power.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * Copyright (c) 2018 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <zephyr.h>
  7. #include <kernel.h>
  8. #include <timeout_q.h>
  9. #include <init.h>
  10. #include <string.h>
  11. #include <pm/pm.h>
  12. #include <pm/state.h>
  13. #include <pm/policy.h>
  14. #include <tracing/tracing.h>
  15. #include "pm_priv.h"
  16. #define PM_STATES_LEN (1 + PM_STATE_SOFT_OFF - PM_STATE_ACTIVE)
  17. #define LOG_LEVEL CONFIG_PM_LOG_LEVEL
  18. #include <logging/log.h>
  19. LOG_MODULE_REGISTER(power);
  20. static int post_ops_done = 1;
  21. static struct pm_state_info z_power_state;
  22. static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
  23. static struct k_spinlock pm_notifier_lock;
  24. //#define CONFIG_PM_DEBUG
  25. #ifdef CONFIG_PM_DEBUG
  26. static uint32_t timer_start;
  27. static inline void pm_debug_start_timer(void)
  28. {
  29. timer_start = k_cycle_get_32();
  30. }
  31. static void pm_debug_stop_timer(const char *msg)
  32. {
  33. uint32_t res = k_cycle_get_32() - timer_start;
  34. printk("%s: use %d us\n",msg, k_cyc_to_us_ceil32(res));
  35. }
  36. #else
  37. static inline void pm_debug_start_timer(void) { }
  38. static void pm_debug_stop_timer(const char *msg) { }
  39. #endif
  40. __weak void
  41. pm_power_state_exit_post_ops(struct pm_state_info info)
  42. {
  43. irq_unlock(0);
  44. }
  45. static inline void exit_pos_ops(struct pm_state_info info)
  46. {
  47. pm_power_state_exit_post_ops(info);
  48. }
  49. __weak void pm_power_state_set(struct pm_state_info info)
  50. {
  51. }
  52. static inline void pm_state_set(struct pm_state_info info)
  53. {
  54. pm_power_state_set(info);
  55. }
  56. /*
  57. * Function called to notify when the system is entering / exiting a
  58. * power state
  59. */
  60. static inline void pm_state_notify(bool entering_state)
  61. {
  62. struct pm_notifier *notifier;
  63. k_spinlock_key_t pm_notifier_key;
  64. void (*callback)(enum pm_state state);
  65. pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  66. SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
  67. if (entering_state) {
  68. callback = notifier->state_entry;
  69. } else {
  70. callback = notifier->state_exit;
  71. }
  72. if (callback) {
  73. callback(z_power_state.state);
  74. }
  75. }
  76. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  77. }
  78. void pm_system_resume(void)
  79. {
  80. /*
  81. * This notification is called from the ISR of the event
  82. * that caused exit from kernel idling after PM operations.
  83. *
  84. * Some CPU low power states require enabling of interrupts
  85. * atomically when entering those states. The wake up from
  86. * such a state first executes code in the ISR of the interrupt
  87. * that caused the wake. This hook will be called from the ISR.
  88. * For such CPU LPS states, do post operations and restores here.
  89. * The kernel scheduler will get control after the ISR finishes
  90. * and it may schedule another thread.
  91. *
  92. * Call pm_idle_exit_notification_disable() if this
  93. * notification is not required.
  94. */
  95. if (!post_ops_done) {
  96. post_ops_done = 1;
  97. pm_state_notify(false);
  98. exit_pos_ops(z_power_state);
  99. }
  100. }
  101. void pm_power_state_force(struct pm_state_info info)
  102. {
  103. __ASSERT(info.state < PM_STATES_LEN,
  104. "Invalid power state %d!", info.state);
  105. if (info.state == PM_STATE_ACTIVE) {
  106. return;
  107. }
  108. (void)arch_irq_lock();
  109. z_power_state = info;
  110. post_ops_done = 0;
  111. pm_state_notify(true);
  112. k_sched_lock();
  113. pm_debug_start_timer();
  114. /* Enter power state */
  115. pm_state_set(z_power_state);
  116. pm_debug_stop_timer("--sleep--");
  117. pm_system_resume();
  118. k_sched_unlock();
  119. }
  120. #if CONFIG_PM_DEVICE
  121. static enum pm_state _handle_device_abort(struct pm_state_info info)
  122. {
  123. LOG_DBG("Some devices didn't enter suspend state!");
  124. pm_resume_devices();
  125. z_power_state.state = PM_STATE_ACTIVE;
  126. return PM_STATE_ACTIVE;
  127. }
  128. #endif
  129. void pm_abort_enter_sleep(void)
  130. {
  131. post_ops_done = 0; /*for idle handle pm_state_notify exit, standby need notify to exit S3 */
  132. }
  133. enum pm_state pm_system_suspend(int32_t ticks)
  134. {
  135. SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, ticks);
  136. z_power_state = pm_policy_next_state(ticks);
  137. if (z_power_state.state == PM_STATE_ACTIVE) {
  138. LOG_DBG("No PM operations done.");
  139. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, z_power_state.state);
  140. return z_power_state.state;
  141. }
  142. post_ops_done = 0;
  143. if (ticks != K_TICKS_FOREVER) {
  144. /*
  145. * Just a sanity check in case the policy manager does not
  146. * handle this error condition properly.
  147. */
  148. __ASSERT(z_power_state.min_residency_us >=
  149. z_power_state.exit_latency_us,
  150. "min_residency_us < exit_latency_us");
  151. /*
  152. * We need to set the timer to interrupt a little bit early to
  153. * accommodate the time required by the CPU to fully wake up.
  154. */
  155. z_set_timeout_expiry(ticks -
  156. k_us_to_ticks_ceil32(z_power_state.exit_latency_us), true);
  157. }
  158. #if CONFIG_PM_DEVICE
  159. bool should_resume_devices = true;
  160. pm_debug_start_timer();
  161. switch (z_power_state.state) {
  162. case PM_STATE_SUSPEND_TO_IDLE:
  163. __fallthrough;
  164. case PM_STATE_STANDBY:
  165. /* low power peripherals. */
  166. if (pm_low_power_devices()) {
  167. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend,
  168. ticks, _handle_device_abort(z_power_state));
  169. return _handle_device_abort(z_power_state);
  170. }
  171. break;
  172. case PM_STATE_SUSPEND_TO_RAM:
  173. __fallthrough;
  174. case PM_STATE_SUSPEND_TO_DISK:
  175. __fallthrough;
  176. case PM_STATE_SOFT_OFF:
  177. if (pm_suspend_devices()) {
  178. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend,
  179. ticks, _handle_device_abort(z_power_state));
  180. return _handle_device_abort(z_power_state);
  181. }
  182. break;
  183. default:
  184. should_resume_devices = false;
  185. break;
  186. }
  187. pm_debug_stop_timer("--suspend--");
  188. #endif
  189. /*
  190. * This function runs with interruptions locked but it is
  191. * expected the SoC to unlock them in
  192. * pm_power_state_exit_post_ops() when returning to active
  193. * state. We don't want to be scheduled out yet, first we need
  194. * to send a notification about leaving the idle state. So,
  195. * we lock the scheduler here and unlock just after we have
  196. * sent the notification in pm_system_resume().
  197. */
  198. k_sched_lock();
  199. pm_debug_start_timer();
  200. /* Enter power state */
  201. pm_state_notify(true);
  202. pm_debug_stop_timer("--notify--");
  203. pm_debug_start_timer();
  204. pm_state_set(z_power_state);
  205. pm_debug_stop_timer("--s3--");
  206. pm_debug_start_timer();
  207. /* Wake up sequence starts here */
  208. #if CONFIG_PM_DEVICE
  209. if (should_resume_devices) {
  210. /* Turn on peripherals and restore device states as necessary */
  211. pm_resume_devices();
  212. }
  213. #endif
  214. pm_system_resume();
  215. pm_debug_stop_timer("--resume--");
  216. k_sched_unlock();
  217. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, z_power_state.state);
  218. return z_power_state.state;
  219. }
  220. void pm_notifier_register(struct pm_notifier *notifier)
  221. {
  222. k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  223. sys_slist_append(&pm_notifiers, &notifier->_node);
  224. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  225. }
  226. int pm_notifier_unregister(struct pm_notifier *notifier)
  227. {
  228. int ret = -EINVAL;
  229. k_spinlock_key_t pm_notifier_key;
  230. pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  231. if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
  232. ret = 0;
  233. }
  234. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  235. return ret;
  236. }