power.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2018 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <zephyr.h>
  7. #include <kernel.h>
  8. #include <timeout_q.h>
  9. #include <init.h>
  10. #include <string.h>
  11. #include <pm/pm.h>
  12. #include <pm/state.h>
  13. #include <pm/policy.h>
  14. #include <tracing/tracing.h>
  15. #include "pm_priv.h"
  16. #define PM_STATES_LEN (1 + PM_STATE_SOFT_OFF - PM_STATE_ACTIVE)
  17. #define LOG_LEVEL CONFIG_PM_LOG_LEVEL
  18. #include <logging/log.h>
  19. LOG_MODULE_REGISTER(power);
  20. static int post_ops_done = 1;
  21. static struct pm_state_info z_power_state;
  22. static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
  23. static struct k_spinlock pm_notifier_lock;
  24. #ifdef CONFIG_PM_DEBUG
  25. struct pm_debug_info {
  26. uint32_t count;
  27. uint32_t last_res;
  28. uint32_t total_res;
  29. };
  30. static struct pm_debug_info pm_dbg_info[PM_STATES_LEN];
  31. static uint32_t timer_start, timer_end;
  32. static inline void pm_debug_start_timer(void)
  33. {
  34. timer_start = k_cycle_get_32();
  35. }
  36. static inline void pm_debug_stop_timer(void)
  37. {
  38. timer_end = k_cycle_get_32();
  39. }
  40. static void pm_log_debug_info(enum pm_state state)
  41. {
  42. uint32_t res = timer_end - timer_start;
  43. pm_dbg_info[state].count++;
  44. pm_dbg_info[state].last_res = res;
  45. pm_dbg_info[state].total_res += res;
  46. }
  47. void pm_dump_debug_info(void)
  48. {
  49. for (int i = 0; i < PM_STATES_LEN; i++) {
  50. LOG_DBG("PM:state = %d, count = %d last_res = %d, "
  51. "total_res = %d\n", i, pm_dbg_info[i].count,
  52. pm_dbg_info[i].last_res, pm_dbg_info[i].total_res);
  53. }
  54. }
  55. #else
  56. static inline void pm_debug_start_timer(void) { }
  57. static inline void pm_debug_stop_timer(void) { }
  58. static void pm_log_debug_info(enum pm_state state) { }
  59. #endif
  60. static inline void exit_pos_ops(struct pm_state_info info)
  61. {
  62. extern __weak void
  63. pm_power_state_exit_post_ops(struct pm_state_info info);
  64. if (pm_power_state_exit_post_ops != NULL) {
  65. pm_power_state_exit_post_ops(info);
  66. } else {
  67. /*
  68. * This function is supposed to be overridden to do SoC or
  69. * architecture specific post ops after sleep state exits.
  70. *
  71. * The kernel expects that irqs are unlocked after this.
  72. */
  73. irq_unlock(0);
  74. }
  75. }
  76. static inline void pm_state_set(struct pm_state_info info)
  77. {
  78. extern __weak void
  79. pm_power_state_set(struct pm_state_info info);
  80. if (pm_power_state_set != NULL) {
  81. pm_power_state_set(info);
  82. }
  83. }
  84. /*
  85. * Function called to notify when the system is entering / exiting a
  86. * power state
  87. */
  88. static inline void pm_state_notify(bool entering_state)
  89. {
  90. struct pm_notifier *notifier;
  91. k_spinlock_key_t pm_notifier_key;
  92. void (*callback)(enum pm_state state);
  93. pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  94. SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
  95. if (entering_state) {
  96. callback = notifier->state_entry;
  97. } else {
  98. callback = notifier->state_exit;
  99. }
  100. if (callback) {
  101. callback(z_power_state.state);
  102. }
  103. }
  104. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  105. }
  106. void pm_system_resume(void)
  107. {
  108. /*
  109. * This notification is called from the ISR of the event
  110. * that caused exit from kernel idling after PM operations.
  111. *
  112. * Some CPU low power states require enabling of interrupts
  113. * atomically when entering those states. The wake up from
  114. * such a state first executes code in the ISR of the interrupt
  115. * that caused the wake. This hook will be called from the ISR.
  116. * For such CPU LPS states, do post operations and restores here.
  117. * The kernel scheduler will get control after the ISR finishes
  118. * and it may schedule another thread.
  119. *
  120. * Call pm_idle_exit_notification_disable() if this
  121. * notification is not required.
  122. */
  123. if (!post_ops_done) {
  124. post_ops_done = 1;
  125. exit_pos_ops(z_power_state);
  126. pm_state_notify(false);
  127. }
  128. }
  129. void pm_power_state_force(struct pm_state_info info)
  130. {
  131. __ASSERT(info.state < PM_STATES_LEN,
  132. "Invalid power state %d!", info.state);
  133. if (info.state == PM_STATE_ACTIVE) {
  134. return;
  135. }
  136. (void)arch_irq_lock();
  137. z_power_state = info;
  138. post_ops_done = 0;
  139. pm_state_notify(true);
  140. k_sched_lock();
  141. pm_debug_start_timer();
  142. /* Enter power state */
  143. pm_state_set(z_power_state);
  144. pm_debug_stop_timer();
  145. pm_system_resume();
  146. k_sched_unlock();
  147. }
  148. #if CONFIG_PM_DEVICE
  149. static enum pm_state _handle_device_abort(struct pm_state_info info)
  150. {
  151. LOG_DBG("Some devices didn't enter suspend state!");
  152. pm_resume_devices();
  153. z_power_state.state = PM_STATE_ACTIVE;
  154. return PM_STATE_ACTIVE;
  155. }
  156. #endif
  157. enum pm_state pm_system_suspend(int32_t ticks)
  158. {
  159. SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, ticks);
  160. z_power_state = pm_policy_next_state(ticks);
  161. if (z_power_state.state == PM_STATE_ACTIVE) {
  162. LOG_DBG("No PM operations done.");
  163. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, z_power_state.state);
  164. return z_power_state.state;
  165. }
  166. post_ops_done = 0;
  167. if (ticks != K_TICKS_FOREVER) {
  168. /*
  169. * Just a sanity check in case the policy manager does not
  170. * handle this error condition properly.
  171. */
  172. __ASSERT(z_power_state.min_residency_us >=
  173. z_power_state.exit_latency_us,
  174. "min_residency_us < exit_latency_us");
  175. /*
  176. * We need to set the timer to interrupt a little bit early to
  177. * accommodate the time required by the CPU to fully wake up.
  178. */
  179. z_set_timeout_expiry(ticks -
  180. k_us_to_ticks_ceil32(z_power_state.exit_latency_us), true);
  181. }
  182. #if CONFIG_PM_DEVICE
  183. bool should_resume_devices = true;
  184. switch (z_power_state.state) {
  185. case PM_STATE_SUSPEND_TO_IDLE:
  186. __fallthrough;
  187. case PM_STATE_STANDBY:
  188. /* low power peripherals. */
  189. if (pm_low_power_devices()) {
  190. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend,
  191. ticks, _handle_device_abort(z_power_state));
  192. return _handle_device_abort(z_power_state);
  193. }
  194. break;
  195. case PM_STATE_SUSPEND_TO_RAM:
  196. __fallthrough;
  197. case PM_STATE_SUSPEND_TO_DISK:
  198. __fallthrough;
  199. case PM_STATE_SOFT_OFF:
  200. if (pm_suspend_devices()) {
  201. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend,
  202. ticks, _handle_device_abort(z_power_state));
  203. return _handle_device_abort(z_power_state);
  204. }
  205. break;
  206. default:
  207. should_resume_devices = false;
  208. break;
  209. }
  210. #endif
  211. /*
  212. * This function runs with interruptions locked but it is
  213. * expected the SoC to unlock them in
  214. * pm_power_state_exit_post_ops() when returning to active
  215. * state. We don't want to be scheduled out yet, first we need
  216. * to send a notification about leaving the idle state. So,
  217. * we lock the scheduler here and unlock just after we have
  218. * sent the notification in pm_system_resume().
  219. */
  220. k_sched_lock();
  221. pm_debug_start_timer();
  222. /* Enter power state */
  223. pm_state_notify(true);
  224. pm_state_set(z_power_state);
  225. pm_debug_stop_timer();
  226. /* Wake up sequence starts here */
  227. #if CONFIG_PM_DEVICE
  228. if (should_resume_devices) {
  229. /* Turn on peripherals and restore device states as necessary */
  230. pm_resume_devices();
  231. }
  232. #endif
  233. pm_log_debug_info(z_power_state.state);
  234. pm_system_resume();
  235. k_sched_unlock();
  236. SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, z_power_state.state);
  237. return z_power_state.state;
  238. }
  239. void pm_notifier_register(struct pm_notifier *notifier)
  240. {
  241. k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  242. sys_slist_append(&pm_notifiers, &notifier->_node);
  243. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  244. }
  245. int pm_notifier_unregister(struct pm_notifier *notifier)
  246. {
  247. int ret = -EINVAL;
  248. k_spinlock_key_t pm_notifier_key;
  249. pm_notifier_key = k_spin_lock(&pm_notifier_lock);
  250. if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
  251. ret = 0;
  252. }
  253. k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
  254. return ret;
  255. }