idle.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. /*
  2. * Copyright (c) 2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <toolchain.h>
  8. #include <linker/sections.h>
  9. #include <drivers/timer/system_timer.h>
  10. #include <wait_q.h>
  11. #include <pm/pm.h>
  12. #include <stdbool.h>
  13. #include <logging/log.h>
  14. #include <ksched.h>
  15. #include <kswap.h>
  16. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  17. /**
  18. * @brief Indicate that kernel is idling in tickless mode
  19. *
  20. * Sets the kernel data structure idle field to either a positive value or
  21. * K_FOREVER.
  22. */
  23. static void pm_save_idle(void)
  24. {
  25. #ifdef CONFIG_PM
  26. int32_t ticks = z_get_next_timeout_expiry();
  27. _kernel.idle = ticks;
  28. /*
  29. * Call the suspend hook function of the soc interface to allow
  30. * entry into a low power state. The function returns
  31. * PM_STATE_ACTIVE if low power state was not entered, in which
  32. * case, kernel does normal idle processing.
  33. *
  34. * This function is entered with interrupts disabled. If a low power
  35. * state was entered, then the hook function should enable inerrupts
  36. * before exiting. This is because the kernel does not do its own idle
  37. * processing in those cases i.e. skips k_cpu_idle(). The kernel's
  38. * idle processing re-enables interrupts which is essential for
  39. * the kernel's scheduling logic.
  40. */
  41. if (pm_system_suspend(ticks) == PM_STATE_ACTIVE) {
  42. k_cpu_idle();
  43. }
  44. #endif
  45. }
  46. void z_pm_save_idle_exit(int32_t ticks)
  47. {
  48. #ifdef CONFIG_PM
  49. /* Some CPU low power states require notification at the ISR
  50. * to allow any operations that needs to be done before kernel
  51. * switches task or processes nested interrupts.
  52. * This can be simply ignored if not required.
  53. */
  54. pm_system_resume();
  55. #endif /* CONFIG_PM */
  56. sys_clock_idle_exit();
  57. }
  58. void idle(void *unused1, void *unused2, void *unused3)
  59. {
  60. ARG_UNUSED(unused1);
  61. ARG_UNUSED(unused2);
  62. ARG_UNUSED(unused3);
  63. __ASSERT_NO_MSG(_current->base.prio >= 0);
  64. while (true) {
  65. /* SMP systems without a working IPI can't
  66. * actual enter an idle state, because they
  67. * can't be notified of scheduler changes
  68. * (i.e. threads they should run). They just
  69. * spin in a yield loop. This is intended as
  70. * a fallback configuration for new platform
  71. * bringup.
  72. */
  73. if (IS_ENABLED(CONFIG_SMP) &&
  74. !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
  75. k_busy_wait(100);
  76. k_yield();
  77. continue;
  78. }
  79. /* Note weird API: k_cpu_idle() is called with local
  80. * CPU interrupts masked, and returns with them
  81. * unmasked. It does not take a spinlock or other
  82. * higher level construct.
  83. */
  84. (void) arch_irq_lock();
  85. if (IS_ENABLED(CONFIG_PM)) {
  86. pm_save_idle();
  87. } else {
  88. k_cpu_idle();
  89. }
  90. #if !defined(CONFIG_PREEMPT_ENABLED)
  91. # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
  92. /* A legacy mess: the idle thread is by definition
  93. * preemptible as far as the modern scheduler is
  94. * concerned, but older platforms use
  95. * CONFIG_PREEMPT_ENABLED=n as an optimization hint
  96. * that interrupt exit always returns to the
  97. * interrupted context. So in that setup we need to
  98. * explicitly yield in the idle thread otherwise
  99. * nothing else will run once it starts.
  100. */
  101. if (_kernel.ready_q.cache != _current) {
  102. z_swap_unlocked();
  103. }
  104. # endif
  105. #endif
  106. }
  107. }