kswap.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Copyright (c) 2018 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
  7. #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
  8. #include <ksched.h>
  9. #include <spinlock.h>
  10. #include <kernel_arch_func.h>
  11. #ifdef CONFIG_STACK_SENTINEL
  12. extern void z_check_stack_sentinel(void);
  13. #else
  14. #define z_check_stack_sentinel() /**/
  15. #endif
  16. extern struct k_spinlock sched_spinlock;
  17. /* In SMP, the irq_lock() is a spinlock which is implicitly released
  18. * and reacquired on context switch to preserve the existing
  19. * semantics. This means that whenever we are about to return to a
  20. * thread (via either z_swap() or interrupt/exception return!) we need
  21. * to restore the lock state to whatever the thread's counter
  22. * expects.
  23. */
  24. void z_smp_release_global_lock(struct k_thread *thread);
  25. /* context switching and scheduling-related routines */
  26. #ifdef CONFIG_USE_SWITCH
  27. /* There is an unavoidable SMP race when threads swap -- their thread
  28. * record is in the queue (and visible to other CPUs) before
  29. * arch_switch() finishes saving state. We must spin for the switch
  30. * handle before entering a new thread. See docs on arch_switch().
  31. *
  32. * Note: future SMP architectures may need a fence/barrier or cache
  33. * invalidation here. Current ones don't, and sadly Zephyr doesn't
  34. * have a framework for that yet.
  35. */
  36. static inline void wait_for_switch(struct k_thread *thread)
  37. {
  38. #ifdef CONFIG_SMP
  39. volatile void **shp = (void *)&thread->switch_handle;
  40. while (*shp == NULL) {
  41. k_busy_wait(1);
  42. }
  43. #endif
  44. }
  45. /* New style context switching. arch_switch() is a lower level
  46. * primitive that doesn't know about the scheduler or return value.
  47. * Needed for SMP, where the scheduler requires spinlocking that we
  48. * don't want to have to do in per-architecture assembly.
  49. *
  50. * Note that is_spinlock is a compile-time construct which will be
  51. * optimized out when this function is expanded.
  52. */
  53. static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
  54. struct k_spinlock *lock,
  55. int is_spinlock)
  56. {
  57. ARG_UNUSED(lock);
  58. struct k_thread *new_thread, *old_thread;
  59. #ifdef CONFIG_SPIN_VALIDATE
  60. /* Make sure the key acts to unmask interrupts, if it doesn't,
  61. * then we are context switching out of a nested lock
  62. * (i.e. breaking the lock of someone up the stack) which is
  63. * forbidden! The sole exception are dummy threads used
  64. * during initialization (where we start with interrupts
  65. * masked and switch away to begin scheduling) and the case of
  66. * a dead current thread that was just aborted (where the
  67. * damage was already done by the abort anyway).
  68. *
  69. * (Note that this is disabled on ARM64, where system calls
  70. * can sometimes run with interrupts masked in ways that don't
  71. * represent lock state. See #35307)
  72. */
  73. # ifndef CONFIG_ARM64
  74. __ASSERT(arch_irq_unlocked(key) ||
  75. _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
  76. "Context switching while holding lock!");
  77. # endif
  78. #endif
  79. old_thread = _current;
  80. z_check_stack_sentinel();
  81. /* We always take the scheduler spinlock if we don't already
  82. * have it. We "release" other spinlocks here. But we never
  83. * drop the interrupt lock.
  84. */
  85. if (is_spinlock && lock != NULL && lock != &sched_spinlock) {
  86. k_spin_release(lock);
  87. }
  88. if (!is_spinlock || lock != &sched_spinlock) {
  89. (void) k_spin_lock(&sched_spinlock);
  90. }
  91. new_thread = z_swap_next_thread();
  92. if (new_thread != old_thread) {
  93. #ifdef CONFIG_TIMESLICING
  94. z_reset_time_slice();
  95. #endif
  96. old_thread->swap_retval = -EAGAIN;
  97. #ifdef CONFIG_SMP
  98. _current_cpu->swap_ok = 0;
  99. new_thread->base.cpu = arch_curr_cpu()->id;
  100. if (!is_spinlock) {
  101. z_smp_release_global_lock(new_thread);
  102. }
  103. #endif
  104. z_thread_mark_switched_out();
  105. wait_for_switch(new_thread);
  106. _current_cpu->current = new_thread;
  107. #ifdef CONFIG_SPIN_VALIDATE
  108. z_spin_lock_set_owner(&sched_spinlock);
  109. #endif
  110. arch_cohere_stacks(old_thread, NULL, new_thread);
  111. #ifdef CONFIG_SMP
  112. /* Add _current back to the run queue HERE. After
  113. * wait_for_switch() we are guaranteed to reach the
  114. * context switch in finite time, avoiding a potential
  115. * deadlock.
  116. */
  117. z_requeue_current(old_thread);
  118. #endif
  119. void *newsh = new_thread->switch_handle;
  120. if (IS_ENABLED(CONFIG_SMP)) {
  121. /* Active threads MUST have a null here */
  122. new_thread->switch_handle = NULL;
  123. }
  124. k_spin_release(&sched_spinlock);
  125. arch_switch(newsh, &old_thread->switch_handle);
  126. } else {
  127. k_spin_release(&sched_spinlock);
  128. }
  129. if (is_spinlock) {
  130. arch_irq_unlock(key);
  131. } else {
  132. irq_unlock(key);
  133. }
  134. return _current->swap_retval;
  135. }
  136. static inline int z_swap_irqlock(unsigned int key)
  137. {
  138. return do_swap(key, NULL, 0);
  139. }
  140. static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
  141. {
  142. return do_swap(key.key, lock, 1);
  143. }
  144. static inline void z_swap_unlocked(void)
  145. {
  146. (void) do_swap(arch_irq_lock(), NULL, 1);
  147. }
  148. #else /* !CONFIG_USE_SWITCH */
  149. extern int arch_swap(unsigned int key);
  150. static inline int z_swap_irqlock(unsigned int key)
  151. {
  152. int ret;
  153. z_check_stack_sentinel();
  154. ret = arch_swap(key);
  155. return ret;
  156. }
  157. /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
  158. * can't be in SMP. The k_spin_release() call is just for validation
  159. * handling.
  160. */
  161. static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
  162. {
  163. k_spin_release(lock);
  164. return z_swap_irqlock(key.key);
  165. }
  166. static inline void z_swap_unlocked(void)
  167. {
  168. (void) z_swap_irqlock(arch_irq_lock());
  169. }
  170. #endif /* !CONFIG_USE_SWITCH */
  171. /**
  172. * Set up a "dummy" thread, used at early initialization to launch the
  173. * first thread on a CPU.
  174. *
  175. * Needs to set enough fields such that the context switching code can
  176. * use it to properly store state, which will just be discarded.
  177. *
  178. * The memory of the dummy thread can be completely uninitialized.
  179. */
  180. static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
  181. {
  182. dummy_thread->base.thread_state = _THREAD_DUMMY;
  183. #ifdef CONFIG_SCHED_CPU_MASK
  184. dummy_thread->base.cpu_mask = -1;
  185. #endif
  186. dummy_thread->base.user_options = K_ESSENTIAL;
  187. #ifdef CONFIG_THREAD_STACK_INFO
  188. dummy_thread->stack_info.start = 0U;
  189. dummy_thread->stack_info.size = 0U;
  190. #endif
  191. #ifdef CONFIG_USERSPACE
  192. dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
  193. #endif
  194. _current_cpu->current = dummy_thread;
  195. }
  196. #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */