spinlock.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Copyright (c) 2018 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
  7. #define ZEPHYR_INCLUDE_SPINLOCK_H_
  8. #include <sys/atomic.h>
  9. #include <sys/__assert.h>
  10. #include <stdbool.h>
  11. #include <arch/cpu.h>
  12. #ifdef __cplusplus
  13. extern "C" {
  14. #endif
  15. struct z_spinlock_key {
  16. int key;
  17. };
  18. /**
  19. * @brief Kernel Spin Lock
  20. *
  21. * This struct defines a spin lock record on which CPUs can wait with
  22. * k_spin_lock(). Any number of spinlocks may be defined in
  23. * application code.
  24. */
  25. struct k_spinlock {
  26. #ifdef CONFIG_SMP
  27. atomic_t locked;
  28. #endif
  29. #ifdef CONFIG_SPIN_VALIDATE
  30. /* Stores the thread that holds the lock with the locking CPU
  31. * ID in the bottom two bits.
  32. */
  33. uintptr_t thread_cpu;
  34. #endif
  35. #if !defined(CONFIG_SMP) && !defined(CONFIG_SPIN_VALIDATE)
  36. /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
  37. * the k_spinlock struct will have no members. The result
  38. * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
  39. *
  40. * This size difference causes problems when the k_spinlock
  41. * is embedded into another struct like k_msgq, because C and
  42. * C++ will have different ideas on the offsets of the members
  43. * that come after the k_spinlock member.
  44. *
  45. * To prevent this we add a 1 byte dummy member to k_spinlock
  46. * when the user selects C++ support and k_spinlock would
  47. * otherwise be empty.
  48. */
  49. char dummy;
  50. #endif
  51. };
  52. /* There's a spinlock validation framework available when asserts are
  53. * enabled. It adds a relatively hefty overhead (about 3k or so) to
  54. * kernel code size, don't use on platforms known to be small.
  55. */
  56. #ifdef CONFIG_SPIN_VALIDATE
  57. bool z_spin_lock_valid(struct k_spinlock *l);
  58. bool z_spin_unlock_valid(struct k_spinlock *l);
  59. void z_spin_lock_set_owner(struct k_spinlock *l);
  60. BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
  61. # ifdef CONFIG_KERNEL_COHERENCE
  62. bool z_spin_lock_mem_coherent(struct k_spinlock *l);
  63. # endif /* CONFIG_KERNEL_COHERENCE */
  64. #endif /* CONFIG_SPIN_VALIDATE */
  65. /**
  66. * @brief Spinlock key type
  67. *
  68. * This type defines a "key" value used by a spinlock implementation
  69. * to store the system interrupt state at the time of a call to
  70. * k_spin_lock(). It is expected to be passed to a matching
  71. * k_spin_unlock().
  72. *
  73. * This type is opaque and should not be inspected by application
  74. * code.
  75. */
  76. typedef struct z_spinlock_key k_spinlock_key_t;
  77. /**
  78. * @brief Lock a spinlock
  79. *
  80. * This routine locks the specified spinlock, returning a key handle
  81. * representing interrupt state needed at unlock time. Upon
  82. * returning, the calling thread is guaranteed not to be suspended or
  83. * interrupted on its current CPU until it calls k_spin_unlock(). The
  84. * implementation guarantees mutual exclusion: exactly one thread on
  85. * one CPU will return from k_spin_lock() at a time. Other CPUs
  86. * trying to acquire a lock already held by another CPU will enter an
  87. * implementation-defined busy loop ("spinning") until the lock is
  88. * released.
  89. *
  90. * Separate spin locks may be nested. It is legal to lock an
  91. * (unlocked) spin lock while holding a different lock. Spin locks
  92. * are not recursive, however: an attempt to acquire a spin lock that
  93. * the CPU already holds will deadlock.
  94. *
  95. * In circumstances where only one CPU exists, the behavior of
  96. * k_spin_lock() remains as specified above, though obviously no
  97. * spinning will take place. Implementations may be free to optimize
  98. * in uniprocessor contexts such that the locking reduces to an
  99. * interrupt mask operation.
  100. *
  101. * @param l A pointer to the spinlock to lock
  102. * @return A key value that must be passed to k_spin_unlock() when the
  103. * lock is released.
  104. */
  105. static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
  106. {
  107. ARG_UNUSED(l);
  108. k_spinlock_key_t k;
  109. /* Note that we need to use the underlying arch-specific lock
  110. * implementation. The "irq_lock()" API in SMP context is
  111. * actually a wrapper for a global spinlock!
  112. */
  113. k.key = arch_irq_lock();
  114. #ifdef CONFIG_SPIN_VALIDATE
  115. __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
  116. # ifdef CONFIG_KERNEL_COHERENCE
  117. __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
  118. # endif
  119. #endif
  120. #ifdef CONFIG_SMP
  121. while (!atomic_cas(&l->locked, 0, 1)) {
  122. }
  123. #endif
  124. #ifdef CONFIG_SPIN_VALIDATE
  125. z_spin_lock_set_owner(l);
  126. #endif
  127. return k;
  128. }
  129. /**
  130. * @brief Unlock a spin lock
  131. *
  132. * This releases a lock acquired by k_spin_lock(). After this
  133. * function is called, any CPU will be able to acquire the lock. If
  134. * other CPUs are currently spinning inside k_spin_lock() waiting for
  135. * this lock, exactly one of them will return synchronously with the
  136. * lock held.
  137. *
  138. * Spin locks must be properly nested. A call to k_spin_unlock() must
  139. * be made on the lock object most recently locked using
  140. * k_spin_lock(), using the key value that it returned. Attempts to
  141. * unlock mis-nested locks, or to unlock locks that are not held, or
  142. * to passing a key parameter other than the one returned from
  143. * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
  144. * of these errors can be detected by the framework.
  145. *
  146. * @param l A pointer to the spinlock to release
  147. * @param key The value returned from k_spin_lock() when this lock was
  148. * acquired
  149. */
  150. static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
  151. k_spinlock_key_t key)
  152. {
  153. ARG_UNUSED(l);
  154. #ifdef CONFIG_SPIN_VALIDATE
  155. __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
  156. #endif
  157. #ifdef CONFIG_SMP
  158. /* Strictly we don't need atomic_clear() here (which is an
  159. * exchange operation that returns the old value). We are always
  160. * setting a zero and (because we hold the lock) know the existing
  161. * state won't change due to a race. But some architectures need
  162. * a memory barrier when used like this, and we don't have a
  163. * Zephyr framework for that.
  164. */
  165. atomic_clear(&l->locked);
  166. #endif
  167. arch_irq_unlock(key.key);
  168. }
  169. /* Internal function: releases the lock, but leaves local interrupts
  170. * disabled
  171. */
  172. static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
  173. {
  174. ARG_UNUSED(l);
  175. #ifdef CONFIG_SPIN_VALIDATE
  176. __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
  177. #endif
  178. #ifdef CONFIG_SMP
  179. atomic_clear(&l->locked);
  180. #endif
  181. }
  182. #ifdef __cplusplus
  183. }
  184. #endif
  185. #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */