spinlock.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * Copyright (c) 2018 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
  7. #define ZEPHYR_INCLUDE_SPINLOCK_H_
  8. #include <sys/atomic.h>
  9. #include <sys/__assert.h>
  10. #include <stdbool.h>
  11. #include <arch/cpu.h>
  12. #ifdef __cplusplus
  13. extern "C" {
  14. #endif
  15. struct z_spinlock_key {
  16. int key;
  17. };
  18. /**
  19. * @brief Kernel Spin Lock
  20. *
  21. * This struct defines a spin lock record on which CPUs can wait with
  22. * k_spin_lock(). Any number of spinlocks may be defined in
  23. * application code.
  24. */
  25. struct k_spinlock {
  26. #ifdef CONFIG_SMP
  27. atomic_t locked;
  28. #endif
  29. #ifdef CONFIG_SPIN_VALIDATE
  30. /* Stores the thread that holds the lock with the locking CPU
  31. * ID in the bottom two bits.
  32. */
  33. uintptr_t thread_cpu;
  34. #endif
  35. #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
  36. !defined(CONFIG_SPIN_VALIDATE)
  37. /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
  38. * the k_spinlock struct will have no members. The result
  39. * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
  40. *
  41. * This size difference causes problems when the k_spinlock
  42. * is embedded into another struct like k_msgq, because C and
  43. * C++ will have different ideas on the offsets of the members
  44. * that come after the k_spinlock member.
  45. *
  46. * To prevent this we add a 1 byte dummy member to k_spinlock
  47. * when the user selects C++ support and k_spinlock would
  48. * otherwise be empty.
  49. */
  50. char dummy;
  51. #endif
  52. };
  53. /* There's a spinlock validation framework available when asserts are
  54. * enabled. It adds a relatively hefty overhead (about 3k or so) to
  55. * kernel code size, don't use on platforms known to be small.
  56. */
  57. #ifdef CONFIG_SPIN_VALIDATE
  58. bool z_spin_lock_valid(struct k_spinlock *l);
  59. bool z_spin_unlock_valid(struct k_spinlock *l);
  60. void z_spin_lock_set_owner(struct k_spinlock *l);
  61. BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
  62. # ifdef CONFIG_KERNEL_COHERENCE
  63. bool z_spin_lock_mem_coherent(struct k_spinlock *l);
  64. # endif /* CONFIG_KERNEL_COHERENCE */
  65. #endif /* CONFIG_SPIN_VALIDATE */
  66. /**
  67. * @brief Spinlock key type
  68. *
  69. * This type defines a "key" value used by a spinlock implementation
  70. * to store the system interrupt state at the time of a call to
  71. * k_spin_lock(). It is expected to be passed to a matching
  72. * k_spin_unlock().
  73. *
  74. * This type is opaque and should not be inspected by application
  75. * code.
  76. */
  77. typedef struct z_spinlock_key k_spinlock_key_t;
  78. /**
  79. * @brief Lock a spinlock
  80. *
  81. * This routine locks the specified spinlock, returning a key handle
  82. * representing interrupt state needed at unlock time. Upon
  83. * returning, the calling thread is guaranteed not to be suspended or
  84. * interrupted on its current CPU until it calls k_spin_unlock(). The
  85. * implementation guarantees mutual exclusion: exactly one thread on
  86. * one CPU will return from k_spin_lock() at a time. Other CPUs
  87. * trying to acquire a lock already held by another CPU will enter an
  88. * implementation-defined busy loop ("spinning") until the lock is
  89. * released.
  90. *
  91. * Separate spin locks may be nested. It is legal to lock an
  92. * (unlocked) spin lock while holding a different lock. Spin locks
  93. * are not recursive, however: an attempt to acquire a spin lock that
  94. * the CPU already holds will deadlock.
  95. *
  96. * In circumstances where only one CPU exists, the behavior of
  97. * k_spin_lock() remains as specified above, though obviously no
  98. * spinning will take place. Implementations may be free to optimize
  99. * in uniprocessor contexts such that the locking reduces to an
  100. * interrupt mask operation.
  101. *
  102. * @param l A pointer to the spinlock to lock
  103. * @return A key value that must be passed to k_spin_unlock() when the
  104. * lock is released.
  105. */
  106. static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
  107. {
  108. ARG_UNUSED(l);
  109. k_spinlock_key_t k;
  110. /* Note that we need to use the underlying arch-specific lock
  111. * implementation. The "irq_lock()" API in SMP context is
  112. * actually a wrapper for a global spinlock!
  113. */
  114. k.key = arch_irq_lock();
  115. #ifdef CONFIG_SPIN_VALIDATE
  116. __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
  117. # ifdef CONFIG_KERNEL_COHERENCE
  118. __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
  119. # endif
  120. #endif
  121. #ifdef CONFIG_SMP
  122. while (!atomic_cas(&l->locked, 0, 1)) {
  123. }
  124. #endif
  125. #ifdef CONFIG_SPIN_VALIDATE
  126. z_spin_lock_set_owner(l);
  127. #endif
  128. return k;
  129. }
  130. /**
  131. * @brief Unlock a spin lock
  132. *
  133. * This releases a lock acquired by k_spin_lock(). After this
  134. * function is called, any CPU will be able to acquire the lock. If
  135. * other CPUs are currently spinning inside k_spin_lock() waiting for
  136. * this lock, exactly one of them will return synchronously with the
  137. * lock held.
  138. *
  139. * Spin locks must be properly nested. A call to k_spin_unlock() must
  140. * be made on the lock object most recently locked using
  141. * k_spin_lock(), using the key value that it returned. Attempts to
  142. * unlock mis-nested locks, or to unlock locks that are not held, or
  143. * to passing a key parameter other than the one returned from
  144. * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
  145. * of these errors can be detected by the framework.
  146. *
  147. * @param l A pointer to the spinlock to release
  148. * @param key The value returned from k_spin_lock() when this lock was
  149. * acquired
  150. */
  151. static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
  152. k_spinlock_key_t key)
  153. {
  154. ARG_UNUSED(l);
  155. #ifdef CONFIG_SPIN_VALIDATE
  156. __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
  157. #endif
  158. #ifdef CONFIG_SMP
  159. /* Strictly we don't need atomic_clear() here (which is an
  160. * exchange operation that returns the old value). We are always
  161. * setting a zero and (because we hold the lock) know the existing
  162. * state won't change due to a race. But some architectures need
  163. * a memory barrier when used like this, and we don't have a
  164. * Zephyr framework for that.
  165. */
  166. atomic_clear(&l->locked);
  167. #endif
  168. arch_irq_unlock(key.key);
  169. }
  170. /* Internal function: releases the lock, but leaves local interrupts
  171. * disabled
  172. */
  173. static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
  174. {
  175. ARG_UNUSED(l);
  176. #ifdef CONFIG_SPIN_VALIDATE
  177. __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
  178. #endif
  179. #ifdef CONFIG_SMP
  180. atomic_clear(&l->locked);
  181. #endif
  182. }
  183. #ifdef __cplusplus
  184. }
  185. #endif
  186. #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */