atomic_c.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright (c) 2016 Intel Corporation
  3. * Copyright (c) 2011-2014 Wind River Systems, Inc.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. */
  7. /**
  8. * @file Atomic ops in pure C
  9. *
  10. * This module provides the atomic operators for processors
  11. * which do not support native atomic operations.
  12. *
  13. * The atomic operations are guaranteed to be atomic with respect
  14. * to interrupt service routines, and to operations performed by peer
  15. * processors.
  16. *
  17. * (originally from x86's atomic.c)
  18. */
  19. #include <toolchain.h>
  20. #include <arch/cpu.h>
  21. #include <spinlock.h>
  22. #include <sys/atomic.h>
  23. #include <kernel_structs.h>
  24. /* Single global spinlock for atomic operations. This is fallback
  25. * code, not performance sensitive. At least by not using irq_lock()
  26. * in SMP contexts we won't content with legitimate users of the
  27. * global lock.
  28. */
  29. static struct k_spinlock lock;
  30. /* For those rare CPUs which support user mode, but not native atomic
  31. * operations, the best we can do for them is implement the atomic
  32. * functions as system calls, since in user mode locking a spinlock is
  33. * forbidden.
  34. */
  35. #ifdef CONFIG_USERSPACE
  36. #include <syscall_handler.h>
  37. #define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
  38. static inline atomic_val_t z_vrfy_##name(atomic_t *target) \
  39. { \
  40. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
  41. return z_impl_##name((atomic_t *)target); \
  42. }
  43. #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
  44. static inline atomic_val_t z_vrfy_##name(atomic_t *target, \
  45. atomic_val_t value) \
  46. { \
  47. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
  48. return z_impl_##name((atomic_t *)target, value); \
  49. }
  50. #else
  51. #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
  52. #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
  53. #endif
  54. /**
  55. *
  56. * @brief Atomic compare-and-set primitive
  57. *
  58. * This routine provides the compare-and-set operator. If the original value at
  59. * <target> equals <oldValue>, then <newValue> is stored at <target> and the
  60. * function returns true.
  61. *
  62. * If the original value at <target> does not equal <oldValue>, then the store
  63. * is not done and the function returns false.
  64. *
  65. * The reading of the original value at <target>, the comparison,
  66. * and the write of the new value (if it occurs) all happen atomically with
  67. * respect to both interrupts and accesses of other processors to <target>.
  68. *
  69. * @param target address to be tested
  70. * @param old_value value to compare against
  71. * @param new_value value to compare against
  72. * @return Returns true if <new_value> is written, false otherwise.
  73. */
  74. bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
  75. atomic_val_t new_value)
  76. {
  77. k_spinlock_key_t key;
  78. int ret = false;
  79. key = k_spin_lock(&lock);
  80. if (*target == old_value) {
  81. *target = new_value;
  82. ret = true;
  83. }
  84. k_spin_unlock(&lock, key);
  85. return ret;
  86. }
  87. #ifdef CONFIG_USERSPACE
  88. bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
  89. atomic_val_t new_value)
  90. {
  91. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
  92. return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
  93. }
  94. #include <syscalls/atomic_cas_mrsh.c>
  95. #endif /* CONFIG_USERSPACE */
  96. bool z_impl_atomic_ptr_cas(atomic_ptr_t *target, atomic_ptr_val_t old_value,
  97. atomic_ptr_val_t new_value)
  98. {
  99. k_spinlock_key_t key;
  100. int ret = false;
  101. key = k_spin_lock(&lock);
  102. if (*target == old_value) {
  103. *target = new_value;
  104. ret = true;
  105. }
  106. k_spin_unlock(&lock, key);
  107. return ret;
  108. }
  109. #ifdef CONFIG_USERSPACE
  110. static inline bool z_vrfy_atomic_ptr_cas(atomic_ptr_t *target,
  111. atomic_ptr_val_t old_value,
  112. atomic_ptr_val_t new_value)
  113. {
  114. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
  115. return z_impl_atomic_ptr_cas(target, old_value, new_value);
  116. }
  117. #include <syscalls/atomic_ptr_cas_mrsh.c>
  118. #endif /* CONFIG_USERSPACE */
  119. /**
  120. *
  121. * @brief Atomic addition primitive
  122. *
  123. * This routine provides the atomic addition operator. The <value> is
  124. * atomically added to the value at <target>, placing the result at <target>,
  125. * and the old value from <target> is returned.
  126. *
  127. * @param target memory location to add to
  128. * @param value the value to add
  129. *
  130. * @return The previous value from <target>
  131. */
  132. atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
  133. {
  134. k_spinlock_key_t key;
  135. atomic_val_t ret;
  136. key = k_spin_lock(&lock);
  137. ret = *target;
  138. *target += value;
  139. k_spin_unlock(&lock, key);
  140. return ret;
  141. }
  142. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
  143. /**
  144. *
  145. * @brief Atomic subtraction primitive
  146. *
  147. * This routine provides the atomic subtraction operator. The <value> is
  148. * atomically subtracted from the value at <target>, placing the result at
  149. * <target>, and the old value from <target> is returned.
  150. *
  151. * @param target the memory location to subtract from
  152. * @param value the value to subtract
  153. *
  154. * @return The previous value from <target>
  155. */
  156. atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
  157. {
  158. k_spinlock_key_t key;
  159. atomic_val_t ret;
  160. key = k_spin_lock(&lock);
  161. ret = *target;
  162. *target -= value;
  163. k_spin_unlock(&lock, key);
  164. return ret;
  165. }
  166. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
  167. /**
  168. *
  169. * @brief Atomic get primitive
  170. *
  171. * @param target memory location to read from
  172. *
  173. * This routine provides the atomic get primitive to atomically read
  174. * a value from <target>. It simply does an ordinary load. Note that <target>
  175. * is expected to be aligned to a 4-byte boundary.
  176. *
  177. * @return The value read from <target>
  178. */
  179. atomic_val_t atomic_get(const atomic_t *target)
  180. {
  181. return *target;
  182. }
  183. atomic_ptr_val_t atomic_ptr_get(const atomic_ptr_t *target)
  184. {
  185. return *target;
  186. }
  187. /**
  188. *
  189. * @brief Atomic get-and-set primitive
  190. *
  191. * This routine provides the atomic set operator. The <value> is atomically
  192. * written at <target> and the previous value at <target> is returned.
  193. *
  194. * @param target the memory location to write to
  195. * @param value the value to write
  196. *
  197. * @return The previous value from <target>
  198. */
  199. atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
  200. {
  201. k_spinlock_key_t key;
  202. atomic_val_t ret;
  203. key = k_spin_lock(&lock);
  204. ret = *target;
  205. *target = value;
  206. k_spin_unlock(&lock, key);
  207. return ret;
  208. }
  209. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
  210. atomic_ptr_val_t z_impl_atomic_ptr_set(atomic_ptr_t *target,
  211. atomic_ptr_val_t value)
  212. {
  213. k_spinlock_key_t key;
  214. atomic_ptr_val_t ret;
  215. key = k_spin_lock(&lock);
  216. ret = *target;
  217. *target = value;
  218. k_spin_unlock(&lock, key);
  219. return ret;
  220. }
  221. #ifdef CONFIG_USERSPACE
  222. static inline atomic_ptr_val_t z_vrfy_atomic_ptr_set(atomic_ptr_t *target,
  223. atomic_ptr_val_t value)
  224. {
  225. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
  226. return z_impl_atomic_ptr_set(target, value);
  227. }
  228. #include <syscalls/atomic_ptr_set_mrsh.c>
  229. #endif /* CONFIG_USERSPACE */
  230. /**
  231. *
  232. * @brief Atomic bitwise inclusive OR primitive
  233. *
  234. * This routine provides the atomic bitwise inclusive OR operator. The <value>
  235. * is atomically bitwise OR'ed with the value at <target>, placing the result
  236. * at <target>, and the previous value at <target> is returned.
  237. *
  238. * @param target the memory location to be modified
  239. * @param value the value to OR
  240. *
  241. * @return The previous value from <target>
  242. */
  243. atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
  244. {
  245. k_spinlock_key_t key;
  246. atomic_val_t ret;
  247. key = k_spin_lock(&lock);
  248. ret = *target;
  249. *target |= value;
  250. k_spin_unlock(&lock, key);
  251. return ret;
  252. }
  253. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
  254. /**
  255. *
  256. * @brief Atomic bitwise exclusive OR (XOR) primitive
  257. *
  258. * This routine provides the atomic bitwise exclusive OR operator. The <value>
  259. * is atomically bitwise XOR'ed with the value at <target>, placing the result
  260. * at <target>, and the previous value at <target> is returned.
  261. *
  262. * @param target the memory location to be modified
  263. * @param value the value to XOR
  264. *
  265. * @return The previous value from <target>
  266. */
  267. atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
  268. {
  269. k_spinlock_key_t key;
  270. atomic_val_t ret;
  271. key = k_spin_lock(&lock);
  272. ret = *target;
  273. *target ^= value;
  274. k_spin_unlock(&lock, key);
  275. return ret;
  276. }
  277. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
  278. /**
  279. *
  280. * @brief Atomic bitwise AND primitive
  281. *
  282. * This routine provides the atomic bitwise AND operator. The <value> is
  283. * atomically bitwise AND'ed with the value at <target>, placing the result
  284. * at <target>, and the previous value at <target> is returned.
  285. *
  286. * @param target the memory location to be modified
  287. * @param value the value to AND
  288. *
  289. * @return The previous value from <target>
  290. */
  291. atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
  292. {
  293. k_spinlock_key_t key;
  294. atomic_val_t ret;
  295. key = k_spin_lock(&lock);
  296. ret = *target;
  297. *target &= value;
  298. k_spin_unlock(&lock, key);
  299. return ret;
  300. }
  301. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
  302. /**
  303. *
  304. * @brief Atomic bitwise NAND primitive
  305. *
  306. * This routine provides the atomic bitwise NAND operator. The <value> is
  307. * atomically bitwise NAND'ed with the value at <target>, placing the result
  308. * at <target>, and the previous value at <target> is returned.
  309. *
  310. * @param target the memory location to be modified
  311. * @param value the value to NAND
  312. *
  313. * @return The previous value from <target>
  314. */
  315. atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
  316. {
  317. k_spinlock_key_t key;
  318. atomic_val_t ret;
  319. key = k_spin_lock(&lock);
  320. ret = *target;
  321. *target = ~(*target & value);
  322. k_spin_unlock(&lock, key);
  323. return ret;
  324. }
  325. ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
  326. #ifdef CONFIG_USERSPACE
  327. #include <syscalls/atomic_add_mrsh.c>
  328. #include <syscalls/atomic_sub_mrsh.c>
  329. #include <syscalls/atomic_set_mrsh.c>
  330. #include <syscalls/atomic_or_mrsh.c>
  331. #include <syscalls/atomic_xor_mrsh.c>
  332. #include <syscalls/atomic_and_mrsh.c>
  333. #include <syscalls/atomic_nand_mrsh.c>
  334. #endif