123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406 |
- /*
- * Copyright (c) 2016 Intel Corporation
- * Copyright (c) 2011-2014 Wind River Systems, Inc.
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- /**
- * @file Atomic ops in pure C
- *
- * This module provides the atomic operators for processors
- * which do not support native atomic operations.
- *
- * The atomic operations are guaranteed to be atomic with respect
- * to interrupt service routines, and to operations performed by peer
- * processors.
- *
- * (originally from x86's atomic.c)
- */
- #include <toolchain.h>
- #include <arch/cpu.h>
- #include <spinlock.h>
- #include <sys/atomic.h>
- #include <kernel_structs.h>
- /* Single global spinlock for atomic operations. This is fallback
- * code, not performance sensitive. At least by not using irq_lock()
- * in SMP contexts we won't content with legitimate users of the
- * global lock.
- */
- static struct k_spinlock lock;
- /* For those rare CPUs which support user mode, but not native atomic
- * operations, the best we can do for them is implement the atomic
- * functions as system calls, since in user mode locking a spinlock is
- * forbidden.
- */
- #ifdef CONFIG_USERSPACE
- #include <syscall_handler.h>
- #define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
- static inline atomic_val_t z_vrfy_##name(atomic_t *target) \
- { \
- Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
- return z_impl_##name((atomic_t *)target); \
- }
- #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
- static inline atomic_val_t z_vrfy_##name(atomic_t *target, \
- atomic_val_t value) \
- { \
- Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
- return z_impl_##name((atomic_t *)target, value); \
- }
- #else
- #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
- #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
- #endif
- /**
- *
- * @brief Atomic compare-and-set primitive
- *
- * This routine provides the compare-and-set operator. If the original value at
- * <target> equals <oldValue>, then <newValue> is stored at <target> and the
- * function returns true.
- *
- * If the original value at <target> does not equal <oldValue>, then the store
- * is not done and the function returns false.
- *
- * The reading of the original value at <target>, the comparison,
- * and the write of the new value (if it occurs) all happen atomically with
- * respect to both interrupts and accesses of other processors to <target>.
- *
- * @param target address to be tested
- * @param old_value value to compare against
- * @param new_value value to compare against
- * @return Returns true if <new_value> is written, false otherwise.
- */
- bool z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
- atomic_val_t new_value)
- {
- k_spinlock_key_t key;
- int ret = false;
- key = k_spin_lock(&lock);
- if (*target == old_value) {
- *target = new_value;
- ret = true;
- }
- k_spin_unlock(&lock, key);
- return ret;
- }
- #ifdef CONFIG_USERSPACE
- bool z_vrfy_atomic_cas(atomic_t *target, atomic_val_t old_value,
- atomic_val_t new_value)
- {
- Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
- return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
- }
- #include <syscalls/atomic_cas_mrsh.c>
- #endif /* CONFIG_USERSPACE */
- bool z_impl_atomic_ptr_cas(atomic_ptr_t *target, atomic_ptr_val_t old_value,
- atomic_ptr_val_t new_value)
- {
- k_spinlock_key_t key;
- int ret = false;
- key = k_spin_lock(&lock);
- if (*target == old_value) {
- *target = new_value;
- ret = true;
- }
- k_spin_unlock(&lock, key);
- return ret;
- }
- #ifdef CONFIG_USERSPACE
- static inline bool z_vrfy_atomic_ptr_cas(atomic_ptr_t *target,
- atomic_ptr_val_t old_value,
- atomic_ptr_val_t new_value)
- {
- Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
- return z_impl_atomic_ptr_cas(target, old_value, new_value);
- }
- #include <syscalls/atomic_ptr_cas_mrsh.c>
- #endif /* CONFIG_USERSPACE */
- /**
- *
- * @brief Atomic addition primitive
- *
- * This routine provides the atomic addition operator. The <value> is
- * atomically added to the value at <target>, placing the result at <target>,
- * and the old value from <target> is returned.
- *
- * @param target memory location to add to
- * @param value the value to add
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target += value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
- /**
- *
- * @brief Atomic subtraction primitive
- *
- * This routine provides the atomic subtraction operator. The <value> is
- * atomically subtracted from the value at <target>, placing the result at
- * <target>, and the old value from <target> is returned.
- *
- * @param target the memory location to subtract from
- * @param value the value to subtract
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target -= value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
- /**
- *
- * @brief Atomic get primitive
- *
- * @param target memory location to read from
- *
- * This routine provides the atomic get primitive to atomically read
- * a value from <target>. It simply does an ordinary load. Note that <target>
- * is expected to be aligned to a 4-byte boundary.
- *
- * @return The value read from <target>
- */
- atomic_val_t atomic_get(const atomic_t *target)
- {
- return *target;
- }
- atomic_ptr_val_t atomic_ptr_get(const atomic_ptr_t *target)
- {
- return *target;
- }
- /**
- *
- * @brief Atomic get-and-set primitive
- *
- * This routine provides the atomic set operator. The <value> is atomically
- * written at <target> and the previous value at <target> is returned.
- *
- * @param target the memory location to write to
- * @param value the value to write
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target = value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
- atomic_ptr_val_t z_impl_atomic_ptr_set(atomic_ptr_t *target,
- atomic_ptr_val_t value)
- {
- k_spinlock_key_t key;
- atomic_ptr_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target = value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- #ifdef CONFIG_USERSPACE
- static inline atomic_ptr_val_t z_vrfy_atomic_ptr_set(atomic_ptr_t *target,
- atomic_ptr_val_t value)
- {
- Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_ptr_t)));
- return z_impl_atomic_ptr_set(target, value);
- }
- #include <syscalls/atomic_ptr_set_mrsh.c>
- #endif /* CONFIG_USERSPACE */
- /**
- *
- * @brief Atomic bitwise inclusive OR primitive
- *
- * This routine provides the atomic bitwise inclusive OR operator. The <value>
- * is atomically bitwise OR'ed with the value at <target>, placing the result
- * at <target>, and the previous value at <target> is returned.
- *
- * @param target the memory location to be modified
- * @param value the value to OR
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target |= value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
- /**
- *
- * @brief Atomic bitwise exclusive OR (XOR) primitive
- *
- * This routine provides the atomic bitwise exclusive OR operator. The <value>
- * is atomically bitwise XOR'ed with the value at <target>, placing the result
- * at <target>, and the previous value at <target> is returned.
- *
- * @param target the memory location to be modified
- * @param value the value to XOR
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target ^= value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
- /**
- *
- * @brief Atomic bitwise AND primitive
- *
- * This routine provides the atomic bitwise AND operator. The <value> is
- * atomically bitwise AND'ed with the value at <target>, placing the result
- * at <target>, and the previous value at <target> is returned.
- *
- * @param target the memory location to be modified
- * @param value the value to AND
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target &= value;
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
- /**
- *
- * @brief Atomic bitwise NAND primitive
- *
- * This routine provides the atomic bitwise NAND operator. The <value> is
- * atomically bitwise NAND'ed with the value at <target>, placing the result
- * at <target>, and the previous value at <target> is returned.
- *
- * @param target the memory location to be modified
- * @param value the value to NAND
- *
- * @return The previous value from <target>
- */
- atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
- {
- k_spinlock_key_t key;
- atomic_val_t ret;
- key = k_spin_lock(&lock);
- ret = *target;
- *target = ~(*target & value);
- k_spin_unlock(&lock, key);
- return ret;
- }
- ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
- #ifdef CONFIG_USERSPACE
- #include <syscalls/atomic_add_mrsh.c>
- #include <syscalls/atomic_sub_mrsh.c>
- #include <syscalls/atomic_set_mrsh.c>
- #include <syscalls/atomic_or_mrsh.c>
- #include <syscalls/atomic_xor_mrsh.c>
- #include <syscalls/atomic_and_mrsh.c>
- #include <syscalls/atomic_nand_mrsh.c>
- #endif
|