mutex.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file @brief mutex kernel services
  8. *
  9. * This module contains routines for handling mutex locking and unlocking.
  10. *
  11. * Mutexes implement a priority inheritance algorithm that boosts the priority
  12. * level of the owning thread to match the priority level of the highest
  13. * priority thread waiting on the mutex.
  14. *
  15. * Each mutex that contributes to priority inheritance must be released in the
  16. * reverse order in which it was acquired. Furthermore each subsequent mutex
  17. * that contributes to raising the owning thread's priority level must be
  18. * acquired at a point after the most recent "bumping" of the priority level.
  19. *
  20. * For example, if thread A has two mutexes contributing to the raising of its
  21. * priority level, the second mutex M2 must be acquired by thread A after
  22. * thread A's priority level was bumped due to owning the first mutex M1.
  23. * When releasing the mutex, thread A must release M2 before it releases M1.
  24. * Failure to follow this nested model may result in threads running at
  25. * unexpected priority levels (too high, or too low).
  26. */
  27. #include <kernel.h>
  28. #include <kernel_structs.h>
  29. #include <toolchain.h>
  30. #include <ksched.h>
  31. #include <wait_q.h>
  32. #include <errno.h>
  33. #include <init.h>
  34. #include <syscall_handler.h>
  35. #include <tracing/tracing.h>
  36. #include <sys/check.h>
  37. #include <logging/log.h>
  38. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  39. /* We use a global spinlock here because some of the synchronization
  40. * is protecting things like owner thread priorities which aren't
  41. * "part of" a single k_mutex. Should move those bits of the API
  42. * under the scheduler lock so we can break this up.
  43. */
  44. static struct k_spinlock lock;
  45. int z_impl_k_mutex_init(struct k_mutex *mutex)
  46. {
  47. mutex->owner = NULL;
  48. mutex->lock_count = 0U;
  49. z_waitq_init(&mutex->wait_q);
  50. z_object_init(mutex);
  51. SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0);
  52. return 0;
  53. }
  54. #ifdef CONFIG_USERSPACE
  55. static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex)
  56. {
  57. Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX));
  58. return z_impl_k_mutex_init(mutex);
  59. }
  60. #include <syscalls/k_mutex_init_mrsh.c>
  61. #endif
  62. static int32_t new_prio_for_inheritance(int32_t target, int32_t limit)
  63. {
  64. int new_prio = z_is_prio_higher(target, limit) ? target : limit;
  65. new_prio = z_get_new_prio_with_ceiling(new_prio);
  66. return new_prio;
  67. }
  68. static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio)
  69. {
  70. if (mutex->owner->base.prio != new_prio) {
  71. LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)",
  72. mutex->owner, z_is_thread_ready(mutex->owner) ?
  73. 'y' : 'n',
  74. new_prio, mutex->owner->base.prio);
  75. return z_set_prio(mutex->owner, new_prio);
  76. }
  77. return false;
  78. }
  79. int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
  80. {
  81. int new_prio;
  82. k_spinlock_key_t key;
  83. bool resched = false;
  84. __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs");
  85. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, lock, mutex, timeout);
  86. key = k_spin_lock(&lock);
  87. if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
  88. mutex->owner_orig_prio = (mutex->lock_count == 0U) ?
  89. _current->base.prio :
  90. mutex->owner_orig_prio;
  91. mutex->lock_count++;
  92. mutex->owner = _current;
  93. LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
  94. _current, mutex, mutex->lock_count,
  95. mutex->owner_orig_prio);
  96. k_spin_unlock(&lock, key);
  97. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0);
  98. return 0;
  99. }
  100. if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
  101. k_spin_unlock(&lock, key);
  102. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EBUSY);
  103. return -EBUSY;
  104. }
  105. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout);
  106. new_prio = new_prio_for_inheritance(_current->base.prio,
  107. mutex->owner->base.prio);
  108. LOG_DBG("adjusting prio up on mutex %p", mutex);
  109. if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
  110. resched = adjust_owner_prio(mutex, new_prio);
  111. }
  112. int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
  113. LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
  114. LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
  115. got_mutex ? 'y' : 'n');
  116. if (got_mutex == 0) {
  117. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0);
  118. return 0;
  119. }
  120. /* timed out */
  121. LOG_DBG("%p timeout on mutex %p", _current, mutex);
  122. key = k_spin_lock(&lock);
  123. struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
  124. new_prio = (waiter != NULL) ?
  125. new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
  126. mutex->owner_orig_prio;
  127. LOG_DBG("adjusting prio down on mutex %p", mutex);
  128. resched = adjust_owner_prio(mutex, new_prio) || resched;
  129. if (resched) {
  130. z_reschedule(&lock, key);
  131. } else {
  132. k_spin_unlock(&lock, key);
  133. }
  134. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EAGAIN);
  135. return -EAGAIN;
  136. }
  137. #ifdef CONFIG_USERSPACE
  138. static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex,
  139. k_timeout_t timeout)
  140. {
  141. Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
  142. return z_impl_k_mutex_lock(mutex, timeout);
  143. }
  144. #include <syscalls/k_mutex_lock_mrsh.c>
  145. #endif
  146. int z_impl_k_mutex_unlock(struct k_mutex *mutex)
  147. {
  148. struct k_thread *new_owner;
  149. __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs");
  150. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, unlock, mutex);
  151. CHECKIF(mutex->owner == NULL) {
  152. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EINVAL);
  153. return -EINVAL;
  154. }
  155. /*
  156. * The current thread does not own the mutex.
  157. */
  158. CHECKIF(mutex->owner != _current) {
  159. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM);
  160. return -EPERM;
  161. }
  162. /*
  163. * Attempt to unlock a mutex which is unlocked. mutex->lock_count
  164. * cannot be zero if the current thread is equal to mutex->owner,
  165. * therefore no underflow check is required. Use assert to catch
  166. * undefined behavior.
  167. */
  168. __ASSERT_NO_MSG(mutex->lock_count > 0U);
  169. z_sched_lock();
  170. LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count);
  171. /*
  172. * If we are the owner and count is greater than 1, then decrement
  173. * the count and return and keep current thread as the owner.
  174. */
  175. if (mutex->lock_count > 1U) {
  176. mutex->lock_count--;
  177. goto k_mutex_unlock_return;
  178. }
  179. k_spinlock_key_t key = k_spin_lock(&lock);
  180. adjust_owner_prio(mutex, mutex->owner_orig_prio);
  181. /* Get the new owner, if any */
  182. new_owner = z_unpend_first_thread(&mutex->wait_q);
  183. mutex->owner = new_owner;
  184. LOG_DBG("new owner of mutex %p: %p (prio: %d)",
  185. mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
  186. if (new_owner != NULL) {
  187. /*
  188. * new owner is already of higher or equal prio than first
  189. * waiter since the wait queue is priority-based: no need to
  190. * ajust its priority
  191. */
  192. mutex->owner_orig_prio = new_owner->base.prio;
  193. arch_thread_return_value_set(new_owner, 0);
  194. z_ready_thread(new_owner);
  195. z_reschedule(&lock, key);
  196. } else {
  197. mutex->lock_count = 0U;
  198. k_spin_unlock(&lock, key);
  199. }
  200. k_mutex_unlock_return:
  201. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, 0);
  202. k_sched_unlock();
  203. return 0;
  204. }
  205. #ifdef CONFIG_USERSPACE
  206. static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex)
  207. {
  208. Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
  209. return z_impl_k_mutex_unlock(mutex);
  210. }
  211. #include <syscalls/k_mutex_unlock_mrsh.c>
  212. #endif