sem.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /*
  2. * Copyright (c) 2010-2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * @brief Kernel semaphore object.
  10. *
  11. * The semaphores are of the 'counting' type, i.e. each 'give' operation will
  12. * increment the internal count by 1, if no thread is pending on it. The 'init'
  13. * call initializes the count to 'initial_count'. Following multiple 'give'
  14. * operations, the same number of 'take' operations can be performed without
  15. * the calling thread having to pend on the semaphore, or the calling task
  16. * having to poll.
  17. */
  18. #include <kernel.h>
  19. #include <kernel_structs.h>
  20. #include <toolchain.h>
  21. #include <wait_q.h>
  22. #include <sys/dlist.h>
  23. #include <ksched.h>
  24. #include <init.h>
  25. #include <syscall_handler.h>
  26. #include <tracing/tracing.h>
  27. #include <sys/check.h>
  28. /* We use a system-wide lock to synchronize semaphores, which has
  29. * unfortunate performance impact vs. using a per-object lock
  30. * (semaphores are *very* widely used). But per-object locks require
  31. * significant extra RAM. A properly spin-aware semaphore
  32. * implementation would spin on atomic access to the count variable,
  33. * and not a spinlock per se. Useful optimization for the future...
  34. */
  35. static struct k_spinlock lock;
  36. int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
  37. unsigned int limit)
  38. {
  39. /*
  40. * Limit cannot be zero and count cannot be greater than limit
  41. */
  42. CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) {
  43. SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
  44. return -EINVAL;
  45. }
  46. sem->count = initial_count;
  47. sem->limit = limit;
  48. SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
  49. z_waitq_init(&sem->wait_q);
  50. #if defined(CONFIG_POLL)
  51. sys_dlist_init(&sem->poll_events);
  52. #endif
  53. z_object_init(sem);
  54. return 0;
  55. }
  56. #ifdef CONFIG_USERSPACE
  57. int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
  58. unsigned int limit)
  59. {
  60. Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
  61. return z_impl_k_sem_init(sem, initial_count, limit);
  62. }
  63. #include <syscalls/k_sem_init_mrsh.c>
  64. #endif
  65. static inline void handle_poll_events(struct k_sem *sem)
  66. {
  67. #ifdef CONFIG_POLL
  68. z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
  69. #else
  70. ARG_UNUSED(sem);
  71. #endif
  72. }
  73. void z_impl_k_sem_give(struct k_sem *sem)
  74. {
  75. k_spinlock_key_t key = k_spin_lock(&lock);
  76. struct k_thread *thread;
  77. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
  78. thread = z_unpend_first_thread(&sem->wait_q);
  79. if (thread != NULL) {
  80. arch_thread_return_value_set(thread, 0);
  81. z_ready_thread(thread);
  82. } else {
  83. sem->count += (sem->count != sem->limit) ? 1U : 0U;
  84. handle_poll_events(sem);
  85. }
  86. z_reschedule(&lock, key);
  87. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
  88. }
  89. #ifdef CONFIG_USERSPACE
  90. static inline void z_vrfy_k_sem_give(struct k_sem *sem)
  91. {
  92. Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
  93. z_impl_k_sem_give(sem);
  94. }
  95. #include <syscalls/k_sem_give_mrsh.c>
  96. #endif
  97. int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
  98. {
  99. int ret = 0;
  100. __ASSERT(((arch_is_in_isr() == false) ||
  101. K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
  102. k_spinlock_key_t key = k_spin_lock(&lock);
  103. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
  104. if (likely(sem->count > 0U)) {
  105. sem->count--;
  106. k_spin_unlock(&lock, key);
  107. ret = 0;
  108. goto out;
  109. }
  110. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  111. k_spin_unlock(&lock, key);
  112. ret = -EBUSY;
  113. goto out;
  114. }
  115. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
  116. ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
  117. out:
  118. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
  119. return ret;
  120. }
  121. void z_impl_k_sem_reset(struct k_sem *sem)
  122. {
  123. struct k_thread *thread;
  124. k_spinlock_key_t key = k_spin_lock(&lock);
  125. while (true) {
  126. thread = z_unpend_first_thread(&sem->wait_q);
  127. if (thread == NULL) {
  128. break;
  129. }
  130. arch_thread_return_value_set(thread, -EAGAIN);
  131. z_ready_thread(thread);
  132. }
  133. sem->count = 0;
  134. SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
  135. handle_poll_events(sem);
  136. z_reschedule(&lock, key);
  137. }
  138. #ifdef CONFIG_USERSPACE
  139. static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
  140. {
  141. Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
  142. return z_impl_k_sem_take((struct k_sem *)sem, timeout);
  143. }
  144. #include <syscalls/k_sem_take_mrsh.c>
  145. static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
  146. {
  147. Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
  148. z_impl_k_sem_reset(sem);
  149. }
  150. #include <syscalls/k_sem_reset_mrsh.c>
  151. static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
  152. {
  153. Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
  154. return z_impl_k_sem_count_get(sem);
  155. }
  156. #include <syscalls/k_sem_count_get_mrsh.c>
  157. #endif