timer.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * Copyright (c) 1997-2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <init.h>
  8. #include <ksched.h>
  9. #include <wait_q.h>
  10. #include <syscall_handler.h>
  11. #include <stdbool.h>
  12. #include <spinlock.h>
  13. static struct k_spinlock lock;
  14. /**
  15. * @brief Handle expiration of a kernel timer object.
  16. *
  17. * @param t Timeout used by the timer.
  18. *
  19. * @return N/A
  20. */
  21. void z_timer_expiration_handler(struct _timeout *t)
  22. {
  23. struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
  24. struct k_thread *thread;
  25. k_spinlock_key_t key = k_spin_lock(&lock);
  26. /*
  27. * if the timer is periodic, start it again; don't add _TICK_ALIGN
  28. * since we're already aligned to a tick boundary
  29. */
  30. if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
  31. !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
  32. z_add_timeout(&timer->timeout, z_timer_expiration_handler,
  33. timer->period);
  34. }
  35. /* update timer's status */
  36. timer->status += 1U;
  37. /* invoke timer expiry function */
  38. if (timer->expiry_fn != NULL) {
  39. timer->expiry_fn(timer);
  40. }
  41. if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
  42. k_spin_unlock(&lock, key);
  43. return;
  44. }
  45. thread = z_waitq_head(&timer->wait_q);
  46. if (thread == NULL) {
  47. k_spin_unlock(&lock, key);
  48. return;
  49. }
  50. z_unpend_thread_no_timeout(thread);
  51. arch_thread_return_value_set(thread, 0);
  52. k_spin_unlock(&lock, key);
  53. z_ready_thread(thread);
  54. }
  55. void k_timer_init(struct k_timer *timer,
  56. k_timer_expiry_t expiry_fn,
  57. k_timer_stop_t stop_fn)
  58. {
  59. timer->expiry_fn = expiry_fn;
  60. timer->stop_fn = stop_fn;
  61. timer->status = 0U;
  62. if (IS_ENABLED(CONFIG_MULTITHREADING)) {
  63. z_waitq_init(&timer->wait_q);
  64. }
  65. z_init_timeout(&timer->timeout);
  66. SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
  67. timer->user_data = NULL;
  68. z_object_init(timer);
  69. }
  70. void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
  71. k_timeout_t period)
  72. {
  73. SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer);
  74. if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
  75. return;
  76. }
  77. /* z_add_timeout() always adds one to the incoming tick count
  78. * to round up to the next tick (by convention it waits for
  79. * "at least as long as the specified timeout"), but the
  80. * period interval is always guaranteed to be reset from
  81. * within the timer ISR, so no round up is desired. Subtract
  82. * one.
  83. *
  84. * Note that the duration (!) value gets the same treatment
  85. * for backwards compatibility. This is unfortunate
  86. * (i.e. k_timer_start() doesn't treat its initial sleep
  87. * argument the same way k_sleep() does), but historical. The
  88. * timer_api test relies on this behavior.
  89. */
  90. if (!K_TIMEOUT_EQ(period, K_FOREVER) && period.ticks != 0 &&
  91. Z_TICK_ABS(period.ticks) < 0) {
  92. period.ticks = MAX(period.ticks - 1, 1);
  93. }
  94. if (Z_TICK_ABS(duration.ticks) < 0) {
  95. duration.ticks = MAX(duration.ticks - 1, 0);
  96. }
  97. (void)z_abort_timeout(&timer->timeout);
  98. timer->period = period;
  99. timer->status = 0U;
  100. z_add_timeout(&timer->timeout, z_timer_expiration_handler,
  101. duration);
  102. }
  103. #ifdef CONFIG_USERSPACE
  104. static inline void z_vrfy_k_timer_start(struct k_timer *timer,
  105. k_timeout_t duration,
  106. k_timeout_t period)
  107. {
  108. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  109. z_impl_k_timer_start(timer, duration, period);
  110. }
  111. #include <syscalls/k_timer_start_mrsh.c>
  112. #endif
  113. void z_impl_k_timer_stop(struct k_timer *timer)
  114. {
  115. SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
  116. int inactive = z_abort_timeout(&timer->timeout) != 0;
  117. if (inactive) {
  118. return;
  119. }
  120. if (timer->stop_fn != NULL) {
  121. timer->stop_fn(timer);
  122. }
  123. if (IS_ENABLED(CONFIG_MULTITHREADING)) {
  124. struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
  125. if (pending_thread != NULL) {
  126. z_ready_thread(pending_thread);
  127. z_reschedule_unlocked();
  128. }
  129. }
  130. }
  131. #ifdef CONFIG_USERSPACE
  132. static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
  133. {
  134. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  135. z_impl_k_timer_stop(timer);
  136. }
  137. #include <syscalls/k_timer_stop_mrsh.c>
  138. #endif
  139. uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
  140. {
  141. k_spinlock_key_t key = k_spin_lock(&lock);
  142. uint32_t result = timer->status;
  143. timer->status = 0U;
  144. k_spin_unlock(&lock, key);
  145. return result;
  146. }
  147. #ifdef CONFIG_USERSPACE
  148. static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
  149. {
  150. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  151. return z_impl_k_timer_status_get(timer);
  152. }
  153. #include <syscalls/k_timer_status_get_mrsh.c>
  154. #endif
  155. uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
  156. {
  157. __ASSERT(!arch_is_in_isr(), "");
  158. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
  159. if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
  160. uint32_t result;
  161. do {
  162. k_spinlock_key_t key = k_spin_lock(&lock);
  163. if (!z_is_inactive_timeout(&timer->timeout)) {
  164. result = *(volatile uint32_t *)&timer->status;
  165. timer->status = 0U;
  166. k_spin_unlock(&lock, key);
  167. if (result > 0) {
  168. break;
  169. }
  170. } else {
  171. result = timer->status;
  172. k_spin_unlock(&lock, key);
  173. break;
  174. }
  175. } while (true);
  176. return result;
  177. }
  178. k_spinlock_key_t key = k_spin_lock(&lock);
  179. uint32_t result = timer->status;
  180. if (result == 0U) {
  181. if (!z_is_inactive_timeout(&timer->timeout)) {
  182. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
  183. /* wait for timer to expire or stop */
  184. (void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
  185. /* get updated timer status */
  186. key = k_spin_lock(&lock);
  187. result = timer->status;
  188. } else {
  189. /* timer is already stopped */
  190. }
  191. } else {
  192. /* timer has already expired at least once */
  193. }
  194. timer->status = 0U;
  195. k_spin_unlock(&lock, key);
  196. /**
  197. * @note New tracing hook
  198. */
  199. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
  200. return result;
  201. }
  202. #ifdef CONFIG_USERSPACE
  203. static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
  204. {
  205. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  206. return z_impl_k_timer_status_sync(timer);
  207. }
  208. #include <syscalls/k_timer_status_sync_mrsh.c>
  209. static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
  210. const struct k_timer *timer)
  211. {
  212. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  213. return z_impl_k_timer_remaining_ticks(timer);
  214. }
  215. #include <syscalls/k_timer_remaining_ticks_mrsh.c>
  216. static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
  217. const struct k_timer *timer)
  218. {
  219. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  220. return z_impl_k_timer_expires_ticks(timer);
  221. }
  222. #include <syscalls/k_timer_expires_ticks_mrsh.c>
  223. static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
  224. {
  225. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  226. return z_impl_k_timer_user_data_get(timer);
  227. }
  228. #include <syscalls/k_timer_user_data_get_mrsh.c>
  229. static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
  230. void *user_data)
  231. {
  232. Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
  233. z_impl_k_timer_user_data_set(timer, user_data);
  234. }
  235. #include <syscalls/k_timer_user_data_set_mrsh.c>
  236. #endif