timeout.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /*
  2. * Copyright (c) 2018 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <kernel.h>
  7. #include <spinlock.h>
  8. #include <ksched.h>
  9. #include <timeout_q.h>
  10. #include <syscall_handler.h>
  11. #include <drivers/timer/system_timer.h>
  12. #include <sys_clock.h>
  13. static uint64_t curr_tick;
  14. static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
  15. static struct k_spinlock timeout_lock;
  16. #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
  17. ? K_TICKS_FOREVER : INT_MAX)
  18. /* Cycles left to process in the currently-executing sys_clock_announce() */
  19. static int announce_remaining;
  20. #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
  21. int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
  22. #ifdef CONFIG_USERSPACE
  23. static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
  24. {
  25. return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
  26. }
  27. #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
  28. #endif /* CONFIG_USERSPACE */
  29. #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
  30. static struct _timeout *first(void)
  31. {
  32. sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
  33. return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
  34. }
  35. static struct _timeout *next(struct _timeout *t)
  36. {
  37. sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
  38. return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
  39. }
  40. static void remove_timeout(struct _timeout *t)
  41. {
  42. if (next(t) != NULL) {
  43. next(t)->dticks += t->dticks;
  44. }
  45. sys_dlist_remove(&t->node);
  46. }
  47. static int32_t elapsed(void)
  48. {
  49. return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
  50. }
  51. static int32_t next_timeout(void)
  52. {
  53. struct _timeout *to = first();
  54. int32_t ticks_elapsed = elapsed();
  55. int32_t ret = to == NULL ? MAX_WAIT
  56. : CLAMP(to->dticks - ticks_elapsed, 0, MAX_WAIT);
  57. #ifdef CONFIG_TIMESLICING
  58. if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
  59. ret = _current_cpu->slice_ticks;
  60. }
  61. #endif
  62. return ret;
  63. }
  64. void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
  65. k_timeout_t timeout)
  66. {
  67. if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
  68. return;
  69. }
  70. #ifdef CONFIG_KERNEL_COHERENCE
  71. __ASSERT_NO_MSG(arch_mem_coherent(to));
  72. #endif
  73. __ASSERT(!sys_dnode_is_linked(&to->node), "");
  74. to->fn = fn;
  75. LOCKED(&timeout_lock) {
  76. struct _timeout *t;
  77. if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
  78. Z_TICK_ABS(timeout.ticks) >= 0) {
  79. k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
  80. to->dticks = MAX(1, ticks);
  81. } else {
  82. to->dticks = timeout.ticks + 1 + elapsed();
  83. }
  84. for (t = first(); t != NULL; t = next(t)) {
  85. if (t->dticks > to->dticks) {
  86. t->dticks -= to->dticks;
  87. sys_dlist_insert(&t->node, &to->node);
  88. break;
  89. }
  90. to->dticks -= t->dticks;
  91. }
  92. if (t == NULL) {
  93. sys_dlist_append(&timeout_list, &to->node);
  94. }
  95. if (to == first()) {
  96. #if CONFIG_TIMESLICING
  97. /*
  98. * This is not ideal, since it does not
  99. * account the time elapsed since the
  100. * last announcement, and slice_ticks is based
  101. * on that. It means that the time remaining for
  102. * the next announcement can be less than
  103. * slice_ticks.
  104. */
  105. int32_t next_time = next_timeout();
  106. if (next_time == 0 ||
  107. _current_cpu->slice_ticks != next_time) {
  108. sys_clock_set_timeout(next_time, false);
  109. }
  110. #else
  111. sys_clock_set_timeout(next_timeout(), false);
  112. #endif /* CONFIG_TIMESLICING */
  113. }
  114. }
  115. }
  116. int z_abort_timeout(struct _timeout *to)
  117. {
  118. int ret = -EINVAL;
  119. LOCKED(&timeout_lock) {
  120. if (sys_dnode_is_linked(&to->node)) {
  121. remove_timeout(to);
  122. ret = 0;
  123. }
  124. }
  125. return ret;
  126. }
  127. /* must be locked */
  128. static k_ticks_t timeout_rem(const struct _timeout *timeout)
  129. {
  130. k_ticks_t ticks = 0;
  131. if (z_is_inactive_timeout(timeout)) {
  132. return 0;
  133. }
  134. for (struct _timeout *t = first(); t != NULL; t = next(t)) {
  135. ticks += t->dticks;
  136. if (timeout == t) {
  137. break;
  138. }
  139. }
  140. return ticks - elapsed();
  141. }
  142. k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
  143. {
  144. k_ticks_t ticks = 0;
  145. LOCKED(&timeout_lock) {
  146. ticks = timeout_rem(timeout);
  147. }
  148. return ticks;
  149. }
  150. k_ticks_t z_timeout_expires(const struct _timeout *timeout)
  151. {
  152. k_ticks_t ticks = 0;
  153. LOCKED(&timeout_lock) {
  154. ticks = curr_tick + timeout_rem(timeout);
  155. }
  156. return ticks;
  157. }
  158. int32_t z_get_next_timeout_expiry(void)
  159. {
  160. int32_t ret = (int32_t) K_TICKS_FOREVER;
  161. LOCKED(&timeout_lock) {
  162. ret = next_timeout();
  163. }
  164. return ret;
  165. }
  166. void z_set_timeout_expiry(int32_t ticks, bool is_idle)
  167. {
  168. LOCKED(&timeout_lock) {
  169. int next_to = next_timeout();
  170. bool sooner = (next_to == K_TICKS_FOREVER)
  171. || (ticks <= next_to);
  172. bool imminent = next_to <= 1;
  173. /* Only set new timeouts when they are sooner than
  174. * what we have. Also don't try to set a timeout when
  175. * one is about to expire: drivers have internal logic
  176. * that will bump the timeout to the "next" tick if
  177. * it's not considered to be settable as directed.
  178. * SMP can't use this optimization though: we don't
  179. * know when context switches happen until interrupt
  180. * exit and so can't get the timeslicing clamp folded
  181. * in.
  182. */
  183. if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) {
  184. sys_clock_set_timeout(MIN(ticks, next_to), is_idle);
  185. }
  186. }
  187. }
  188. void sys_clock_announce(int32_t ticks)
  189. {
  190. #ifdef CONFIG_TIMESLICING
  191. z_time_slice(ticks);
  192. #endif
  193. k_spinlock_key_t key = k_spin_lock(&timeout_lock);
  194. announce_remaining = ticks;
  195. while (first() != NULL && first()->dticks <= announce_remaining) {
  196. struct _timeout *t = first();
  197. int dt = t->dticks;
  198. curr_tick += dt;
  199. announce_remaining -= dt;
  200. t->dticks = 0;
  201. remove_timeout(t);
  202. k_spin_unlock(&timeout_lock, key);
  203. t->fn(t);
  204. key = k_spin_lock(&timeout_lock);
  205. }
  206. if (first() != NULL) {
  207. first()->dticks -= announce_remaining;
  208. }
  209. curr_tick += announce_remaining;
  210. announce_remaining = 0;
  211. sys_clock_set_timeout(next_timeout(), false);
  212. k_spin_unlock(&timeout_lock, key);
  213. }
  214. int64_t sys_clock_tick_get(void)
  215. {
  216. uint64_t t = 0U;
  217. LOCKED(&timeout_lock) {
  218. t = curr_tick + sys_clock_elapsed();
  219. }
  220. return t;
  221. }
  222. uint32_t sys_clock_tick_get_32(void)
  223. {
  224. #ifdef CONFIG_TICKLESS_KERNEL
  225. return (uint32_t)sys_clock_tick_get();
  226. #else
  227. return (uint32_t)curr_tick;
  228. #endif
  229. }
  230. int64_t z_impl_k_uptime_ticks(void)
  231. {
  232. return sys_clock_tick_get();
  233. }
  234. #ifdef CONFIG_USERSPACE
  235. static inline int64_t z_vrfy_k_uptime_ticks(void)
  236. {
  237. return z_impl_k_uptime_ticks();
  238. }
  239. #include <syscalls/k_uptime_ticks_mrsh.c>
  240. #endif
  241. void z_impl_k_busy_wait(uint32_t usec_to_wait)
  242. {
  243. SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
  244. if (usec_to_wait == 0U) {
  245. SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
  246. return;
  247. }
  248. #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
  249. uint32_t start_cycles = k_cycle_get_32();
  250. /* use 64-bit math to prevent overflow when multiplying */
  251. uint32_t cycles_to_wait = (uint32_t)(
  252. (uint64_t)usec_to_wait *
  253. (uint64_t)sys_clock_hw_cycles_per_sec() /
  254. (uint64_t)USEC_PER_SEC
  255. );
  256. for (;;) {
  257. uint32_t current_cycles = k_cycle_get_32();
  258. /* this handles the rollover on an unsigned 32-bit value */
  259. if ((current_cycles - start_cycles) >= cycles_to_wait) {
  260. break;
  261. }
  262. }
  263. #else
  264. arch_busy_wait(usec_to_wait);
  265. #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
  266. SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
  267. }
  268. #ifdef CONFIG_USERSPACE
  269. static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
  270. {
  271. z_impl_k_busy_wait(usec_to_wait);
  272. }
  273. #include <syscalls/k_busy_wait_mrsh.c>
  274. #endif /* CONFIG_USERSPACE */
  275. /* Returns the uptime expiration (relative to an unlocked "now"!) of a
  276. * timeout object. When used correctly, this should be called once,
  277. * synchronously with the user passing a new timeout value. It should
  278. * not be used iteratively to adjust a timeout.
  279. */
  280. uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
  281. {
  282. k_ticks_t dt;
  283. if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
  284. return UINT64_MAX;
  285. } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  286. return sys_clock_tick_get();
  287. } else {
  288. dt = timeout.ticks;
  289. if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
  290. return Z_TICK_ABS(dt);
  291. }
  292. return sys_clock_tick_get() + MAX(1, dt);
  293. }
  294. }