work.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * Copyright (c) 2020 Nordic Semiconductor ASA
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * Second generation work queue implementation
  10. */
  11. #include <kernel.h>
  12. #include <kernel_structs.h>
  13. #include <wait_q.h>
  14. #include <spinlock.h>
  15. #include <errno.h>
  16. #include <ksched.h>
  17. #include <sys/printk.h>
  18. static inline void flag_clear(uint32_t *flagp,
  19. uint32_t bit)
  20. {
  21. *flagp &= ~BIT(bit);
  22. }
  23. static inline void flag_set(uint32_t *flagp,
  24. uint32_t bit)
  25. {
  26. *flagp |= BIT(bit);
  27. }
  28. static inline bool flag_test(const uint32_t *flagp,
  29. uint32_t bit)
  30. {
  31. return (*flagp & BIT(bit)) != 0U;
  32. }
  33. static inline bool flag_test_and_clear(uint32_t *flagp,
  34. int bit)
  35. {
  36. bool ret = flag_test(flagp, bit);
  37. flag_clear(flagp, bit);
  38. return ret;
  39. }
  40. static inline void flags_set(uint32_t *flagp,
  41. uint32_t flags)
  42. {
  43. *flagp = flags;
  44. }
  45. static inline uint32_t flags_get(const uint32_t *flagp)
  46. {
  47. return *flagp;
  48. }
  49. /* Lock to protect the internal state of all work items, work queues,
  50. * and pending_cancels.
  51. */
  52. static struct k_spinlock lock;
  53. /* Invoked by work thread */
  54. static void handle_flush(struct k_work *work)
  55. {
  56. struct z_work_flusher *flusher
  57. = CONTAINER_OF(work, struct z_work_flusher, work);
  58. k_sem_give(&flusher->sem);
  59. }
  60. static inline void init_flusher(struct z_work_flusher *flusher)
  61. {
  62. k_sem_init(&flusher->sem, 0, 1);
  63. k_work_init(&flusher->work, handle_flush);
  64. }
  65. /* List of pending cancellations. */
  66. static sys_slist_t pending_cancels;
  67. /* Initialize a canceler record and add it to the list of pending
  68. * cancels.
  69. *
  70. * Invoked with work lock held.
  71. *
  72. * @param canceler the structure used to notify a waiting process.
  73. * @param work the work structure that is to be canceled
  74. */
  75. static inline void init_work_cancel(struct z_work_canceller *canceler,
  76. struct k_work *work)
  77. {
  78. k_sem_init(&canceler->sem, 0, 1);
  79. canceler->work = work;
  80. sys_slist_append(&pending_cancels, &canceler->node);
  81. }
  82. /* Complete cancellation of a work item and unlock held lock.
  83. *
  84. * Invoked with work lock held.
  85. *
  86. * Invoked from a work queue thread.
  87. *
  88. * Reschedules.
  89. *
  90. * @param work the work structre that has completed cancellation
  91. */
  92. static void finalize_cancel_locked(struct k_work *work)
  93. {
  94. struct z_work_canceller *wc, *tmp;
  95. sys_snode_t *prev = NULL;
  96. /* Clear this first, so released high-priority threads don't
  97. * see it when doing things.
  98. */
  99. flag_clear(&work->flags, K_WORK_CANCELING_BIT);
  100. /* Search for and remove the matching container, and release
  101. * what's waiting for the completion. The same work item can
  102. * appear multiple times in the list if multiple threads
  103. * attempt to cancel it.
  104. */
  105. SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
  106. if (wc->work == work) {
  107. sys_slist_remove(&pending_cancels, prev, &wc->node);
  108. k_sem_give(&wc->sem);
  109. } else {
  110. prev = &wc->node;
  111. }
  112. }
  113. }
  114. void k_work_init(struct k_work *work,
  115. k_work_handler_t handler)
  116. {
  117. __ASSERT_NO_MSG(work != NULL);
  118. __ASSERT_NO_MSG(handler != NULL);
  119. *work = (struct k_work)Z_WORK_INITIALIZER(handler);
  120. SYS_PORT_TRACING_OBJ_INIT(k_work, work);
  121. }
  122. static inline int work_busy_get_locked(const struct k_work *work)
  123. {
  124. return flags_get(&work->flags) & K_WORK_MASK;
  125. }
  126. int k_work_busy_get(const struct k_work *work)
  127. {
  128. k_spinlock_key_t key = k_spin_lock(&lock);
  129. int ret = work_busy_get_locked(work);
  130. k_spin_unlock(&lock, key);
  131. return ret;
  132. }
  133. /* Add a flusher work item to the queue.
  134. *
  135. * Invoked with work lock held.
  136. *
  137. * Caller must notify queue of pending work.
  138. *
  139. * @param queue queue on which a work item may appear.
  140. * @param work the work item that is either queued or running on @p
  141. * queue
  142. * @param flusher an uninitialized/unused flusher object
  143. */
  144. static void queue_flusher_locked(struct k_work_q *queue,
  145. struct k_work *work,
  146. struct z_work_flusher *flusher)
  147. {
  148. bool in_list = false;
  149. struct k_work *wn;
  150. /* Determine whether the work item is still queued. */
  151. SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
  152. if (wn == work) {
  153. in_list = true;
  154. break;
  155. }
  156. }
  157. init_flusher(flusher);
  158. if (in_list) {
  159. sys_slist_insert(&queue->pending, &work->node,
  160. &flusher->work.node);
  161. } else {
  162. sys_slist_prepend(&queue->pending, &flusher->work.node);
  163. }
  164. }
  165. /* Try to remove a work item from the given queue.
  166. *
  167. * Invoked with work lock held.
  168. *
  169. * @param queue the queue from which the work should be removed
  170. * @param work work that may be on the queue
  171. */
  172. static inline void queue_remove_locked(struct k_work_q *queue,
  173. struct k_work *work)
  174. {
  175. if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
  176. (void)sys_slist_find_and_remove(&queue->pending, &work->node);
  177. }
  178. }
  179. /* Potentially notify a queue that it needs to look for pending work.
  180. *
  181. * This may make the work queue thread ready, but as the lock is held it
  182. * will not be a reschedule point. Callers should yield after the lock is
  183. * released where appropriate (generally if this returns true).
  184. *
  185. * @param queue to be notified. If this is null no notification is required.
  186. *
  187. * @return true if and only if the queue was notified and woken, i.e. a
  188. * reschedule is pending.
  189. */
  190. static inline bool notify_queue_locked(struct k_work_q *queue)
  191. {
  192. bool rv = false;
  193. if (queue != NULL) {
  194. rv = z_sched_wake(&queue->notifyq, 0, NULL);
  195. }
  196. return rv;
  197. }
  198. /* Submit an work item to a queue if queue state allows new work.
  199. *
  200. * Submission is rejected if no queue is provided, or if the queue is
  201. * draining and the work isn't being submitted from the queue's
  202. * thread (chained submission).
  203. *
  204. * Invoked with work lock held.
  205. * Conditionally notifies queue.
  206. *
  207. * @param queue the queue to which work should be submitted. This may
  208. * be null, in which case the submission will fail.
  209. *
  210. * @param work to be submitted
  211. *
  212. * @retval 1 if successfully queued
  213. * @retval -EINVAL if no queue is provided
  214. * @retval -ENODEV if the queue is not started
  215. * @retval -EBUSY if the submission was rejected (draining, plugged)
  216. */
  217. static inline int queue_submit_locked(struct k_work_q *queue,
  218. struct k_work *work)
  219. {
  220. if (queue == NULL) {
  221. return -EINVAL;
  222. }
  223. int ret = -EBUSY;
  224. bool chained = (_current == &queue->thread) && !k_is_in_isr();
  225. bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
  226. bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
  227. /* Test for acceptability, in priority order:
  228. *
  229. * * -ENODEV if the queue isn't running.
  230. * * -EBUSY if draining and not chained
  231. * * -EBUSY if plugged and not draining
  232. * * otherwise OK
  233. */
  234. if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
  235. ret = -ENODEV;
  236. } else if (draining && !chained) {
  237. ret = -EBUSY;
  238. } else if (plugged && !draining) {
  239. ret = -EBUSY;
  240. } else {
  241. sys_slist_append(&queue->pending, &work->node);
  242. ret = 1;
  243. (void)notify_queue_locked(queue);
  244. }
  245. return ret;
  246. }
  247. /* Attempt to submit work to a queue.
  248. *
  249. * The submission can fail if:
  250. * * the work is cancelling,
  251. * * no candidate queue can be identified;
  252. * * the candidate queue rejects the submission.
  253. *
  254. * Invoked with work lock held.
  255. * Conditionally notifies queue.
  256. *
  257. * @param work the work structure to be submitted
  258. * @param queuep pointer to a queue reference. On input this should
  259. * dereference to the proposed queue (which may be null); after completion it
  260. * will be null if the work was not submitted or if submitted will reference
  261. * the queue it was submitted to. That may or may not be the queue provided
  262. * on input.
  263. *
  264. * @retval 0 if work was already submitted to a queue
  265. * @retval 1 if work was not submitted and has been queued to @p queue
  266. * @retval 2 if work was running and has been queued to the queue that was
  267. * running it
  268. * @retval -EBUSY if canceling or submission was rejected by queue
  269. * @retval -EINVAL if no queue is provided
  270. * @retval -ENODEV if the queue is not started
  271. */
  272. static int submit_to_queue_locked(struct k_work *work,
  273. struct k_work_q **queuep)
  274. {
  275. int ret = 0;
  276. if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  277. /* Disallowed */
  278. ret = -EBUSY;
  279. } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
  280. /* Not currently queued */
  281. ret = 1;
  282. /* If no queue specified resubmit to last queue.
  283. */
  284. if (*queuep == NULL) {
  285. *queuep = work->queue;
  286. }
  287. /* If the work is currently running we have to use the
  288. * queue it's running on to prevent handler
  289. * re-entrancy.
  290. */
  291. if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
  292. __ASSERT_NO_MSG(work->queue != NULL);
  293. *queuep = work->queue;
  294. ret = 2;
  295. }
  296. int rc = queue_submit_locked(*queuep, work);
  297. if (rc < 0) {
  298. ret = rc;
  299. } else {
  300. flag_set(&work->flags, K_WORK_QUEUED_BIT);
  301. work->queue = *queuep;
  302. }
  303. } else {
  304. /* Already queued, do nothing. */
  305. }
  306. if (ret <= 0) {
  307. *queuep = NULL;
  308. }
  309. return ret;
  310. }
  311. int k_work_submit_to_queue(struct k_work_q *queue,
  312. struct k_work *work)
  313. {
  314. __ASSERT_NO_MSG(work != NULL);
  315. k_spinlock_key_t key = k_spin_lock(&lock);
  316. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
  317. int ret = submit_to_queue_locked(work, &queue);
  318. k_spin_unlock(&lock, key);
  319. /* If we changed the queue contents (as indicated by a positive ret)
  320. * the queue thread may now be ready, but we missed the reschedule
  321. * point because the lock was held. If this is being invoked by a
  322. * preemptible thread then yield.
  323. */
  324. if ((ret > 0) && (k_is_preempt_thread() != 0)) {
  325. k_yield();
  326. }
  327. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
  328. return ret;
  329. }
  330. int k_work_submit(struct k_work *work)
  331. {
  332. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
  333. int ret = k_work_submit_to_queue(&k_sys_work_q, work);
  334. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
  335. return ret;
  336. }
  337. /* Flush the work item if necessary.
  338. *
  339. * Flushing is necessary only if the work is either queued or running.
  340. *
  341. * Invoked with work lock held by key.
  342. * Sleeps.
  343. *
  344. * @param work the work item that is to be flushed
  345. * @param flusher state used to synchronize the flush
  346. *
  347. * @retval true if work is queued or running. If this happens the
  348. * caller must take the flusher semaphore after releasing the lock.
  349. *
  350. * @retval false otherwise. No wait required.
  351. */
  352. static bool work_flush_locked(struct k_work *work,
  353. struct z_work_flusher *flusher)
  354. {
  355. bool need_flush = (flags_get(&work->flags)
  356. & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
  357. if (need_flush) {
  358. struct k_work_q *queue = work->queue;
  359. __ASSERT_NO_MSG(queue != NULL);
  360. queue_flusher_locked(queue, work, flusher);
  361. notify_queue_locked(queue);
  362. }
  363. return need_flush;
  364. }
  365. bool k_work_flush(struct k_work *work,
  366. struct k_work_sync *sync)
  367. {
  368. __ASSERT_NO_MSG(work != NULL);
  369. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  370. __ASSERT_NO_MSG(!k_is_in_isr());
  371. __ASSERT_NO_MSG(sync != NULL);
  372. #ifdef CONFIG_KERNEL_COHERENCE
  373. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  374. #endif
  375. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
  376. struct z_work_flusher *flusher = &sync->flusher;
  377. k_spinlock_key_t key = k_spin_lock(&lock);
  378. bool need_flush = work_flush_locked(work, flusher);
  379. k_spin_unlock(&lock, key);
  380. /* If necessary wait until the flusher item completes */
  381. if (need_flush) {
  382. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
  383. k_sem_take(&flusher->sem, K_FOREVER);
  384. }
  385. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
  386. return need_flush;
  387. }
  388. /* Execute the non-waiting steps necessary to cancel a work item.
  389. *
  390. * Invoked with work lock held.
  391. *
  392. * @param work the work item to be canceled.
  393. *
  394. * @retval true if we need to wait for the work item to finish canceling
  395. * @retval false if the work item is idle
  396. *
  397. * @return k_busy_wait() captured under lock
  398. */
  399. static int cancel_async_locked(struct k_work *work)
  400. {
  401. /* If we haven't already started canceling, do it now. */
  402. if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  403. /* Remove it from the queue, if it's queued. */
  404. queue_remove_locked(work->queue, work);
  405. }
  406. /* If it's still busy after it's been dequeued, then flag it
  407. * as canceling.
  408. */
  409. int ret = work_busy_get_locked(work);
  410. if (ret != 0) {
  411. flag_set(&work->flags, K_WORK_CANCELING_BIT);
  412. ret = work_busy_get_locked(work);
  413. }
  414. return ret;
  415. }
  416. /* Complete cancellation necessary, release work lock, and wait if
  417. * necessary.
  418. *
  419. * Invoked with work lock held by key.
  420. * Sleeps.
  421. *
  422. * @param work work that is being canceled
  423. * @param canceller state used to synchronize the cancellation
  424. * @param key used by work lock
  425. *
  426. * @retval true if and only if the work was still active on entry. The caller
  427. * must wait on the canceller semaphore after releasing the lock.
  428. *
  429. * @retval false if work was idle on entry. The caller need not wait.
  430. */
  431. static bool cancel_sync_locked(struct k_work *work,
  432. struct z_work_canceller *canceller)
  433. {
  434. bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
  435. /* If something's still running then we have to wait for
  436. * completion, which is indicated when finish_cancel() gets
  437. * invoked.
  438. */
  439. if (ret) {
  440. init_work_cancel(canceller, work);
  441. }
  442. return ret;
  443. }
  444. int k_work_cancel(struct k_work *work)
  445. {
  446. __ASSERT_NO_MSG(work != NULL);
  447. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  448. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
  449. k_spinlock_key_t key = k_spin_lock(&lock);
  450. int ret = cancel_async_locked(work);
  451. k_spin_unlock(&lock, key);
  452. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
  453. return ret;
  454. }
  455. bool k_work_cancel_sync(struct k_work *work,
  456. struct k_work_sync *sync)
  457. {
  458. __ASSERT_NO_MSG(work != NULL);
  459. __ASSERT_NO_MSG(sync != NULL);
  460. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  461. __ASSERT_NO_MSG(!k_is_in_isr());
  462. #ifdef CONFIG_KERNEL_COHERENCE
  463. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  464. #endif
  465. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
  466. struct z_work_canceller *canceller = &sync->canceller;
  467. k_spinlock_key_t key = k_spin_lock(&lock);
  468. bool pending = (work_busy_get_locked(work) != 0U);
  469. bool need_wait = false;
  470. if (pending) {
  471. (void)cancel_async_locked(work);
  472. need_wait = cancel_sync_locked(work, canceller);
  473. }
  474. k_spin_unlock(&lock, key);
  475. if (need_wait) {
  476. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
  477. k_sem_take(&canceller->sem, K_FOREVER);
  478. }
  479. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
  480. return pending;
  481. }
  482. /* Loop executed by a work queue thread.
  483. *
  484. * @param workq_ptr pointer to the work queue structure
  485. */
  486. static void work_queue_main(void *workq_ptr, void *p2, void *p3)
  487. {
  488. struct k_work_q *queue = (struct k_work_q *)workq_ptr;
  489. while (true) {
  490. sys_snode_t *node;
  491. struct k_work *work = NULL;
  492. k_work_handler_t handler = NULL;
  493. k_spinlock_key_t key = k_spin_lock(&lock);
  494. /* Check for and prepare any new work. */
  495. node = sys_slist_get(&queue->pending);
  496. if (node != NULL) {
  497. /* Mark that there's some work active that's
  498. * not on the pending list.
  499. */
  500. flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
  501. work = CONTAINER_OF(node, struct k_work, node);
  502. flag_set(&work->flags, K_WORK_RUNNING_BIT);
  503. flag_clear(&work->flags, K_WORK_QUEUED_BIT);
  504. /* Static code analysis tool can raise a false-positive violation
  505. * in the line below that 'work' is checked for null after being
  506. * dereferenced.
  507. *
  508. * The work is figured out by CONTAINER_OF, as a container
  509. * of type struct k_work that contains the node.
  510. * The only way for it to be NULL is if node would be a member
  511. * of struct k_work object that has been placed at address NULL,
  512. * which should never happen, even line 'if (work != NULL)'
  513. * ensures that.
  514. * This means that if node is not NULL, then work will not be NULL.
  515. */
  516. handler = work->handler;
  517. } else if (flag_test_and_clear(&queue->flags,
  518. K_WORK_QUEUE_DRAIN_BIT)) {
  519. /* Not busy and draining: move threads waiting for
  520. * drain to ready state. The held spinlock inhibits
  521. * immediate reschedule; released threads get their
  522. * chance when this invokes z_sched_wait() below.
  523. *
  524. * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
  525. * here doesn't mean that the queue will allow new
  526. * submissions.
  527. */
  528. (void)z_sched_wake_all(&queue->drainq, 1, NULL);
  529. } else {
  530. /* No work is available and no queue state requires
  531. * special handling.
  532. */
  533. ;
  534. }
  535. if (work == NULL) {
  536. /* Nothing's had a chance to add work since we took
  537. * the lock, and we didn't find work nor got asked to
  538. * stop. Just go to sleep: when something happens the
  539. * work thread will be woken and we can check again.
  540. */
  541. (void)z_sched_wait(&lock, key, &queue->notifyq,
  542. K_FOREVER, NULL);
  543. continue;
  544. }
  545. k_spin_unlock(&lock, key);
  546. if (work != NULL) {
  547. bool yield;
  548. __ASSERT_NO_MSG(handler != NULL);
  549. handler(work);
  550. /* Mark the work item as no longer running and deal
  551. * with any cancellation issued while it was running.
  552. * Clear the BUSY flag and optionally yield to prevent
  553. * starving other threads.
  554. */
  555. key = k_spin_lock(&lock);
  556. flag_clear(&work->flags, K_WORK_RUNNING_BIT);
  557. if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  558. finalize_cancel_locked(work);
  559. }
  560. flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
  561. yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
  562. k_spin_unlock(&lock, key);
  563. /* Optionally yield to prevent the work queue from
  564. * starving other threads.
  565. */
  566. if (yield) {
  567. k_yield();
  568. }
  569. }
  570. }
  571. }
  572. void k_work_queue_init(struct k_work_q *queue)
  573. {
  574. __ASSERT_NO_MSG(queue != NULL);
  575. *queue = (struct k_work_q) {
  576. .flags = 0,
  577. };
  578. SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
  579. }
  580. void k_work_queue_start(struct k_work_q *queue,
  581. k_thread_stack_t *stack,
  582. size_t stack_size,
  583. int prio,
  584. const struct k_work_queue_config *cfg)
  585. {
  586. __ASSERT_NO_MSG(queue);
  587. __ASSERT_NO_MSG(stack);
  588. __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
  589. uint32_t flags = K_WORK_QUEUE_STARTED;
  590. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
  591. sys_slist_init(&queue->pending);
  592. z_waitq_init(&queue->notifyq);
  593. z_waitq_init(&queue->drainq);
  594. if ((cfg != NULL) && cfg->no_yield) {
  595. flags |= K_WORK_QUEUE_NO_YIELD;
  596. }
  597. /* It hasn't actually been started yet, but all the state is in place
  598. * so we can submit things and once the thread gets control it's ready
  599. * to roll.
  600. */
  601. flags_set(&queue->flags, flags);
  602. (void)k_thread_create(&queue->thread, stack, stack_size,
  603. work_queue_main, queue, NULL, NULL,
  604. prio, 0, K_FOREVER);
  605. if ((cfg != NULL) && (cfg->name != NULL)) {
  606. k_thread_name_set(&queue->thread, cfg->name);
  607. }
  608. k_thread_start(&queue->thread);
  609. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
  610. }
  611. int k_work_queue_drain(struct k_work_q *queue,
  612. bool plug)
  613. {
  614. __ASSERT_NO_MSG(queue);
  615. __ASSERT_NO_MSG(!k_is_in_isr());
  616. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
  617. int ret = 0;
  618. k_spinlock_key_t key = k_spin_lock(&lock);
  619. if (((flags_get(&queue->flags)
  620. & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
  621. || plug
  622. || !sys_slist_is_empty(&queue->pending)) {
  623. flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
  624. if (plug) {
  625. flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
  626. }
  627. notify_queue_locked(queue);
  628. ret = z_sched_wait(&lock, key, &queue->drainq,
  629. K_FOREVER, NULL);
  630. } else {
  631. k_spin_unlock(&lock, key);
  632. }
  633. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
  634. return ret;
  635. }
  636. int k_work_queue_unplug(struct k_work_q *queue)
  637. {
  638. __ASSERT_NO_MSG(queue);
  639. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
  640. int ret = -EALREADY;
  641. k_spinlock_key_t key = k_spin_lock(&lock);
  642. if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
  643. ret = 0;
  644. }
  645. k_spin_unlock(&lock, key);
  646. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
  647. return ret;
  648. }
  649. #ifdef CONFIG_SYS_CLOCK_EXISTS
  650. /* Timeout handler for delayable work.
  651. *
  652. * Invoked by timeout infrastructure.
  653. * Takes and releases work lock.
  654. * Conditionally reschedules.
  655. */
  656. static void work_timeout(struct _timeout *to)
  657. {
  658. struct k_work_delayable *dw
  659. = CONTAINER_OF(to, struct k_work_delayable, timeout);
  660. struct k_work *wp = &dw->work;
  661. k_spinlock_key_t key = k_spin_lock(&lock);
  662. struct k_work_q *queue = NULL;
  663. /* If the work is still marked delayed (should be) then clear that
  664. * state and submit it to the queue. If successful the queue will be
  665. * notified of new work at the next reschedule point.
  666. *
  667. * If not successful there is no notification that the work has been
  668. * abandoned. Sorry.
  669. */
  670. if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
  671. queue = dw->queue;
  672. (void)submit_to_queue_locked(wp, &queue);
  673. }
  674. k_spin_unlock(&lock, key);
  675. }
  676. void k_work_init_delayable(struct k_work_delayable *dwork,
  677. k_work_handler_t handler)
  678. {
  679. __ASSERT_NO_MSG(dwork != NULL);
  680. __ASSERT_NO_MSG(handler != NULL);
  681. *dwork = (struct k_work_delayable){
  682. .work = {
  683. .handler = handler,
  684. .flags = K_WORK_DELAYABLE,
  685. },
  686. };
  687. z_init_timeout(&dwork->timeout);
  688. SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
  689. }
  690. static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
  691. {
  692. return atomic_get(&dwork->work.flags) & K_WORK_MASK;
  693. }
  694. int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
  695. {
  696. k_spinlock_key_t key = k_spin_lock(&lock);
  697. int ret = work_delayable_busy_get_locked(dwork);
  698. k_spin_unlock(&lock, key);
  699. return ret;
  700. }
  701. /* Attempt to schedule a work item for future (maybe immediate)
  702. * submission.
  703. *
  704. * Invoked with work lock held.
  705. *
  706. * See also submit_to_queue_locked(), which implements this for a no-wait
  707. * delay.
  708. *
  709. * Invoked with work lock held.
  710. *
  711. * @param queuep pointer to a pointer to a queue. On input this
  712. * should dereference to the proposed queue (which may be null); after
  713. * completion it will be null if the work was not submitted or if
  714. * submitted will reference the queue it was submitted to. That may
  715. * or may not be the queue provided on input.
  716. *
  717. * @param dwork the delayed work structure
  718. *
  719. * @param delay the delay to use before scheduling.
  720. *
  721. * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
  722. * @retval 1 to indicate successfully scheduled.
  723. */
  724. static int schedule_for_queue_locked(struct k_work_q **queuep,
  725. struct k_work_delayable *dwork,
  726. k_timeout_t delay)
  727. {
  728. int ret = 1;
  729. struct k_work *work = &dwork->work;
  730. if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
  731. return submit_to_queue_locked(work, queuep);
  732. }
  733. flag_set(&work->flags, K_WORK_DELAYED_BIT);
  734. dwork->queue = *queuep;
  735. /* Add timeout */
  736. z_add_timeout(&dwork->timeout, work_timeout, delay);
  737. return ret;
  738. }
  739. /* Unschedule delayable work.
  740. *
  741. * If the work is delayed, cancel the timeout and clear the delayed
  742. * flag.
  743. *
  744. * Invoked with work lock held.
  745. *
  746. * @param dwork pointer to delayable work structure.
  747. *
  748. * @return true if and only if work had been delayed so the timeout
  749. * was cancelled.
  750. */
  751. static inline bool unschedule_locked(struct k_work_delayable *dwork)
  752. {
  753. bool ret = false;
  754. struct k_work *work = &dwork->work;
  755. /* If scheduled, try to cancel. */
  756. if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
  757. z_abort_timeout(&dwork->timeout);
  758. ret = true;
  759. }
  760. return ret;
  761. }
  762. /* Full cancellation of a delayable work item.
  763. *
  764. * Unschedules the delayed part then delegates to standard work
  765. * cancellation.
  766. *
  767. * Invoked with work lock held.
  768. *
  769. * @param dwork delayable work item
  770. *
  771. * @return k_work_busy_get() flags
  772. */
  773. static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
  774. {
  775. (void)unschedule_locked(dwork);
  776. return cancel_async_locked(&dwork->work);
  777. }
  778. int k_work_schedule_for_queue(struct k_work_q *queue,
  779. struct k_work_delayable *dwork,
  780. k_timeout_t delay)
  781. {
  782. __ASSERT_NO_MSG(dwork != NULL);
  783. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
  784. struct k_work *work = &dwork->work;
  785. int ret = 0;
  786. k_spinlock_key_t key = k_spin_lock(&lock);
  787. /* Schedule the work item if it's idle or running. */
  788. if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
  789. ret = schedule_for_queue_locked(&queue, dwork, delay);
  790. }
  791. k_spin_unlock(&lock, key);
  792. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
  793. return ret;
  794. }
  795. int k_work_schedule(struct k_work_delayable *dwork,
  796. k_timeout_t delay)
  797. {
  798. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
  799. int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
  800. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
  801. return ret;
  802. }
  803. int k_work_reschedule_for_queue(struct k_work_q *queue,
  804. struct k_work_delayable *dwork,
  805. k_timeout_t delay)
  806. {
  807. __ASSERT_NO_MSG(dwork != NULL);
  808. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
  809. int ret = 0;
  810. k_spinlock_key_t key = k_spin_lock(&lock);
  811. /* Remove any active scheduling. */
  812. (void)unschedule_locked(dwork);
  813. /* Schedule the work item with the new parameters. */
  814. ret = schedule_for_queue_locked(&queue, dwork, delay);
  815. k_spin_unlock(&lock, key);
  816. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
  817. return ret;
  818. }
  819. int k_work_reschedule(struct k_work_delayable *dwork,
  820. k_timeout_t delay)
  821. {
  822. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
  823. int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
  824. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
  825. return ret;
  826. }
  827. int k_work_cancel_delayable(struct k_work_delayable *dwork)
  828. {
  829. __ASSERT_NO_MSG(dwork != NULL);
  830. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
  831. k_spinlock_key_t key = k_spin_lock(&lock);
  832. int ret = cancel_delayable_async_locked(dwork);
  833. k_spin_unlock(&lock, key);
  834. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
  835. return ret;
  836. }
  837. bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
  838. struct k_work_sync *sync)
  839. {
  840. __ASSERT_NO_MSG(dwork != NULL);
  841. __ASSERT_NO_MSG(sync != NULL);
  842. __ASSERT_NO_MSG(!k_is_in_isr());
  843. #ifdef CONFIG_KERNEL_COHERENCE
  844. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  845. #endif
  846. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
  847. struct z_work_canceller *canceller = &sync->canceller;
  848. k_spinlock_key_t key = k_spin_lock(&lock);
  849. bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
  850. bool need_wait = false;
  851. if (pending) {
  852. (void)cancel_delayable_async_locked(dwork);
  853. need_wait = cancel_sync_locked(&dwork->work, canceller);
  854. }
  855. k_spin_unlock(&lock, key);
  856. if (need_wait) {
  857. k_sem_take(&canceller->sem, K_FOREVER);
  858. }
  859. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
  860. return pending;
  861. }
  862. bool k_work_flush_delayable(struct k_work_delayable *dwork,
  863. struct k_work_sync *sync)
  864. {
  865. __ASSERT_NO_MSG(dwork != NULL);
  866. __ASSERT_NO_MSG(sync != NULL);
  867. __ASSERT_NO_MSG(!k_is_in_isr());
  868. #ifdef CONFIG_KERNEL_COHERENCE
  869. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  870. #endif
  871. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
  872. struct k_work *work = &dwork->work;
  873. struct z_work_flusher *flusher = &sync->flusher;
  874. k_spinlock_key_t key = k_spin_lock(&lock);
  875. /* If it's idle release the lock and return immediately. */
  876. if (work_busy_get_locked(work) == 0U) {
  877. k_spin_unlock(&lock, key);
  878. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
  879. return false;
  880. }
  881. /* If unscheduling did something then submit it. Ignore a
  882. * failed submission (e.g. when cancelling).
  883. */
  884. if (unschedule_locked(dwork)) {
  885. struct k_work_q *queue = dwork->queue;
  886. (void)submit_to_queue_locked(work, &queue);
  887. }
  888. /* Wait for it to finish */
  889. bool need_flush = work_flush_locked(work, flusher);
  890. k_spin_unlock(&lock, key);
  891. /* If necessary wait until the flusher item completes */
  892. if (need_flush) {
  893. k_sem_take(&flusher->sem, K_FOREVER);
  894. }
  895. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
  896. return need_flush;
  897. }
  898. int k_delayed_work_cancel(struct k_delayed_work *work)
  899. {
  900. bool pending = k_work_delayable_is_pending(&work->work);
  901. int rc = k_work_cancel_delayable(&work->work);
  902. /* Old return value rules:
  903. *
  904. * 0 if:
  905. * * Work item countdown cancelled before the item was submitted to
  906. * its queue; or
  907. * * Work item was removed from its queue before it was processed.
  908. *
  909. * -EINVAL if:
  910. * * Work item has never been submitted; or
  911. * * Work item has been successfully cancelled; or
  912. * * Timeout handler is in the process of submitting the work item to
  913. * its queue; or
  914. * * Work queue thread has removed the work item from the queue but
  915. * has not called its handler.
  916. *
  917. * -EALREADY if:
  918. * * Work queue thread has removed the work item from the queue and
  919. * cleared its pending flag; or
  920. * * Work queue thread is invoking the item handler; or
  921. * * Work item handler has completed.
  922. *
  923. * We can't reconstruct those states, so call it successful only when
  924. * a pending item is no longer pending, -EINVAL if it was pending and
  925. * still is, and cancel, and -EALREADY if it wasn't pending (so
  926. * presumably cancellation should have had no effect, assuming we
  927. * didn't hit a race condition).
  928. */
  929. if (pending) {
  930. return (rc == 0) ? 0 : -EINVAL;
  931. }
  932. return -EALREADY;
  933. }
  934. void k_delayed_work_init(struct k_delayed_work *work,
  935. k_work_handler_t handler)
  936. {
  937. k_work_init_delayable(&work->work, handler);
  938. }
  939. int k_delayed_work_submit(struct k_delayed_work *work,
  940. k_timeout_t delay)
  941. {
  942. int rc = k_work_reschedule(&work->work, delay);
  943. /* Legacy API doesn't distinguish success cases. */
  944. return (rc >= 0) ? 0 : rc;
  945. }
  946. int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
  947. struct k_delayed_work *work,
  948. k_timeout_t delay)
  949. {
  950. int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
  951. /* Legacy API doesn't distinguish success cases. */
  952. return (rc >= 0) ? 0 : rc;
  953. }
  954. #endif /* CONFIG_SYS_CLOCK_EXISTS */