work.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * Copyright (c) 2020 Nordic Semiconductor ASA
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * Second generation work queue implementation
  10. */
  11. #include <kernel.h>
  12. #include <kernel_structs.h>
  13. #include <wait_q.h>
  14. #include <spinlock.h>
  15. #include <errno.h>
  16. #include <ksched.h>
  17. #include <sys/printk.h>
  18. static inline void flag_clear(uint32_t *flagp,
  19. uint32_t bit)
  20. {
  21. *flagp &= ~BIT(bit);
  22. }
  23. static inline void flag_set(uint32_t *flagp,
  24. uint32_t bit)
  25. {
  26. *flagp |= BIT(bit);
  27. }
  28. static inline bool flag_test(const uint32_t *flagp,
  29. uint32_t bit)
  30. {
  31. return (*flagp & BIT(bit)) != 0U;
  32. }
  33. static inline bool flag_test_and_clear(uint32_t *flagp,
  34. int bit)
  35. {
  36. bool ret = flag_test(flagp, bit);
  37. flag_clear(flagp, bit);
  38. return ret;
  39. }
  40. static inline void flags_set(uint32_t *flagp,
  41. uint32_t flags)
  42. {
  43. *flagp = flags;
  44. }
  45. static inline uint32_t flags_get(const uint32_t *flagp)
  46. {
  47. return *flagp;
  48. }
  49. /* Lock to protect the internal state of all work items, work queues,
  50. * and pending_cancels.
  51. */
  52. static struct k_spinlock lock;
  53. /* Invoked by work thread */
  54. static void handle_flush(struct k_work *work)
  55. {
  56. struct z_work_flusher *flusher
  57. = CONTAINER_OF(work, struct z_work_flusher, work);
  58. k_sem_give(&flusher->sem);
  59. }
  60. static inline void init_flusher(struct z_work_flusher *flusher)
  61. {
  62. k_sem_init(&flusher->sem, 0, 1);
  63. k_work_init(&flusher->work, handle_flush);
  64. }
  65. /* List of pending cancellations. */
  66. static sys_slist_t pending_cancels;
  67. /* Initialize a canceler record and add it to the list of pending
  68. * cancels.
  69. *
  70. * Invoked with work lock held.
  71. *
  72. * @param canceler the structure used to notify a waiting process.
  73. * @param work the work structure that is to be canceled
  74. */
  75. static inline void init_work_cancel(struct z_work_canceller *canceler,
  76. struct k_work *work)
  77. {
  78. k_sem_init(&canceler->sem, 0, 1);
  79. canceler->work = work;
  80. sys_slist_append(&pending_cancels, &canceler->node);
  81. }
  82. /* Complete cancellation of a work item and unlock held lock.
  83. *
  84. * Invoked with work lock held.
  85. *
  86. * Invoked from a work queue thread.
  87. *
  88. * Reschedules.
  89. *
  90. * @param work the work structre that has completed cancellation
  91. */
  92. static void finalize_cancel_locked(struct k_work *work)
  93. {
  94. struct z_work_canceller *wc, *tmp;
  95. sys_snode_t *prev = NULL;
  96. /* Clear this first, so released high-priority threads don't
  97. * see it when doing things.
  98. */
  99. flag_clear(&work->flags, K_WORK_CANCELING_BIT);
  100. /* Search for and remove the matching container, and release
  101. * what's waiting for the completion. The same work item can
  102. * appear multiple times in the list if multiple threads
  103. * attempt to cancel it.
  104. */
  105. SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
  106. if (wc->work == work) {
  107. sys_slist_remove(&pending_cancels, prev, &wc->node);
  108. k_sem_give(&wc->sem);
  109. } else {
  110. prev = &wc->node;
  111. }
  112. }
  113. }
  114. void k_work_init(struct k_work *work,
  115. k_work_handler_t handler)
  116. {
  117. __ASSERT_NO_MSG(work != NULL);
  118. __ASSERT_NO_MSG(handler != NULL);
  119. *work = (struct k_work)Z_WORK_INITIALIZER(handler);
  120. SYS_PORT_TRACING_OBJ_INIT(k_work, work);
  121. }
  122. static inline int work_busy_get_locked(const struct k_work *work)
  123. {
  124. return flags_get(&work->flags) & K_WORK_MASK;
  125. }
  126. int k_work_busy_get(const struct k_work *work)
  127. {
  128. k_spinlock_key_t key = k_spin_lock(&lock);
  129. int ret = work_busy_get_locked(work);
  130. k_spin_unlock(&lock, key);
  131. return ret;
  132. }
  133. /* Add a flusher work item to the queue.
  134. *
  135. * Invoked with work lock held.
  136. *
  137. * Caller must notify queue of pending work.
  138. *
  139. * @param queue queue on which a work item may appear.
  140. * @param work the work item that is either queued or running on @p
  141. * queue
  142. * @param flusher an uninitialized/unused flusher object
  143. */
  144. static void queue_flusher_locked(struct k_work_q *queue,
  145. struct k_work *work,
  146. struct z_work_flusher *flusher)
  147. {
  148. bool in_list = false;
  149. struct k_work *wn;
  150. /* Determine whether the work item is still queued. */
  151. SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
  152. if (wn == work) {
  153. in_list = true;
  154. break;
  155. }
  156. }
  157. init_flusher(flusher);
  158. if (in_list) {
  159. sys_slist_insert(&queue->pending, &work->node,
  160. &flusher->work.node);
  161. } else {
  162. sys_slist_prepend(&queue->pending, &flusher->work.node);
  163. }
  164. }
  165. /* Try to remove a work item from the given queue.
  166. *
  167. * Invoked with work lock held.
  168. *
  169. * @param queue the queue from which the work should be removed
  170. * @param work work that may be on the queue
  171. */
  172. static inline void queue_remove_locked(struct k_work_q *queue,
  173. struct k_work *work)
  174. {
  175. if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
  176. (void)sys_slist_find_and_remove(&queue->pending, &work->node);
  177. }
  178. }
  179. /* Potentially notify a queue that it needs to look for pending work.
  180. *
  181. * This may make the work queue thread ready, but as the lock is held it
  182. * will not be a reschedule point. Callers should yield after the lock is
  183. * released where appropriate (generally if this returns true).
  184. *
  185. * @param queue to be notified. If this is null no notification is required.
  186. *
  187. * @return true if and only if the queue was notified and woken, i.e. a
  188. * reschedule is pending.
  189. */
  190. static inline bool notify_queue_locked(struct k_work_q *queue)
  191. {
  192. bool rv = false;
  193. if (queue != NULL) {
  194. rv = z_sched_wake(&queue->notifyq, 0, NULL);
  195. }
  196. return rv;
  197. }
  198. /* Submit an work item to a queue if queue state allows new work.
  199. *
  200. * Submission is rejected if no queue is provided, or if the queue is
  201. * draining and the work isn't being submitted from the queue's
  202. * thread (chained submission).
  203. *
  204. * Invoked with work lock held.
  205. * Conditionally notifies queue.
  206. *
  207. * @param queue the queue to which work should be submitted. This may
  208. * be null, in which case the submission will fail.
  209. *
  210. * @param work to be submitted
  211. *
  212. * @retval 1 if successfully queued
  213. * @retval -EINVAL if no queue is provided
  214. * @retval -ENODEV if the queue is not started
  215. * @retval -EBUSY if the submission was rejected (draining, plugged)
  216. */
  217. static inline int queue_submit_locked(struct k_work_q *queue,
  218. struct k_work *work)
  219. {
  220. if (queue == NULL) {
  221. return -EINVAL;
  222. }
  223. int ret = -EBUSY;
  224. bool chained = (_current == &queue->thread) && !k_is_in_isr();
  225. bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
  226. bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
  227. /* Test for acceptability, in priority order:
  228. *
  229. * * -ENODEV if the queue isn't running.
  230. * * -EBUSY if draining and not chained
  231. * * -EBUSY if plugged and not draining
  232. * * otherwise OK
  233. */
  234. if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
  235. ret = -ENODEV;
  236. } else if (draining && !chained) {
  237. ret = -EBUSY;
  238. } else if (plugged && !draining) {
  239. ret = -EBUSY;
  240. } else {
  241. sys_slist_append(&queue->pending, &work->node);
  242. ret = 1;
  243. (void)notify_queue_locked(queue);
  244. }
  245. return ret;
  246. }
  247. /* Attempt to submit work to a queue.
  248. *
  249. * The submission can fail if:
  250. * * the work is cancelling,
  251. * * no candidate queue can be identified;
  252. * * the candidate queue rejects the submission.
  253. *
  254. * Invoked with work lock held.
  255. * Conditionally notifies queue.
  256. *
  257. * @param work the work structure to be submitted
  258. * @param queuep pointer to a queue reference. On input this should
  259. * dereference to the proposed queue (which may be null); after completion it
  260. * will be null if the work was not submitted or if submitted will reference
  261. * the queue it was submitted to. That may or may not be the queue provided
  262. * on input.
  263. *
  264. * @retval 0 if work was already submitted to a queue
  265. * @retval 1 if work was not submitted and has been queued to @p queue
  266. * @retval 2 if work was running and has been queued to the queue that was
  267. * running it
  268. * @retval -EBUSY if canceling or submission was rejected by queue
  269. * @retval -EINVAL if no queue is provided
  270. * @retval -ENODEV if the queue is not started
  271. */
  272. static int submit_to_queue_locked(struct k_work *work,
  273. struct k_work_q **queuep)
  274. {
  275. int ret = 0;
  276. if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  277. /* Disallowed */
  278. ret = -EBUSY;
  279. } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
  280. /* Not currently queued */
  281. ret = 1;
  282. /* If no queue specified resubmit to last queue.
  283. */
  284. if (*queuep == NULL) {
  285. *queuep = work->queue;
  286. }
  287. /* If the work is currently running we have to use the
  288. * queue it's running on to prevent handler
  289. * re-entrancy.
  290. */
  291. if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
  292. __ASSERT_NO_MSG(work->queue != NULL);
  293. *queuep = work->queue;
  294. ret = 2;
  295. }
  296. int rc = queue_submit_locked(*queuep, work);
  297. if (rc < 0) {
  298. ret = rc;
  299. } else {
  300. flag_set(&work->flags, K_WORK_QUEUED_BIT);
  301. work->queue = *queuep;
  302. }
  303. } else {
  304. /* Already queued, do nothing. */
  305. }
  306. if (ret <= 0) {
  307. *queuep = NULL;
  308. }
  309. return ret;
  310. }
  311. int k_work_submit_to_queue(struct k_work_q *queue,
  312. struct k_work *work)
  313. {
  314. __ASSERT_NO_MSG(work != NULL);
  315. k_spinlock_key_t key = k_spin_lock(&lock);
  316. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
  317. int ret = submit_to_queue_locked(work, &queue);
  318. k_spin_unlock(&lock, key);
  319. /* If we changed the queue contents (as indicated by a positive ret)
  320. * the queue thread may now be ready, but we missed the reschedule
  321. * point because the lock was held. If this is being invoked by a
  322. * preemptible thread then yield.
  323. */
  324. if ((ret > 0) && (k_is_preempt_thread() != 0)) {
  325. k_yield();
  326. }
  327. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
  328. return ret;
  329. }
  330. int k_work_submit(struct k_work *work)
  331. {
  332. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
  333. int ret = k_work_submit_to_queue(&k_sys_work_q, work);
  334. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
  335. return ret;
  336. }
  337. /* Flush the work item if necessary.
  338. *
  339. * Flushing is necessary only if the work is either queued or running.
  340. *
  341. * Invoked with work lock held by key.
  342. * Sleeps.
  343. *
  344. * @param work the work item that is to be flushed
  345. * @param flusher state used to synchronize the flush
  346. *
  347. * @retval true if work is queued or running. If this happens the
  348. * caller must take the flusher semaphore after releasing the lock.
  349. *
  350. * @retval false otherwise. No wait required.
  351. */
  352. static bool work_flush_locked(struct k_work *work,
  353. struct z_work_flusher *flusher)
  354. {
  355. bool need_flush = (flags_get(&work->flags)
  356. & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
  357. if (need_flush) {
  358. struct k_work_q *queue = work->queue;
  359. __ASSERT_NO_MSG(queue != NULL);
  360. queue_flusher_locked(queue, work, flusher);
  361. notify_queue_locked(queue);
  362. }
  363. return need_flush;
  364. }
  365. bool k_work_flush(struct k_work *work,
  366. struct k_work_sync *sync)
  367. {
  368. __ASSERT_NO_MSG(work != NULL);
  369. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  370. __ASSERT_NO_MSG(!k_is_in_isr());
  371. __ASSERT_NO_MSG(sync != NULL);
  372. #ifdef CONFIG_KERNEL_COHERENCE
  373. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  374. #endif
  375. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
  376. struct z_work_flusher *flusher = &sync->flusher;
  377. k_spinlock_key_t key = k_spin_lock(&lock);
  378. bool need_flush = work_flush_locked(work, flusher);
  379. k_spin_unlock(&lock, key);
  380. /* If necessary wait until the flusher item completes */
  381. if (need_flush) {
  382. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
  383. k_sem_take(&flusher->sem, K_FOREVER);
  384. }
  385. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
  386. return need_flush;
  387. }
  388. /* Execute the non-waiting steps necessary to cancel a work item.
  389. *
  390. * Invoked with work lock held.
  391. *
  392. * @param work the work item to be canceled.
  393. *
  394. * @retval true if we need to wait for the work item to finish canceling
  395. * @retval false if the work item is idle
  396. *
  397. * @return k_busy_wait() captured under lock
  398. */
  399. static int cancel_async_locked(struct k_work *work)
  400. {
  401. /* If we haven't already started canceling, do it now. */
  402. if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  403. /* Remove it from the queue, if it's queued. */
  404. queue_remove_locked(work->queue, work);
  405. }
  406. /* If it's still busy after it's been dequeued, then flag it
  407. * as canceling.
  408. */
  409. int ret = work_busy_get_locked(work);
  410. if (ret != 0) {
  411. flag_set(&work->flags, K_WORK_CANCELING_BIT);
  412. ret = work_busy_get_locked(work);
  413. }
  414. return ret;
  415. }
  416. /* Complete cancellation necessary, release work lock, and wait if
  417. * necessary.
  418. *
  419. * Invoked with work lock held by key.
  420. * Sleeps.
  421. *
  422. * @param work work that is being canceled
  423. * @param canceller state used to synchronize the cancellation
  424. * @param key used by work lock
  425. *
  426. * @retval true if and only if the work was still active on entry. The caller
  427. * must wait on the canceller semaphore after releasing the lock.
  428. *
  429. * @retval false if work was idle on entry. The caller need not wait.
  430. */
  431. static bool cancel_sync_locked(struct k_work *work,
  432. struct z_work_canceller *canceller)
  433. {
  434. bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
  435. /* If something's still running then we have to wait for
  436. * completion, which is indicated when finish_cancel() gets
  437. * invoked.
  438. */
  439. if (ret) {
  440. init_work_cancel(canceller, work);
  441. }
  442. return ret;
  443. }
  444. int k_work_cancel(struct k_work *work)
  445. {
  446. __ASSERT_NO_MSG(work != NULL);
  447. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  448. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
  449. k_spinlock_key_t key = k_spin_lock(&lock);
  450. int ret = cancel_async_locked(work);
  451. k_spin_unlock(&lock, key);
  452. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
  453. return ret;
  454. }
  455. bool k_work_cancel_sync(struct k_work *work,
  456. struct k_work_sync *sync)
  457. {
  458. __ASSERT_NO_MSG(work != NULL);
  459. __ASSERT_NO_MSG(sync != NULL);
  460. __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
  461. __ASSERT_NO_MSG(!k_is_in_isr());
  462. #ifdef CONFIG_KERNEL_COHERENCE
  463. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  464. #endif
  465. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
  466. struct z_work_canceller *canceller = &sync->canceller;
  467. k_spinlock_key_t key = k_spin_lock(&lock);
  468. bool pending = (work_busy_get_locked(work) != 0U);
  469. bool need_wait = false;
  470. if (pending) {
  471. (void)cancel_async_locked(work);
  472. need_wait = cancel_sync_locked(work, canceller);
  473. }
  474. k_spin_unlock(&lock, key);
  475. if (need_wait) {
  476. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
  477. k_sem_take(&canceller->sem, K_FOREVER);
  478. }
  479. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
  480. return pending;
  481. }
  482. /* Loop executed by a work queue thread.
  483. *
  484. * @param workq_ptr pointer to the work queue structure
  485. */
  486. static void work_queue_main(void *workq_ptr, void *p2, void *p3)
  487. {
  488. struct k_work_q *queue = (struct k_work_q *)workq_ptr;
  489. uint32_t start_time, stop_time, cost_time;
  490. while (true) {
  491. sys_snode_t *node;
  492. struct k_work *work = NULL;
  493. k_work_handler_t handler = NULL;
  494. k_spinlock_key_t key = k_spin_lock(&lock);
  495. /* Check for and prepare any new work. */
  496. node = sys_slist_get(&queue->pending);
  497. if (node != NULL) {
  498. /* Mark that there's some work active that's
  499. * not on the pending list.
  500. */
  501. flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
  502. work = CONTAINER_OF(node, struct k_work, node);
  503. flag_set(&work->flags, K_WORK_RUNNING_BIT);
  504. flag_clear(&work->flags, K_WORK_QUEUED_BIT);
  505. /* Static code analysis tool can raise a false-positive violation
  506. * in the line below that 'work' is checked for null after being
  507. * dereferenced.
  508. *
  509. * The work is figured out by CONTAINER_OF, as a container
  510. * of type struct k_work that contains the node.
  511. * The only way for it to be NULL is if node would be a member
  512. * of struct k_work object that has been placed at address NULL,
  513. * which should never happen, even line 'if (work != NULL)'
  514. * ensures that.
  515. * This means that if node is not NULL, then work will not be NULL.
  516. */
  517. handler = work->handler;
  518. } else if (flag_test_and_clear(&queue->flags,
  519. K_WORK_QUEUE_DRAIN_BIT)) {
  520. /* Not busy and draining: move threads waiting for
  521. * drain to ready state. The held spinlock inhibits
  522. * immediate reschedule; released threads get their
  523. * chance when this invokes z_sched_wait() below.
  524. *
  525. * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
  526. * here doesn't mean that the queue will allow new
  527. * submissions.
  528. */
  529. (void)z_sched_wake_all(&queue->drainq, 1, NULL);
  530. } else {
  531. /* No work is available and no queue state requires
  532. * special handling.
  533. */
  534. ;
  535. }
  536. if (work == NULL) {
  537. /* Nothing's had a chance to add work since we took
  538. * the lock, and we didn't find work nor got asked to
  539. * stop. Just go to sleep: when something happens the
  540. * work thread will be woken and we can check again.
  541. */
  542. (void)z_sched_wait(&lock, key, &queue->notifyq,
  543. K_FOREVER, NULL);
  544. continue;
  545. }
  546. k_spin_unlock(&lock, key);
  547. if (work != NULL) {
  548. bool yield;
  549. __ASSERT_NO_MSG(handler != NULL);
  550. start_time = k_cycle_get_32();
  551. handler(work);
  552. stop_time = k_cycle_get_32();
  553. cost_time = k_cyc_to_us_floor32(stop_time - start_time);
  554. if ((k_thread_priority_get(&queue->thread) < 0) && (cost_time > 10000)) {
  555. #if defined(CONFIG_THREAD_NAME)
  556. printk("work_q %s %p work %p run %d us!!!\n", queue->thread.name, queue, handler, cost_time);
  557. #else
  558. printk("work_q %p work %p run %d us!!!\n", queue, handler, cost_time);
  559. #endif
  560. }
  561. /* Mark the work item as no longer running and deal
  562. * with any cancellation issued while it was running.
  563. * Clear the BUSY flag and optionally yield to prevent
  564. * starving other threads.
  565. */
  566. key = k_spin_lock(&lock);
  567. flag_clear(&work->flags, K_WORK_RUNNING_BIT);
  568. if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
  569. finalize_cancel_locked(work);
  570. }
  571. flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
  572. yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
  573. k_spin_unlock(&lock, key);
  574. /* Optionally yield to prevent the work queue from
  575. * starving other threads.
  576. */
  577. if (yield) {
  578. k_yield();
  579. }
  580. }
  581. }
  582. }
  583. void k_work_queue_init(struct k_work_q *queue)
  584. {
  585. __ASSERT_NO_MSG(queue != NULL);
  586. *queue = (struct k_work_q) {
  587. .flags = 0,
  588. };
  589. SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
  590. }
  591. void k_work_queue_start(struct k_work_q *queue,
  592. k_thread_stack_t *stack,
  593. size_t stack_size,
  594. int prio,
  595. const struct k_work_queue_config *cfg)
  596. {
  597. __ASSERT_NO_MSG(queue);
  598. __ASSERT_NO_MSG(stack);
  599. __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
  600. uint32_t flags = K_WORK_QUEUE_STARTED;
  601. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
  602. sys_slist_init(&queue->pending);
  603. z_waitq_init(&queue->notifyq);
  604. z_waitq_init(&queue->drainq);
  605. if ((cfg != NULL) && cfg->no_yield) {
  606. flags |= K_WORK_QUEUE_NO_YIELD;
  607. }
  608. /* It hasn't actually been started yet, but all the state is in place
  609. * so we can submit things and once the thread gets control it's ready
  610. * to roll.
  611. */
  612. flags_set(&queue->flags, flags);
  613. (void)k_thread_create(&queue->thread, stack, stack_size,
  614. work_queue_main, queue, NULL, NULL,
  615. prio, 0, K_FOREVER);
  616. if ((cfg != NULL) && (cfg->name != NULL)) {
  617. k_thread_name_set(&queue->thread, cfg->name);
  618. }
  619. k_thread_start(&queue->thread);
  620. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
  621. }
  622. int k_work_queue_drain(struct k_work_q *queue,
  623. bool plug)
  624. {
  625. __ASSERT_NO_MSG(queue);
  626. __ASSERT_NO_MSG(!k_is_in_isr());
  627. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
  628. int ret = 0;
  629. k_spinlock_key_t key = k_spin_lock(&lock);
  630. if (((flags_get(&queue->flags)
  631. & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
  632. || plug
  633. || !sys_slist_is_empty(&queue->pending)) {
  634. flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
  635. if (plug) {
  636. flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
  637. }
  638. notify_queue_locked(queue);
  639. ret = z_sched_wait(&lock, key, &queue->drainq,
  640. K_FOREVER, NULL);
  641. } else {
  642. k_spin_unlock(&lock, key);
  643. }
  644. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
  645. return ret;
  646. }
  647. int k_work_queue_unplug(struct k_work_q *queue)
  648. {
  649. __ASSERT_NO_MSG(queue);
  650. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
  651. int ret = -EALREADY;
  652. k_spinlock_key_t key = k_spin_lock(&lock);
  653. if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
  654. ret = 0;
  655. }
  656. k_spin_unlock(&lock, key);
  657. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
  658. return ret;
  659. }
  660. #ifdef CONFIG_SYS_CLOCK_EXISTS
  661. /* Timeout handler for delayable work.
  662. *
  663. * Invoked by timeout infrastructure.
  664. * Takes and releases work lock.
  665. * Conditionally reschedules.
  666. */
  667. static void work_timeout(struct _timeout *to)
  668. {
  669. struct k_work_delayable *dw
  670. = CONTAINER_OF(to, struct k_work_delayable, timeout);
  671. struct k_work *wp = &dw->work;
  672. k_spinlock_key_t key = k_spin_lock(&lock);
  673. struct k_work_q *queue = NULL;
  674. /* If the work is still marked delayed (should be) then clear that
  675. * state and submit it to the queue. If successful the queue will be
  676. * notified of new work at the next reschedule point.
  677. *
  678. * If not successful there is no notification that the work has been
  679. * abandoned. Sorry.
  680. */
  681. if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
  682. queue = dw->queue;
  683. (void)submit_to_queue_locked(wp, &queue);
  684. }
  685. k_spin_unlock(&lock, key);
  686. }
  687. void k_work_init_delayable(struct k_work_delayable *dwork,
  688. k_work_handler_t handler)
  689. {
  690. __ASSERT_NO_MSG(dwork != NULL);
  691. __ASSERT_NO_MSG(handler != NULL);
  692. *dwork = (struct k_work_delayable){
  693. .work = {
  694. .handler = handler,
  695. .flags = K_WORK_DELAYABLE,
  696. },
  697. };
  698. z_init_timeout(&dwork->timeout);
  699. SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
  700. }
  701. static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
  702. {
  703. return atomic_get(&dwork->work.flags) & K_WORK_MASK;
  704. }
  705. int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
  706. {
  707. k_spinlock_key_t key = k_spin_lock(&lock);
  708. int ret = work_delayable_busy_get_locked(dwork);
  709. k_spin_unlock(&lock, key);
  710. return ret;
  711. }
  712. /* Attempt to schedule a work item for future (maybe immediate)
  713. * submission.
  714. *
  715. * Invoked with work lock held.
  716. *
  717. * See also submit_to_queue_locked(), which implements this for a no-wait
  718. * delay.
  719. *
  720. * Invoked with work lock held.
  721. *
  722. * @param queuep pointer to a pointer to a queue. On input this
  723. * should dereference to the proposed queue (which may be null); after
  724. * completion it will be null if the work was not submitted or if
  725. * submitted will reference the queue it was submitted to. That may
  726. * or may not be the queue provided on input.
  727. *
  728. * @param dwork the delayed work structure
  729. *
  730. * @param delay the delay to use before scheduling.
  731. *
  732. * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
  733. * @retval 1 to indicate successfully scheduled.
  734. */
  735. static int schedule_for_queue_locked(struct k_work_q **queuep,
  736. struct k_work_delayable *dwork,
  737. k_timeout_t delay)
  738. {
  739. int ret = 1;
  740. struct k_work *work = &dwork->work;
  741. if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
  742. return submit_to_queue_locked(work, queuep);
  743. }
  744. flag_set(&work->flags, K_WORK_DELAYED_BIT);
  745. dwork->queue = *queuep;
  746. /* Add timeout */
  747. z_add_timeout(&dwork->timeout, work_timeout, delay);
  748. return ret;
  749. }
  750. /* Unschedule delayable work.
  751. *
  752. * If the work is delayed, cancel the timeout and clear the delayed
  753. * flag.
  754. *
  755. * Invoked with work lock held.
  756. *
  757. * @param dwork pointer to delayable work structure.
  758. *
  759. * @return true if and only if work had been delayed so the timeout
  760. * was cancelled.
  761. */
  762. static inline bool unschedule_locked(struct k_work_delayable *dwork)
  763. {
  764. bool ret = false;
  765. struct k_work *work = &dwork->work;
  766. /* If scheduled, try to cancel. */
  767. if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
  768. z_abort_timeout(&dwork->timeout);
  769. ret = true;
  770. }
  771. return ret;
  772. }
  773. /* Full cancellation of a delayable work item.
  774. *
  775. * Unschedules the delayed part then delegates to standard work
  776. * cancellation.
  777. *
  778. * Invoked with work lock held.
  779. *
  780. * @param dwork delayable work item
  781. *
  782. * @return k_work_busy_get() flags
  783. */
  784. static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
  785. {
  786. (void)unschedule_locked(dwork);
  787. return cancel_async_locked(&dwork->work);
  788. }
  789. int k_work_schedule_for_queue(struct k_work_q *queue,
  790. struct k_work_delayable *dwork,
  791. k_timeout_t delay)
  792. {
  793. __ASSERT_NO_MSG(dwork != NULL);
  794. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
  795. struct k_work *work = &dwork->work;
  796. int ret = 0;
  797. k_spinlock_key_t key = k_spin_lock(&lock);
  798. /* Schedule the work item if it's idle or running. */
  799. if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
  800. ret = schedule_for_queue_locked(&queue, dwork, delay);
  801. }
  802. k_spin_unlock(&lock, key);
  803. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
  804. return ret;
  805. }
  806. int k_work_schedule(struct k_work_delayable *dwork,
  807. k_timeout_t delay)
  808. {
  809. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
  810. int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
  811. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
  812. return ret;
  813. }
  814. int k_work_reschedule_for_queue(struct k_work_q *queue,
  815. struct k_work_delayable *dwork,
  816. k_timeout_t delay)
  817. {
  818. __ASSERT_NO_MSG(dwork != NULL);
  819. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
  820. int ret = 0;
  821. k_spinlock_key_t key = k_spin_lock(&lock);
  822. /* Remove any active scheduling. */
  823. (void)unschedule_locked(dwork);
  824. /* Schedule the work item with the new parameters. */
  825. ret = schedule_for_queue_locked(&queue, dwork, delay);
  826. k_spin_unlock(&lock, key);
  827. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
  828. return ret;
  829. }
  830. int k_work_reschedule(struct k_work_delayable *dwork,
  831. k_timeout_t delay)
  832. {
  833. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
  834. int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
  835. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
  836. return ret;
  837. }
  838. int k_work_cancel_delayable(struct k_work_delayable *dwork)
  839. {
  840. __ASSERT_NO_MSG(dwork != NULL);
  841. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
  842. k_spinlock_key_t key = k_spin_lock(&lock);
  843. int ret = cancel_delayable_async_locked(dwork);
  844. k_spin_unlock(&lock, key);
  845. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
  846. return ret;
  847. }
  848. bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
  849. struct k_work_sync *sync)
  850. {
  851. __ASSERT_NO_MSG(dwork != NULL);
  852. __ASSERT_NO_MSG(sync != NULL);
  853. __ASSERT_NO_MSG(!k_is_in_isr());
  854. #ifdef CONFIG_KERNEL_COHERENCE
  855. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  856. #endif
  857. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
  858. struct z_work_canceller *canceller = &sync->canceller;
  859. k_spinlock_key_t key = k_spin_lock(&lock);
  860. bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
  861. bool need_wait = false;
  862. if (pending) {
  863. (void)cancel_delayable_async_locked(dwork);
  864. need_wait = cancel_sync_locked(&dwork->work, canceller);
  865. }
  866. k_spin_unlock(&lock, key);
  867. if (need_wait) {
  868. k_sem_take(&canceller->sem, K_FOREVER);
  869. }
  870. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
  871. return pending;
  872. }
  873. bool k_work_flush_delayable(struct k_work_delayable *dwork,
  874. struct k_work_sync *sync)
  875. {
  876. __ASSERT_NO_MSG(dwork != NULL);
  877. __ASSERT_NO_MSG(sync != NULL);
  878. __ASSERT_NO_MSG(!k_is_in_isr());
  879. #ifdef CONFIG_KERNEL_COHERENCE
  880. __ASSERT_NO_MSG(arch_mem_coherent(sync));
  881. #endif
  882. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
  883. struct k_work *work = &dwork->work;
  884. struct z_work_flusher *flusher = &sync->flusher;
  885. k_spinlock_key_t key = k_spin_lock(&lock);
  886. /* If it's idle release the lock and return immediately. */
  887. if (work_busy_get_locked(work) == 0U) {
  888. k_spin_unlock(&lock, key);
  889. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
  890. return false;
  891. }
  892. /* If unscheduling did something then submit it. Ignore a
  893. * failed submission (e.g. when cancelling).
  894. */
  895. if (unschedule_locked(dwork)) {
  896. struct k_work_q *queue = dwork->queue;
  897. (void)submit_to_queue_locked(work, &queue);
  898. }
  899. /* Wait for it to finish */
  900. bool need_flush = work_flush_locked(work, flusher);
  901. k_spin_unlock(&lock, key);
  902. /* If necessary wait until the flusher item completes */
  903. if (need_flush) {
  904. k_sem_take(&flusher->sem, K_FOREVER);
  905. }
  906. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
  907. return need_flush;
  908. }
  909. int k_delayed_work_cancel(struct k_delayed_work *work)
  910. {
  911. bool pending = k_work_delayable_is_pending(&work->work);
  912. int rc = k_work_cancel_delayable(&work->work);
  913. /* Old return value rules:
  914. *
  915. * 0 if:
  916. * * Work item countdown cancelled before the item was submitted to
  917. * its queue; or
  918. * * Work item was removed from its queue before it was processed.
  919. *
  920. * -EINVAL if:
  921. * * Work item has never been submitted; or
  922. * * Work item has been successfully cancelled; or
  923. * * Timeout handler is in the process of submitting the work item to
  924. * its queue; or
  925. * * Work queue thread has removed the work item from the queue but
  926. * has not called its handler.
  927. *
  928. * -EALREADY if:
  929. * * Work queue thread has removed the work item from the queue and
  930. * cleared its pending flag; or
  931. * * Work queue thread is invoking the item handler; or
  932. * * Work item handler has completed.
  933. *
  934. * We can't reconstruct those states, so call it successful only when
  935. * a pending item is no longer pending, -EINVAL if it was pending and
  936. * still is, and cancel, and -EALREADY if it wasn't pending (so
  937. * presumably cancellation should have had no effect, assuming we
  938. * didn't hit a race condition).
  939. */
  940. if (pending) {
  941. return (rc == 0) ? 0 : -EINVAL;
  942. }
  943. return -EALREADY;
  944. }
  945. void k_delayed_work_init(struct k_delayed_work *work,
  946. k_work_handler_t handler)
  947. {
  948. k_work_init_delayable(&work->work, handler);
  949. }
  950. int k_delayed_work_submit(struct k_delayed_work *work,
  951. k_timeout_t delay)
  952. {
  953. int rc = k_work_reschedule(&work->work, delay);
  954. /* Legacy API doesn't distinguish success cases. */
  955. return (rc >= 0) ? 0 : rc;
  956. }
  957. int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
  958. struct k_delayed_work *work,
  959. k_timeout_t delay)
  960. {
  961. int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
  962. /* Legacy API doesn't distinguish success cases. */
  963. return (rc >= 0) ? 0 : rc;
  964. }
  965. #endif /* CONFIG_SYS_CLOCK_EXISTS */