123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176 |
- /*
- * Copyright (c) 2020 Nordic Semiconductor ASA
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- /**
- * @file
- *
- * Second generation work queue implementation
- */
- #include <kernel.h>
- #include <kernel_structs.h>
- #include <wait_q.h>
- #include <spinlock.h>
- #include <errno.h>
- #include <ksched.h>
- #include <sys/printk.h>
- static inline void flag_clear(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp &= ~BIT(bit);
- }
- static inline void flag_set(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp |= BIT(bit);
- }
- static inline bool flag_test(const uint32_t *flagp,
- uint32_t bit)
- {
- return (*flagp & BIT(bit)) != 0U;
- }
- static inline bool flag_test_and_clear(uint32_t *flagp,
- int bit)
- {
- bool ret = flag_test(flagp, bit);
- flag_clear(flagp, bit);
- return ret;
- }
- static inline void flags_set(uint32_t *flagp,
- uint32_t flags)
- {
- *flagp = flags;
- }
- static inline uint32_t flags_get(const uint32_t *flagp)
- {
- return *flagp;
- }
- /* Lock to protect the internal state of all work items, work queues,
- * and pending_cancels.
- */
- static struct k_spinlock lock;
- /* Invoked by work thread */
- static void handle_flush(struct k_work *work)
- {
- struct z_work_flusher *flusher
- = CONTAINER_OF(work, struct z_work_flusher, work);
- k_sem_give(&flusher->sem);
- }
- static inline void init_flusher(struct z_work_flusher *flusher)
- {
- k_sem_init(&flusher->sem, 0, 1);
- k_work_init(&flusher->work, handle_flush);
- }
- /* List of pending cancellations. */
- static sys_slist_t pending_cancels;
- /* Initialize a canceler record and add it to the list of pending
- * cancels.
- *
- * Invoked with work lock held.
- *
- * @param canceler the structure used to notify a waiting process.
- * @param work the work structure that is to be canceled
- */
- static inline void init_work_cancel(struct z_work_canceller *canceler,
- struct k_work *work)
- {
- k_sem_init(&canceler->sem, 0, 1);
- canceler->work = work;
- sys_slist_append(&pending_cancels, &canceler->node);
- }
- /* Complete cancellation of a work item and unlock held lock.
- *
- * Invoked with work lock held.
- *
- * Invoked from a work queue thread.
- *
- * Reschedules.
- *
- * @param work the work structre that has completed cancellation
- */
- static void finalize_cancel_locked(struct k_work *work)
- {
- struct z_work_canceller *wc, *tmp;
- sys_snode_t *prev = NULL;
- /* Clear this first, so released high-priority threads don't
- * see it when doing things.
- */
- flag_clear(&work->flags, K_WORK_CANCELING_BIT);
- /* Search for and remove the matching container, and release
- * what's waiting for the completion. The same work item can
- * appear multiple times in the list if multiple threads
- * attempt to cancel it.
- */
- SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
- if (wc->work == work) {
- sys_slist_remove(&pending_cancels, prev, &wc->node);
- k_sem_give(&wc->sem);
- } else {
- prev = &wc->node;
- }
- }
- }
- void k_work_init(struct k_work *work,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *work = (struct k_work)Z_WORK_INITIALIZER(handler);
- SYS_PORT_TRACING_OBJ_INIT(k_work, work);
- }
- static inline int work_busy_get_locked(const struct k_work *work)
- {
- return flags_get(&work->flags) & K_WORK_MASK;
- }
- int k_work_busy_get(const struct k_work *work)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_busy_get_locked(work);
- k_spin_unlock(&lock, key);
- return ret;
- }
- /* Add a flusher work item to the queue.
- *
- * Invoked with work lock held.
- *
- * Caller must notify queue of pending work.
- *
- * @param queue queue on which a work item may appear.
- * @param work the work item that is either queued or running on @p
- * queue
- * @param flusher an uninitialized/unused flusher object
- */
- static void queue_flusher_locked(struct k_work_q *queue,
- struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool in_list = false;
- struct k_work *wn;
- /* Determine whether the work item is still queued. */
- SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
- if (wn == work) {
- in_list = true;
- break;
- }
- }
- init_flusher(flusher);
- if (in_list) {
- sys_slist_insert(&queue->pending, &work->node,
- &flusher->work.node);
- } else {
- sys_slist_prepend(&queue->pending, &flusher->work.node);
- }
- }
- /* Try to remove a work item from the given queue.
- *
- * Invoked with work lock held.
- *
- * @param queue the queue from which the work should be removed
- * @param work work that may be on the queue
- */
- static inline void queue_remove_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
- (void)sys_slist_find_and_remove(&queue->pending, &work->node);
- }
- }
- /* Potentially notify a queue that it needs to look for pending work.
- *
- * This may make the work queue thread ready, but as the lock is held it
- * will not be a reschedule point. Callers should yield after the lock is
- * released where appropriate (generally if this returns true).
- *
- * @param queue to be notified. If this is null no notification is required.
- *
- * @return true if and only if the queue was notified and woken, i.e. a
- * reschedule is pending.
- */
- static inline bool notify_queue_locked(struct k_work_q *queue)
- {
- bool rv = false;
- if (queue != NULL) {
- rv = z_sched_wake(&queue->notifyq, 0, NULL);
- }
- return rv;
- }
- /* Submit an work item to a queue if queue state allows new work.
- *
- * Submission is rejected if no queue is provided, or if the queue is
- * draining and the work isn't being submitted from the queue's
- * thread (chained submission).
- *
- * Invoked with work lock held.
- * Conditionally notifies queue.
- *
- * @param queue the queue to which work should be submitted. This may
- * be null, in which case the submission will fail.
- *
- * @param work to be submitted
- *
- * @retval 1 if successfully queued
- * @retval -EINVAL if no queue is provided
- * @retval -ENODEV if the queue is not started
- * @retval -EBUSY if the submission was rejected (draining, plugged)
- */
- static inline int queue_submit_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (queue == NULL) {
- return -EINVAL;
- }
- int ret = -EBUSY;
- bool chained = (_current == &queue->thread) && !k_is_in_isr();
- bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
- /* Test for acceptability, in priority order:
- *
- * * -ENODEV if the queue isn't running.
- * * -EBUSY if draining and not chained
- * * -EBUSY if plugged and not draining
- * * otherwise OK
- */
- if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
- ret = -ENODEV;
- } else if (draining && !chained) {
- ret = -EBUSY;
- } else if (plugged && !draining) {
- ret = -EBUSY;
- } else {
- sys_slist_append(&queue->pending, &work->node);
- ret = 1;
- (void)notify_queue_locked(queue);
- }
- return ret;
- }
- /* Attempt to submit work to a queue.
- *
- * The submission can fail if:
- * * the work is cancelling,
- * * no candidate queue can be identified;
- * * the candidate queue rejects the submission.
- *
- * Invoked with work lock held.
- * Conditionally notifies queue.
- *
- * @param work the work structure to be submitted
- * @param queuep pointer to a queue reference. On input this should
- * dereference to the proposed queue (which may be null); after completion it
- * will be null if the work was not submitted or if submitted will reference
- * the queue it was submitted to. That may or may not be the queue provided
- * on input.
- *
- * @retval 0 if work was already submitted to a queue
- * @retval 1 if work was not submitted and has been queued to @p queue
- * @retval 2 if work was running and has been queued to the queue that was
- * running it
- * @retval -EBUSY if canceling or submission was rejected by queue
- * @retval -EINVAL if no queue is provided
- * @retval -ENODEV if the queue is not started
- */
- static int submit_to_queue_locked(struct k_work *work,
- struct k_work_q **queuep)
- {
- int ret = 0;
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
- /* Disallowed */
- ret = -EBUSY;
- } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
- /* Not currently queued */
- ret = 1;
- /* If no queue specified resubmit to last queue.
- */
- if (*queuep == NULL) {
- *queuep = work->queue;
- }
- /* If the work is currently running we have to use the
- * queue it's running on to prevent handler
- * re-entrancy.
- */
- if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
- __ASSERT_NO_MSG(work->queue != NULL);
- *queuep = work->queue;
- ret = 2;
- }
- int rc = queue_submit_locked(*queuep, work);
- if (rc < 0) {
- ret = rc;
- } else {
- flag_set(&work->flags, K_WORK_QUEUED_BIT);
- work->queue = *queuep;
- }
- } else {
- /* Already queued, do nothing. */
- }
- if (ret <= 0) {
- *queuep = NULL;
- }
- return ret;
- }
- int k_work_submit_to_queue(struct k_work_q *queue,
- struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- k_spinlock_key_t key = k_spin_lock(&lock);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
- int ret = submit_to_queue_locked(work, &queue);
- k_spin_unlock(&lock, key);
- /* If we changed the queue contents (as indicated by a positive ret)
- * the queue thread may now be ready, but we missed the reschedule
- * point because the lock was held. If this is being invoked by a
- * preemptible thread then yield.
- */
- if ((ret > 0) && (k_is_preempt_thread() != 0)) {
- k_yield();
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
- return ret;
- }
- int k_work_submit(struct k_work *work)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
- int ret = k_work_submit_to_queue(&k_sys_work_q, work);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
- return ret;
- }
- /* Flush the work item if necessary.
- *
- * Flushing is necessary only if the work is either queued or running.
- *
- * Invoked with work lock held by key.
- * Sleeps.
- *
- * @param work the work item that is to be flushed
- * @param flusher state used to synchronize the flush
- *
- * @retval true if work is queued or running. If this happens the
- * caller must take the flusher semaphore after releasing the lock.
- *
- * @retval false otherwise. No wait required.
- */
- static bool work_flush_locked(struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool need_flush = (flags_get(&work->flags)
- & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
- if (need_flush) {
- struct k_work_q *queue = work->queue;
- __ASSERT_NO_MSG(queue != NULL);
- queue_flusher_locked(queue, work, flusher);
- notify_queue_locked(queue);
- }
- return need_flush;
- }
- bool k_work_flush(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- __ASSERT_NO_MSG(sync != NULL);
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
- /* If necessary wait until the flusher item completes */
- if (need_flush) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
- return need_flush;
- }
- /* Execute the non-waiting steps necessary to cancel a work item.
- *
- * Invoked with work lock held.
- *
- * @param work the work item to be canceled.
- *
- * @retval true if we need to wait for the work item to finish canceling
- * @retval false if the work item is idle
- *
- * @return k_busy_wait() captured under lock
- */
- static int cancel_async_locked(struct k_work *work)
- {
- /* If we haven't already started canceling, do it now. */
- if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
- /* Remove it from the queue, if it's queued. */
- queue_remove_locked(work->queue, work);
- }
- /* If it's still busy after it's been dequeued, then flag it
- * as canceling.
- */
- int ret = work_busy_get_locked(work);
- if (ret != 0) {
- flag_set(&work->flags, K_WORK_CANCELING_BIT);
- ret = work_busy_get_locked(work);
- }
- return ret;
- }
- /* Complete cancellation necessary, release work lock, and wait if
- * necessary.
- *
- * Invoked with work lock held by key.
- * Sleeps.
- *
- * @param work work that is being canceled
- * @param canceller state used to synchronize the cancellation
- * @param key used by work lock
- *
- * @retval true if and only if the work was still active on entry. The caller
- * must wait on the canceller semaphore after releasing the lock.
- *
- * @retval false if work was idle on entry. The caller need not wait.
- */
- static bool cancel_sync_locked(struct k_work *work,
- struct z_work_canceller *canceller)
- {
- bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
- /* If something's still running then we have to wait for
- * completion, which is indicated when finish_cancel() gets
- * invoked.
- */
- if (ret) {
- init_work_cancel(canceller, work);
- }
- return ret;
- }
- int k_work_cancel(struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_async_locked(work);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
- return ret;
- }
- bool k_work_cancel_sync(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_busy_get_locked(work) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_async_locked(work);
- need_wait = cancel_sync_locked(work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
- return pending;
- }
- /* Loop executed by a work queue thread.
- *
- * @param workq_ptr pointer to the work queue structure
- */
- static void work_queue_main(void *workq_ptr, void *p2, void *p3)
- {
- struct k_work_q *queue = (struct k_work_q *)workq_ptr;
- uint32_t start_time, stop_time, cost_time;
- while (true) {
- sys_snode_t *node;
- struct k_work *work = NULL;
- k_work_handler_t handler = NULL;
- k_spinlock_key_t key = k_spin_lock(&lock);
- /* Check for and prepare any new work. */
- node = sys_slist_get(&queue->pending);
- if (node != NULL) {
- /* Mark that there's some work active that's
- * not on the pending list.
- */
- flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- work = CONTAINER_OF(node, struct k_work, node);
- flag_set(&work->flags, K_WORK_RUNNING_BIT);
- flag_clear(&work->flags, K_WORK_QUEUED_BIT);
- /* Static code analysis tool can raise a false-positive violation
- * in the line below that 'work' is checked for null after being
- * dereferenced.
- *
- * The work is figured out by CONTAINER_OF, as a container
- * of type struct k_work that contains the node.
- * The only way for it to be NULL is if node would be a member
- * of struct k_work object that has been placed at address NULL,
- * which should never happen, even line 'if (work != NULL)'
- * ensures that.
- * This means that if node is not NULL, then work will not be NULL.
- */
- handler = work->handler;
- } else if (flag_test_and_clear(&queue->flags,
- K_WORK_QUEUE_DRAIN_BIT)) {
- /* Not busy and draining: move threads waiting for
- * drain to ready state. The held spinlock inhibits
- * immediate reschedule; released threads get their
- * chance when this invokes z_sched_wait() below.
- *
- * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
- * here doesn't mean that the queue will allow new
- * submissions.
- */
- (void)z_sched_wake_all(&queue->drainq, 1, NULL);
- } else {
- /* No work is available and no queue state requires
- * special handling.
- */
- ;
- }
- if (work == NULL) {
- /* Nothing's had a chance to add work since we took
- * the lock, and we didn't find work nor got asked to
- * stop. Just go to sleep: when something happens the
- * work thread will be woken and we can check again.
- */
- (void)z_sched_wait(&lock, key, &queue->notifyq,
- K_FOREVER, NULL);
- continue;
- }
- k_spin_unlock(&lock, key);
- if (work != NULL) {
- bool yield;
- __ASSERT_NO_MSG(handler != NULL);
- start_time = k_cycle_get_32();
- handler(work);
- stop_time = k_cycle_get_32();
- cost_time = k_cyc_to_us_floor32(stop_time - start_time);
- if ((k_thread_priority_get(&queue->thread) < 0) && (cost_time > 10000)) {
- #if defined(CONFIG_THREAD_NAME)
- printk("work_q %s %p work %p run %d us!!!\n", queue->thread.name, queue, handler, cost_time);
- #else
- printk("work_q %p work %p run %d us!!!\n", queue, handler, cost_time);
- #endif
- }
- /* Mark the work item as no longer running and deal
- * with any cancellation issued while it was running.
- * Clear the BUSY flag and optionally yield to prevent
- * starving other threads.
- */
- key = k_spin_lock(&lock);
- flag_clear(&work->flags, K_WORK_RUNNING_BIT);
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
- finalize_cancel_locked(work);
- }
- flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
- k_spin_unlock(&lock, key);
- /* Optionally yield to prevent the work queue from
- * starving other threads.
- */
- if (yield) {
- k_yield();
- }
- }
- }
- }
- void k_work_queue_init(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue != NULL);
- *queue = (struct k_work_q) {
- .flags = 0,
- };
- SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
- }
- void k_work_queue_start(struct k_work_q *queue,
- k_thread_stack_t *stack,
- size_t stack_size,
- int prio,
- const struct k_work_queue_config *cfg)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(stack);
- __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
- uint32_t flags = K_WORK_QUEUE_STARTED;
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
- sys_slist_init(&queue->pending);
- z_waitq_init(&queue->notifyq);
- z_waitq_init(&queue->drainq);
- if ((cfg != NULL) && cfg->no_yield) {
- flags |= K_WORK_QUEUE_NO_YIELD;
- }
- /* It hasn't actually been started yet, but all the state is in place
- * so we can submit things and once the thread gets control it's ready
- * to roll.
- */
- flags_set(&queue->flags, flags);
- (void)k_thread_create(&queue->thread, stack, stack_size,
- work_queue_main, queue, NULL, NULL,
- prio, 0, K_FOREVER);
- if ((cfg != NULL) && (cfg->name != NULL)) {
- k_thread_name_set(&queue->thread, cfg->name);
- }
- k_thread_start(&queue->thread);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
- }
- int k_work_queue_drain(struct k_work_q *queue,
- bool plug)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(!k_is_in_isr());
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (((flags_get(&queue->flags)
- & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
- || plug
- || !sys_slist_is_empty(&queue->pending)) {
- flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- if (plug) {
- flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
- }
- notify_queue_locked(queue);
- ret = z_sched_wait(&lock, key, &queue->drainq,
- K_FOREVER, NULL);
- } else {
- k_spin_unlock(&lock, key);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
- return ret;
- }
- int k_work_queue_unplug(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
- int ret = -EALREADY;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
- ret = 0;
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
- return ret;
- }
- #ifdef CONFIG_SYS_CLOCK_EXISTS
- /* Timeout handler for delayable work.
- *
- * Invoked by timeout infrastructure.
- * Takes and releases work lock.
- * Conditionally reschedules.
- */
- static void work_timeout(struct _timeout *to)
- {
- struct k_work_delayable *dw
- = CONTAINER_OF(to, struct k_work_delayable, timeout);
- struct k_work *wp = &dw->work;
- k_spinlock_key_t key = k_spin_lock(&lock);
- struct k_work_q *queue = NULL;
- /* If the work is still marked delayed (should be) then clear that
- * state and submit it to the queue. If successful the queue will be
- * notified of new work at the next reschedule point.
- *
- * If not successful there is no notification that the work has been
- * abandoned. Sorry.
- */
- if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
- queue = dw->queue;
- (void)submit_to_queue_locked(wp, &queue);
- }
- k_spin_unlock(&lock, key);
- }
- void k_work_init_delayable(struct k_work_delayable *dwork,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *dwork = (struct k_work_delayable){
- .work = {
- .handler = handler,
- .flags = K_WORK_DELAYABLE,
- },
- };
- z_init_timeout(&dwork->timeout);
- SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
- }
- static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
- {
- return atomic_get(&dwork->work.flags) & K_WORK_MASK;
- }
- int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_delayable_busy_get_locked(dwork);
- k_spin_unlock(&lock, key);
- return ret;
- }
- /* Attempt to schedule a work item for future (maybe immediate)
- * submission.
- *
- * Invoked with work lock held.
- *
- * See also submit_to_queue_locked(), which implements this for a no-wait
- * delay.
- *
- * Invoked with work lock held.
- *
- * @param queuep pointer to a pointer to a queue. On input this
- * should dereference to the proposed queue (which may be null); after
- * completion it will be null if the work was not submitted or if
- * submitted will reference the queue it was submitted to. That may
- * or may not be the queue provided on input.
- *
- * @param dwork the delayed work structure
- *
- * @param delay the delay to use before scheduling.
- *
- * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
- * @retval 1 to indicate successfully scheduled.
- */
- static int schedule_for_queue_locked(struct k_work_q **queuep,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- int ret = 1;
- struct k_work *work = &dwork->work;
- if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
- return submit_to_queue_locked(work, queuep);
- }
- flag_set(&work->flags, K_WORK_DELAYED_BIT);
- dwork->queue = *queuep;
- /* Add timeout */
- z_add_timeout(&dwork->timeout, work_timeout, delay);
- return ret;
- }
- /* Unschedule delayable work.
- *
- * If the work is delayed, cancel the timeout and clear the delayed
- * flag.
- *
- * Invoked with work lock held.
- *
- * @param dwork pointer to delayable work structure.
- *
- * @return true if and only if work had been delayed so the timeout
- * was cancelled.
- */
- static inline bool unschedule_locked(struct k_work_delayable *dwork)
- {
- bool ret = false;
- struct k_work *work = &dwork->work;
- /* If scheduled, try to cancel. */
- if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
- z_abort_timeout(&dwork->timeout);
- ret = true;
- }
- return ret;
- }
- /* Full cancellation of a delayable work item.
- *
- * Unschedules the delayed part then delegates to standard work
- * cancellation.
- *
- * Invoked with work lock held.
- *
- * @param dwork delayable work item
- *
- * @return k_work_busy_get() flags
- */
- static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
- {
- (void)unschedule_locked(dwork);
- return cancel_async_locked(&dwork->work);
- }
- int k_work_schedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
- struct k_work *work = &dwork->work;
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
- /* Schedule the work item if it's idle or running. */
- if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_schedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
- int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
- /* Remove any active scheduling. */
- (void)unschedule_locked(dwork);
- /* Schedule the work item with the new parameters. */
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
- int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
- return ret;
- }
- int k_work_cancel_delayable(struct k_work_delayable *dwork)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_delayable_async_locked(dwork);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
- return ret;
- }
- bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_delayable_async_locked(dwork);
- need_wait = cancel_sync_locked(&dwork->work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
- return pending;
- }
- bool k_work_flush_delayable(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
- struct k_work *work = &dwork->work;
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
- /* If it's idle release the lock and return immediately. */
- if (work_busy_get_locked(work) == 0U) {
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
- return false;
- }
- /* If unscheduling did something then submit it. Ignore a
- * failed submission (e.g. when cancelling).
- */
- if (unschedule_locked(dwork)) {
- struct k_work_q *queue = dwork->queue;
- (void)submit_to_queue_locked(work, &queue);
- }
- /* Wait for it to finish */
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
- /* If necessary wait until the flusher item completes */
- if (need_flush) {
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
- return need_flush;
- }
- int k_delayed_work_cancel(struct k_delayed_work *work)
- {
- bool pending = k_work_delayable_is_pending(&work->work);
- int rc = k_work_cancel_delayable(&work->work);
- /* Old return value rules:
- *
- * 0 if:
- * * Work item countdown cancelled before the item was submitted to
- * its queue; or
- * * Work item was removed from its queue before it was processed.
- *
- * -EINVAL if:
- * * Work item has never been submitted; or
- * * Work item has been successfully cancelled; or
- * * Timeout handler is in the process of submitting the work item to
- * its queue; or
- * * Work queue thread has removed the work item from the queue but
- * has not called its handler.
- *
- * -EALREADY if:
- * * Work queue thread has removed the work item from the queue and
- * cleared its pending flag; or
- * * Work queue thread is invoking the item handler; or
- * * Work item handler has completed.
- *
- * We can't reconstruct those states, so call it successful only when
- * a pending item is no longer pending, -EINVAL if it was pending and
- * still is, and cancel, and -EALREADY if it wasn't pending (so
- * presumably cancellation should have had no effect, assuming we
- * didn't hit a race condition).
- */
- if (pending) {
- return (rc == 0) ? 0 : -EINVAL;
- }
- return -EALREADY;
- }
- void k_delayed_work_init(struct k_delayed_work *work,
- k_work_handler_t handler)
- {
- k_work_init_delayable(&work->work, handler);
- }
- int k_delayed_work_submit(struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule(&work->work, delay);
- /* Legacy API doesn't distinguish success cases. */
- return (rc >= 0) ? 0 : rc;
- }
- int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
- struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
- /* Legacy API doesn't distinguish success cases. */
- return (rc >= 0) ? 0 : rc;
- }
- #endif /* CONFIG_SYS_CLOCK_EXISTS */
|