123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176 |
- #include <kernel.h>
- #include <kernel_structs.h>
- #include <wait_q.h>
- #include <spinlock.h>
- #include <errno.h>
- #include <ksched.h>
- #include <sys/printk.h>
- static inline void flag_clear(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp &= ~BIT(bit);
- }
- static inline void flag_set(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp |= BIT(bit);
- }
- static inline bool flag_test(const uint32_t *flagp,
- uint32_t bit)
- {
- return (*flagp & BIT(bit)) != 0U;
- }
- static inline bool flag_test_and_clear(uint32_t *flagp,
- int bit)
- {
- bool ret = flag_test(flagp, bit);
- flag_clear(flagp, bit);
- return ret;
- }
- static inline void flags_set(uint32_t *flagp,
- uint32_t flags)
- {
- *flagp = flags;
- }
- static inline uint32_t flags_get(const uint32_t *flagp)
- {
- return *flagp;
- }
- static struct k_spinlock lock;
- static void handle_flush(struct k_work *work)
- {
- struct z_work_flusher *flusher
- = CONTAINER_OF(work, struct z_work_flusher, work);
- k_sem_give(&flusher->sem);
- }
- static inline void init_flusher(struct z_work_flusher *flusher)
- {
- k_sem_init(&flusher->sem, 0, 1);
- k_work_init(&flusher->work, handle_flush);
- }
- static sys_slist_t pending_cancels;
- static inline void init_work_cancel(struct z_work_canceller *canceler,
- struct k_work *work)
- {
- k_sem_init(&canceler->sem, 0, 1);
- canceler->work = work;
- sys_slist_append(&pending_cancels, &canceler->node);
- }
- static void finalize_cancel_locked(struct k_work *work)
- {
- struct z_work_canceller *wc, *tmp;
- sys_snode_t *prev = NULL;
-
- flag_clear(&work->flags, K_WORK_CANCELING_BIT);
-
- SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
- if (wc->work == work) {
- sys_slist_remove(&pending_cancels, prev, &wc->node);
- k_sem_give(&wc->sem);
- } else {
- prev = &wc->node;
- }
- }
- }
- void k_work_init(struct k_work *work,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *work = (struct k_work)Z_WORK_INITIALIZER(handler);
- SYS_PORT_TRACING_OBJ_INIT(k_work, work);
- }
- static inline int work_busy_get_locked(const struct k_work *work)
- {
- return flags_get(&work->flags) & K_WORK_MASK;
- }
- int k_work_busy_get(const struct k_work *work)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_busy_get_locked(work);
- k_spin_unlock(&lock, key);
- return ret;
- }
- static void queue_flusher_locked(struct k_work_q *queue,
- struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool in_list = false;
- struct k_work *wn;
-
- SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
- if (wn == work) {
- in_list = true;
- break;
- }
- }
- init_flusher(flusher);
- if (in_list) {
- sys_slist_insert(&queue->pending, &work->node,
- &flusher->work.node);
- } else {
- sys_slist_prepend(&queue->pending, &flusher->work.node);
- }
- }
- static inline void queue_remove_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
- (void)sys_slist_find_and_remove(&queue->pending, &work->node);
- }
- }
- static inline bool notify_queue_locked(struct k_work_q *queue)
- {
- bool rv = false;
- if (queue != NULL) {
- rv = z_sched_wake(&queue->notifyq, 0, NULL);
- }
- return rv;
- }
- static inline int queue_submit_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (queue == NULL) {
- return -EINVAL;
- }
- int ret = -EBUSY;
- bool chained = (_current == &queue->thread) && !k_is_in_isr();
- bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
-
- if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
- ret = -ENODEV;
- } else if (draining && !chained) {
- ret = -EBUSY;
- } else if (plugged && !draining) {
- ret = -EBUSY;
- } else {
- sys_slist_append(&queue->pending, &work->node);
- ret = 1;
- (void)notify_queue_locked(queue);
- }
- return ret;
- }
- static int submit_to_queue_locked(struct k_work *work,
- struct k_work_q **queuep)
- {
- int ret = 0;
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
-
- ret = -EBUSY;
- } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
-
- ret = 1;
-
- if (*queuep == NULL) {
- *queuep = work->queue;
- }
-
- if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
- __ASSERT_NO_MSG(work->queue != NULL);
- *queuep = work->queue;
- ret = 2;
- }
- int rc = queue_submit_locked(*queuep, work);
- if (rc < 0) {
- ret = rc;
- } else {
- flag_set(&work->flags, K_WORK_QUEUED_BIT);
- work->queue = *queuep;
- }
- } else {
-
- }
- if (ret <= 0) {
- *queuep = NULL;
- }
- return ret;
- }
- int k_work_submit_to_queue(struct k_work_q *queue,
- struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- k_spinlock_key_t key = k_spin_lock(&lock);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
- int ret = submit_to_queue_locked(work, &queue);
- k_spin_unlock(&lock, key);
-
- if ((ret > 0) && (k_is_preempt_thread() != 0)) {
- k_yield();
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
- return ret;
- }
- int k_work_submit(struct k_work *work)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
- int ret = k_work_submit_to_queue(&k_sys_work_q, work);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
- return ret;
- }
- static bool work_flush_locked(struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool need_flush = (flags_get(&work->flags)
- & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
- if (need_flush) {
- struct k_work_q *queue = work->queue;
- __ASSERT_NO_MSG(queue != NULL);
- queue_flusher_locked(queue, work, flusher);
- notify_queue_locked(queue);
- }
- return need_flush;
- }
- bool k_work_flush(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- __ASSERT_NO_MSG(sync != NULL);
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
-
- if (need_flush) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
- return need_flush;
- }
- static int cancel_async_locked(struct k_work *work)
- {
-
- if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
-
- queue_remove_locked(work->queue, work);
- }
-
- int ret = work_busy_get_locked(work);
- if (ret != 0) {
- flag_set(&work->flags, K_WORK_CANCELING_BIT);
- ret = work_busy_get_locked(work);
- }
- return ret;
- }
- static bool cancel_sync_locked(struct k_work *work,
- struct z_work_canceller *canceller)
- {
- bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
-
- if (ret) {
- init_work_cancel(canceller, work);
- }
- return ret;
- }
- int k_work_cancel(struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_async_locked(work);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
- return ret;
- }
- bool k_work_cancel_sync(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_busy_get_locked(work) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_async_locked(work);
- need_wait = cancel_sync_locked(work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
- return pending;
- }
- static void work_queue_main(void *workq_ptr, void *p2, void *p3)
- {
- struct k_work_q *queue = (struct k_work_q *)workq_ptr;
- uint32_t start_time, stop_time, cost_time;
- while (true) {
- sys_snode_t *node;
- struct k_work *work = NULL;
- k_work_handler_t handler = NULL;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- node = sys_slist_get(&queue->pending);
- if (node != NULL) {
-
- flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- work = CONTAINER_OF(node, struct k_work, node);
- flag_set(&work->flags, K_WORK_RUNNING_BIT);
- flag_clear(&work->flags, K_WORK_QUEUED_BIT);
-
- handler = work->handler;
- } else if (flag_test_and_clear(&queue->flags,
- K_WORK_QUEUE_DRAIN_BIT)) {
-
- (void)z_sched_wake_all(&queue->drainq, 1, NULL);
- } else {
-
- ;
- }
- if (work == NULL) {
-
- (void)z_sched_wait(&lock, key, &queue->notifyq,
- K_FOREVER, NULL);
- continue;
- }
- k_spin_unlock(&lock, key);
- if (work != NULL) {
- bool yield;
- __ASSERT_NO_MSG(handler != NULL);
- start_time = k_cycle_get_32();
- handler(work);
- stop_time = k_cycle_get_32();
- cost_time = k_cyc_to_us_floor32(stop_time - start_time);
- if ((k_thread_priority_get(&queue->thread) < 0) && (cost_time > 10000)) {
- #if defined(CONFIG_THREAD_NAME)
- printk("work_q %s %p work %p run %d us!!!\n", queue->thread.name, queue, handler, cost_time);
- #else
- printk("work_q %p work %p run %d us!!!\n", queue, handler, cost_time);
- #endif
- }
-
- key = k_spin_lock(&lock);
- flag_clear(&work->flags, K_WORK_RUNNING_BIT);
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
- finalize_cancel_locked(work);
- }
- flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
- k_spin_unlock(&lock, key);
-
- if (yield) {
- k_yield();
- }
- }
- }
- }
- void k_work_queue_init(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue != NULL);
- *queue = (struct k_work_q) {
- .flags = 0,
- };
- SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
- }
- void k_work_queue_start(struct k_work_q *queue,
- k_thread_stack_t *stack,
- size_t stack_size,
- int prio,
- const struct k_work_queue_config *cfg)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(stack);
- __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
- uint32_t flags = K_WORK_QUEUE_STARTED;
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
- sys_slist_init(&queue->pending);
- z_waitq_init(&queue->notifyq);
- z_waitq_init(&queue->drainq);
- if ((cfg != NULL) && cfg->no_yield) {
- flags |= K_WORK_QUEUE_NO_YIELD;
- }
-
- flags_set(&queue->flags, flags);
- (void)k_thread_create(&queue->thread, stack, stack_size,
- work_queue_main, queue, NULL, NULL,
- prio, 0, K_FOREVER);
- if ((cfg != NULL) && (cfg->name != NULL)) {
- k_thread_name_set(&queue->thread, cfg->name);
- }
- k_thread_start(&queue->thread);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
- }
- int k_work_queue_drain(struct k_work_q *queue,
- bool plug)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(!k_is_in_isr());
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (((flags_get(&queue->flags)
- & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
- || plug
- || !sys_slist_is_empty(&queue->pending)) {
- flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- if (plug) {
- flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
- }
- notify_queue_locked(queue);
- ret = z_sched_wait(&lock, key, &queue->drainq,
- K_FOREVER, NULL);
- } else {
- k_spin_unlock(&lock, key);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
- return ret;
- }
- int k_work_queue_unplug(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
- int ret = -EALREADY;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
- ret = 0;
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
- return ret;
- }
- #ifdef CONFIG_SYS_CLOCK_EXISTS
- static void work_timeout(struct _timeout *to)
- {
- struct k_work_delayable *dw
- = CONTAINER_OF(to, struct k_work_delayable, timeout);
- struct k_work *wp = &dw->work;
- k_spinlock_key_t key = k_spin_lock(&lock);
- struct k_work_q *queue = NULL;
-
- if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
- queue = dw->queue;
- (void)submit_to_queue_locked(wp, &queue);
- }
- k_spin_unlock(&lock, key);
- }
- void k_work_init_delayable(struct k_work_delayable *dwork,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *dwork = (struct k_work_delayable){
- .work = {
- .handler = handler,
- .flags = K_WORK_DELAYABLE,
- },
- };
- z_init_timeout(&dwork->timeout);
- SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
- }
- static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
- {
- return atomic_get(&dwork->work.flags) & K_WORK_MASK;
- }
- int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_delayable_busy_get_locked(dwork);
- k_spin_unlock(&lock, key);
- return ret;
- }
- static int schedule_for_queue_locked(struct k_work_q **queuep,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- int ret = 1;
- struct k_work *work = &dwork->work;
- if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
- return submit_to_queue_locked(work, queuep);
- }
- flag_set(&work->flags, K_WORK_DELAYED_BIT);
- dwork->queue = *queuep;
-
- z_add_timeout(&dwork->timeout, work_timeout, delay);
- return ret;
- }
- static inline bool unschedule_locked(struct k_work_delayable *dwork)
- {
- bool ret = false;
- struct k_work *work = &dwork->work;
-
- if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
- z_abort_timeout(&dwork->timeout);
- ret = true;
- }
- return ret;
- }
- static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
- {
- (void)unschedule_locked(dwork);
- return cancel_async_locked(&dwork->work);
- }
- int k_work_schedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
- struct k_work *work = &dwork->work;
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_schedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
- int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- (void)unschedule_locked(dwork);
-
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
- int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
- return ret;
- }
- int k_work_cancel_delayable(struct k_work_delayable *dwork)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_delayable_async_locked(dwork);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
- return ret;
- }
- bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_delayable_async_locked(dwork);
- need_wait = cancel_sync_locked(&dwork->work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
- return pending;
- }
- bool k_work_flush_delayable(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
- struct k_work *work = &dwork->work;
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- if (work_busy_get_locked(work) == 0U) {
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
- return false;
- }
-
- if (unschedule_locked(dwork)) {
- struct k_work_q *queue = dwork->queue;
- (void)submit_to_queue_locked(work, &queue);
- }
-
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
-
- if (need_flush) {
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
- return need_flush;
- }
- int k_delayed_work_cancel(struct k_delayed_work *work)
- {
- bool pending = k_work_delayable_is_pending(&work->work);
- int rc = k_work_cancel_delayable(&work->work);
-
- if (pending) {
- return (rc == 0) ? 0 : -EINVAL;
- }
- return -EALREADY;
- }
- void k_delayed_work_init(struct k_delayed_work *work,
- k_work_handler_t handler)
- {
- k_work_init_delayable(&work->work, handler);
- }
- int k_delayed_work_submit(struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule(&work->work, delay);
-
- return (rc >= 0) ? 0 : rc;
- }
- int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
- struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
-
- return (rc >= 0) ? 0 : rc;
- }
- #endif
|