1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165 |
- #include <kernel.h>
- #include <kernel_structs.h>
- #include <wait_q.h>
- #include <spinlock.h>
- #include <errno.h>
- #include <ksched.h>
- #include <sys/printk.h>
- static inline void flag_clear(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp &= ~BIT(bit);
- }
- static inline void flag_set(uint32_t *flagp,
- uint32_t bit)
- {
- *flagp |= BIT(bit);
- }
- static inline bool flag_test(const uint32_t *flagp,
- uint32_t bit)
- {
- return (*flagp & BIT(bit)) != 0U;
- }
- static inline bool flag_test_and_clear(uint32_t *flagp,
- int bit)
- {
- bool ret = flag_test(flagp, bit);
- flag_clear(flagp, bit);
- return ret;
- }
- static inline void flags_set(uint32_t *flagp,
- uint32_t flags)
- {
- *flagp = flags;
- }
- static inline uint32_t flags_get(const uint32_t *flagp)
- {
- return *flagp;
- }
- static struct k_spinlock lock;
- static void handle_flush(struct k_work *work)
- {
- struct z_work_flusher *flusher
- = CONTAINER_OF(work, struct z_work_flusher, work);
- k_sem_give(&flusher->sem);
- }
- static inline void init_flusher(struct z_work_flusher *flusher)
- {
- k_sem_init(&flusher->sem, 0, 1);
- k_work_init(&flusher->work, handle_flush);
- }
- static sys_slist_t pending_cancels;
- static inline void init_work_cancel(struct z_work_canceller *canceler,
- struct k_work *work)
- {
- k_sem_init(&canceler->sem, 0, 1);
- canceler->work = work;
- sys_slist_append(&pending_cancels, &canceler->node);
- }
- static void finalize_cancel_locked(struct k_work *work)
- {
- struct z_work_canceller *wc, *tmp;
- sys_snode_t *prev = NULL;
-
- flag_clear(&work->flags, K_WORK_CANCELING_BIT);
-
- SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
- if (wc->work == work) {
- sys_slist_remove(&pending_cancels, prev, &wc->node);
- k_sem_give(&wc->sem);
- } else {
- prev = &wc->node;
- }
- }
- }
- void k_work_init(struct k_work *work,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *work = (struct k_work)Z_WORK_INITIALIZER(handler);
- SYS_PORT_TRACING_OBJ_INIT(k_work, work);
- }
- static inline int work_busy_get_locked(const struct k_work *work)
- {
- return flags_get(&work->flags) & K_WORK_MASK;
- }
- int k_work_busy_get(const struct k_work *work)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_busy_get_locked(work);
- k_spin_unlock(&lock, key);
- return ret;
- }
- static void queue_flusher_locked(struct k_work_q *queue,
- struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool in_list = false;
- struct k_work *wn;
-
- SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
- if (wn == work) {
- in_list = true;
- break;
- }
- }
- init_flusher(flusher);
- if (in_list) {
- sys_slist_insert(&queue->pending, &work->node,
- &flusher->work.node);
- } else {
- sys_slist_prepend(&queue->pending, &flusher->work.node);
- }
- }
- static inline void queue_remove_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
- (void)sys_slist_find_and_remove(&queue->pending, &work->node);
- }
- }
- static inline bool notify_queue_locked(struct k_work_q *queue)
- {
- bool rv = false;
- if (queue != NULL) {
- rv = z_sched_wake(&queue->notifyq, 0, NULL);
- }
- return rv;
- }
- static inline int queue_submit_locked(struct k_work_q *queue,
- struct k_work *work)
- {
- if (queue == NULL) {
- return -EINVAL;
- }
- int ret = -EBUSY;
- bool chained = (_current == &queue->thread) && !k_is_in_isr();
- bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
-
- if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
- ret = -ENODEV;
- } else if (draining && !chained) {
- ret = -EBUSY;
- } else if (plugged && !draining) {
- ret = -EBUSY;
- } else {
- sys_slist_append(&queue->pending, &work->node);
- ret = 1;
- (void)notify_queue_locked(queue);
- }
- return ret;
- }
- static int submit_to_queue_locked(struct k_work *work,
- struct k_work_q **queuep)
- {
- int ret = 0;
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
-
- ret = -EBUSY;
- } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
-
- ret = 1;
-
- if (*queuep == NULL) {
- *queuep = work->queue;
- }
-
- if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
- __ASSERT_NO_MSG(work->queue != NULL);
- *queuep = work->queue;
- ret = 2;
- }
- int rc = queue_submit_locked(*queuep, work);
- if (rc < 0) {
- ret = rc;
- } else {
- flag_set(&work->flags, K_WORK_QUEUED_BIT);
- work->queue = *queuep;
- }
- } else {
-
- }
- if (ret <= 0) {
- *queuep = NULL;
- }
- return ret;
- }
- int k_work_submit_to_queue(struct k_work_q *queue,
- struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- k_spinlock_key_t key = k_spin_lock(&lock);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
- int ret = submit_to_queue_locked(work, &queue);
- k_spin_unlock(&lock, key);
-
- if ((ret > 0) && (k_is_preempt_thread() != 0)) {
- k_yield();
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
- return ret;
- }
- int k_work_submit(struct k_work *work)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
- int ret = k_work_submit_to_queue(&k_sys_work_q, work);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
- return ret;
- }
- static bool work_flush_locked(struct k_work *work,
- struct z_work_flusher *flusher)
- {
- bool need_flush = (flags_get(&work->flags)
- & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
- if (need_flush) {
- struct k_work_q *queue = work->queue;
- __ASSERT_NO_MSG(queue != NULL);
- queue_flusher_locked(queue, work, flusher);
- notify_queue_locked(queue);
- }
- return need_flush;
- }
- bool k_work_flush(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- __ASSERT_NO_MSG(sync != NULL);
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
-
- if (need_flush) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
- return need_flush;
- }
- static int cancel_async_locked(struct k_work *work)
- {
-
- if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
-
- queue_remove_locked(work->queue, work);
- }
-
- int ret = work_busy_get_locked(work);
- if (ret != 0) {
- flag_set(&work->flags, K_WORK_CANCELING_BIT);
- ret = work_busy_get_locked(work);
- }
- return ret;
- }
- static bool cancel_sync_locked(struct k_work *work,
- struct z_work_canceller *canceller)
- {
- bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
-
- if (ret) {
- init_work_cancel(canceller, work);
- }
- return ret;
- }
- int k_work_cancel(struct k_work *work)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_async_locked(work);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
- return ret;
- }
- bool k_work_cancel_sync(struct k_work *work,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(work != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_busy_get_locked(work) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_async_locked(work);
- need_wait = cancel_sync_locked(work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
- return pending;
- }
- static void work_queue_main(void *workq_ptr, void *p2, void *p3)
- {
- struct k_work_q *queue = (struct k_work_q *)workq_ptr;
- while (true) {
- sys_snode_t *node;
- struct k_work *work = NULL;
- k_work_handler_t handler = NULL;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- node = sys_slist_get(&queue->pending);
- if (node != NULL) {
-
- flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- work = CONTAINER_OF(node, struct k_work, node);
- flag_set(&work->flags, K_WORK_RUNNING_BIT);
- flag_clear(&work->flags, K_WORK_QUEUED_BIT);
-
- handler = work->handler;
- } else if (flag_test_and_clear(&queue->flags,
- K_WORK_QUEUE_DRAIN_BIT)) {
-
- (void)z_sched_wake_all(&queue->drainq, 1, NULL);
- } else {
-
- ;
- }
- if (work == NULL) {
-
- (void)z_sched_wait(&lock, key, &queue->notifyq,
- K_FOREVER, NULL);
- continue;
- }
- k_spin_unlock(&lock, key);
- if (work != NULL) {
- bool yield;
- __ASSERT_NO_MSG(handler != NULL);
- handler(work);
-
- key = k_spin_lock(&lock);
- flag_clear(&work->flags, K_WORK_RUNNING_BIT);
- if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
- finalize_cancel_locked(work);
- }
- flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
- yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
- k_spin_unlock(&lock, key);
-
- if (yield) {
- k_yield();
- }
- }
- }
- }
- void k_work_queue_init(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue != NULL);
- *queue = (struct k_work_q) {
- .flags = 0,
- };
- SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
- }
- void k_work_queue_start(struct k_work_q *queue,
- k_thread_stack_t *stack,
- size_t stack_size,
- int prio,
- const struct k_work_queue_config *cfg)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(stack);
- __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
- uint32_t flags = K_WORK_QUEUE_STARTED;
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
- sys_slist_init(&queue->pending);
- z_waitq_init(&queue->notifyq);
- z_waitq_init(&queue->drainq);
- if ((cfg != NULL) && cfg->no_yield) {
- flags |= K_WORK_QUEUE_NO_YIELD;
- }
-
- flags_set(&queue->flags, flags);
- (void)k_thread_create(&queue->thread, stack, stack_size,
- work_queue_main, queue, NULL, NULL,
- prio, 0, K_FOREVER);
- if ((cfg != NULL) && (cfg->name != NULL)) {
- k_thread_name_set(&queue->thread, cfg->name);
- }
- k_thread_start(&queue->thread);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
- }
- int k_work_queue_drain(struct k_work_q *queue,
- bool plug)
- {
- __ASSERT_NO_MSG(queue);
- __ASSERT_NO_MSG(!k_is_in_isr());
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (((flags_get(&queue->flags)
- & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
- || plug
- || !sys_slist_is_empty(&queue->pending)) {
- flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
- if (plug) {
- flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
- }
- notify_queue_locked(queue);
- ret = z_sched_wait(&lock, key, &queue->drainq,
- K_FOREVER, NULL);
- } else {
- k_spin_unlock(&lock, key);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
- return ret;
- }
- int k_work_queue_unplug(struct k_work_q *queue)
- {
- __ASSERT_NO_MSG(queue);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
- int ret = -EALREADY;
- k_spinlock_key_t key = k_spin_lock(&lock);
- if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
- ret = 0;
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
- return ret;
- }
- #ifdef CONFIG_SYS_CLOCK_EXISTS
- static void work_timeout(struct _timeout *to)
- {
- struct k_work_delayable *dw
- = CONTAINER_OF(to, struct k_work_delayable, timeout);
- struct k_work *wp = &dw->work;
- k_spinlock_key_t key = k_spin_lock(&lock);
- struct k_work_q *queue = NULL;
-
- if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
- queue = dw->queue;
- (void)submit_to_queue_locked(wp, &queue);
- }
- k_spin_unlock(&lock, key);
- }
- void k_work_init_delayable(struct k_work_delayable *dwork,
- k_work_handler_t handler)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(handler != NULL);
- *dwork = (struct k_work_delayable){
- .work = {
- .handler = handler,
- .flags = K_WORK_DELAYABLE,
- },
- };
- z_init_timeout(&dwork->timeout);
- SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
- }
- static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
- {
- return atomic_get(&dwork->work.flags) & K_WORK_MASK;
- }
- int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
- {
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = work_delayable_busy_get_locked(dwork);
- k_spin_unlock(&lock, key);
- return ret;
- }
- static int schedule_for_queue_locked(struct k_work_q **queuep,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- int ret = 1;
- struct k_work *work = &dwork->work;
- if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
- return submit_to_queue_locked(work, queuep);
- }
- flag_set(&work->flags, K_WORK_DELAYED_BIT);
- dwork->queue = *queuep;
-
- z_add_timeout(&dwork->timeout, work_timeout, delay);
- return ret;
- }
- static inline bool unschedule_locked(struct k_work_delayable *dwork)
- {
- bool ret = false;
- struct k_work *work = &dwork->work;
-
- if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
- z_abort_timeout(&dwork->timeout);
- ret = true;
- }
- return ret;
- }
- static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
- {
- (void)unschedule_locked(dwork);
- return cancel_async_locked(&dwork->work);
- }
- int k_work_schedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
- struct k_work *work = &dwork->work;
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- }
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_schedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
- int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule_for_queue(struct k_work_q *queue,
- struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
- int ret = 0;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- (void)unschedule_locked(dwork);
-
- ret = schedule_for_queue_locked(&queue, dwork, delay);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
- return ret;
- }
- int k_work_reschedule(struct k_work_delayable *dwork,
- k_timeout_t delay)
- {
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
- int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
- return ret;
- }
- int k_work_cancel_delayable(struct k_work_delayable *dwork)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
- k_spinlock_key_t key = k_spin_lock(&lock);
- int ret = cancel_delayable_async_locked(dwork);
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
- return ret;
- }
- bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
- struct z_work_canceller *canceller = &sync->canceller;
- k_spinlock_key_t key = k_spin_lock(&lock);
- bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
- bool need_wait = false;
- if (pending) {
- (void)cancel_delayable_async_locked(dwork);
- need_wait = cancel_sync_locked(&dwork->work, canceller);
- }
- k_spin_unlock(&lock, key);
- if (need_wait) {
- k_sem_take(&canceller->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
- return pending;
- }
- bool k_work_flush_delayable(struct k_work_delayable *dwork,
- struct k_work_sync *sync)
- {
- __ASSERT_NO_MSG(dwork != NULL);
- __ASSERT_NO_MSG(sync != NULL);
- __ASSERT_NO_MSG(!k_is_in_isr());
- #ifdef CONFIG_KERNEL_COHERENCE
- __ASSERT_NO_MSG(arch_mem_coherent(sync));
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
- struct k_work *work = &dwork->work;
- struct z_work_flusher *flusher = &sync->flusher;
- k_spinlock_key_t key = k_spin_lock(&lock);
-
- if (work_busy_get_locked(work) == 0U) {
- k_spin_unlock(&lock, key);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
- return false;
- }
-
- if (unschedule_locked(dwork)) {
- struct k_work_q *queue = dwork->queue;
- (void)submit_to_queue_locked(work, &queue);
- }
-
- bool need_flush = work_flush_locked(work, flusher);
- k_spin_unlock(&lock, key);
-
- if (need_flush) {
- k_sem_take(&flusher->sem, K_FOREVER);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
- return need_flush;
- }
- int k_delayed_work_cancel(struct k_delayed_work *work)
- {
- bool pending = k_work_delayable_is_pending(&work->work);
- int rc = k_work_cancel_delayable(&work->work);
-
- if (pending) {
- return (rc == 0) ? 0 : -EINVAL;
- }
- return -EALREADY;
- }
- void k_delayed_work_init(struct k_delayed_work *work,
- k_work_handler_t handler)
- {
- k_work_init_delayable(&work->work, handler);
- }
- int k_delayed_work_submit(struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule(&work->work, delay);
-
- return (rc >= 0) ? 0 : rc;
- }
- int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
- struct k_delayed_work *work,
- k_timeout_t delay)
- {
- int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
-
- return (rc >= 0) ? 0 : rc;
- }
- #endif
|