123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524 |
- #include <kernel.h>
- #include <kernel_structs.h>
- #include <toolchain.h>
- #include <linker/sections.h>
- #include <string.h>
- #include <ksched.h>
- #include <wait_q.h>
- #include <sys/dlist.h>
- #include <init.h>
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
- struct k_mbox_async {
- struct _thread_base thread;
- struct k_mbox_msg tx_msg;
- };
- K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
- static inline void mbox_async_alloc(struct k_mbox_async **async)
- {
- (void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
- }
- static inline void mbox_async_free(struct k_mbox_async *async)
- {
- k_stack_push(&async_msg_free, (stack_data_t)async);
- }
- #endif
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
- static int init_mbox_module(const struct device *dev)
- {
- ARG_UNUSED(dev);
-
- static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
-
- int i;
- for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
- z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
- k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
- }
- #endif
-
- return 0;
- }
- SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
- #endif
- #ifdef CONFIG_POLL
- static inline void handle_poll_thread_events(struct k_mbox *mbox, uint32_t state, k_tid_t tid)
- {
- z_handle_obj_poll_thread_events(&mbox->poll_events, state, tid);
- }
- #endif
- void k_mbox_init(struct k_mbox *mbox)
- {
- z_waitq_init(&mbox->tx_msg_queue);
- z_waitq_init(&mbox->rx_msg_queue);
- mbox->lock = (struct k_spinlock) {};
- #ifdef CONFIG_POLL
- sys_dlist_init(&mbox->poll_events);
- #endif
- SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
- }
- static int mbox_message_match(struct k_mbox_msg *tx_msg,
- struct k_mbox_msg *rx_msg)
- {
- uint32_t temp_info;
- if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
- (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
- ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
- (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
-
- rx_msg->rx_source_thread = tx_msg->rx_source_thread;
- tx_msg->tx_target_thread = rx_msg->tx_target_thread;
-
- temp_info = rx_msg->info;
- rx_msg->info = tx_msg->info;
- tx_msg->info = temp_info;
-
- if (rx_msg->size > tx_msg->size) {
- rx_msg->size = tx_msg->size;
- }
-
- rx_msg->tx_data = tx_msg->tx_data;
- rx_msg->tx_block = tx_msg->tx_block;
- if (rx_msg->tx_data != NULL) {
- rx_msg->tx_block.data = NULL;
- } else if (rx_msg->tx_block.data != NULL) {
- rx_msg->tx_data = rx_msg->tx_block.data;
- } else {
-
- }
-
- rx_msg->_syncing_thread = tx_msg->_syncing_thread;
- return 0;
- }
- return -1;
- }
- static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
- {
- struct k_thread *sending_thread;
- struct k_mbox_msg *tx_msg;
-
- if (rx_msg->_syncing_thread == NULL) {
- return;
- }
- if (rx_msg->tx_block.data != NULL) {
- rx_msg->tx_block.data = NULL;
- }
-
- sending_thread = rx_msg->_syncing_thread;
- rx_msg->_syncing_thread = NULL;
- tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
-
- tx_msg->size = rx_msg->size;
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
-
- if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
- struct k_sem *async_sem = tx_msg->_async_sem;
- mbox_async_free((struct k_mbox_async *)sending_thread);
- if (async_sem != NULL) {
- k_sem_give(async_sem);
- }
- return;
- }
- #endif
-
- arch_thread_return_value_set(sending_thread, 0);
- z_mark_thread_as_not_pending(sending_thread);
- z_ready_thread(sending_thread);
- z_reschedule_unlocked();
- }
- static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
- k_timeout_t timeout)
- {
- struct k_thread *sending_thread;
- struct k_thread *receiving_thread;
- struct k_mbox_msg *rx_msg;
- k_spinlock_key_t key;
-
- tx_msg->rx_source_thread = _current;
-
- sending_thread = tx_msg->_syncing_thread;
- sending_thread->base.swap_data = tx_msg;
-
- key = k_spin_lock(&mbox->lock);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
-
- #ifdef CONFIG_POLL
- handle_poll_thread_events(mbox, K_POLL_STATE_MBOX_DATA_AVAILABLE, tx_msg->tx_target_thread);
- #endif
- _WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
- rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
- if (mbox_message_match(tx_msg, rx_msg) == 0) {
-
- z_unpend_thread(receiving_thread);
-
- arch_thread_return_value_set(receiving_thread, 0);
- z_ready_thread(receiving_thread);
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
-
- if ((sending_thread->base.thread_state & _THREAD_DUMMY)
- != 0U) {
- z_reschedule(&mbox->lock, key);
- return 0;
- }
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
-
- int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
- return ret;
- }
- }
-
- if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
- k_spin_unlock(&mbox->lock, key);
- return -ENOMSG;
- }
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
-
- if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
- z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
- k_spin_unlock(&mbox->lock, key);
- return 0;
- }
- #endif
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
-
- int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
- return ret;
- }
- int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
- k_timeout_t timeout)
- {
-
- tx_msg->_syncing_thread = _current;
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
- int ret = mbox_message_put(mbox, tx_msg, timeout);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
- return ret;
- }
- #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
- void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
- struct k_sem *sem)
- {
- struct k_mbox_async *async;
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
-
- mbox_async_alloc(&async);
- async->thread.prio = _current->base.prio;
- async->tx_msg = *tx_msg;
- async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
- async->tx_msg._async_sem = sem;
- (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
- }
- #endif
- void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
- {
-
- if (buffer == NULL) {
- rx_msg->size = 0;
- mbox_message_dispose(rx_msg);
- return;
- }
-
- if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
- (void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
- }
- mbox_message_dispose(rx_msg);
- }
- static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
- {
- if (buffer != NULL) {
-
- k_mbox_data_get(rx_msg, buffer);
- } else if (rx_msg->size == 0U) {
-
- mbox_message_dispose(rx_msg);
- } else {
-
- }
- return 0;
- }
- int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
- k_timeout_t timeout)
- {
- struct k_thread *sending_thread;
- struct k_mbox_msg *tx_msg;
- k_spinlock_key_t key;
- int result;
-
- rx_msg->tx_target_thread = _current;
-
- key = k_spin_lock(&mbox->lock);
- SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
- _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
- tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
- if (mbox_message_match(tx_msg, rx_msg) == 0) {
-
- z_unpend_thread(sending_thread);
- k_spin_unlock(&mbox->lock, key);
-
- result = mbox_message_data_check(rx_msg, buffer);
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
- return result;
- }
- }
-
- if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
-
- k_spin_unlock(&mbox->lock, key);
- return -ENOMSG;
- }
- SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
-
- _current->base.swap_data = rx_msg;
- result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
-
- if (result == 0) {
- result = mbox_message_data_check(rx_msg, buffer);
- }
- SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
- return result;
- }
- void k_mbox_clear_msg(struct k_mbox *mbox)
- {
- struct k_thread *sending_thread;
- struct k_mbox_msg *tx_msg;
- struct k_mbox_msg rx_msg;
- k_spinlock_key_t key;
-
- key = k_spin_lock(&mbox->lock);
- _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
- memset(&rx_msg, 0 ,sizeof(struct k_mbox_msg));
- tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
- rx_msg.info = tx_msg->size;
- rx_msg.size = tx_msg->size;
- rx_msg.rx_source_thread = (k_tid_t)K_ANY;
- tx_msg->tx_target_thread = (k_tid_t)K_ANY;
- if (mbox_message_match(tx_msg, &rx_msg) == 0) {
- printk("_mbox_message_data_check \n");
-
- z_unpend_thread(sending_thread);
- z_abort_thread_timeout(sending_thread);
- rx_msg.size = 0;
- mbox_message_data_check(&rx_msg, NULL);
- }
- }
- k_spin_unlock(&mbox->lock, key);
- return;
- }
- int k_mbox_get_pending_msg_cnt(struct k_mbox *mbox,k_tid_t target_thread)
- {
- struct k_thread *sending_thread;
- struct k_mbox_msg *tx_msg;
- k_spinlock_key_t key;
- int result = 0;
-
- key = k_spin_lock(&mbox->lock);
- _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
- tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
- if (tx_msg->tx_target_thread == target_thread) {
- result ++;
- }
- }
- k_spin_unlock(&mbox->lock, key);
- return result;
- }
|