mailbox.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /*
  2. * Copyright (c) 2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @brief Mailboxes.
  8. */
  9. #include <kernel.h>
  10. #include <kernel_structs.h>
  11. #include <toolchain.h>
  12. #include <linker/sections.h>
  13. #include <string.h>
  14. #include <ksched.h>
  15. #include <wait_q.h>
  16. #include <sys/dlist.h>
  17. #include <init.h>
  18. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  19. /* asynchronous message descriptor type */
  20. struct k_mbox_async {
  21. struct _thread_base thread; /* dummy thread object */
  22. struct k_mbox_msg tx_msg; /* transmit message descriptor */
  23. };
  24. /* stack of unused asynchronous message descriptors */
  25. K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
  26. /* allocate an asynchronous message descriptor */
  27. static inline void mbox_async_alloc(struct k_mbox_async **async)
  28. {
  29. (void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
  30. }
  31. /* free an asynchronous message descriptor */
  32. static inline void mbox_async_free(struct k_mbox_async *async)
  33. {
  34. k_stack_push(&async_msg_free, (stack_data_t)async);
  35. }
  36. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
  37. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  38. /*
  39. * Do run-time initialization of mailbox object subsystem.
  40. */
  41. static int init_mbox_module(const struct device *dev)
  42. {
  43. ARG_UNUSED(dev);
  44. /* array of asynchronous message descriptors */
  45. static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
  46. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  47. /*
  48. * Create pool of asynchronous message descriptors.
  49. *
  50. * A dummy thread requires minimal initialization, since it never gets
  51. * to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
  52. * dummy thread from a real one. The threads are *not* added to the
  53. * kernel's list of known threads.
  54. *
  55. * Once initialized, the address of each descriptor is added to a stack
  56. * that governs access to them.
  57. */
  58. int i;
  59. for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
  60. z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
  61. k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
  62. }
  63. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
  64. /* Complete initialization of statically defined mailboxes. */
  65. return 0;
  66. }
  67. SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
  68. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
  69. #ifdef CONFIG_POLL
  70. static inline void handle_poll_thread_events(struct k_mbox *mbox, uint32_t state, k_tid_t tid)
  71. {
  72. z_handle_obj_poll_thread_events(&mbox->poll_events, state, tid);
  73. }
  74. #endif /* CONFIG_POLL */
  75. void k_mbox_init(struct k_mbox *mbox)
  76. {
  77. z_waitq_init(&mbox->tx_msg_queue);
  78. z_waitq_init(&mbox->rx_msg_queue);
  79. mbox->lock = (struct k_spinlock) {};
  80. #ifdef CONFIG_POLL
  81. sys_dlist_init(&mbox->poll_events);
  82. #endif /* CONFIG_POLL */
  83. SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
  84. }
  85. /**
  86. * @brief Check compatibility of sender's and receiver's message descriptors.
  87. *
  88. * Compares sender's and receiver's message descriptors to see if they are
  89. * compatible. If so, the descriptor fields are updated to reflect that a
  90. * match has occurred.
  91. *
  92. * @param tx_msg Pointer to transmit message descriptor.
  93. * @param rx_msg Pointer to receive message descriptor.
  94. *
  95. * @return 0 if successfully matched, otherwise -1.
  96. */
  97. static int mbox_message_match(struct k_mbox_msg *tx_msg,
  98. struct k_mbox_msg *rx_msg)
  99. {
  100. uint32_t temp_info;
  101. if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
  102. (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
  103. ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
  104. (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
  105. /* update thread identifier fields for both descriptors */
  106. rx_msg->rx_source_thread = tx_msg->rx_source_thread;
  107. tx_msg->tx_target_thread = rx_msg->tx_target_thread;
  108. /* update application info fields for both descriptors */
  109. temp_info = rx_msg->info;
  110. rx_msg->info = tx_msg->info;
  111. tx_msg->info = temp_info;
  112. /* update data size field for receiver only */
  113. if (rx_msg->size > tx_msg->size) {
  114. rx_msg->size = tx_msg->size;
  115. }
  116. /* update data location fields for receiver only */
  117. rx_msg->tx_data = tx_msg->tx_data;
  118. rx_msg->tx_block = tx_msg->tx_block;
  119. if (rx_msg->tx_data != NULL) {
  120. rx_msg->tx_block.data = NULL;
  121. } else if (rx_msg->tx_block.data != NULL) {
  122. rx_msg->tx_data = rx_msg->tx_block.data;
  123. } else {
  124. /* no data */
  125. }
  126. /* update syncing thread field for receiver only */
  127. rx_msg->_syncing_thread = tx_msg->_syncing_thread;
  128. return 0;
  129. }
  130. return -1;
  131. }
  132. /**
  133. * @brief Dispose of received message.
  134. *
  135. * Releases any memory pool block still associated with the message,
  136. * then notifies the sender that message processing is complete.
  137. *
  138. * @param rx_msg Pointer to receive message descriptor.
  139. *
  140. * @return N/A
  141. */
  142. static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
  143. {
  144. struct k_thread *sending_thread;
  145. struct k_mbox_msg *tx_msg;
  146. /* do nothing if message was disposed of when it was received */
  147. if (rx_msg->_syncing_thread == NULL) {
  148. return;
  149. }
  150. if (rx_msg->tx_block.data != NULL) {
  151. rx_msg->tx_block.data = NULL;
  152. }
  153. /* recover sender info */
  154. sending_thread = rx_msg->_syncing_thread;
  155. rx_msg->_syncing_thread = NULL;
  156. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  157. /* update data size field for sender */
  158. tx_msg->size = rx_msg->size;
  159. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  160. /*
  161. * asynchronous send: free asynchronous message descriptor +
  162. * dummy thread pair, then give semaphore (if needed)
  163. */
  164. if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
  165. struct k_sem *async_sem = tx_msg->_async_sem;
  166. mbox_async_free((struct k_mbox_async *)sending_thread);
  167. if (async_sem != NULL) {
  168. k_sem_give(async_sem);
  169. }
  170. return;
  171. }
  172. #endif
  173. /* synchronous send: wake up sending thread */
  174. arch_thread_return_value_set(sending_thread, 0);
  175. z_mark_thread_as_not_pending(sending_thread);
  176. z_ready_thread(sending_thread);
  177. z_reschedule_unlocked();
  178. }
  179. /**
  180. * @brief Send a mailbox message.
  181. *
  182. * Helper routine that handles both synchronous and asynchronous sends.
  183. *
  184. * @param mbox Pointer to the mailbox object.
  185. * @param tx_msg Pointer to transmit message descriptor.
  186. * @param timeout Maximum time (milliseconds) to wait for the message to be
  187. * received (although not necessarily completely processed).
  188. * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
  189. * as necessary.
  190. *
  191. * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
  192. */
  193. static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  194. k_timeout_t timeout)
  195. {
  196. struct k_thread *sending_thread;
  197. struct k_thread *receiving_thread;
  198. struct k_mbox_msg *rx_msg;
  199. k_spinlock_key_t key;
  200. /* save sender id so it can be used during message matching */
  201. tx_msg->rx_source_thread = _current;
  202. /* finish readying sending thread (actual or dummy) for send */
  203. sending_thread = tx_msg->_syncing_thread;
  204. sending_thread->base.swap_data = tx_msg;
  205. /* search mailbox's rx queue for a compatible receiver */
  206. key = k_spin_lock(&mbox->lock);
  207. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
  208. #ifdef CONFIG_POLL
  209. handle_poll_thread_events(mbox, K_POLL_STATE_MBOX_DATA_AVAILABLE, tx_msg->tx_target_thread);
  210. #endif /* CONFIG_POLL */
  211. _WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
  212. rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
  213. if (mbox_message_match(tx_msg, rx_msg) == 0) {
  214. /* take receiver out of rx queue */
  215. z_unpend_thread(receiving_thread);
  216. /* ready receiver for execution */
  217. arch_thread_return_value_set(receiving_thread, 0);
  218. z_ready_thread(receiving_thread);
  219. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  220. /*
  221. * asynchronous send: swap out current thread
  222. * if receiver has priority, otherwise let it continue
  223. *
  224. * note: dummy sending thread sits (unqueued)
  225. * until the receiver consumes the message
  226. */
  227. if ((sending_thread->base.thread_state & _THREAD_DUMMY)
  228. != 0U) {
  229. z_reschedule(&mbox->lock, key);
  230. return 0;
  231. }
  232. #endif
  233. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
  234. /*
  235. * synchronous send: pend current thread (unqueued)
  236. * until the receiver consumes the message
  237. */
  238. int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
  239. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
  240. return ret;
  241. }
  242. }
  243. /* didn't find a matching receiver: don't wait for one */
  244. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  245. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
  246. k_spin_unlock(&mbox->lock, key);
  247. return -ENOMSG;
  248. }
  249. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  250. /* asynchronous send: dummy thread waits on tx queue for receiver */
  251. if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
  252. z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
  253. k_spin_unlock(&mbox->lock, key);
  254. return 0;
  255. }
  256. #endif
  257. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
  258. /* synchronous send: sender waits on tx queue for receiver or timeout */
  259. int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
  260. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
  261. return ret;
  262. }
  263. int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  264. k_timeout_t timeout)
  265. {
  266. /* configure things for a synchronous send, then send the message */
  267. tx_msg->_syncing_thread = _current;
  268. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
  269. int ret = mbox_message_put(mbox, tx_msg, timeout);
  270. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
  271. return ret;
  272. }
  273. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  274. void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  275. struct k_sem *sem)
  276. {
  277. struct k_mbox_async *async;
  278. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
  279. /*
  280. * allocate an asynchronous message descriptor, configure both parts,
  281. * then send the message asynchronously
  282. */
  283. mbox_async_alloc(&async);
  284. async->thread.prio = _current->base.prio;
  285. async->tx_msg = *tx_msg;
  286. async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
  287. async->tx_msg._async_sem = sem;
  288. (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
  289. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
  290. }
  291. #endif
  292. void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
  293. {
  294. /* handle case where data is to be discarded */
  295. if (buffer == NULL) {
  296. rx_msg->size = 0;
  297. mbox_message_dispose(rx_msg);
  298. return;
  299. }
  300. /* copy message data to buffer, then dispose of message */
  301. if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
  302. (void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
  303. }
  304. mbox_message_dispose(rx_msg);
  305. }
  306. /**
  307. * @brief Handle immediate consumption of received mailbox message data.
  308. *
  309. * Checks to see if received message data should be kept for later retrieval,
  310. * or if the data should consumed immediately and the message disposed of.
  311. *
  312. * The data is consumed immediately in either of the following cases:
  313. * 1) The receiver requested immediate retrieval by suppling a buffer
  314. * to receive the data.
  315. * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
  316. *
  317. * @param rx_msg Pointer to receive message descriptor.
  318. * @param buffer Pointer to buffer to receive data.
  319. *
  320. * @return 0
  321. */
  322. static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
  323. {
  324. if (buffer != NULL) {
  325. /* retrieve data now, then dispose of message */
  326. k_mbox_data_get(rx_msg, buffer);
  327. } else if (rx_msg->size == 0U) {
  328. /* there is no data to get, so just dispose of message */
  329. mbox_message_dispose(rx_msg);
  330. } else {
  331. /* keep message around for later data retrieval */
  332. }
  333. return 0;
  334. }
  335. int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
  336. k_timeout_t timeout)
  337. {
  338. struct k_thread *sending_thread;
  339. struct k_mbox_msg *tx_msg;
  340. k_spinlock_key_t key;
  341. int result;
  342. /* save receiver id so it can be used during message matching */
  343. rx_msg->tx_target_thread = _current;
  344. /* search mailbox's tx queue for a compatible sender */
  345. key = k_spin_lock(&mbox->lock);
  346. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
  347. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  348. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  349. if (mbox_message_match(tx_msg, rx_msg) == 0) {
  350. /* take sender out of mailbox's tx queue */
  351. z_unpend_thread(sending_thread);
  352. k_spin_unlock(&mbox->lock, key);
  353. /* consume message data immediately, if needed */
  354. result = mbox_message_data_check(rx_msg, buffer);
  355. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
  356. return result;
  357. }
  358. }
  359. /* didn't find a matching sender */
  360. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  361. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
  362. /* don't wait for a matching sender to appear */
  363. k_spin_unlock(&mbox->lock, key);
  364. return -ENOMSG;
  365. }
  366. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
  367. /* wait until a matching sender appears or a timeout occurs */
  368. _current->base.swap_data = rx_msg;
  369. result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
  370. /* consume message data immediately, if needed */
  371. if (result == 0) {
  372. result = mbox_message_data_check(rx_msg, buffer);
  373. }
  374. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
  375. return result;
  376. }
  377. void k_mbox_clear_msg(struct k_mbox *mbox)
  378. {
  379. struct k_thread *sending_thread;
  380. struct k_mbox_msg *tx_msg;
  381. struct k_mbox_msg rx_msg;
  382. k_spinlock_key_t key;
  383. /* search mailbox's tx queue for a compatible sender */
  384. key = k_spin_lock(&mbox->lock);
  385. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  386. memset(&rx_msg, 0 ,sizeof(struct k_mbox_msg));
  387. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  388. rx_msg.info = tx_msg->size;
  389. rx_msg.size = tx_msg->size;
  390. rx_msg.rx_source_thread = (k_tid_t)K_ANY;
  391. tx_msg->tx_target_thread = (k_tid_t)K_ANY;
  392. if (mbox_message_match(tx_msg, &rx_msg) == 0) {
  393. printk("_mbox_message_data_check \n");
  394. /* take sender out of mailbox's tx queue */
  395. z_unpend_thread(sending_thread);
  396. z_abort_thread_timeout(sending_thread);
  397. rx_msg.size = 0;
  398. mbox_message_data_check(&rx_msg, NULL);
  399. }
  400. }
  401. k_spin_unlock(&mbox->lock, key);
  402. return;
  403. }
  404. int k_mbox_get_pending_msg_cnt(struct k_mbox *mbox,k_tid_t target_thread)
  405. {
  406. struct k_thread *sending_thread;
  407. struct k_mbox_msg *tx_msg;
  408. k_spinlock_key_t key;
  409. int result = 0;
  410. /* search mailbox's tx queue for a compatible sender */
  411. key = k_spin_lock(&mbox->lock);
  412. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  413. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  414. if (tx_msg->tx_target_thread == target_thread) {
  415. result ++;
  416. }
  417. }
  418. k_spin_unlock(&mbox->lock, key);
  419. return result;
  420. }