mailbox.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /*
  2. * Copyright (c) 2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @brief Mailboxes.
  8. */
  9. #include <kernel.h>
  10. #include <kernel_structs.h>
  11. #include <toolchain.h>
  12. #include <linker/sections.h>
  13. #include <string.h>
  14. #include <ksched.h>
  15. #include <wait_q.h>
  16. #include <sys/dlist.h>
  17. #include <init.h>
  18. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  19. /* asynchronous message descriptor type */
  20. struct k_mbox_async {
  21. struct _thread_base thread; /* dummy thread object */
  22. struct k_mbox_msg tx_msg; /* transmit message descriptor */
  23. };
  24. /* stack of unused asynchronous message descriptors */
  25. K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
  26. /* allocate an asynchronous message descriptor */
  27. static inline void mbox_async_alloc(struct k_mbox_async **async)
  28. {
  29. (void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
  30. }
  31. /* free an asynchronous message descriptor */
  32. static inline void mbox_async_free(struct k_mbox_async *async)
  33. {
  34. k_stack_push(&async_msg_free, (stack_data_t)async);
  35. }
  36. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
  37. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  38. /*
  39. * Do run-time initialization of mailbox object subsystem.
  40. */
  41. static int init_mbox_module(const struct device *dev)
  42. {
  43. ARG_UNUSED(dev);
  44. /* array of asynchronous message descriptors */
  45. static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
  46. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  47. /*
  48. * Create pool of asynchronous message descriptors.
  49. *
  50. * A dummy thread requires minimal initialization, since it never gets
  51. * to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
  52. * dummy thread from a real one. The threads are *not* added to the
  53. * kernel's list of known threads.
  54. *
  55. * Once initialized, the address of each descriptor is added to a stack
  56. * that governs access to them.
  57. */
  58. int i;
  59. for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
  60. z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
  61. k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
  62. }
  63. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
  64. /* Complete initialization of statically defined mailboxes. */
  65. return 0;
  66. }
  67. SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
  68. #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
  69. void k_mbox_init(struct k_mbox *mbox)
  70. {
  71. z_waitq_init(&mbox->tx_msg_queue);
  72. z_waitq_init(&mbox->rx_msg_queue);
  73. mbox->lock = (struct k_spinlock) {};
  74. SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
  75. }
  76. /**
  77. * @brief Check compatibility of sender's and receiver's message descriptors.
  78. *
  79. * Compares sender's and receiver's message descriptors to see if they are
  80. * compatible. If so, the descriptor fields are updated to reflect that a
  81. * match has occurred.
  82. *
  83. * @param tx_msg Pointer to transmit message descriptor.
  84. * @param rx_msg Pointer to receive message descriptor.
  85. *
  86. * @return 0 if successfully matched, otherwise -1.
  87. */
  88. static int mbox_message_match(struct k_mbox_msg *tx_msg,
  89. struct k_mbox_msg *rx_msg)
  90. {
  91. uint32_t temp_info;
  92. if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
  93. (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
  94. ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
  95. (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
  96. /* update thread identifier fields for both descriptors */
  97. rx_msg->rx_source_thread = tx_msg->rx_source_thread;
  98. tx_msg->tx_target_thread = rx_msg->tx_target_thread;
  99. /* update application info fields for both descriptors */
  100. temp_info = rx_msg->info;
  101. rx_msg->info = tx_msg->info;
  102. tx_msg->info = temp_info;
  103. /* update data size field for receiver only */
  104. if (rx_msg->size > tx_msg->size) {
  105. rx_msg->size = tx_msg->size;
  106. }
  107. /* update data location fields for receiver only */
  108. rx_msg->tx_data = tx_msg->tx_data;
  109. rx_msg->tx_block = tx_msg->tx_block;
  110. if (rx_msg->tx_data != NULL) {
  111. rx_msg->tx_block.data = NULL;
  112. } else if (rx_msg->tx_block.data != NULL) {
  113. rx_msg->tx_data = rx_msg->tx_block.data;
  114. } else {
  115. /* no data */
  116. }
  117. /* update syncing thread field for receiver only */
  118. rx_msg->_syncing_thread = tx_msg->_syncing_thread;
  119. return 0;
  120. }
  121. return -1;
  122. }
  123. /**
  124. * @brief Dispose of received message.
  125. *
  126. * Releases any memory pool block still associated with the message,
  127. * then notifies the sender that message processing is complete.
  128. *
  129. * @param rx_msg Pointer to receive message descriptor.
  130. *
  131. * @return N/A
  132. */
  133. static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
  134. {
  135. struct k_thread *sending_thread;
  136. struct k_mbox_msg *tx_msg;
  137. /* do nothing if message was disposed of when it was received */
  138. if (rx_msg->_syncing_thread == NULL) {
  139. return;
  140. }
  141. if (rx_msg->tx_block.data != NULL) {
  142. rx_msg->tx_block.data = NULL;
  143. }
  144. /* recover sender info */
  145. sending_thread = rx_msg->_syncing_thread;
  146. rx_msg->_syncing_thread = NULL;
  147. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  148. /* update data size field for sender */
  149. tx_msg->size = rx_msg->size;
  150. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  151. /*
  152. * asynchronous send: free asynchronous message descriptor +
  153. * dummy thread pair, then give semaphore (if needed)
  154. */
  155. if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
  156. struct k_sem *async_sem = tx_msg->_async_sem;
  157. mbox_async_free((struct k_mbox_async *)sending_thread);
  158. if (async_sem != NULL) {
  159. k_sem_give(async_sem);
  160. }
  161. return;
  162. }
  163. #endif
  164. /* synchronous send: wake up sending thread */
  165. arch_thread_return_value_set(sending_thread, 0);
  166. z_mark_thread_as_not_pending(sending_thread);
  167. z_ready_thread(sending_thread);
  168. z_reschedule_unlocked();
  169. }
  170. /**
  171. * @brief Send a mailbox message.
  172. *
  173. * Helper routine that handles both synchronous and asynchronous sends.
  174. *
  175. * @param mbox Pointer to the mailbox object.
  176. * @param tx_msg Pointer to transmit message descriptor.
  177. * @param timeout Maximum time (milliseconds) to wait for the message to be
  178. * received (although not necessarily completely processed).
  179. * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
  180. * as necessary.
  181. *
  182. * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
  183. */
  184. static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  185. k_timeout_t timeout)
  186. {
  187. struct k_thread *sending_thread;
  188. struct k_thread *receiving_thread;
  189. struct k_mbox_msg *rx_msg;
  190. k_spinlock_key_t key;
  191. /* save sender id so it can be used during message matching */
  192. tx_msg->rx_source_thread = _current;
  193. /* finish readying sending thread (actual or dummy) for send */
  194. sending_thread = tx_msg->_syncing_thread;
  195. sending_thread->base.swap_data = tx_msg;
  196. /* search mailbox's rx queue for a compatible receiver */
  197. key = k_spin_lock(&mbox->lock);
  198. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
  199. _WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
  200. rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
  201. if (mbox_message_match(tx_msg, rx_msg) == 0) {
  202. /* take receiver out of rx queue */
  203. z_unpend_thread(receiving_thread);
  204. /* ready receiver for execution */
  205. arch_thread_return_value_set(receiving_thread, 0);
  206. z_ready_thread(receiving_thread);
  207. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  208. /*
  209. * asynchronous send: swap out current thread
  210. * if receiver has priority, otherwise let it continue
  211. *
  212. * note: dummy sending thread sits (unqueued)
  213. * until the receiver consumes the message
  214. */
  215. if ((sending_thread->base.thread_state & _THREAD_DUMMY)
  216. != 0U) {
  217. z_reschedule(&mbox->lock, key);
  218. return 0;
  219. }
  220. #endif
  221. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
  222. /*
  223. * synchronous send: pend current thread (unqueued)
  224. * until the receiver consumes the message
  225. */
  226. int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
  227. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
  228. return ret;
  229. }
  230. }
  231. /* didn't find a matching receiver: don't wait for one */
  232. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  233. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
  234. k_spin_unlock(&mbox->lock, key);
  235. return -ENOMSG;
  236. }
  237. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  238. /* asynchronous send: dummy thread waits on tx queue for receiver */
  239. if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
  240. z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
  241. k_spin_unlock(&mbox->lock, key);
  242. return 0;
  243. }
  244. #endif
  245. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
  246. /* synchronous send: sender waits on tx queue for receiver or timeout */
  247. int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
  248. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
  249. return ret;
  250. }
  251. int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  252. k_timeout_t timeout)
  253. {
  254. /* configure things for a synchronous send, then send the message */
  255. tx_msg->_syncing_thread = _current;
  256. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
  257. int ret = mbox_message_put(mbox, tx_msg, timeout);
  258. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
  259. return ret;
  260. }
  261. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  262. void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  263. struct k_sem *sem)
  264. {
  265. struct k_mbox_async *async;
  266. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
  267. /*
  268. * allocate an asynchronous message descriptor, configure both parts,
  269. * then send the message asynchronously
  270. */
  271. mbox_async_alloc(&async);
  272. async->thread.prio = _current->base.prio;
  273. async->tx_msg = *tx_msg;
  274. async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
  275. async->tx_msg._async_sem = sem;
  276. (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
  277. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
  278. }
  279. #endif
  280. void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
  281. {
  282. /* handle case where data is to be discarded */
  283. if (buffer == NULL) {
  284. rx_msg->size = 0;
  285. mbox_message_dispose(rx_msg);
  286. return;
  287. }
  288. /* copy message data to buffer, then dispose of message */
  289. if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
  290. (void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
  291. }
  292. mbox_message_dispose(rx_msg);
  293. }
  294. /**
  295. * @brief Handle immediate consumption of received mailbox message data.
  296. *
  297. * Checks to see if received message data should be kept for later retrieval,
  298. * or if the data should consumed immediately and the message disposed of.
  299. *
  300. * The data is consumed immediately in either of the following cases:
  301. * 1) The receiver requested immediate retrieval by suppling a buffer
  302. * to receive the data.
  303. * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
  304. *
  305. * @param rx_msg Pointer to receive message descriptor.
  306. * @param buffer Pointer to buffer to receive data.
  307. *
  308. * @return 0
  309. */
  310. static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
  311. {
  312. if (buffer != NULL) {
  313. /* retrieve data now, then dispose of message */
  314. k_mbox_data_get(rx_msg, buffer);
  315. } else if (rx_msg->size == 0U) {
  316. /* there is no data to get, so just dispose of message */
  317. mbox_message_dispose(rx_msg);
  318. } else {
  319. /* keep message around for later data retrieval */
  320. }
  321. return 0;
  322. }
  323. int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
  324. k_timeout_t timeout)
  325. {
  326. struct k_thread *sending_thread;
  327. struct k_mbox_msg *tx_msg;
  328. k_spinlock_key_t key;
  329. int result;
  330. /* save receiver id so it can be used during message matching */
  331. rx_msg->tx_target_thread = _current;
  332. /* search mailbox's tx queue for a compatible sender */
  333. key = k_spin_lock(&mbox->lock);
  334. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
  335. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  336. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  337. if (mbox_message_match(tx_msg, rx_msg) == 0) {
  338. /* take sender out of mailbox's tx queue */
  339. z_unpend_thread(sending_thread);
  340. k_spin_unlock(&mbox->lock, key);
  341. /* consume message data immediately, if needed */
  342. result = mbox_message_data_check(rx_msg, buffer);
  343. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
  344. return result;
  345. }
  346. }
  347. /* didn't find a matching sender */
  348. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  349. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
  350. /* don't wait for a matching sender to appear */
  351. k_spin_unlock(&mbox->lock, key);
  352. return -ENOMSG;
  353. }
  354. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
  355. /* wait until a matching sender appears or a timeout occurs */
  356. _current->base.swap_data = rx_msg;
  357. result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
  358. /* consume message data immediately, if needed */
  359. if (result == 0) {
  360. result = mbox_message_data_check(rx_msg, buffer);
  361. }
  362. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
  363. return result;
  364. }
  365. void k_mbox_clear_msg(struct k_mbox *mbox)
  366. {
  367. struct k_thread *sending_thread;
  368. struct k_mbox_msg *tx_msg;
  369. struct k_mbox_msg rx_msg;
  370. k_spinlock_key_t key;
  371. /* search mailbox's tx queue for a compatible sender */
  372. key = k_spin_lock(&mbox->lock);
  373. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  374. memset(&rx_msg, 0 ,sizeof(struct k_mbox_msg));
  375. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  376. rx_msg.info = tx_msg->size;
  377. rx_msg.size = tx_msg->size;
  378. rx_msg.rx_source_thread = (k_tid_t)K_ANY;
  379. tx_msg->tx_target_thread = (k_tid_t)K_ANY;
  380. if (mbox_message_match(tx_msg, &rx_msg) == 0) {
  381. printk("_mbox_message_data_check \n");
  382. /* take sender out of mailbox's tx queue */
  383. z_unpend_thread(sending_thread);
  384. z_abort_thread_timeout(sending_thread);
  385. rx_msg.size = 0;
  386. mbox_message_data_check(&rx_msg, NULL);
  387. }
  388. }
  389. k_spin_unlock(&mbox->lock, key);
  390. return;
  391. }
  392. int k_mbox_get_pending_msg_cnt(struct k_mbox *mbox,k_tid_t target_thread)
  393. {
  394. struct k_thread *sending_thread;
  395. struct k_mbox_msg *tx_msg;
  396. k_spinlock_key_t key;
  397. int result = 0;
  398. /* search mailbox's tx queue for a compatible sender */
  399. key = k_spin_lock(&mbox->lock);
  400. _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
  401. tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
  402. if (tx_msg->tx_target_thread == target_thread) {
  403. result ++;
  404. }
  405. }
  406. k_spin_unlock(&mbox->lock, key);
  407. return result;
  408. }