os_message.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * Copyright (c) 2017 Actions Semi Co., Ltd.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. * Author: wh<wanghui@actions-semi.com>
  16. *
  17. * Change log:
  18. * 2023/5/11: Created by wh.
  19. */
  20. #include "os_common_api.h"
  21. #include "string.h"
  22. #include <zephyr.h>
  23. #include <sys/atomic.h>
  24. #include <assert.h>
  25. /****************************************************************************
  26. * Private micro Prototypes
  27. ****************************************************************************/
  28. #define HIGH_PRIORITY_MSG 1
  29. #define NORMAL_PRIORITY_MSG 0
  30. #define HIGH_PRIORITY_MSG_NUM 5
  31. /****************************************************************************
  32. * Private Types
  33. ****************************************************************************/
  34. /** message pool */
  35. struct msg_info
  36. {
  37. os_sem msg_sem;
  38. #ifdef CONFIG_MESSAGE_DEBUG
  39. os_tid_t sender;
  40. os_tid_t receiver;
  41. #endif
  42. char msg[MSG_MEM_SIZE];
  43. };
  44. struct msg_cache_item {
  45. sys_snode_t node; /* used for delay_msg list*/
  46. uint8_t busy_flag;
  47. uint8_t high_priority_flag;
  48. os_tid_t sender;
  49. os_tid_t receiver;
  50. char msg[MSG_MEM_SIZE];
  51. };
  52. struct msg_pool
  53. {
  54. atomic_t free_size;
  55. uint8_t pool_size;
  56. uint8_t free_high_msg_cache_num;
  57. sys_slist_t high_priority_msg_list;
  58. struct msg_info pool[CONFIG_NUM_MBOX_ASYNC_MSGS];
  59. struct msg_cache_item high_msg_cache[HIGH_PRIORITY_MSG_NUM];
  60. };
  61. #define MSG_INFO(_node) CONTAINER_OF(_node, struct msg_cache_item, node)
  62. /****************************************************************************
  63. * Private Data
  64. ****************************************************************************/
  65. /** message function*/
  66. K_MBOX_DEFINE(global_mailbox);
  67. /** mutex for msg pool */
  68. OS_MUTEX_DEFINE(msg_pool_mutex);
  69. #ifdef CONFIG_SOC_NO_PSRAM
  70. __in_section_unique(osal.noinit.msg_pool)
  71. #endif
  72. static struct msg_pool globle_msg_pool;
  73. static int high_priority_msg_cnt = 0;
  74. /****************************************************************************
  75. * Private Functions
  76. ****************************************************************************/
  77. static struct msg_cache_item *_msg_pool_get_free_msg_cache(void)
  78. {
  79. struct msg_pool *pool = &globle_msg_pool;
  80. for (uint8_t i = 0 ; i < HIGH_PRIORITY_MSG_NUM; i++) {
  81. struct msg_cache_item *msg_cache = &pool->high_msg_cache[i];
  82. if (!msg_cache->busy_flag) {
  83. msg_cache->busy_flag = 1;
  84. msg_cache->high_priority_flag = 0;
  85. pool->free_high_msg_cache_num--;
  86. //SYS_LOG_INF("get free_high_msg_cache_num %d \n", pool->free_high_msg_cache_num);
  87. return msg_cache;
  88. }
  89. }
  90. //SYS_LOG_INF("get free_high_msg_cache_num %d \n", pool->free_high_msg_cache_num);
  91. return NULL;
  92. }
  93. static void _msg_pool_release_msg_cache(struct msg_cache_item *msg_cache)
  94. {
  95. struct msg_pool *pool = &globle_msg_pool;
  96. if (msg_cache) {
  97. msg_cache->busy_flag = 0;
  98. pool->free_high_msg_cache_num++;
  99. //SYS_LOG_INF("release free_high_msg_cache_num %d \n", pool->free_high_msg_cache_num);
  100. }
  101. }
  102. static struct msg_info *_msg_pool_get_free_msg_info(void)
  103. {
  104. struct msg_pool *pool = &globle_msg_pool;
  105. struct msg_info *result = NULL;
  106. for (uint8_t i = 0 ; i < pool->pool_size; i++) {
  107. struct msg_info * msg_content = &pool->pool[i];
  108. if (k_sem_take(&msg_content->msg_sem, SYS_TIMEOUT_MS(OS_NO_WAIT)) == 0) {
  109. memset(&msg_content->msg, 0, MSG_MEM_SIZE);
  110. result = msg_content;
  111. break;
  112. } else {
  113. //SYS_LOG_WRN("msg %d is busy\n", i);
  114. }
  115. }
  116. return result;
  117. }
  118. static int _msg_pool_send_async_msg_inner(void *receiver, void *msg, int msg_size, int high_priority)
  119. {
  120. os_mbox_msg send_msg;
  121. struct msg_info *msg_content;
  122. __ASSERT(!k_is_in_isr(),"send messag in isr");
  123. msg_content = _msg_pool_get_free_msg_info();
  124. if (!msg_content) {
  125. SYS_LOG_ERR("msg_content is NULL ... ");
  126. return -ENOMEM;
  127. }
  128. atomic_dec(&globle_msg_pool.free_size);
  129. memcpy(&msg_content->msg, msg, msg_size);
  130. #ifdef CONFIG_MESSAGE_DEBUG
  131. msg_content->receiver = (os_tid_t)receiver;
  132. msg_content->sender = os_current_get();
  133. #endif
  134. /* prepare to send message */
  135. send_msg.info = high_priority;
  136. send_msg.size = msg_size;
  137. send_msg.tx_data = &msg_content->msg;
  138. send_msg.tx_target_thread = (os_tid_t)receiver;
  139. /* send message containing most current data and loop around */
  140. os_mbox_async_put(&global_mailbox, &send_msg, &msg_content->msg_sem);
  141. return 0;
  142. }
  143. /****************************************************************************
  144. * Public Functions
  145. ****************************************************************************/
  146. int msg_pool_get_free_msg_num(void)
  147. {
  148. return atomic_get(&globle_msg_pool.free_size);
  149. }
  150. void msg_pool_dump(void(*dump_fn)(os_tid_t sender, os_tid_t receiver,
  151. const char *content, int max_size))
  152. {
  153. struct msg_pool *pool = &globle_msg_pool;
  154. printk("mbox free msg cnt %d/%d\n", msg_pool_get_free_msg_num(), pool->pool_size);
  155. for (uint8_t i = 0 ; i < pool->pool_size; i++) {
  156. struct msg_info * msg_content = &pool->pool[i];
  157. if (os_sem_take(&msg_content->msg_sem, OS_NO_WAIT) != 0) {
  158. printk("busy msg %d:\n", i);
  159. #ifdef CONFIG_MESSAGE_DEBUG
  160. dump_fn(msg_content->sender, msg_content->receiver, msg_content->msg, MSG_MEM_SIZE);
  161. #else
  162. dump_fn(OS_ANY, OS_ANY, msg_content->msg, MSG_MEM_SIZE);
  163. #endif
  164. } else {
  165. os_sem_give(&msg_content->msg_sem);
  166. }
  167. }
  168. printk("high priority msg free %d used %d total %d \n",
  169. pool->free_high_msg_cache_num,
  170. high_priority_msg_cnt,
  171. HIGH_PRIORITY_MSG_NUM);
  172. for (uint8_t i = 0 ; i < HIGH_PRIORITY_MSG_NUM; i++) {
  173. struct msg_cache_item *msg_content = &pool->high_msg_cache[i];
  174. if (msg_content->busy_flag) {
  175. #ifdef CONFIG_MESSAGE_DEBUG
  176. dump_fn(msg_content->sender, msg_content->receiver, msg_content->msg, MSG_MEM_SIZE);
  177. #else
  178. dump_fn(OS_ANY, OS_ANY, msg_content->msg, MSG_MEM_SIZE);
  179. #endif
  180. }
  181. }
  182. }
  183. int os_send_async_msg_discardable(void *receiver, void *msg, int msg_size)
  184. {
  185. sys_snode_t *node, *tmp;
  186. struct msg_cache_item *msg_info;
  187. int ret;
  188. os_mutex_lock(&msg_pool_mutex, OS_FOREVER);
  189. SYS_SLIST_FOR_EACH_NODE_SAFE(&globle_msg_pool.high_priority_msg_list, node, tmp) {
  190. msg_info = MSG_INFO(node);
  191. if (!memcmp(msg_info->msg, msg, msg_size)) {
  192. //SYS_LOG_INF("drop high priority msg \n");
  193. os_mutex_unlock(&msg_pool_mutex);
  194. return 0;
  195. }
  196. }
  197. ret = _msg_pool_send_async_msg_inner(receiver, msg, msg_size, HIGH_PRIORITY_MSG);
  198. if (!ret) {
  199. msg_info = _msg_pool_get_free_msg_cache();
  200. if (msg_info){
  201. memcpy(&msg_info->msg, msg, msg_size);
  202. msg_info->receiver = (os_tid_t)receiver;
  203. msg_info->sender = os_current_get();
  204. msg_info->high_priority_flag = 1;
  205. sys_slist_append(&globle_msg_pool.high_priority_msg_list, (sys_snode_t *)msg_info);
  206. //SYS_LOG_INF("add high_priority_msg_cnt %d \n",++high_priority_msg_cnt);
  207. } else {
  208. SYS_LOG_WRN("no memory for immediately msg record \n");
  209. }
  210. }
  211. os_mutex_unlock(&msg_pool_mutex);
  212. return ret;
  213. }
  214. int os_send_async_msg(void *receiver, void *msg, int msg_size)
  215. {
  216. int ret = 0;
  217. int try_cnt = 500;
  218. try_again:
  219. os_mutex_lock(&msg_pool_mutex, OS_FOREVER);
  220. ret = _msg_pool_send_async_msg_inner(receiver, msg, msg_size, NORMAL_PRIORITY_MSG);
  221. os_mutex_unlock(&msg_pool_mutex);
  222. if (ret) {
  223. SYS_LOG_INF("wait msg_content again %d",try_cnt);
  224. os_sleep(2);
  225. if(try_cnt-- <= 0 || receiver == os_current_get()) {
  226. SYS_LOG_ERR("msg_content is NULL try_cnt %d receiver %p current %p\n",try_cnt,receiver,os_current_get());
  227. return ret;
  228. } else {
  229. goto try_again;
  230. }
  231. }
  232. return ret;
  233. }
  234. int os_receive_msg(void *msg, int msg_size,int timeout)
  235. {
  236. os_mbox_msg recv_msg;
  237. char buffer[MSG_MEM_SIZE];
  238. memset(buffer, 0, msg_size);
  239. /* prepare to receive message */
  240. recv_msg.info = msg_size;
  241. recv_msg.size = msg_size;
  242. recv_msg.rx_source_thread = OS_ANY;
  243. /* get a data item, waiting as long as needed */
  244. if (os_mbox_get(&global_mailbox, &recv_msg, buffer, SYS_TIMEOUT_MS(timeout))) {
  245. //SYS_LOG_INF("no message");
  246. return -ETIMEDOUT;
  247. }
  248. atomic_inc(&globle_msg_pool.free_size);
  249. /* copy msg from recvied buffer */
  250. memcpy(msg, buffer, msg_size);
  251. os_mutex_lock(&msg_pool_mutex, OS_FOREVER);
  252. /* remove high priority msg */
  253. if (recv_msg.info == HIGH_PRIORITY_MSG) {
  254. sys_snode_t *node, *tmp;
  255. int cnt = 0;
  256. SYS_SLIST_FOR_EACH_NODE_SAFE(&globle_msg_pool.high_priority_msg_list, node, tmp) {
  257. struct msg_cache_item *msg_info = MSG_INFO(node);
  258. if (!memcmp(msg_info->msg, msg, msg_size)) {
  259. sys_slist_find_and_remove(&globle_msg_pool.high_priority_msg_list, (sys_snode_t *)node);
  260. _msg_pool_release_msg_cache(msg_info);
  261. //SYS_LOG_INF("release high_priority_msg_cnt %d \n",--high_priority_msg_cnt);
  262. break;
  263. } else {
  264. }
  265. cnt++;
  266. }
  267. //SYS_LOG_INF("high_priority cnt %d\n",cnt);
  268. }
  269. os_mutex_unlock(&msg_pool_mutex);
  270. return 0;
  271. }
  272. int os_poll_msg(void *msg, int msg_size, os_sem *sem, int timeout)
  273. {
  274. #ifdef CONFIG_POLL
  275. struct k_poll_event events[2];
  276. int ret = OS_POLL_TIMEOUT;
  277. if (sem == NULL) {
  278. if (!os_receive_msg(msg, msg_size, timeout)) {
  279. ret = OS_POLL_MSG;
  280. }
  281. return ret;
  282. }
  283. k_poll_event_init(&events[0], K_POLL_TYPE_MBOX_DATA_AVAILABLE,
  284. K_POLL_MODE_NOTIFY_ONLY, &global_mailbox);
  285. k_poll_event_init(&events[1], K_POLL_TYPE_SEM_AVAILABLE,
  286. K_POLL_MODE_NOTIFY_ONLY, sem);
  287. int rc = k_poll(events, ARRAY_SIZE(events), SYS_TIMEOUT_MS(timeout));
  288. if (rc == 0) {
  289. if (events[0].state == K_POLL_STATE_MBOX_DATA_AVAILABLE) {
  290. if (!os_receive_msg(msg, msg_size, 0)) {
  291. ret = OS_POLL_MSG;
  292. } else {
  293. SYS_LOG_WRN("mbox (0x%x) poll err", (uint32_t)&global_mailbox);
  294. }
  295. } else if (events[1].state == K_POLL_STATE_SEM_AVAILABLE) {
  296. if (!k_sem_take(events[1].sem, K_NO_WAIT)) {
  297. ret = OS_POLL_SEM;
  298. } else {
  299. SYS_LOG_WRN("sem (0x%x) poll err", (uint32_t)sem);
  300. }
  301. }
  302. }
  303. return ret;
  304. #else
  305. return os_receive_msg(msg, msg_size, timeout);
  306. #endif
  307. }
  308. void os_msg_clean(void)
  309. {
  310. unsigned int key = irq_lock();
  311. os_mbox_clear_msg(&global_mailbox);
  312. atomic_set(&globle_msg_pool.free_size, CONFIG_NUM_MBOX_ASYNC_MSGS);
  313. irq_unlock(key);
  314. }
  315. int os_get_pending_msg_cnt(void)
  316. {
  317. return k_mbox_get_pending_msg_cnt(&global_mailbox, os_current_get());
  318. }
  319. void os_msg_init(void)
  320. {
  321. struct msg_pool *pool = &globle_msg_pool;
  322. pool->free_size = CONFIG_NUM_MBOX_ASYNC_MSGS;
  323. pool->pool_size = CONFIG_NUM_MBOX_ASYNC_MSGS;
  324. pool->free_high_msg_cache_num = HIGH_PRIORITY_MSG_NUM;
  325. for (uint8_t i = 0 ; i < pool->pool_size; i++) {
  326. struct msg_info *msg_content = &pool->pool[i];
  327. os_sem_init(&msg_content->msg_sem, 1, 1);
  328. }
  329. for (uint8_t i = 0 ; i < HIGH_PRIORITY_MSG_NUM; i++) {
  330. struct msg_cache_item *msg_cache = &pool->high_msg_cache[i];
  331. memset(msg_cache, 0, sizeof(struct msg_cache_item));
  332. }
  333. sys_slist_init(&globle_msg_pool.high_priority_msg_list);
  334. }