queue.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * Copyright (c) 2010-2016 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * @brief dynamic-size QUEUE object.
  10. */
  11. #include <kernel.h>
  12. #include <kernel_structs.h>
  13. #include <toolchain.h>
  14. #include <wait_q.h>
  15. #include <ksched.h>
  16. #include <init.h>
  17. #include <syscall_handler.h>
  18. #include <kernel_internal.h>
  19. #include <sys/check.h>
  20. struct alloc_node {
  21. sys_sfnode_t node;
  22. void *data;
  23. };
  24. void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
  25. {
  26. void *ret;
  27. if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
  28. /* If the flag is set, then the enqueue operation for this item
  29. * did a behind-the scenes memory allocation of an alloc_node
  30. * struct, which is what got put in the queue. Free it and pass
  31. * back the data pointer.
  32. */
  33. struct alloc_node *anode;
  34. anode = CONTAINER_OF(node, struct alloc_node, node);
  35. ret = anode->data;
  36. if (needs_free) {
  37. k_free(anode);
  38. }
  39. } else {
  40. /* Data was directly placed in the queue, the first word
  41. * reserved for the linked list. User mode isn't allowed to
  42. * do this, although it can get data sent this way.
  43. */
  44. ret = (void *)node;
  45. }
  46. return ret;
  47. }
  48. void z_impl_k_queue_init(struct k_queue *queue)
  49. {
  50. sys_sflist_init(&queue->data_q);
  51. queue->lock = (struct k_spinlock) {};
  52. z_waitq_init(&queue->wait_q);
  53. #if defined(CONFIG_POLL)
  54. sys_dlist_init(&queue->poll_events);
  55. #endif
  56. SYS_PORT_TRACING_OBJ_INIT(k_queue, queue);
  57. z_object_init(queue);
  58. }
  59. #ifdef CONFIG_USERSPACE
  60. static inline void z_vrfy_k_queue_init(struct k_queue *queue)
  61. {
  62. Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
  63. z_impl_k_queue_init(queue);
  64. }
  65. #include <syscalls/k_queue_init_mrsh.c>
  66. #endif
  67. static void prepare_thread_to_run(struct k_thread *thread, void *data)
  68. {
  69. z_thread_return_value_set_with_data(thread, 0, data);
  70. z_ready_thread(thread);
  71. }
  72. static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
  73. {
  74. #ifdef CONFIG_POLL
  75. z_handle_obj_poll_events(&queue->poll_events, state);
  76. #endif
  77. }
  78. void z_impl_k_queue_cancel_wait(struct k_queue *queue)
  79. {
  80. SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue);
  81. k_spinlock_key_t key = k_spin_lock(&queue->lock);
  82. struct k_thread *first_pending_thread;
  83. first_pending_thread = z_unpend_first_thread(&queue->wait_q);
  84. if (first_pending_thread != NULL) {
  85. prepare_thread_to_run(first_pending_thread, NULL);
  86. }
  87. handle_poll_events(queue, K_POLL_STATE_CANCELLED);
  88. z_reschedule(&queue->lock, key);
  89. }
  90. #ifdef CONFIG_USERSPACE
  91. static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
  92. {
  93. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  94. z_impl_k_queue_cancel_wait(queue);
  95. }
  96. #include <syscalls/k_queue_cancel_wait_mrsh.c>
  97. #endif
  98. static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
  99. bool alloc, bool is_append)
  100. {
  101. struct k_thread *first_pending_thread;
  102. k_spinlock_key_t key = k_spin_lock(&queue->lock);
  103. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc);
  104. if (is_append) {
  105. prev = sys_sflist_peek_tail(&queue->data_q);
  106. }
  107. first_pending_thread = z_unpend_first_thread(&queue->wait_q);
  108. if (first_pending_thread != NULL) {
  109. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
  110. prepare_thread_to_run(first_pending_thread, data);
  111. z_reschedule(&queue->lock, key);
  112. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
  113. return 0;
  114. }
  115. /* Only need to actually allocate if no threads are pending */
  116. if (alloc) {
  117. struct alloc_node *anode;
  118. anode = z_thread_malloc(sizeof(*anode));
  119. if (anode == NULL) {
  120. k_spin_unlock(&queue->lock, key);
  121. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc,
  122. -ENOMEM);
  123. return -ENOMEM;
  124. }
  125. anode->data = data;
  126. sys_sfnode_init(&anode->node, 0x1);
  127. data = anode;
  128. } else {
  129. sys_sfnode_init(data, 0x0);
  130. }
  131. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
  132. sys_sflist_insert(&queue->data_q, prev, data);
  133. handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
  134. z_reschedule(&queue->lock, key);
  135. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
  136. return 0;
  137. }
  138. void k_queue_insert(struct k_queue *queue, void *prev, void *data)
  139. {
  140. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue);
  141. (void)queue_insert(queue, prev, data, false, false);
  142. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue);
  143. }
  144. void k_queue_append(struct k_queue *queue, void *data)
  145. {
  146. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue);
  147. (void)queue_insert(queue, NULL, data, false, true);
  148. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue);
  149. }
  150. void k_queue_prepend(struct k_queue *queue, void *data)
  151. {
  152. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue);
  153. (void)queue_insert(queue, NULL, data, false, false);
  154. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue);
  155. }
  156. int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
  157. {
  158. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue);
  159. int32_t ret = queue_insert(queue, NULL, data, true, true);
  160. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret);
  161. return ret;
  162. }
  163. #ifdef CONFIG_USERSPACE
  164. static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
  165. void *data)
  166. {
  167. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  168. return z_impl_k_queue_alloc_append(queue, data);
  169. }
  170. #include <syscalls/k_queue_alloc_append_mrsh.c>
  171. #endif
  172. int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
  173. {
  174. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue);
  175. int32_t ret = queue_insert(queue, NULL, data, true, false);
  176. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret);
  177. return ret;
  178. }
  179. #ifdef CONFIG_USERSPACE
  180. static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
  181. void *data)
  182. {
  183. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  184. return z_impl_k_queue_alloc_prepend(queue, data);
  185. }
  186. #include <syscalls/k_queue_alloc_prepend_mrsh.c>
  187. #endif
  188. int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
  189. {
  190. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
  191. /* invalid head or tail of list */
  192. CHECKIF(head == NULL || tail == NULL) {
  193. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
  194. return -EINVAL;
  195. }
  196. k_spinlock_key_t key = k_spin_lock(&queue->lock);
  197. struct k_thread *thread = NULL;
  198. if (head != NULL) {
  199. thread = z_unpend_first_thread(&queue->wait_q);
  200. }
  201. while ((head != NULL) && (thread != NULL)) {
  202. prepare_thread_to_run(thread, head);
  203. head = *(void **)head;
  204. thread = z_unpend_first_thread(&queue->wait_q);
  205. }
  206. if (head != NULL) {
  207. sys_sflist_append_list(&queue->data_q, head, tail);
  208. }
  209. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
  210. handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
  211. z_reschedule(&queue->lock, key);
  212. return 0;
  213. }
  214. int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
  215. {
  216. int ret;
  217. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue);
  218. /* list must not be empty */
  219. CHECKIF(sys_slist_is_empty(list)) {
  220. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL);
  221. return -EINVAL;
  222. }
  223. /*
  224. * note: this works as long as:
  225. * - the slist implementation keeps the next pointer as the first
  226. * field of the node object type
  227. * - list->tail->next = NULL.
  228. * - sflist implementation only differs from slist by stuffing
  229. * flag bytes in the lower order bits of the data pointer
  230. * - source list is really an slist and not an sflist with flags set
  231. */
  232. ret = k_queue_append_list(queue, list->head, list->tail);
  233. CHECKIF(ret != 0) {
  234. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret);
  235. return ret;
  236. }
  237. sys_slist_init(list);
  238. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0);
  239. return 0;
  240. }
  241. void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
  242. {
  243. k_spinlock_key_t key = k_spin_lock(&queue->lock);
  244. void *data;
  245. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout);
  246. if (likely(!sys_sflist_is_empty(&queue->data_q))) {
  247. sys_sfnode_t *node;
  248. node = sys_sflist_get_not_empty(&queue->data_q);
  249. data = z_queue_node_peek(node, true);
  250. k_spin_unlock(&queue->lock, key);
  251. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data);
  252. return data;
  253. }
  254. SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout);
  255. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  256. k_spin_unlock(&queue->lock, key);
  257. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL);
  258. return NULL;
  259. }
  260. int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
  261. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
  262. (ret != 0) ? NULL : _current->base.swap_data);
  263. return (ret != 0) ? NULL : _current->base.swap_data;
  264. }
  265. bool k_queue_remove(struct k_queue *queue, void *data)
  266. {
  267. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue);
  268. bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
  269. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret);
  270. return ret;
  271. }
  272. bool k_queue_unique_append(struct k_queue *queue, void *data)
  273. {
  274. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue);
  275. sys_sfnode_t *test;
  276. SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
  277. if (test == (sys_sfnode_t *) data) {
  278. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false);
  279. return false;
  280. }
  281. }
  282. k_queue_append(queue, data);
  283. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true);
  284. return true;
  285. }
  286. void *z_impl_k_queue_peek_head(struct k_queue *queue)
  287. {
  288. void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
  289. SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret);
  290. return ret;
  291. }
  292. void *z_impl_k_queue_peek_tail(struct k_queue *queue)
  293. {
  294. void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
  295. SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret);
  296. return ret;
  297. }
  298. #ifdef CONFIG_USERSPACE
  299. static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
  300. k_timeout_t timeout)
  301. {
  302. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  303. return z_impl_k_queue_get(queue, timeout);
  304. }
  305. #include <syscalls/k_queue_get_mrsh.c>
  306. static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
  307. {
  308. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  309. return z_impl_k_queue_is_empty(queue);
  310. }
  311. #include <syscalls/k_queue_is_empty_mrsh.c>
  312. static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
  313. {
  314. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  315. return z_impl_k_queue_peek_head(queue);
  316. }
  317. #include <syscalls/k_queue_peek_head_mrsh.c>
  318. static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
  319. {
  320. Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
  321. return z_impl_k_queue_peek_tail(queue);
  322. }
  323. #include <syscalls/k_queue_peek_tail_mrsh.c>
  324. #endif /* CONFIG_USERSPACE */