poll.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Copyright (c) 2017 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * @brief Kernel asynchronous event polling interface.
  10. *
  11. * This polling mechanism allows waiting on multiple events concurrently,
  12. * either events triggered directly, or from kernel objects or other kernel
  13. * constructs.
  14. */
  15. #include <kernel.h>
  16. #include <kernel_structs.h>
  17. #include <kernel_internal.h>
  18. #include <wait_q.h>
  19. #include <ksched.h>
  20. #include <syscall_handler.h>
  21. #include <sys/dlist.h>
  22. #include <sys/util.h>
  23. #include <sys/__assert.h>
  24. #include <stdbool.h>
  25. /* Single subsystem lock. Locking per-event would be better on highly
  26. * contended SMP systems, but the original locking scheme here is
  27. * subtle (it relies on releasing/reacquiring the lock in areas for
  28. * latency control and it's sometimes hard to see exactly what data is
  29. * "inside" a given critical section). Do the synchronization port
  30. * later as an optimization.
  31. */
  32. static struct k_spinlock lock;
  33. enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
  34. static int signal_poller(struct k_poll_event *event, uint32_t state);
  35. static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
  36. void k_poll_event_init(struct k_poll_event *event, uint32_t type,
  37. int mode, void *obj)
  38. {
  39. __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
  40. "only NOTIFY_ONLY mode is supported\n");
  41. __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
  42. __ASSERT(obj != NULL, "must provide an object\n");
  43. event->poller = NULL;
  44. /* event->tag is left uninitialized: the user will set it if needed */
  45. event->type = type;
  46. event->state = K_POLL_STATE_NOT_READY;
  47. event->mode = mode;
  48. event->unused = 0U;
  49. event->obj = obj;
  50. SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
  51. }
  52. /* must be called with interrupts locked */
  53. static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
  54. {
  55. switch (event->type) {
  56. case K_POLL_TYPE_SEM_AVAILABLE:
  57. if (k_sem_count_get(event->sem) > 0U) {
  58. *state = K_POLL_STATE_SEM_AVAILABLE;
  59. return true;
  60. }
  61. break;
  62. case K_POLL_TYPE_DATA_AVAILABLE:
  63. if (!k_queue_is_empty(event->queue)) {
  64. *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
  65. return true;
  66. }
  67. break;
  68. case K_POLL_TYPE_SIGNAL:
  69. if (event->signal->signaled != 0U) {
  70. *state = K_POLL_STATE_SIGNALED;
  71. return true;
  72. }
  73. break;
  74. case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
  75. if (event->msgq->used_msgs > 0) {
  76. *state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
  77. return true;
  78. }
  79. break;
  80. case K_POLL_TYPE_IGNORE:
  81. break;
  82. default:
  83. __ASSERT(false, "invalid event type (0x%x)\n", event->type);
  84. break;
  85. }
  86. return false;
  87. }
  88. static struct k_thread *poller_thread(struct z_poller *p)
  89. {
  90. return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
  91. }
  92. static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
  93. struct z_poller *poller)
  94. {
  95. struct k_poll_event *pending;
  96. pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
  97. if ((pending == NULL) ||
  98. (z_sched_prio_cmp(poller_thread(pending->poller),
  99. poller_thread(poller)) > 0)) {
  100. sys_dlist_append(events, &event->_node);
  101. return;
  102. }
  103. SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
  104. if (z_sched_prio_cmp(poller_thread(poller),
  105. poller_thread(pending->poller)) > 0) {
  106. sys_dlist_insert(&pending->_node, &event->_node);
  107. return;
  108. }
  109. }
  110. sys_dlist_append(events, &event->_node);
  111. }
  112. /* must be called with interrupts locked */
  113. static inline void register_event(struct k_poll_event *event,
  114. struct z_poller *poller)
  115. {
  116. switch (event->type) {
  117. case K_POLL_TYPE_SEM_AVAILABLE:
  118. __ASSERT(event->sem != NULL, "invalid semaphore\n");
  119. add_event(&event->sem->poll_events, event, poller);
  120. break;
  121. case K_POLL_TYPE_DATA_AVAILABLE:
  122. __ASSERT(event->queue != NULL, "invalid queue\n");
  123. add_event(&event->queue->poll_events, event, poller);
  124. break;
  125. case K_POLL_TYPE_SIGNAL:
  126. __ASSERT(event->signal != NULL, "invalid poll signal\n");
  127. add_event(&event->signal->poll_events, event, poller);
  128. break;
  129. case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
  130. __ASSERT(event->msgq != NULL, "invalid message queue\n");
  131. add_event(&event->msgq->poll_events, event, poller);
  132. break;
  133. case K_POLL_TYPE_IGNORE:
  134. /* nothing to do */
  135. break;
  136. default:
  137. __ASSERT(false, "invalid event type\n");
  138. break;
  139. }
  140. event->poller = poller;
  141. }
  142. /* must be called with interrupts locked */
  143. static inline void clear_event_registration(struct k_poll_event *event)
  144. {
  145. bool remove_event = false;
  146. event->poller = NULL;
  147. switch (event->type) {
  148. case K_POLL_TYPE_SEM_AVAILABLE:
  149. __ASSERT(event->sem != NULL, "invalid semaphore\n");
  150. remove_event = true;
  151. break;
  152. case K_POLL_TYPE_DATA_AVAILABLE:
  153. __ASSERT(event->queue != NULL, "invalid queue\n");
  154. remove_event = true;
  155. break;
  156. case K_POLL_TYPE_SIGNAL:
  157. __ASSERT(event->signal != NULL, "invalid poll signal\n");
  158. remove_event = true;
  159. break;
  160. case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
  161. __ASSERT(event->msgq != NULL, "invalid message queue\n");
  162. remove_event = true;
  163. break;
  164. case K_POLL_TYPE_IGNORE:
  165. /* nothing to do */
  166. break;
  167. default:
  168. __ASSERT(false, "invalid event type\n");
  169. break;
  170. }
  171. if (remove_event && sys_dnode_is_linked(&event->_node)) {
  172. sys_dlist_remove(&event->_node);
  173. }
  174. }
  175. /* must be called with interrupts locked */
  176. static inline void clear_event_registrations(struct k_poll_event *events,
  177. int num_events,
  178. k_spinlock_key_t key)
  179. {
  180. while (num_events--) {
  181. clear_event_registration(&events[num_events]);
  182. k_spin_unlock(&lock, key);
  183. key = k_spin_lock(&lock);
  184. }
  185. }
  186. static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
  187. {
  188. event->poller = NULL;
  189. event->state |= state;
  190. }
  191. static inline int register_events(struct k_poll_event *events,
  192. int num_events,
  193. struct z_poller *poller,
  194. bool just_check)
  195. {
  196. int events_registered = 0;
  197. for (int ii = 0; ii < num_events; ii++) {
  198. k_spinlock_key_t key;
  199. uint32_t state;
  200. key = k_spin_lock(&lock);
  201. if (is_condition_met(&events[ii], &state)) {
  202. set_event_ready(&events[ii], state);
  203. poller->is_polling = false;
  204. } else if (!just_check && poller->is_polling) {
  205. register_event(&events[ii], poller);
  206. events_registered += 1;
  207. } else {
  208. /* Event is not one of those identified in is_condition_met()
  209. * catching non-polling events, or is marked for just check,
  210. * or not marked for polling. No action needed.
  211. */
  212. ;
  213. }
  214. k_spin_unlock(&lock, key);
  215. }
  216. return events_registered;
  217. }
  218. static int signal_poller(struct k_poll_event *event, uint32_t state)
  219. {
  220. struct k_thread *thread = poller_thread(event->poller);
  221. __ASSERT(thread != NULL, "poller should have a thread\n");
  222. if (!z_is_thread_pending(thread)) {
  223. return 0;
  224. }
  225. if (z_is_thread_timeout_expired(thread)) {
  226. return -EAGAIN;
  227. }
  228. z_unpend_thread(thread);
  229. arch_thread_return_value_set(thread,
  230. state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
  231. if (!z_is_thread_ready(thread)) {
  232. return 0;
  233. }
  234. z_ready_thread(thread);
  235. return 0;
  236. }
  237. int z_impl_k_poll(struct k_poll_event *events, int num_events,
  238. k_timeout_t timeout)
  239. {
  240. int events_registered;
  241. k_spinlock_key_t key;
  242. struct z_poller *poller = &_current->poller;
  243. poller->is_polling = true;
  244. poller->mode = MODE_POLL;
  245. __ASSERT(!arch_is_in_isr(), "");
  246. __ASSERT(events != NULL, "NULL events\n");
  247. __ASSERT(num_events >= 0, "<0 events\n");
  248. SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
  249. events_registered = register_events(events, num_events, poller,
  250. K_TIMEOUT_EQ(timeout, K_NO_WAIT));
  251. key = k_spin_lock(&lock);
  252. /*
  253. * If we're not polling anymore, it means that at least one event
  254. * condition is met, either when looping through the events here or
  255. * because one of the events registered has had its state changed.
  256. */
  257. if (!poller->is_polling) {
  258. clear_event_registrations(events, events_registered, key);
  259. k_spin_unlock(&lock, key);
  260. SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
  261. return 0;
  262. }
  263. poller->is_polling = false;
  264. if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  265. k_spin_unlock(&lock, key);
  266. SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
  267. return -EAGAIN;
  268. }
  269. static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
  270. int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
  271. /*
  272. * Clear all event registrations. If events happen while we're in this
  273. * loop, and we already had one that triggered, that's OK: they will
  274. * end up in the list of events that are ready; if we timed out, and
  275. * events happen while we're in this loop, that is OK as well since
  276. * we've already know the return code (-EAGAIN), and even if they are
  277. * added to the list of events that occurred, the user has to check the
  278. * return code first, which invalidates the whole list of event states.
  279. */
  280. key = k_spin_lock(&lock);
  281. clear_event_registrations(events, events_registered, key);
  282. k_spin_unlock(&lock, key);
  283. SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
  284. return swap_rc;
  285. }
  286. #ifdef CONFIG_USERSPACE
  287. static inline int z_vrfy_k_poll(struct k_poll_event *events,
  288. int num_events, k_timeout_t timeout)
  289. {
  290. int ret;
  291. k_spinlock_key_t key;
  292. struct k_poll_event *events_copy = NULL;
  293. uint32_t bounds;
  294. /* Validate the events buffer and make a copy of it in an
  295. * allocated kernel-side buffer.
  296. */
  297. if (Z_SYSCALL_VERIFY(num_events >= 0U)) {
  298. ret = -EINVAL;
  299. goto out;
  300. }
  301. if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
  302. sizeof(struct k_poll_event),
  303. &bounds),
  304. "num_events too large")) {
  305. ret = -EINVAL;
  306. goto out;
  307. }
  308. events_copy = z_thread_malloc(bounds);
  309. if (!events_copy) {
  310. ret = -ENOMEM;
  311. goto out;
  312. }
  313. key = k_spin_lock(&lock);
  314. if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
  315. k_spin_unlock(&lock, key);
  316. goto oops_free;
  317. }
  318. (void)memcpy(events_copy, events, bounds);
  319. k_spin_unlock(&lock, key);
  320. /* Validate what's inside events_copy */
  321. for (int i = 0; i < num_events; i++) {
  322. struct k_poll_event *e = &events_copy[i];
  323. if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
  324. ret = -EINVAL;
  325. goto out_free;
  326. }
  327. switch (e->type) {
  328. case K_POLL_TYPE_IGNORE:
  329. break;
  330. case K_POLL_TYPE_SIGNAL:
  331. Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
  332. break;
  333. case K_POLL_TYPE_SEM_AVAILABLE:
  334. Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
  335. break;
  336. case K_POLL_TYPE_DATA_AVAILABLE:
  337. Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
  338. break;
  339. case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
  340. Z_OOPS(Z_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
  341. break;
  342. default:
  343. ret = -EINVAL;
  344. goto out_free;
  345. }
  346. }
  347. ret = k_poll(events_copy, num_events, timeout);
  348. (void)memcpy((void *)events, events_copy, bounds);
  349. out_free:
  350. k_free(events_copy);
  351. out:
  352. return ret;
  353. oops_free:
  354. k_free(events_copy);
  355. Z_OOPS(1);
  356. }
  357. #include <syscalls/k_poll_mrsh.c>
  358. #endif
  359. /* must be called with interrupts locked */
  360. static int signal_poll_event(struct k_poll_event *event, uint32_t state)
  361. {
  362. struct z_poller *poller = event->poller;
  363. int retcode = 0;
  364. if (poller != NULL) {
  365. if (poller->mode == MODE_POLL) {
  366. retcode = signal_poller(event, state);
  367. } else if (poller->mode == MODE_TRIGGERED) {
  368. retcode = signal_triggered_work(event, state);
  369. } else {
  370. /* Poller is not poll or triggered mode. No action needed.*/
  371. ;
  372. }
  373. poller->is_polling = false;
  374. if (retcode < 0) {
  375. return retcode;
  376. }
  377. }
  378. set_event_ready(event, state);
  379. return retcode;
  380. }
  381. void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
  382. {
  383. struct k_poll_event *poll_event;
  384. poll_event = (struct k_poll_event *)sys_dlist_get(events);
  385. if (poll_event != NULL) {
  386. (void) signal_poll_event(poll_event, state);
  387. }
  388. }
  389. void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
  390. {
  391. sys_dlist_init(&sig->poll_events);
  392. sig->signaled = 0U;
  393. /* signal->result is left unitialized */
  394. z_object_init(sig);
  395. SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
  396. }
  397. #ifdef CONFIG_USERSPACE
  398. static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
  399. {
  400. Z_OOPS(Z_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
  401. z_impl_k_poll_signal_init(sig);
  402. }
  403. #include <syscalls/k_poll_signal_init_mrsh.c>
  404. #endif
  405. void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
  406. {
  407. sig->signaled = 0U;
  408. SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
  409. }
  410. void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
  411. unsigned int *signaled, int *result)
  412. {
  413. *signaled = sig->signaled;
  414. *result = sig->result;
  415. SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
  416. }
  417. #ifdef CONFIG_USERSPACE
  418. void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
  419. unsigned int *signaled, int *result)
  420. {
  421. Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
  422. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
  423. Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
  424. z_impl_k_poll_signal_check(sig, signaled, result);
  425. }
  426. #include <syscalls/k_poll_signal_check_mrsh.c>
  427. #endif
  428. int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
  429. {
  430. k_spinlock_key_t key = k_spin_lock(&lock);
  431. struct k_poll_event *poll_event;
  432. sig->result = result;
  433. sig->signaled = 1U;
  434. poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
  435. if (poll_event == NULL) {
  436. k_spin_unlock(&lock, key);
  437. SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
  438. return 0;
  439. }
  440. int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
  441. SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
  442. z_reschedule(&lock, key);
  443. return rc;
  444. }
  445. #ifdef CONFIG_USERSPACE
  446. static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
  447. int result)
  448. {
  449. Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
  450. return z_impl_k_poll_signal_raise(sig, result);
  451. }
  452. #include <syscalls/k_poll_signal_raise_mrsh.c>
  453. static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
  454. {
  455. Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
  456. z_impl_k_poll_signal_reset(sig);
  457. }
  458. #include <syscalls/k_poll_signal_reset_mrsh.c>
  459. #endif
  460. static void triggered_work_handler(struct k_work *work)
  461. {
  462. struct k_work_poll *twork =
  463. CONTAINER_OF(work, struct k_work_poll, work);
  464. /*
  465. * If callback is not set, the k_work_poll_submit_to_queue()
  466. * already cleared event registrations.
  467. */
  468. if (twork->poller.mode != MODE_NONE) {
  469. k_spinlock_key_t key;
  470. key = k_spin_lock(&lock);
  471. clear_event_registrations(twork->events,
  472. twork->num_events, key);
  473. k_spin_unlock(&lock, key);
  474. }
  475. /* Drop work ownership and execute real handler. */
  476. twork->workq = NULL;
  477. twork->real_handler(work);
  478. }
  479. static void triggered_work_expiration_handler(struct _timeout *timeout)
  480. {
  481. struct k_work_poll *twork =
  482. CONTAINER_OF(timeout, struct k_work_poll, timeout);
  483. twork->poller.is_polling = false;
  484. twork->poll_result = -EAGAIN;
  485. k_work_submit_to_queue(twork->workq, &twork->work);
  486. }
  487. static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
  488. {
  489. struct z_poller *poller = event->poller;
  490. struct k_work_poll *twork =
  491. CONTAINER_OF(poller, struct k_work_poll, poller);
  492. if (poller->is_polling && twork->workq != NULL) {
  493. struct k_work_q *work_q = twork->workq;
  494. z_abort_timeout(&twork->timeout);
  495. twork->poll_result = 0;
  496. k_work_submit_to_queue(work_q, &twork->work);
  497. }
  498. return 0;
  499. }
  500. static int triggered_work_cancel(struct k_work_poll *work,
  501. k_spinlock_key_t key)
  502. {
  503. /* Check if the work waits for event. */
  504. if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
  505. /* Remove timeout associated with the work. */
  506. z_abort_timeout(&work->timeout);
  507. /*
  508. * Prevent work execution if event arrives while we will be
  509. * clearing registrations.
  510. */
  511. work->poller.mode = MODE_NONE;
  512. /* Clear registrations and work ownership. */
  513. clear_event_registrations(work->events, work->num_events, key);
  514. work->workq = NULL;
  515. return 0;
  516. }
  517. /*
  518. * If we reached here, the work is either being registered in
  519. * the k_work_poll_submit_to_queue(), executed or is pending.
  520. * Only in the last case we have a chance to cancel it, but
  521. * unfortunately there is no public API performing this task.
  522. */
  523. return -EINVAL;
  524. }
  525. void k_work_poll_init(struct k_work_poll *work,
  526. k_work_handler_t handler)
  527. {
  528. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
  529. *work = (struct k_work_poll) {};
  530. k_work_init(&work->work, triggered_work_handler);
  531. work->real_handler = handler;
  532. z_init_timeout(&work->timeout);
  533. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
  534. }
  535. int k_work_poll_submit_to_queue(struct k_work_q *work_q,
  536. struct k_work_poll *work,
  537. struct k_poll_event *events,
  538. int num_events,
  539. k_timeout_t timeout)
  540. {
  541. int events_registered;
  542. k_spinlock_key_t key;
  543. __ASSERT(work_q != NULL, "NULL work_q\n");
  544. __ASSERT(work != NULL, "NULL work\n");
  545. __ASSERT(events != NULL, "NULL events\n");
  546. __ASSERT(num_events > 0, "zero events\n");
  547. SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
  548. /* Take overship of the work if it is possible. */
  549. key = k_spin_lock(&lock);
  550. if (work->workq != NULL) {
  551. if (work->workq == work_q) {
  552. int retval;
  553. retval = triggered_work_cancel(work, key);
  554. if (retval < 0) {
  555. k_spin_unlock(&lock, key);
  556. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
  557. work, timeout, retval);
  558. return retval;
  559. }
  560. } else {
  561. k_spin_unlock(&lock, key);
  562. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
  563. work, timeout, -EADDRINUSE);
  564. return -EADDRINUSE;
  565. }
  566. }
  567. work->poller.is_polling = true;
  568. work->workq = work_q;
  569. work->poller.mode = MODE_NONE;
  570. k_spin_unlock(&lock, key);
  571. /* Save list of events. */
  572. work->events = events;
  573. work->num_events = num_events;
  574. /* Clear result */
  575. work->poll_result = -EINPROGRESS;
  576. /* Register events */
  577. events_registered = register_events(events, num_events,
  578. &work->poller, false);
  579. key = k_spin_lock(&lock);
  580. if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
  581. /*
  582. * Poller is still polling.
  583. * No event is ready and all are watched.
  584. */
  585. __ASSERT(num_events == events_registered,
  586. "Some events were not registered!\n");
  587. /* Setup timeout if such action is requested */
  588. if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
  589. z_add_timeout(&work->timeout,
  590. triggered_work_expiration_handler,
  591. timeout);
  592. }
  593. /* From now, any event will result in submitted work. */
  594. work->poller.mode = MODE_TRIGGERED;
  595. k_spin_unlock(&lock, key);
  596. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
  597. return 0;
  598. }
  599. /*
  600. * The K_NO_WAIT timeout was specified or at least one event
  601. * was ready at registration time or changed state since
  602. * registration. Hopefully, the poller mode was not set, so
  603. * work was not submitted to workqueue.
  604. */
  605. /*
  606. * If poller is still polling, no watched event occurred. This means
  607. * we reached here due to K_NO_WAIT timeout "expiration".
  608. */
  609. if (work->poller.is_polling) {
  610. work->poller.is_polling = false;
  611. work->poll_result = -EAGAIN;
  612. } else {
  613. work->poll_result = 0;
  614. }
  615. /* Clear registrations. */
  616. clear_event_registrations(events, events_registered, key);
  617. k_spin_unlock(&lock, key);
  618. /* Submit work. */
  619. k_work_submit_to_queue(work_q, &work->work);
  620. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
  621. return 0;
  622. }
  623. int k_work_poll_submit(struct k_work_poll *work,
  624. struct k_poll_event *events,
  625. int num_events,
  626. k_timeout_t timeout)
  627. {
  628. SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
  629. int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
  630. events, num_events, timeout);
  631. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
  632. return ret;
  633. }
  634. int k_work_poll_cancel(struct k_work_poll *work)
  635. {
  636. k_spinlock_key_t key;
  637. int retval;
  638. SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
  639. /* Check if the work was submitted. */
  640. if (work == NULL || work->workq == NULL) {
  641. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
  642. return -EINVAL;
  643. }
  644. key = k_spin_lock(&lock);
  645. retval = triggered_work_cancel(work, key);
  646. k_spin_unlock(&lock, key);
  647. SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
  648. return retval;
  649. }