eventfd.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright (c) 2020 Tobias Svehagen
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <zephyr.h>
  7. #include <wait_q.h>
  8. #include <posix/sys/eventfd.h>
  9. #include <net/socket.h>
  10. #include <ksched.h>
  11. struct eventfd {
  12. struct k_poll_signal read_sig;
  13. struct k_poll_signal write_sig;
  14. struct k_spinlock lock;
  15. _wait_q_t wait_q;
  16. eventfd_t cnt;
  17. int flags;
  18. };
  19. K_MUTEX_DEFINE(eventfd_mtx);
  20. static struct eventfd efds[CONFIG_EVENTFD_MAX];
  21. static int eventfd_poll_prepare(struct eventfd *efd,
  22. struct zsock_pollfd *pfd,
  23. struct k_poll_event **pev,
  24. struct k_poll_event *pev_end)
  25. {
  26. if (pfd->events & ZSOCK_POLLIN) {
  27. if (*pev == pev_end) {
  28. errno = ENOMEM;
  29. return -1;
  30. }
  31. (*pev)->obj = &efd->read_sig;
  32. (*pev)->type = K_POLL_TYPE_SIGNAL;
  33. (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
  34. (*pev)->state = K_POLL_STATE_NOT_READY;
  35. (*pev)++;
  36. }
  37. if (pfd->events & ZSOCK_POLLOUT) {
  38. if (*pev == pev_end) {
  39. errno = ENOMEM;
  40. return -1;
  41. }
  42. (*pev)->obj = &efd->write_sig;
  43. (*pev)->type = K_POLL_TYPE_SIGNAL;
  44. (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
  45. (*pev)->state = K_POLL_STATE_NOT_READY;
  46. (*pev)++;
  47. }
  48. return 0;
  49. }
  50. static int eventfd_poll_update(struct eventfd *efd,
  51. struct zsock_pollfd *pfd,
  52. struct k_poll_event **pev)
  53. {
  54. ARG_UNUSED(efd);
  55. if (pfd->events & ZSOCK_POLLIN) {
  56. if ((*pev)->state != K_POLL_STATE_NOT_READY) {
  57. pfd->revents |= ZSOCK_POLLIN;
  58. }
  59. (*pev)++;
  60. }
  61. if (pfd->events & ZSOCK_POLLOUT) {
  62. if ((*pev)->state != K_POLL_STATE_NOT_READY) {
  63. pfd->revents |= ZSOCK_POLLOUT;
  64. }
  65. (*pev)++;
  66. }
  67. return 0;
  68. }
  69. static ssize_t eventfd_read_op(void *obj, void *buf, size_t sz)
  70. {
  71. struct eventfd *efd = obj;
  72. int result = 0;
  73. eventfd_t count = 0;
  74. k_spinlock_key_t key;
  75. if (sz < sizeof(eventfd_t)) {
  76. errno = EINVAL;
  77. return -1;
  78. }
  79. for (;;) {
  80. key = k_spin_lock(&efd->lock);
  81. if ((efd->flags & EFD_NONBLOCK) && efd->cnt == 0) {
  82. result = EAGAIN;
  83. break;
  84. } else if (efd->cnt == 0) {
  85. z_pend_curr(&efd->lock, key, &efd->wait_q, K_FOREVER);
  86. } else {
  87. count = (efd->flags & EFD_SEMAPHORE) ? 1 : efd->cnt;
  88. efd->cnt -= count;
  89. if (efd->cnt == 0) {
  90. k_poll_signal_reset(&efd->read_sig);
  91. }
  92. k_poll_signal_raise(&efd->write_sig, 0);
  93. break;
  94. }
  95. }
  96. if (z_unpend_all(&efd->wait_q) != 0) {
  97. z_reschedule(&efd->lock, key);
  98. } else {
  99. k_spin_unlock(&efd->lock, key);
  100. }
  101. if (result != 0) {
  102. errno = result;
  103. return -1;
  104. }
  105. *(eventfd_t *)buf = count;
  106. return sizeof(eventfd_t);
  107. }
  108. static ssize_t eventfd_write_op(void *obj, const void *buf, size_t sz)
  109. {
  110. struct eventfd *efd = obj;
  111. int result = 0;
  112. eventfd_t count;
  113. bool overflow;
  114. k_spinlock_key_t key;
  115. if (sz < sizeof(eventfd_t)) {
  116. errno = EINVAL;
  117. return -1;
  118. }
  119. count = *((eventfd_t *)buf);
  120. if (count == UINT64_MAX) {
  121. errno = EINVAL;
  122. return -1;
  123. }
  124. if (count == 0) {
  125. return sizeof(eventfd_t);
  126. }
  127. for (;;) {
  128. key = k_spin_lock(&efd->lock);
  129. overflow = UINT64_MAX - count <= efd->cnt;
  130. if ((efd->flags & EFD_NONBLOCK) && overflow) {
  131. result = EAGAIN;
  132. break;
  133. } else if (overflow) {
  134. z_pend_curr(&efd->lock, key, &efd->wait_q, K_FOREVER);
  135. } else {
  136. efd->cnt += count;
  137. if (efd->cnt == (UINT64_MAX - 1)) {
  138. k_poll_signal_reset(&efd->write_sig);
  139. }
  140. k_poll_signal_raise(&efd->read_sig, 0);
  141. break;
  142. }
  143. }
  144. if (z_unpend_all(&efd->wait_q) != 0) {
  145. z_reschedule(&efd->lock, key);
  146. } else {
  147. k_spin_unlock(&efd->lock, key);
  148. }
  149. if (result != 0) {
  150. errno = result;
  151. return -1;
  152. }
  153. return sizeof(eventfd_t);
  154. }
  155. static int eventfd_close_op(void *obj)
  156. {
  157. struct eventfd *efd = (struct eventfd *)obj;
  158. efd->flags = 0;
  159. return 0;
  160. }
  161. static int eventfd_ioctl_op(void *obj, unsigned int request, va_list args)
  162. {
  163. struct eventfd *efd = (struct eventfd *)obj;
  164. switch (request) {
  165. case F_GETFL:
  166. return efd->flags & EFD_FLAGS_SET;
  167. case F_SETFL: {
  168. int flags;
  169. flags = va_arg(args, int);
  170. if (flags & ~EFD_FLAGS_SET) {
  171. errno = EINVAL;
  172. return -1;
  173. }
  174. efd->flags = flags;
  175. return 0;
  176. }
  177. case ZFD_IOCTL_POLL_PREPARE: {
  178. struct zsock_pollfd *pfd;
  179. struct k_poll_event **pev;
  180. struct k_poll_event *pev_end;
  181. pfd = va_arg(args, struct zsock_pollfd *);
  182. pev = va_arg(args, struct k_poll_event **);
  183. pev_end = va_arg(args, struct k_poll_event *);
  184. return eventfd_poll_prepare(obj, pfd, pev, pev_end);
  185. }
  186. case ZFD_IOCTL_POLL_UPDATE: {
  187. struct zsock_pollfd *pfd;
  188. struct k_poll_event **pev;
  189. pfd = va_arg(args, struct zsock_pollfd *);
  190. pev = va_arg(args, struct k_poll_event **);
  191. return eventfd_poll_update(obj, pfd, pev);
  192. }
  193. default:
  194. errno = EOPNOTSUPP;
  195. return -1;
  196. }
  197. }
  198. static const struct fd_op_vtable eventfd_fd_vtable = {
  199. .read = eventfd_read_op,
  200. .write = eventfd_write_op,
  201. .close = eventfd_close_op,
  202. .ioctl = eventfd_ioctl_op,
  203. };
  204. int eventfd(unsigned int initval, int flags)
  205. {
  206. struct eventfd *efd = NULL;
  207. int fd = -1;
  208. int i;
  209. if (flags & ~EFD_FLAGS_SET) {
  210. errno = EINVAL;
  211. return -1;
  212. }
  213. k_mutex_lock(&eventfd_mtx, K_FOREVER);
  214. for (i = 0; i < ARRAY_SIZE(efds); ++i) {
  215. if (!(efds[i].flags & EFD_IN_USE)) {
  216. efd = &efds[i];
  217. break;
  218. }
  219. }
  220. if (efd == NULL) {
  221. errno = ENOMEM;
  222. goto exit_mtx;
  223. }
  224. fd = z_reserve_fd();
  225. if (fd < 0) {
  226. goto exit_mtx;
  227. }
  228. efd->flags = EFD_IN_USE | flags;
  229. efd->cnt = initval;
  230. k_poll_signal_init(&efd->write_sig);
  231. k_poll_signal_init(&efd->read_sig);
  232. z_waitq_init(&efd->wait_q);
  233. if (initval != 0) {
  234. k_poll_signal_raise(&efd->read_sig, 0);
  235. }
  236. k_poll_signal_raise(&efd->write_sig, 0);
  237. z_finalize_fd(fd, efd, &eventfd_fd_vtable);
  238. exit_mtx:
  239. k_mutex_unlock(&eventfd_mtx);
  240. return fd;
  241. }