ring_buffer.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /* ring_buffer.c: Simple ring buffer API */
  2. /*
  3. * Copyright (c) 2015 Intel Corporation
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. */
  7. #include <sys/ring_buffer.h>
  8. #include <string.h>
  9. /* LCOV_EXCL_START */
  10. /* The weak function used to allow overwriting it in the test and trigger
  11. * rewinding earlier.
  12. */
  13. uint32_t __weak ring_buf_get_rewind_threshold(void)
  14. {
  15. return RING_BUFFER_MAX_SIZE;
  16. }
  17. /* LCOV_EXCL_STOP */
  18. /**
  19. * Internal data structure for a buffer header.
  20. *
  21. * We want all of this to fit in a single uint32_t. Every item stored in the
  22. * ring buffer will be one of these headers plus any extra data supplied
  23. */
  24. struct ring_element {
  25. uint32_t type :16; /**< Application-specific */
  26. uint32_t length :8; /**< length in 32-bit chunks */
  27. uint32_t value :8; /**< Room for small integral values */
  28. };
  29. static uint32_t mod(struct ring_buf *buf, uint32_t val)
  30. {
  31. return likely(buf->mask) ? val & buf->mask : val % buf->size;
  32. }
  33. static uint32_t get_rewind_value(uint32_t buf_size, uint32_t threshold)
  34. {
  35. /* Rewind value is rounded to buffer size and decreased by buffer_size.
  36. * This is done to ensure that there will be no negative numbers after
  37. * subtraction. That could happen because tail is rewinded first and
  38. * head (which follows tail) is rewinded on next getting.
  39. */
  40. return buf_size * (threshold / buf_size - 1);
  41. }
  42. int ring_buf_is_empty(struct ring_buf *buf)
  43. {
  44. uint32_t tail = buf->tail;
  45. uint32_t head = buf->head;
  46. if (tail < head) {
  47. tail += get_rewind_value(buf->size,
  48. ring_buf_get_rewind_threshold());
  49. }
  50. return (head == tail);
  51. }
  52. uint32_t ring_buf_size_get(struct ring_buf *buf)
  53. {
  54. uint32_t tail = buf->tail;
  55. uint32_t head = buf->head;
  56. if (tail < head) {
  57. tail += get_rewind_value(buf->size,
  58. ring_buf_get_rewind_threshold());
  59. }
  60. return tail - head;
  61. }
  62. uint32_t ring_buf_space_get(struct ring_buf *buf)
  63. {
  64. return buf->size - ring_buf_size_get(buf);
  65. }
  66. int ring_buf_item_put(struct ring_buf *buf, uint16_t type, uint8_t value,
  67. uint32_t *data, uint8_t size32)
  68. {
  69. uint32_t i, space, index, rc;
  70. uint32_t threshold = ring_buf_get_rewind_threshold();
  71. uint32_t rew;
  72. space = ring_buf_space_get(buf);
  73. if (space >= (size32 + 1)) {
  74. struct ring_element *header =
  75. (struct ring_element *)&buf->buf.buf32[mod(buf, buf->tail)];
  76. header->type = type;
  77. header->length = size32;
  78. header->value = value;
  79. if (likely(buf->mask)) {
  80. for (i = 0U; i < size32; ++i) {
  81. index = (i + buf->tail + 1) & buf->mask;
  82. buf->buf.buf32[index] = data[i];
  83. }
  84. } else {
  85. for (i = 0U; i < size32; ++i) {
  86. index = (i + buf->tail + 1) % buf->size;
  87. buf->buf.buf32[index] = data[i];
  88. }
  89. }
  90. /* Check if indexes shall be rewound. */
  91. if (buf->tail > threshold) {
  92. rew = get_rewind_value(buf->size, threshold);
  93. } else {
  94. rew = 0;
  95. }
  96. buf->tail = buf->tail + (size32 + 1 - rew);
  97. rc = 0U;
  98. } else {
  99. buf->misc.item_mode.dropped_put_count++;
  100. rc = -EMSGSIZE;
  101. }
  102. return rc;
  103. }
  104. int ring_buf_item_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
  105. uint32_t *data, uint8_t *size32)
  106. {
  107. struct ring_element *header;
  108. uint32_t i, index;
  109. uint32_t tail = buf->tail;
  110. uint32_t rew;
  111. /* Tail is always ahead, if it is not, it's only because it got rewound. */
  112. if (tail < buf->head) {
  113. /* Locally undo rewind to get tail aligned with head. */
  114. rew = get_rewind_value(buf->size,
  115. ring_buf_get_rewind_threshold());
  116. tail += rew;
  117. } else if (ring_buf_is_empty(buf)) {
  118. return -EAGAIN;
  119. } else {
  120. rew = 0;
  121. }
  122. header = (struct ring_element *) &buf->buf.buf32[mod(buf, buf->head)];
  123. if (data && (header->length > *size32)) {
  124. *size32 = header->length;
  125. return -EMSGSIZE;
  126. }
  127. *size32 = header->length;
  128. *type = header->type;
  129. *value = header->value;
  130. if (data) {
  131. if (likely(buf->mask)) {
  132. for (i = 0U; i < header->length; ++i) {
  133. index = (i + buf->head + 1) & buf->mask;
  134. data[i] = buf->buf.buf32[index];
  135. }
  136. } else {
  137. for (i = 0U; i < header->length; ++i) {
  138. index = (i + buf->head + 1) % buf->size;
  139. data[i] = buf->buf.buf32[index];
  140. }
  141. }
  142. }
  143. /* Include potential rewinding */
  144. buf->head = buf->head + header->length + 1 - rew;
  145. return 0;
  146. }
  147. /** @brief Wraps index if it exceeds the limit.
  148. *
  149. * @param val Value
  150. * @param max Max.
  151. *
  152. * @return value % max.
  153. */
  154. __unused static inline uint32_t wrap(uint32_t val, uint32_t max)
  155. {
  156. return val >= max ? (val - max) : val;
  157. }
  158. uint32_t ring_buf_put_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
  159. {
  160. uint32_t space, trail_size, allocated, tmp_trail_mod;
  161. uint32_t head = buf->head;
  162. uint32_t tmp_tail = buf->misc.byte_mode.tmp_tail;
  163. if (buf->misc.byte_mode.tmp_tail < head) {
  164. /* Head is already rewinded but tail is not */
  165. tmp_tail += get_rewind_value(buf->size, ring_buf_get_rewind_threshold());
  166. }
  167. tmp_trail_mod = mod(buf, buf->misc.byte_mode.tmp_tail);
  168. space = (head + buf->size) - tmp_tail;
  169. trail_size = buf->size - tmp_trail_mod;
  170. /* Limit requested size to available size. */
  171. size = MIN(size, space);
  172. trail_size = buf->size - (tmp_trail_mod);
  173. /* Limit allocated size to trail size. */
  174. allocated = MIN(trail_size, size);
  175. *data = &buf->buf.buf8[tmp_trail_mod];
  176. buf->misc.byte_mode.tmp_tail =
  177. buf->misc.byte_mode.tmp_tail + allocated;
  178. return allocated;
  179. }
  180. int ring_buf_put_finish(struct ring_buf *buf, uint32_t size)
  181. {
  182. uint32_t rew;
  183. uint32_t threshold = ring_buf_get_rewind_threshold();
  184. if ((buf->tail + size) > (buf->head + buf->size)) {
  185. return -EINVAL;
  186. }
  187. /* Check if indexes shall be rewind. */
  188. if (buf->tail > threshold) {
  189. rew = get_rewind_value(buf->size, threshold);
  190. } else {
  191. rew = 0;
  192. }
  193. buf->tail += (size - rew);
  194. buf->misc.byte_mode.tmp_tail = buf->tail;
  195. return 0;
  196. }
  197. uint32_t ring_buf_put(struct ring_buf *buf, const uint8_t *data, uint32_t size)
  198. {
  199. uint8_t *dst;
  200. uint32_t partial_size;
  201. uint32_t total_size = 0U;
  202. int err;
  203. do {
  204. partial_size = ring_buf_put_claim(buf, &dst, size);
  205. memcpy(dst, data, partial_size);
  206. total_size += partial_size;
  207. size -= partial_size;
  208. data += partial_size;
  209. } while (size && partial_size);
  210. err = ring_buf_put_finish(buf, total_size);
  211. __ASSERT_NO_MSG(err == 0);
  212. return total_size;
  213. }
  214. uint32_t ring_buf_get_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
  215. {
  216. uint32_t space, granted_size, trail_size, tmp_head_mod;
  217. uint32_t tail = buf->tail;
  218. /* Tail is always ahead, if it is not, it's only because it got rewinded. */
  219. if (tail < buf->misc.byte_mode.tmp_head) {
  220. /* Locally, increment it to pre-rewind value */
  221. tail += get_rewind_value(buf->size,
  222. ring_buf_get_rewind_threshold());
  223. }
  224. tmp_head_mod = mod(buf, buf->misc.byte_mode.tmp_head);
  225. space = tail - buf->misc.byte_mode.tmp_head;
  226. trail_size = buf->size - tmp_head_mod;
  227. /* Limit requested size to available size. */
  228. granted_size = MIN(size, space);
  229. /* Limit allocated size to trail size. */
  230. granted_size = MIN(trail_size, granted_size);
  231. *data = &buf->buf.buf8[tmp_head_mod];
  232. buf->misc.byte_mode.tmp_head += granted_size;
  233. return granted_size;
  234. }
  235. int ring_buf_get_finish(struct ring_buf *buf, uint32_t size)
  236. {
  237. uint32_t tail = buf->tail;
  238. uint32_t rew;
  239. /* Tail is always ahead, if it is not, it's only because it got rewinded. */
  240. if (tail < buf->misc.byte_mode.tmp_head) {
  241. /* tail was rewinded. Locally, increment it to pre-rewind value */
  242. rew = get_rewind_value(buf->size,
  243. ring_buf_get_rewind_threshold());
  244. tail += rew;
  245. } else {
  246. rew = 0;
  247. }
  248. if ((buf->head + size) > tail) {
  249. return -EINVAL;
  250. }
  251. /* Include potential rewinding. */
  252. buf->head += (size - rew);
  253. buf->misc.byte_mode.tmp_head = buf->head;
  254. return 0;
  255. }
  256. uint32_t ring_buf_get(struct ring_buf *buf, uint8_t *data, uint32_t size)
  257. {
  258. uint8_t *src;
  259. uint32_t partial_size;
  260. uint32_t total_size = 0U;
  261. int err;
  262. do {
  263. partial_size = ring_buf_get_claim(buf, &src, size);
  264. if (data) {
  265. memcpy(data, src, partial_size);
  266. data += partial_size;
  267. }
  268. total_size += partial_size;
  269. size -= partial_size;
  270. } while (size && partial_size);
  271. err = ring_buf_get_finish(buf, total_size);
  272. __ASSERT_NO_MSG(err == 0);
  273. return total_size;
  274. }
  275. uint32_t ring_buf_peek(struct ring_buf *buf, uint8_t *data, uint32_t size)
  276. {
  277. uint8_t *src;
  278. uint32_t partial_size;
  279. uint32_t total_size = 0U;
  280. int err;
  281. size = MIN(size, ring_buf_size_get(buf));
  282. do {
  283. partial_size = ring_buf_get_claim(buf, &src, size);
  284. __ASSERT_NO_MSG(data != NULL);
  285. memcpy(data, src, partial_size);
  286. data += partial_size;
  287. total_size += partial_size;
  288. size -= partial_size;
  289. } while (size && partial_size);
  290. /* effectively unclaim total_size bytes */
  291. err = ring_buf_get_finish(buf, 0);
  292. __ASSERT_NO_MSG(err == 0);
  293. return total_size;
  294. }