heap.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright (c) 2019 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
  7. #define ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
  8. /*
  9. * Internal heap APIs
  10. */
  11. /* Theese validation checks are non-trivially expensive, so enable
  12. * only when debugging the heap code. They shouldn't be routine
  13. * assertions.
  14. */
  15. #ifdef CONFIG_SYS_HEAP_VALIDATE
  16. #define CHECK(x) __ASSERT(x, "")
  17. #else
  18. #define CHECK(x) /**/
  19. #endif
  20. /* Chunks are identified by their offset in 8 byte units from the
  21. * first address in the buffer (a zero-valued chunkid_t is used as a
  22. * null; that chunk would always point into the metadata at the start
  23. * of the heap and cannot be allocated). They are prefixed by a
  24. * variable size header that depends on the size of the heap. Heaps
  25. * with fewer than 2^15 units (256kb) of storage use shorts to store
  26. * the fields, otherwise the units are 32 bit integers for a 16Gb heap
  27. * space (larger spaces really aren't in scope for this code, but
  28. * could be handled similarly I suppose). Because of that design
  29. * there's a certain amount of boilerplate API needed to expose the
  30. * field accessors since we can't use natural syntax.
  31. *
  32. * The fields are:
  33. * LEFT_SIZE: The size of the left (next lower chunk in memory)
  34. * neighbor chunk.
  35. * SIZE_AND_USED: the total size (including header) of the chunk in
  36. * 8-byte units. The bottom bit stores a "used" flag.
  37. * FREE_PREV: Chunk ID of the previous node in a free list.
  38. * FREE_NEXT: Chunk ID of the next node in a free list.
  39. *
  40. * The free lists are circular lists, one for each power-of-two size
  41. * category. The free list pointers exist only for free chunks,
  42. * obviously. This memory is part of the user's buffer when
  43. * allocated.
  44. *
  45. * The field order is so that allocated buffers are immediately bounded
  46. * by SIZE_AND_USED of the current chunk at the bottom, and LEFT_SIZE of
  47. * the following chunk at the top. This ordering allows for quick buffer
  48. * overflow detection by testing left_chunk(c + chunk_size(c)) == c.
  49. */
  50. enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
  51. #define CHUNK_UNIT 8U
  52. typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
  53. /* big_heap needs uint32_t, small_heap needs uint16_t */
  54. typedef uint32_t chunkid_t;
  55. typedef uint32_t chunksz_t;
  56. struct z_heap_bucket {
  57. chunkid_t next;
  58. };
  59. struct z_heap {
  60. chunkid_t chunk0_hdr[2];
  61. chunkid_t end_chunk;
  62. uint32_t avail_buckets;
  63. struct z_heap_bucket buckets[0];
  64. };
  65. static inline bool big_heap_chunks(chunksz_t chunks)
  66. {
  67. return sizeof(void *) > 4U || chunks > 0x7fffU;
  68. }
  69. static inline bool big_heap_bytes(size_t bytes)
  70. {
  71. return big_heap_chunks(bytes / CHUNK_UNIT);
  72. }
  73. static inline bool big_heap(struct z_heap *h)
  74. {
  75. return big_heap_chunks(h->end_chunk);
  76. }
  77. static inline chunk_unit_t *chunk_buf(struct z_heap *h)
  78. {
  79. /* the struct z_heap matches with the first chunk */
  80. return (chunk_unit_t *)h;
  81. }
  82. static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
  83. enum chunk_fields f)
  84. {
  85. chunk_unit_t *buf = chunk_buf(h);
  86. void *cmem = &buf[c];
  87. if (big_heap(h)) {
  88. return ((uint32_t *)cmem)[f];
  89. } else {
  90. return ((uint16_t *)cmem)[f];
  91. }
  92. }
  93. static inline void chunk_set(struct z_heap *h, chunkid_t c,
  94. enum chunk_fields f, chunkid_t val)
  95. {
  96. CHECK(c <= h->end_chunk);
  97. chunk_unit_t *buf = chunk_buf(h);
  98. void *cmem = &buf[c];
  99. if (big_heap(h)) {
  100. CHECK(val == (uint32_t)val);
  101. ((uint32_t *)cmem)[f] = val;
  102. } else {
  103. CHECK(val == (uint16_t)val);
  104. ((uint16_t *)cmem)[f] = val;
  105. }
  106. }
  107. static inline bool chunk_used(struct z_heap *h, chunkid_t c)
  108. {
  109. return chunk_field(h, c, SIZE_AND_USED) & 1U;
  110. }
  111. static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
  112. {
  113. return chunk_field(h, c, SIZE_AND_USED) >> 1;
  114. }
  115. static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
  116. {
  117. chunk_unit_t *buf = chunk_buf(h);
  118. void *cmem = &buf[c];
  119. if (big_heap(h)) {
  120. if (used) {
  121. ((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
  122. } else {
  123. ((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
  124. }
  125. } else {
  126. if (used) {
  127. ((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
  128. } else {
  129. ((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
  130. }
  131. }
  132. }
  133. /*
  134. * Note: no need to preserve the used bit here as the chunk is never in use
  135. * when its size is modified, and potential set_chunk_used() is always
  136. * invoked after set_chunk_size().
  137. */
  138. static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
  139. {
  140. chunk_set(h, c, SIZE_AND_USED, size << 1);
  141. }
  142. static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
  143. {
  144. return chunk_field(h, c, FREE_PREV);
  145. }
  146. static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
  147. {
  148. return chunk_field(h, c, FREE_NEXT);
  149. }
  150. static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
  151. chunkid_t prev)
  152. {
  153. chunk_set(h, c, FREE_PREV, prev);
  154. }
  155. static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
  156. chunkid_t next)
  157. {
  158. chunk_set(h, c, FREE_NEXT, next);
  159. }
  160. static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
  161. {
  162. return c - chunk_field(h, c, LEFT_SIZE);
  163. }
  164. static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
  165. {
  166. return c + chunk_size(h, c);
  167. }
  168. static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
  169. chunksz_t size)
  170. {
  171. chunk_set(h, c, LEFT_SIZE, size);
  172. }
  173. static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
  174. {
  175. return big_heap(h) && chunk_size(h, c) == 1U;
  176. }
  177. static inline size_t chunk_header_bytes(struct z_heap *h)
  178. {
  179. return big_heap(h) ? 8 : 4;
  180. }
  181. static inline size_t heap_footer_bytes(size_t size)
  182. {
  183. return big_heap_bytes(size) ? 8 : 4;
  184. }
  185. static inline chunksz_t chunksz(size_t bytes)
  186. {
  187. return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
  188. }
  189. static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
  190. {
  191. return chunksz(chunk_header_bytes(h) + bytes);
  192. }
  193. static inline chunksz_t min_chunk_size(struct z_heap *h)
  194. {
  195. return bytes_to_chunksz(h, 1);
  196. }
  197. static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz_in)
  198. {
  199. return chunksz_in * CHUNK_UNIT - chunk_header_bytes(h);
  200. }
  201. static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
  202. {
  203. unsigned int usable_sz = sz - min_chunk_size(h) + 1;
  204. return 31 - __builtin_clz(usable_sz);
  205. }
  206. static inline bool size_too_big(struct z_heap *h, size_t bytes)
  207. {
  208. /*
  209. * Quick check to bail out early if size is too big.
  210. * Also guards against potential arithmetic overflows elsewhere.
  211. */
  212. return (bytes / CHUNK_UNIT) >= h->end_chunk;
  213. }
  214. /* For debugging */
  215. void heap_print_info(struct z_heap *h, bool dump_chunks);
  216. #endif /* ZEPHYR_INCLUDE_LIB_OS_HEAP_H_ */