123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264 |
- /*
- * Copyright (c) 2019 Intel Corporation
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- #ifndef ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
- #define ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
- /*
- * Internal heap APIs
- */
- /* Theese validation checks are non-trivially expensive, so enable
- * only when debugging the heap code. They shouldn't be routine
- * assertions.
- */
- #ifdef CONFIG_SYS_HEAP_VALIDATE
- //#define CHECK(x) __ASSERT(x, "")
- #define CHECK(x) \
- do { \
- if (!(x)) { \
- printk("CHECK FAIL @ %s:%d\n", __FILE__, __LINE__); \
- k_panic(); \
- } \
- } while (false)
- #else
- #define CHECK(x) /**/
- #endif
- /* Chunks are identified by their offset in 8 byte units from the
- * first address in the buffer (a zero-valued chunkid_t is used as a
- * null; that chunk would always point into the metadata at the start
- * of the heap and cannot be allocated). They are prefixed by a
- * variable size header that depends on the size of the heap. Heaps
- * with fewer than 2^15 units (256kb) of storage use shorts to store
- * the fields, otherwise the units are 32 bit integers for a 16Gb heap
- * space (larger spaces really aren't in scope for this code, but
- * could be handled similarly I suppose). Because of that design
- * there's a certain amount of boilerplate API needed to expose the
- * field accessors since we can't use natural syntax.
- *
- * The fields are:
- * LEFT_SIZE: The size of the left (next lower chunk in memory)
- * neighbor chunk.
- * SIZE_AND_USED: the total size (including header) of the chunk in
- * 8-byte units. The bottom bit stores a "used" flag.
- * FREE_PREV: Chunk ID of the previous node in a free list.
- * FREE_NEXT: Chunk ID of the next node in a free list.
- *
- * The free lists are circular lists, one for each power-of-two size
- * category. The free list pointers exist only for free chunks,
- * obviously. This memory is part of the user's buffer when
- * allocated.
- *
- * The field order is so that allocated buffers are immediately bounded
- * by SIZE_AND_USED of the current chunk at the bottom, and LEFT_SIZE of
- * the following chunk at the top. This ordering allows for quick buffer
- * overflow detection by testing left_chunk(c + chunk_size(c)) == c.
- */
- enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
- #define CHUNK_UNIT 8U
- typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
- /* big_heap needs uint32_t, small_heap needs uint16_t */
- typedef uint32_t chunkid_t;
- typedef uint32_t chunksz_t;
- struct z_heap_bucket {
- chunkid_t next;
- };
- struct z_heap {
- chunkid_t chunk0_hdr[2];
- chunkid_t end_chunk;
- uint32_t avail_buckets;
- struct z_heap_bucket buckets[0];
- };
- static inline bool big_heap_chunks(chunksz_t chunks)
- {
- return sizeof(void *) > 4U || chunks > 0x7fffU;
- }
- static inline bool big_heap_bytes(size_t bytes)
- {
- return big_heap_chunks(bytes / CHUNK_UNIT);
- }
- static inline bool big_heap(struct z_heap *h)
- {
- return big_heap_chunks(h->end_chunk);
- }
- static inline chunk_unit_t *chunk_buf(struct z_heap *h)
- {
- /* the struct z_heap matches with the first chunk */
- return (chunk_unit_t *)h;
- }
- static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
- enum chunk_fields f)
- {
- chunk_unit_t *buf = chunk_buf(h);
- void *cmem = &buf[c];
- if (big_heap(h)) {
- return ((uint32_t *)cmem)[f];
- } else {
- return ((uint16_t *)cmem)[f];
- }
- }
- static inline void chunk_set(struct z_heap *h, chunkid_t c,
- enum chunk_fields f, chunkid_t val)
- {
- CHECK(c <= h->end_chunk);
- chunk_unit_t *buf = chunk_buf(h);
- void *cmem = &buf[c];
- if (big_heap(h)) {
- CHECK(val == (uint32_t)val);
- ((uint32_t *)cmem)[f] = val;
- } else {
- CHECK(val == (uint16_t)val);
- ((uint16_t *)cmem)[f] = val;
- }
- }
- static inline bool chunk_used(struct z_heap *h, chunkid_t c)
- {
- return chunk_field(h, c, SIZE_AND_USED) & 1U;
- }
- static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
- {
- return chunk_field(h, c, SIZE_AND_USED) >> 1;
- }
- static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
- {
- chunk_unit_t *buf = chunk_buf(h);
- void *cmem = &buf[c];
- if (big_heap(h)) {
- if (used) {
- ((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
- } else {
- ((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
- }
- } else {
- if (used) {
- ((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
- } else {
- ((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
- }
- }
- }
- /*
- * Note: no need to preserve the used bit here as the chunk is never in use
- * when its size is modified, and potential set_chunk_used() is always
- * invoked after set_chunk_size().
- */
- static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
- {
- chunk_set(h, c, SIZE_AND_USED, size << 1);
- }
- static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
- {
- return chunk_field(h, c, FREE_PREV);
- }
- static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
- {
- return chunk_field(h, c, FREE_NEXT);
- }
- static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
- chunkid_t prev)
- {
- chunk_set(h, c, FREE_PREV, prev);
- }
- static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
- chunkid_t next)
- {
- chunk_set(h, c, FREE_NEXT, next);
- }
- static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
- {
- return c - chunk_field(h, c, LEFT_SIZE);
- }
- static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
- {
- return c + chunk_size(h, c);
- }
- static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
- chunksz_t size)
- {
- chunk_set(h, c, LEFT_SIZE, size);
- }
- static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
- {
- return big_heap(h) && chunk_size(h, c) == 1U;
- }
- static inline size_t chunk_header_bytes(struct z_heap *h)
- {
- return big_heap(h) ? 8 : 4;
- }
- static inline size_t heap_footer_bytes(size_t size)
- {
- return big_heap_bytes(size) ? 8 : 4;
- }
- static inline chunksz_t chunksz(size_t bytes)
- {
- return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
- }
- static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
- {
- return chunksz(chunk_header_bytes(h) + bytes);
- }
- static inline chunksz_t min_chunk_size(struct z_heap *h)
- {
- return bytes_to_chunksz(h, 1);
- }
- static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz_in)
- {
- return chunksz_in * CHUNK_UNIT - chunk_header_bytes(h);
- }
- static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
- {
- unsigned int usable_sz = sz - min_chunk_size(h) + 1;
- return 31 - __builtin_clz(usable_sz);
- }
- static inline bool size_too_big(struct z_heap *h, size_t bytes)
- {
- /*
- * Quick check to bail out early if size is too big.
- * Also guards against potential arithmetic overflows elsewhere.
- */
- return (bytes / CHUNK_UNIT) >= h->end_chunk;
- }
- /* For debugging */
- void heap_print_info(struct z_heap *h, bool dump_chunks);
- #endif /* ZEPHYR_INCLUDE_LIB_OS_HEAP_H_ */
|