123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427 |
- /*
- * Copyright (c) 2019 Intel Corporation
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- #include <sys/sys_heap.h>
- #include <kernel.h>
- #include <string.h>
- #include "heap.h"
- static void *chunk_mem(struct z_heap *h, chunkid_t c)
- {
- chunk_unit_t *buf = chunk_buf(h);
- uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
- CHECK(!(((uintptr_t)ret) & (big_heap(h) ? 7 : 3)));
- return ret;
- }
- static void free_list_remove_bidx(struct z_heap *h, chunkid_t c, int bidx)
- {
- struct z_heap_bucket *b = &h->buckets[bidx];
- CHECK(!chunk_used(h, c));
- CHECK(b->next != 0);
- CHECK(h->avail_buckets & (1 << bidx));
- if (next_free_chunk(h, c) == c) {
- /* this is the last chunk */
- h->avail_buckets &= ~(1 << bidx);
- b->next = 0;
- } else {
- chunkid_t first = prev_free_chunk(h, c),
- second = next_free_chunk(h, c);
- b->next = second;
- set_next_free_chunk(h, first, second);
- set_prev_free_chunk(h, second, first);
- }
- }
- static void free_list_remove(struct z_heap *h, chunkid_t c)
- {
- if (!solo_free_header(h, c)) {
- int bidx = bucket_idx(h, chunk_size(h, c));
- free_list_remove_bidx(h, c, bidx);
- }
- }
- static void free_list_add_bidx(struct z_heap *h, chunkid_t c, int bidx)
- {
- struct z_heap_bucket *b = &h->buckets[bidx];
- if (b->next == 0U) {
- CHECK((h->avail_buckets & (1 << bidx)) == 0);
- /* Empty list, first item */
- h->avail_buckets |= (1 << bidx);
- b->next = c;
- set_prev_free_chunk(h, c, c);
- set_next_free_chunk(h, c, c);
- } else {
- CHECK(h->avail_buckets & (1 << bidx));
- /* Insert before (!) the "next" pointer */
- chunkid_t second = b->next;
- chunkid_t first = prev_free_chunk(h, second);
- set_prev_free_chunk(h, c, first);
- set_next_free_chunk(h, c, second);
- set_next_free_chunk(h, first, c);
- set_prev_free_chunk(h, second, c);
- }
- }
- static void free_list_add(struct z_heap *h, chunkid_t c)
- {
- if (!solo_free_header(h, c)) {
- int bidx = bucket_idx(h, chunk_size(h, c));
- free_list_add_bidx(h, c, bidx);
- }
- }
- /* Splits a chunk "lc" into a left chunk and a right chunk at "rc".
- * Leaves both chunks marked "free"
- */
- static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
- {
- CHECK(rc > lc);
- CHECK(rc - lc < chunk_size(h, lc));
- chunksz_t sz0 = chunk_size(h, lc);
- chunksz_t lsz = rc - lc;
- chunksz_t rsz = sz0 - lsz;
- set_chunk_size(h, lc, lsz);
- set_chunk_size(h, rc, rsz);
- set_left_chunk_size(h, rc, lsz);
- set_left_chunk_size(h, right_chunk(h, rc), rsz);
- }
- /* Does not modify free list */
- static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
- {
- chunksz_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
- set_chunk_size(h, lc, newsz);
- set_left_chunk_size(h, right_chunk(h, rc), newsz);
- }
- static void free_chunk(struct z_heap *h, chunkid_t c)
- {
- /* Merge with free right chunk? */
- if (!chunk_used(h, right_chunk(h, c))) {
- free_list_remove(h, right_chunk(h, c));
- merge_chunks(h, c, right_chunk(h, c));
- }
- /* Merge with free left chunk? */
- if (!chunk_used(h, left_chunk(h, c))) {
- free_list_remove(h, left_chunk(h, c));
- merge_chunks(h, left_chunk(h, c), c);
- c = left_chunk(h, c);
- }
- free_list_add(h, c);
- }
- /*
- * Return the closest chunk ID corresponding to given memory pointer.
- * Here "closest" is only meaningful in the context of sys_heap_aligned_alloc()
- * where wanted alignment might not always correspond to a chunk header
- * boundary.
- */
- static chunkid_t mem_to_chunkid(struct z_heap *h, void *p)
- {
- uint8_t *mem = p, *base = (uint8_t *)chunk_buf(h);
- return (mem - chunk_header_bytes(h) - base) / CHUNK_UNIT;
- }
- void sys_heap_free(struct sys_heap *heap, void *mem)
- {
- if (mem == NULL) {
- return; /* ISO C free() semantics */
- }
- struct z_heap *h = heap->heap;
- chunkid_t c = mem_to_chunkid(h, mem);
- /*
- * This should catch many double-free cases.
- * This is cheap enough so let's do it all the time.
- */
- __ASSERT(chunk_used(h, c),
- "unexpected heap state (double-free?) for memory at %p", mem);
- /*
- * It is easy to catch many common memory overflow cases with
- * a quick check on this and next chunk header fields that are
- * immediately before and after the freed memory.
- */
- __ASSERT(left_chunk(h, right_chunk(h, c)) == c,
- "corrupted heap bounds (buffer overflow?) for memory at %p",
- mem);
- set_chunk_used(h, c, false);
- free_chunk(h, c);
- }
- static chunkid_t alloc_chunk(struct z_heap *h, chunksz_t sz)
- {
- int bi = bucket_idx(h, sz);
- struct z_heap_bucket *b = &h->buckets[bi];
- CHECK(bi <= bucket_idx(h, h->end_chunk));
- /* First try a bounded count of items from the minimal bucket
- * size. These may not fit, trying (e.g.) three means that
- * (assuming that chunk sizes are evenly distributed[1]) we
- * have a 7/8 chance of finding a match, thus keeping the
- * number of such blocks consumed by allocation higher than
- * the number of smaller blocks created by fragmenting larger
- * ones.
- *
- * [1] In practice, they are never evenly distributed, of
- * course. But even in pathological situations we still
- * maintain our constant time performance and at worst see
- * fragmentation waste of the order of the block allocated
- * only.
- */
- if (b->next) {
- chunkid_t first = b->next;
- int i = CONFIG_SYS_HEAP_ALLOC_LOOPS;
- do {
- chunkid_t c = b->next;
- if (chunk_size(h, c) >= sz) {
- free_list_remove_bidx(h, c, bi);
- return c;
- }
- b->next = next_free_chunk(h, c);
- CHECK(b->next != 0);
- } while (--i && b->next != first);
- }
- /* Otherwise pick the smallest non-empty bucket guaranteed to
- * fit and use that unconditionally.
- */
- uint32_t bmask = h->avail_buckets & ~((1 << (bi + 1)) - 1);
- if (bmask != 0U) {
- int minbucket = __builtin_ctz(bmask);
- chunkid_t c = h->buckets[minbucket].next;
- free_list_remove_bidx(h, c, minbucket);
- CHECK(chunk_size(h, c) >= sz);
- return c;
- }
- return 0;
- }
- void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
- {
- struct z_heap *h = heap->heap;
- if (bytes == 0U || size_too_big(h, bytes)) {
- return NULL;
- }
- chunksz_t chunk_sz = bytes_to_chunksz(h, bytes);
- chunkid_t c = alloc_chunk(h, chunk_sz);
- if (c == 0U) {
- return NULL;
- }
- /* Split off remainder if any */
- if (chunk_size(h, c) > chunk_sz) {
- split_chunks(h, c, c + chunk_sz);
- free_list_add(h, c + chunk_sz);
- }
- set_chunk_used(h, c, true);
- return chunk_mem(h, c);
- }
- void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
- {
- struct z_heap *h = heap->heap;
- size_t gap, rew;
- /*
- * Split align and rewind values (if any).
- * We allow for one bit of rewind in addition to the alignment
- * value to efficiently accommodate z_heap_aligned_alloc().
- * So if e.g. align = 0x28 (32 | 8) this means we align to a 32-byte
- * boundary and then rewind 8 bytes.
- */
- rew = align & -align;
- if (align != rew) {
- align -= rew;
- gap = MIN(rew, chunk_header_bytes(h));
- } else {
- if (align <= chunk_header_bytes(h)) {
- return sys_heap_alloc(heap, bytes);
- }
- rew = 0;
- gap = chunk_header_bytes(h);
- }
- __ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
- if (bytes == 0 || size_too_big(h, bytes)) {
- return NULL;
- }
- /*
- * Find a free block that is guaranteed to fit.
- * We over-allocate to account for alignment and then free
- * the extra allocations afterwards.
- */
- chunksz_t padded_sz = bytes_to_chunksz(h, bytes + align - gap);
- chunkid_t c0 = alloc_chunk(h, padded_sz);
- if (c0 == 0) {
- return NULL;
- }
- uint8_t *mem = chunk_mem(h, c0);
- /* Align allocated memory */
- mem = (uint8_t *) ROUND_UP(mem + rew, align) - rew;
- chunk_unit_t *end = (chunk_unit_t *) ROUND_UP(mem + bytes, CHUNK_UNIT);
- /* Get corresponding chunks */
- chunkid_t c = mem_to_chunkid(h, mem);
- chunkid_t c_end = end - chunk_buf(h);
- CHECK(c >= c0 && c < c_end && c_end <= c0 + padded_sz);
- /* Split and free unused prefix */
- if (c > c0) {
- split_chunks(h, c0, c);
- free_list_add(h, c0);
- }
- /* Split and free unused suffix */
- if (right_chunk(h, c) > c_end) {
- split_chunks(h, c, c_end);
- free_list_add(h, c_end);
- }
- set_chunk_used(h, c, true);
- return mem;
- }
- void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
- size_t align, size_t bytes)
- {
- struct z_heap *h = heap->heap;
- /* special realloc semantics */
- if (ptr == NULL) {
- return sys_heap_aligned_alloc(heap, align, bytes);
- }
- if (bytes == 0) {
- sys_heap_free(heap, ptr);
- return NULL;
- }
- __ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
- if (size_too_big(h, bytes)) {
- return NULL;
- }
- chunkid_t c = mem_to_chunkid(h, ptr);
- chunkid_t rc = right_chunk(h, c);
- size_t align_gap = (uint8_t *)ptr - (uint8_t *)chunk_mem(h, c);
- chunksz_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
- if (align && ((uintptr_t)ptr & (align - 1))) {
- /* ptr is not sufficiently aligned */
- } else if (chunk_size(h, c) == chunks_need) {
- /* We're good already */
- return ptr;
- } else if (chunk_size(h, c) > chunks_need) {
- /* Shrink in place, split off and free unused suffix */
- split_chunks(h, c, c + chunks_need);
- set_chunk_used(h, c, true);
- free_chunk(h, c + chunks_need);
- return ptr;
- } else if (!chunk_used(h, rc) &&
- (chunk_size(h, c) + chunk_size(h, rc) >= chunks_need)) {
- /* Expand: split the right chunk and append */
- chunkid_t split_size = chunks_need - chunk_size(h, c);
- free_list_remove(h, rc);
- if (split_size < chunk_size(h, rc)) {
- split_chunks(h, rc, rc + split_size);
- free_list_add(h, rc + split_size);
- }
- merge_chunks(h, c, rc);
- set_chunk_used(h, c, true);
- return ptr;
- } else {
- ;
- }
- /* Fallback: allocate and copy */
- void *ptr2 = sys_heap_aligned_alloc(heap, align, bytes);
- if (ptr2 != NULL) {
- size_t prev_size = chunksz_to_bytes(h, chunk_size(h, c)) - align_gap;
- memcpy(ptr2, ptr, MIN(prev_size, bytes));
- sys_heap_free(heap, ptr);
- }
- return ptr2;
- }
- void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
- {
- /* Must fit in a 31 bit count of HUNK_UNIT */
- __ASSERT(bytes / CHUNK_UNIT <= 0x7fffffffU, "heap size is too big");
- /* Reserve the end marker chunk's header */
- __ASSERT(bytes > heap_footer_bytes(bytes), "heap size is too small");
- bytes -= heap_footer_bytes(bytes);
- /* Round the start up, the end down */
- uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
- uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
- chunksz_t heap_sz = (end - addr) / CHUNK_UNIT;
- CHECK(end > addr);
- __ASSERT(heap_sz > chunksz(sizeof(struct z_heap)), "heap size is too small");
- struct z_heap *h = (struct z_heap *)addr;
- heap->heap = h;
- h->end_chunk = heap_sz;
- h->avail_buckets = 0;
- int nb_buckets = bucket_idx(h, heap_sz) + 1;
- chunksz_t chunk0_size = chunksz(sizeof(struct z_heap) +
- nb_buckets * sizeof(struct z_heap_bucket));
- __ASSERT(chunk0_size + min_chunk_size(h) <= heap_sz, "heap size is too small");
- for (int i = 0; i < nb_buckets; i++) {
- h->buckets[i].next = 0;
- }
- /* chunk containing our struct z_heap */
- set_chunk_size(h, 0, chunk0_size);
- set_left_chunk_size(h, 0, 0);
- set_chunk_used(h, 0, true);
- /* chunk containing the free heap */
- set_chunk_size(h, chunk0_size, heap_sz - chunk0_size);
- set_left_chunk_size(h, chunk0_size, chunk0_size);
- /* the end marker chunk */
- set_chunk_size(h, heap_sz, 0);
- set_left_chunk_size(h, heap_sz, heap_sz - chunk0_size);
- set_chunk_used(h, heap_sz, true);
- free_list_add(h, chunk0_size);
- }
|