123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215 |
- #ifndef __MSPACE__H__
- #define __MSPACE__H__
- typedef struct
- {
- size_t max_size;
- size_t sys_size;
- size_t used_size;
- } mspaceinfo_t;
- /*
- mspace is an opaque type representing an independent
- region of space that supports mspace_malloc, etc.
- */
- typedef void* mspace;
- /*
- create_mspace creates and returns a new independent space with the
- given initial capacity, or, if 0, the default granularity size. It
- returns null if there is no system memory available to create the
- space. If argument locked is non-zero, the space uses a separate
- lock to control access. The capacity of the space will grow
- dynamically as needed to service mspace_malloc requests. You can
- control the sizes of incremental increases of this space by
- compiling with a different DEFAULT_GRANULARITY or dynamically
- setting with mallopt(M_GRANULARITY, value).
- */
- mspace create_mspace(size_t capacity, int locked);
- /*
- destroy_mspace destroys the given space, and attempts to return all
- of its memory back to the system, returning the total number of
- bytes freed. After destruction, the results of access to all memory
- used by the space become undefined.
- */
- size_t destroy_mspace(mspace msp);
- /*
- create_mspace_with_base uses the memory supplied as the initial base
- of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
- space is used for bookkeeping, so the capacity must be at least this
- large. (Otherwise 0 is returned.) When this initial space is
- exhausted, additional memory will be obtained from the system.
- Destroying this space will deallocate all additionally allocated
- space (if possible) but not the initial base.
- */
- mspace create_mspace_with_base(void* base, size_t capacity, int locked);
- /*
- mspace_track_large_chunks controls whether requests for large chunks
- are allocated in their own untracked mmapped regions, separate from
- others in this mspace. By default large chunks are not tracked,
- which reduces fragmentation. However, such chunks are not
- necessarily released to the system upon destroy_mspace. Enabling
- tracking by setting to true may increase fragmentation, but avoids
- leakage when relying on destroy_mspace to release all memory
- allocated using this space. The function returns the previous
- setting.
- */
- int mspace_track_large_chunks(mspace msp, int enable);
- /*
- mspace_malloc behaves as malloc, but operates within
- the given space.
- */
- void* mspace_malloc(mspace msp, size_t bytes);
- /*
- mspace_free behaves as free, but operates within
- the given space.
- If compiled with FOOTERS==1, mspace_free is not actually needed.
- free may be called instead of mspace_free because freed chunks from
- any space are handled by their originating spaces.
- */
- void mspace_free(mspace msp, void* mem);
- /*
- mspace_realloc behaves as realloc, but operates within
- the given space.
- If compiled with FOOTERS==1, mspace_realloc is not actually
- needed. realloc may be called instead of mspace_realloc because
- realloced chunks from any space are handled by their originating
- spaces.
- */
- void* mspace_realloc(mspace msp, void* mem, size_t newsize);
- /*
- mspace_calloc behaves as calloc, but operates within
- the given space.
- */
- void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
- /*
- mspace_memalign behaves as memalign, but operates within
- the given space.
- */
- void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
- /*
- mspace_independent_calloc behaves as independent_calloc, but
- operates within the given space.
- */
- void** mspace_independent_calloc(mspace msp, size_t n_elements,
- size_t elem_size, void* chunks[]);
- /*
- mspace_independent_comalloc behaves as independent_comalloc, but
- operates within the given space.
- */
- void** mspace_independent_comalloc(mspace msp, size_t n_elements,
- size_t sizes[], void* chunks[]);
- /*
- mspace_malloc2 behaves as mspace_malloc, but provides additional
- functionality. Setting alignment to a non-zero value is
- identical to using mspace_memalign(). Flags may be set to:
- * M2_ZERO_MEMORY: Sets the contents of the allocated chunk to
- zero.
- * M2_ALWAYS_MMAP: Always allocate as though mmap_threshold
- were being exceeded. This is useful for large
- arrays which frequently extend.
- * M2_RESERVE_MULT(n): Reserve n times as much address space such
- that mmapped realloc() is much faster.
- * M2_RESERVE_SHIFT(n): Reserve (1<<n) bytes of address space such
- that mmapped realloc() is much faster.
- Note when setting RESERVE sizes that on some platforms (e.g. Windows)
- page tables are constructed for the reservation size. On x86/x64
- Windows this costs 2Kb of kernel memory per Mb reserved, and as on
- x86 kernel memory is not abundant you should not be excessive.
- */
- void* mspace_malloc2(mspace msp, size_t bytes, size_t alignment, unsigned flags);
- /*
- mspace_realloc2 behaves as mspace_realloc, but provides additional
- functionality. Setting alignment to a non-zero value is
- identical to using mspace_memalign(). Flags may be set to:
- * M2_ZERO_MEMORY: Sets any increase in the allocated chunk to
- zero. Note that this zeroes only the increase
- from what dlmalloc thinks the chunk's size is,
- so if you didn't use this flag when allocating
- with malloc2 (which zeroes up to chunk size)
- then you may have garbage just before the new
- space.
- * M2_PREVENT_MOVE: Prevent moves in realloc2() which is very
- useful for C++ container objects.
- * M2_ALWAYS_MMAP: Always allocate as though mmap_threshold
- were being exceeded. Note that setting this
- bit will not necessarily mmap a chunk which
- isn't already mmapped, but it will force a
- mmapped chunk if new memory needs allocating.
- * M2_RESERVE_MULT(n): Reserve n times as much address space such
- that mmapped realloc() is much faster.
- * M2_RESERVE_SHIFT(n): Reserve (1<<n) bytes of address space such
- that mmapped realloc() is much faster.
- Note when setting RESERVE sizes that on some platforms (e.g. Windows)
- page tables are constructed for the reservation size. On x86/x64
- Windows this costs 2Kb of kernel memory per Mb reserved, and as on
- x86 kernel memory is not abundant you should not be excessive.
- With regard to M2_RESERVE_*, these only take effect when the
- mmapped chunk has exceeded its reservation space and a new
- reservation space needs to be created.
- */
- void* mspace_realloc2(mspace msp, void* mem, size_t newsize, size_t alignment, unsigned flags);
- /*
- mspace_footprint() returns the number of bytes obtained from the
- system for this space.
- */
- size_t mspace_footprint(mspace msp);
- /*
- mspace_max_footprint() returns the peak number of bytes obtained from the
- system for this space.
- */
- size_t mspace_max_footprint(mspace msp);
- /*
- mspace_mallinfo behaves as mallinfo, but reports properties of
- the given space.
- */
- struct mallinfo mspace_mallinfo(mspace msp);
- /*
- malloc_usable_size(void* p) behaves the same as malloc_usable_size;
- */
- size_t mspace_usable_size(void* mem);
- /*
- mspace_malloc_stats behaves as malloc_stats, but reports
- properties of the given space.
- */
- void mspace_malloc_stats(mspace msp, mspaceinfo_t *pstmspacinfo);
- /*
- mspace_trim behaves as malloc_trim, but
- operates within the given space.
- */
- int mspace_trim(mspace msp, size_t pad);
- /*
- An alias for mallopt.
- */
- int mspace_mallopt(int, int);
- #endif /* __MSPACE__H__ */
|