mspace.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. #ifndef __MSPACE__H__
  2. #define __MSPACE__H__
  3. typedef struct
  4. {
  5. size_t max_size;
  6. size_t sys_size;
  7. size_t used_size;
  8. } mspaceinfo_t;
  9. /*
  10. mspace is an opaque type representing an independent
  11. region of space that supports mspace_malloc, etc.
  12. */
  13. typedef void* mspace;
  14. /*
  15. create_mspace creates and returns a new independent space with the
  16. given initial capacity, or, if 0, the default granularity size. It
  17. returns null if there is no system memory available to create the
  18. space. If argument locked is non-zero, the space uses a separate
  19. lock to control access. The capacity of the space will grow
  20. dynamically as needed to service mspace_malloc requests. You can
  21. control the sizes of incremental increases of this space by
  22. compiling with a different DEFAULT_GRANULARITY or dynamically
  23. setting with mallopt(M_GRANULARITY, value).
  24. */
  25. mspace create_mspace(size_t capacity, int locked);
  26. /*
  27. destroy_mspace destroys the given space, and attempts to return all
  28. of its memory back to the system, returning the total number of
  29. bytes freed. After destruction, the results of access to all memory
  30. used by the space become undefined.
  31. */
  32. size_t destroy_mspace(mspace msp);
  33. /*
  34. create_mspace_with_base uses the memory supplied as the initial base
  35. of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  36. space is used for bookkeeping, so the capacity must be at least this
  37. large. (Otherwise 0 is returned.) When this initial space is
  38. exhausted, additional memory will be obtained from the system.
  39. Destroying this space will deallocate all additionally allocated
  40. space (if possible) but not the initial base.
  41. */
  42. mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  43. /*
  44. mspace_track_large_chunks controls whether requests for large chunks
  45. are allocated in their own untracked mmapped regions, separate from
  46. others in this mspace. By default large chunks are not tracked,
  47. which reduces fragmentation. However, such chunks are not
  48. necessarily released to the system upon destroy_mspace. Enabling
  49. tracking by setting to true may increase fragmentation, but avoids
  50. leakage when relying on destroy_mspace to release all memory
  51. allocated using this space. The function returns the previous
  52. setting.
  53. */
  54. int mspace_track_large_chunks(mspace msp, int enable);
  55. /*
  56. mspace_malloc behaves as malloc, but operates within
  57. the given space.
  58. */
  59. void* mspace_malloc(mspace msp, size_t bytes);
  60. /*
  61. mspace_free behaves as free, but operates within
  62. the given space.
  63. If compiled with FOOTERS==1, mspace_free is not actually needed.
  64. free may be called instead of mspace_free because freed chunks from
  65. any space are handled by their originating spaces.
  66. */
  67. void mspace_free(mspace msp, void* mem);
  68. /*
  69. mspace_realloc behaves as realloc, but operates within
  70. the given space.
  71. If compiled with FOOTERS==1, mspace_realloc is not actually
  72. needed. realloc may be called instead of mspace_realloc because
  73. realloced chunks from any space are handled by their originating
  74. spaces.
  75. */
  76. void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  77. /*
  78. mspace_calloc behaves as calloc, but operates within
  79. the given space.
  80. */
  81. void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  82. /*
  83. mspace_memalign behaves as memalign, but operates within
  84. the given space.
  85. */
  86. void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  87. /*
  88. mspace_independent_calloc behaves as independent_calloc, but
  89. operates within the given space.
  90. */
  91. void** mspace_independent_calloc(mspace msp, size_t n_elements,
  92. size_t elem_size, void* chunks[]);
  93. /*
  94. mspace_independent_comalloc behaves as independent_comalloc, but
  95. operates within the given space.
  96. */
  97. void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  98. size_t sizes[], void* chunks[]);
  99. /*
  100. mspace_malloc2 behaves as mspace_malloc, but provides additional
  101. functionality. Setting alignment to a non-zero value is
  102. identical to using mspace_memalign(). Flags may be set to:
  103. * M2_ZERO_MEMORY: Sets the contents of the allocated chunk to
  104. zero.
  105. * M2_ALWAYS_MMAP: Always allocate as though mmap_threshold
  106. were being exceeded. This is useful for large
  107. arrays which frequently extend.
  108. * M2_RESERVE_MULT(n): Reserve n times as much address space such
  109. that mmapped realloc() is much faster.
  110. * M2_RESERVE_SHIFT(n): Reserve (1<<n) bytes of address space such
  111. that mmapped realloc() is much faster.
  112. Note when setting RESERVE sizes that on some platforms (e.g. Windows)
  113. page tables are constructed for the reservation size. On x86/x64
  114. Windows this costs 2Kb of kernel memory per Mb reserved, and as on
  115. x86 kernel memory is not abundant you should not be excessive.
  116. */
  117. void* mspace_malloc2(mspace msp, size_t bytes, size_t alignment, unsigned flags);
  118. /*
  119. mspace_realloc2 behaves as mspace_realloc, but provides additional
  120. functionality. Setting alignment to a non-zero value is
  121. identical to using mspace_memalign(). Flags may be set to:
  122. * M2_ZERO_MEMORY: Sets any increase in the allocated chunk to
  123. zero. Note that this zeroes only the increase
  124. from what dlmalloc thinks the chunk's size is,
  125. so if you didn't use this flag when allocating
  126. with malloc2 (which zeroes up to chunk size)
  127. then you may have garbage just before the new
  128. space.
  129. * M2_PREVENT_MOVE: Prevent moves in realloc2() which is very
  130. useful for C++ container objects.
  131. * M2_ALWAYS_MMAP: Always allocate as though mmap_threshold
  132. were being exceeded. Note that setting this
  133. bit will not necessarily mmap a chunk which
  134. isn't already mmapped, but it will force a
  135. mmapped chunk if new memory needs allocating.
  136. * M2_RESERVE_MULT(n): Reserve n times as much address space such
  137. that mmapped realloc() is much faster.
  138. * M2_RESERVE_SHIFT(n): Reserve (1<<n) bytes of address space such
  139. that mmapped realloc() is much faster.
  140. Note when setting RESERVE sizes that on some platforms (e.g. Windows)
  141. page tables are constructed for the reservation size. On x86/x64
  142. Windows this costs 2Kb of kernel memory per Mb reserved, and as on
  143. x86 kernel memory is not abundant you should not be excessive.
  144. With regard to M2_RESERVE_*, these only take effect when the
  145. mmapped chunk has exceeded its reservation space and a new
  146. reservation space needs to be created.
  147. */
  148. void* mspace_realloc2(mspace msp, void* mem, size_t newsize, size_t alignment, unsigned flags);
  149. /*
  150. mspace_footprint() returns the number of bytes obtained from the
  151. system for this space.
  152. */
  153. size_t mspace_footprint(mspace msp);
  154. /*
  155. mspace_max_footprint() returns the peak number of bytes obtained from the
  156. system for this space.
  157. */
  158. size_t mspace_max_footprint(mspace msp);
  159. /*
  160. mspace_mallinfo behaves as mallinfo, but reports properties of
  161. the given space.
  162. */
  163. struct mallinfo mspace_mallinfo(mspace msp);
  164. /*
  165. malloc_usable_size(void* p) behaves the same as malloc_usable_size;
  166. */
  167. size_t mspace_usable_size(void* mem);
  168. /*
  169. mspace_malloc_stats behaves as malloc_stats, but reports
  170. properties of the given space.
  171. */
  172. void mspace_malloc_stats(mspace msp, mspaceinfo_t *pstmspacinfo);
  173. /*
  174. mspace_trim behaves as malloc_trim, but
  175. operates within the given space.
  176. */
  177. int mspace_trim(mspace msp, size_t pad);
  178. /*
  179. An alias for mallopt.
  180. */
  181. int mspace_mallopt(int, int);
  182. #endif /* __MSPACE__H__ */