mem_slabes.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Copyright (c) 2019 Actions Semi Co., Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief mem slab manager.
  9. */
  10. #include <os_common_api.h>
  11. #include <kernel.h>
  12. #include <kernel_structs.h>
  13. #include <init.h>
  14. #include <toolchain.h>
  15. #include <linker/sections.h>
  16. #include <string.h>
  17. #include <memory/mem_slabs.h>
  18. static int find_slab_by_addr(struct slabs_info * slabs, void * addr)
  19. {
  20. int i = 0;
  21. int target_slab_index = slabs->slab_num;
  22. for (i = 0 ; i < slabs->slab_num; i++) {
  23. if ((uint32_t)slabs->slabs[i].slab_base <= (uint32_t)addr
  24. && (uint32_t)((uint32_t)slabs->slabs[i].slab_base +
  25. slabs->slabs[i].block_size *
  26. slabs->slabs[i].block_num) > (uint32_t)addr) {
  27. target_slab_index = i;
  28. break;
  29. }
  30. }
  31. return target_slab_index;
  32. }
  33. static void dump_mem_hex(struct slabs_info *slabs, int slab_index);
  34. static void* malloc_from_stable_slab(struct slabs_info * slabs,int slab_index)
  35. {
  36. void * block_ptr = NULL;
  37. if (slab_index >= 0 && slab_index < slabs->slab_num) {
  38. if(!k_mem_slab_alloc(
  39. slabs->slabs[slab_index].slab,
  40. &block_ptr, K_NO_WAIT)) {
  41. if (slabs->max_used[slab_index] <
  42. k_mem_slab_num_used_get(slabs->slabs[slab_index].slab)) {
  43. slabs->max_used[slab_index] =
  44. k_mem_slab_num_used_get(slabs->slabs[slab_index].slab);
  45. }
  46. } else {
  47. SYS_LOG_ERR("Memory allocation time-out"
  48. "slab_index %d, free_list %p next freelist %p ",
  49. slab_index, slabs->slabs[slab_index].slab->free_list,
  50. *(char **)(slabs->slabs[slab_index].slab->free_list));
  51. #if DEBUG
  52. dump_stack();
  53. #endif
  54. mem_slabs_dump(slabs, -1);
  55. }
  56. } else {
  57. SYS_LOG_INF("slab_index %d overflow max is %d",
  58. slab_index, slabs->slab_num);
  59. }
  60. #ifdef DEBUG
  61. SYS_LOG_DBG("mem_malloc from stable_slab %d : ptr %p",
  62. slab_index, block_ptr);
  63. #endif
  64. return block_ptr;
  65. }
  66. static bool free_to_stable_slab(struct slabs_info *slabs, void * ptr)
  67. {
  68. int slab_index = find_slab_by_addr(slabs, ptr);
  69. if (slab_index >= 0 && slab_index < slabs->slab_num) {
  70. k_mem_slab_free(slabs->slabs[slab_index].slab,
  71. &ptr);
  72. #ifdef DEBUG
  73. SYS_LOG_DBG("mem_free to stable slab %d "
  74. ": ptr %p ",slab_index, ptr);
  75. #endif
  76. return true;
  77. }
  78. return false;
  79. }
  80. static int find_slab_index(struct slabs_info *slabs, unsigned int num_bytes)
  81. {
  82. uint8_t i = 0;
  83. uint8_t first_fit_slab = slabs->slab_num;
  84. uint8_t target_slab_index = slabs->slab_num;
  85. uint8_t flag=1;
  86. for(i = 0; i < slabs->slab_num; i++) {
  87. if (slabs->slabs[i].block_size >= num_bytes) {
  88. target_slab_index = i;
  89. if (first_fit_slab == slabs->slab_num) {
  90. first_fit_slab = i;
  91. }
  92. if(k_mem_slab_num_free_get(
  93. slabs->slabs[target_slab_index].slab) != 0) {
  94. break;
  95. } else {
  96. if (flag) {
  97. SYS_LOG_DBG("%d ne",slabs->slabs[i].block_size);
  98. flag = 0;
  99. }
  100. }
  101. }
  102. }
  103. return target_slab_index;
  104. }
  105. static void dump_mem_hex(struct slabs_info *slabs, int slab_index)
  106. {
  107. int length= slabs->slabs[slab_index].block_size *
  108. slabs->slabs[slab_index].block_num;
  109. unsigned char * addr = slabs->slabs[slab_index].slab_base;
  110. int num = 0;
  111. printk("slab id : %d base addr: %p , lenght %d \n",
  112. slab_index, addr, length);
  113. printk ("free_list %p next freelist %p \n",
  114. slabs->slabs[slab_index].slab->free_list,
  115. *(char **)(slabs->slabs[slab_index].slab->free_list));
  116. for (int i = 0 ; i < length; i++) {
  117. if ((i % 16) == 0) {
  118. printk("\n");
  119. }
  120. printk(" %2x ",addr[i]);
  121. }
  122. printk("\n");
  123. void * free_node = slabs->slabs[slab_index].slab->free_list;
  124. while (free_node != NULL) {
  125. printk("node[%d] %p \n",num++,free_node);
  126. free_node = *(char **)free_node;
  127. }
  128. if (k_mem_slab_num_free_get(slabs->slabs[slab_index].slab) != num) {
  129. printk("mem lost num %d , mem_free %d \n",
  130. num,
  131. k_mem_slab_num_free_get(slabs->slabs[slab_index].slab));
  132. }
  133. }
  134. void * mem_slabs_malloc(struct slabs_info * slabs, unsigned int num_bytes)
  135. {
  136. void * block_ptr = NULL;
  137. unsigned int key = irq_lock();
  138. int slab_index = find_slab_index(slabs, num_bytes);
  139. #ifdef DEBUG
  140. SYS_LOG_DBG("Memory mem_malloc num_bytes %d bytes begin",num_bytes);
  141. #endif
  142. if (slab_index >= slabs->slab_num) {
  143. SYS_LOG_ERR("Memory allocation failed ,block too big num_bytes %d ",
  144. num_bytes);
  145. #ifdef DEBUG
  146. dump_stack();
  147. #endif
  148. goto END;
  149. }
  150. block_ptr = malloc_from_stable_slab(slabs, slab_index);
  151. if (block_ptr != NULL) {
  152. if (slabs->max_size[slab_index] < num_bytes)
  153. slabs->max_size[slab_index] = num_bytes;
  154. goto END;
  155. }
  156. END:
  157. #ifdef DEBUG
  158. SYS_LOG_INF("Memory allocation num_bytes %d : block_ptr %p slab_index %d",
  159. num_bytes, block_ptr, slab_index);
  160. #endif
  161. if (block_ptr == NULL) {
  162. // dump_stack();
  163. SYS_LOG_ERR("Memory allocation failed , num_bytes %d ", num_bytes);
  164. }
  165. irq_unlock(key);
  166. return block_ptr;
  167. }
  168. void mem_slabs_free(struct slabs_info * slabs, void *ptr)
  169. {
  170. unsigned int key = irq_lock();
  171. #ifdef DEBUG
  172. SYS_LOG_DBG("Memory Free ptr %p begin",ptr);
  173. #endif
  174. if (ptr != NULL) {
  175. if (!free_to_stable_slab(slabs, ptr)) {
  176. #ifdef DEBUG
  177. dump_stack();
  178. #endif
  179. SYS_LOG_ERR("Memory Free ERR ptr %p ", ptr);
  180. goto exit;
  181. }
  182. #ifdef DEBUG
  183. SYS_LOG_INF("Memory Free ptr %p ",ptr);
  184. #endif
  185. }
  186. exit:
  187. irq_unlock(key);
  188. }
  189. void mem_slabs_init(struct slabs_info * slabs)
  190. {
  191. for(int i = 0 ; i < slabs->slab_num; i++) {
  192. k_mem_slab_init(slabs->slabs[i].slab,
  193. slabs->slabs[i].slab_base,
  194. slabs->slabs[i].block_size,
  195. slabs->slabs[i].block_num);
  196. }
  197. }
  198. void mem_slabs_dump(struct slabs_info * slabs, int index)
  199. {
  200. int total_used = 0;
  201. int total_size = 0;
  202. for (int i = 0 ; i < slabs->slab_num; i++) {
  203. total_used +=
  204. slabs->slabs[i].block_size *
  205. k_mem_slab_num_used_get(slabs->slabs[i].slab);
  206. total_size += slabs->slabs[i].block_size *
  207. slabs->slabs[i].block_num;
  208. }
  209. printk("slabs total mem : %d bytes ,used mem %d bytes\n",
  210. total_size, total_used);
  211. for (int i = 0 ; i < slabs->slab_num; i++) {
  212. printk(
  213. " mem slab %d :block size %4u : used %4u , mem_free %4u,"
  214. " max used %4u, max size %6u\n",
  215. i, slabs->slabs[i].block_size,
  216. k_mem_slab_num_used_get(slabs->slabs[i].slab),
  217. k_mem_slab_num_free_get(slabs->slabs[i].slab),
  218. slabs->max_used[i], slabs->max_size[i]);
  219. }
  220. if (index >= 0 && index < slabs->slab_num) {
  221. dump_mem_hex(slabs, index);
  222. }
  223. }