mem_slab.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Copyright (c) 2019 Actions Semi Co., Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief mem slab manager.
  9. */
  10. #include <os_common_api.h>
  11. #include <mem_manager.h>
  12. #include <kernel.h>
  13. #include <kernel_structs.h>
  14. #include <init.h>
  15. #include <toolchain.h>
  16. #include <linker/sections.h>
  17. #include <string.h>
  18. struct k_mem_slab mem_slab[CONFIG_SLAB_TOTAL_NUM];
  19. uint8_t system_max_used[CONFIG_SLAB_TOTAL_NUM];
  20. uint16_t system_max_size[CONFIG_SLAB_TOTAL_NUM];
  21. #ifdef CONFIG_USED_DYNAMIC_SLAB
  22. sys_slist_t dynamic_slab_list[CONFIG_SLAB_TOTAL_NUM];
  23. #endif
  24. #define CONFIG_SLAB_TOTAL_NUM 9
  25. #define SLAB_TOTAL_SIZE (CONFIG_SLAB0_BLOCK_SIZE * CONFIG_SLAB0_NUM_BLOCKS \
  26. + CONFIG_SLAB1_BLOCK_SIZE * CONFIG_SLAB1_NUM_BLOCKS \
  27. + CONFIG_SLAB2_BLOCK_SIZE * CONFIG_SLAB2_NUM_BLOCKS \
  28. + CONFIG_SLAB3_BLOCK_SIZE * CONFIG_SLAB3_NUM_BLOCKS \
  29. + CONFIG_SLAB4_BLOCK_SIZE * CONFIG_SLAB4_NUM_BLOCKS \
  30. + CONFIG_SLAB5_BLOCK_SIZE * CONFIG_SLAB5_NUM_BLOCKS \
  31. + CONFIG_SLAB6_BLOCK_SIZE * CONFIG_SLAB6_NUM_BLOCKS \
  32. + CONFIG_SLAB7_BLOCK_SIZE * CONFIG_SLAB7_NUM_BLOCKS \
  33. + CONFIG_SLAB8_BLOCK_SIZE * CONFIG_SLAB8_NUM_BLOCKS )
  34. char __aligned(4) mem_slab_buffer[SLAB_TOTAL_SIZE];
  35. #define SLAB0_BLOCK_OFF 0
  36. #define SLAB1_BLOCK_OFF \
  37. SLAB0_BLOCK_OFF + \
  38. CONFIG_SLAB0_BLOCK_SIZE * CONFIG_SLAB0_NUM_BLOCKS
  39. #define SLAB2_BLOCK_OFF \
  40. SLAB1_BLOCK_OFF + \
  41. CONFIG_SLAB1_BLOCK_SIZE * CONFIG_SLAB1_NUM_BLOCKS
  42. #define SLAB3_BLOCK_OFF \
  43. SLAB2_BLOCK_OFF + \
  44. CONFIG_SLAB2_BLOCK_SIZE * CONFIG_SLAB2_NUM_BLOCKS
  45. #define SLAB4_BLOCK_OFF \
  46. SLAB3_BLOCK_OFF + \
  47. CONFIG_SLAB3_BLOCK_SIZE * CONFIG_SLAB3_NUM_BLOCKS
  48. #define SLAB5_BLOCK_OFF \
  49. SLAB4_BLOCK_OFF + \
  50. CONFIG_SLAB4_BLOCK_SIZE * CONFIG_SLAB4_NUM_BLOCKS
  51. #define SLAB6_BLOCK_OFF \
  52. SLAB5_BLOCK_OFF + \
  53. CONFIG_SLAB5_BLOCK_SIZE * CONFIG_SLAB5_NUM_BLOCKS
  54. #define SLAB7_BLOCK_OFF \
  55. SLAB6_BLOCK_OFF + \
  56. CONFIG_SLAB6_BLOCK_SIZE * CONFIG_SLAB6_NUM_BLOCKS
  57. #define SLAB8_BLOCK_OFF \
  58. SLAB7_BLOCK_OFF + \
  59. CONFIG_SLAB7_BLOCK_SIZE * CONFIG_SLAB7_NUM_BLOCKS
  60. const struct slabs_info sys_slab =
  61. {
  62. .slab_num = CONFIG_SLAB_TOTAL_NUM,
  63. .max_used = system_max_used,
  64. .max_size = system_max_size,
  65. .slab_flag = SYSTEM_MEM_SLAB,
  66. .slabs = {
  67. {
  68. .slab = &mem_slab[0],
  69. .slab_base = &mem_slab_buffer[SLAB0_BLOCK_OFF],
  70. .block_num = CONFIG_SLAB0_NUM_BLOCKS,
  71. .block_size = CONFIG_SLAB0_BLOCK_SIZE,
  72. #ifdef CONFIG_USED_DYNAMIC_SLAB
  73. .dynamic_slab_list = &dynamic_slab_list[0],
  74. #endif
  75. },
  76. {
  77. .slab = &mem_slab[1],
  78. .slab_base = &mem_slab_buffer[SLAB1_BLOCK_OFF],
  79. .block_num = CONFIG_SLAB1_NUM_BLOCKS,
  80. .block_size = CONFIG_SLAB1_BLOCK_SIZE,
  81. #ifdef CONFIG_USED_DYNAMIC_SLAB
  82. .dynamic_slab_list = &dynamic_slab_list[1],
  83. #endif
  84. },
  85. {
  86. .slab = &mem_slab[2],
  87. .slab_base = &mem_slab_buffer[SLAB2_BLOCK_OFF],
  88. .block_num = CONFIG_SLAB2_NUM_BLOCKS,
  89. .block_size = CONFIG_SLAB2_BLOCK_SIZE,
  90. #ifdef CONFIG_USED_DYNAMIC_SLAB
  91. .dynamic_slab_list = &dynamic_slab_list[2],
  92. #endif
  93. },
  94. {
  95. .slab = &mem_slab[3],
  96. .slab_base = &mem_slab_buffer[SLAB3_BLOCK_OFF],
  97. .block_num = CONFIG_SLAB3_NUM_BLOCKS,
  98. .block_size = CONFIG_SLAB3_BLOCK_SIZE,
  99. #ifdef CONFIG_USED_DYNAMIC_SLAB
  100. .dynamic_slab_list = &dynamic_slab_list[3],
  101. #endif
  102. },
  103. {
  104. .slab = &mem_slab[4],
  105. .slab_base = &mem_slab_buffer[SLAB4_BLOCK_OFF],
  106. .block_num = CONFIG_SLAB4_NUM_BLOCKS,
  107. .block_size = CONFIG_SLAB4_BLOCK_SIZE,
  108. #ifdef CONFIG_USED_DYNAMIC_SLAB
  109. .dynamic_slab_list = &dynamic_slab_list[4],
  110. #endif
  111. },
  112. {
  113. .slab = &mem_slab[5],
  114. .slab_base = &mem_slab_buffer[SLAB5_BLOCK_OFF],
  115. .block_num = CONFIG_SLAB5_NUM_BLOCKS,
  116. .block_size = CONFIG_SLAB5_BLOCK_SIZE,
  117. #ifdef CONFIG_USED_DYNAMIC_SLAB
  118. .dynamic_slab_list = &dynamic_slab_list[5],
  119. #endif
  120. },
  121. {
  122. .slab = &mem_slab[6],
  123. .slab_base = &mem_slab_buffer[SLAB6_BLOCK_OFF],
  124. .block_num = CONFIG_SLAB6_NUM_BLOCKS,
  125. .block_size = CONFIG_SLAB6_BLOCK_SIZE,
  126. #ifdef CONFIG_USED_DYNAMIC_SLAB
  127. .dynamic_slab_list = &dynamic_slab_list[6],
  128. #endif
  129. },
  130. {
  131. .slab = &mem_slab[7],
  132. .slab_base = &mem_slab_buffer[SLAB7_BLOCK_OFF],
  133. .block_num = CONFIG_SLAB7_NUM_BLOCKS,
  134. .block_size = CONFIG_SLAB7_BLOCK_SIZE,
  135. #ifdef CONFIG_USED_DYNAMIC_SLAB
  136. .dynamic_slab_list = &dynamic_slab_list[7],
  137. #endif
  138. },
  139. {
  140. .slab = &mem_slab[8],
  141. .slab_base = &mem_slab_buffer[SLAB8_BLOCK_OFF],
  142. .block_num = CONFIG_SLAB8_NUM_BLOCKS,
  143. .block_size = CONFIG_SLAB8_BLOCK_SIZE,
  144. #ifdef CONFIG_USED_DYNAMIC_SLAB
  145. .dynamic_slab_list = &dynamic_slab_list[8],
  146. #endif
  147. }
  148. }
  149. };
  150. static int find_slab_by_addr(struct slabs_info * slabs, void * addr)
  151. {
  152. int i = 0;
  153. int target_slab_index = slabs->slab_num;
  154. for (i = 0 ; i < slabs->slab_num; i++) {
  155. if ((uint32_t)slabs->slabs[i].slab_base <= (uint32_t)addr
  156. && (uint32_t)((uint32_t)slabs->slabs[i].slab_base +
  157. slabs->slabs[i].block_size *
  158. slabs->slabs[i].block_num) > (uint32_t)addr) {
  159. target_slab_index = i;
  160. break;
  161. }
  162. }
  163. return target_slab_index;
  164. }
  165. static void dump_mem_hex(struct slabs_info *slabs, int slab_index);
  166. static void* malloc_from_stable_slab(struct slabs_info * slabs,int slab_index)
  167. {
  168. void * block_ptr = NULL;
  169. if (slab_index >= 0 && slab_index < slabs->slab_num) {
  170. if(!k_mem_slab_alloc(
  171. slabs->slabs[slab_index].slab,
  172. &block_ptr, K_NO_WAIT)) {
  173. memset(block_ptr, 0, slabs->slabs[slab_index].block_size);
  174. if (slabs->max_used[slab_index] <
  175. k_mem_slab_num_used_get(slabs->slabs[slab_index].slab)) {
  176. slabs->max_used[slab_index] =
  177. k_mem_slab_num_used_get(slabs->slabs[slab_index].slab);
  178. }
  179. } else {
  180. // SYS_LOG_ERR("Memory allocation time-out"
  181. // " free_list %p next freelist %p ",
  182. // slabs->slabs[slab_index].slab->free_list,
  183. // *(char **)(slabs->slabs[slab_index].slab->free_list));
  184. #if DEBUG
  185. dump_stack();
  186. mem_dump(slabs,slab_index);
  187. #endif
  188. }
  189. } else {
  190. SYS_LOG_INF("slab_index %d overflow max is %d",
  191. slab_index, slabs->slab_num);
  192. }
  193. #ifdef DEBUG
  194. SYS_LOG_DBG("mem_malloc from stable_slab %d : ptr %p",
  195. slab_index, block_ptr);
  196. #endif
  197. return block_ptr;
  198. }
  199. static bool free_to_stable_slab(struct slabs_info *slabs, void * ptr)
  200. {
  201. int slab_index = find_slab_by_addr(slabs, ptr);
  202. if (slab_index >= 0 && slab_index < slabs->slab_num) {
  203. memset(ptr,
  204. 0, slabs->slabs[slab_index].block_size);
  205. k_mem_slab_free(slabs->slabs[slab_index].slab,
  206. &ptr);
  207. #ifdef DEBUG
  208. SYS_LOG_DBG("mem_free to stable slab %d "
  209. ": ptr %p ",slab_index, ptr);
  210. #endif
  211. return true;
  212. }
  213. return false;
  214. }
  215. #ifdef CONFIG_USED_DYNAMIC_SLAB
  216. static void * malloc_from_dynamic_slab(struct slabs_info *slabs, int slab_index)
  217. {
  218. sys_snode_t *node, *tmp;
  219. void * alloc_block = NULL;
  220. sys_slist_t * dynamic_list =
  221. slabs->slabs[slab_index].dynamic_slab_list;
  222. SYS_SLIST_FOR_EACH_NODE_SAFE(dynamic_list, node, tmp) {
  223. struct dynamic_slab_info * slab = DYNAMIC_SLAB_INFO(node);
  224. if (((uint32_t)slab->base_addr & ALL_SLAB_BUSY) != ALL_SLAB_BUSY) {
  225. if (((uint32_t)slab->base_addr & SLAB0_BUSY) != SLAB0_BUSY) {
  226. alloc_block = slab->base_addr;
  227. slab->base_addr =
  228. (void *)((uint32_t)slab->base_addr | SLAB0_BUSY);
  229. #ifdef DEBUG
  230. SYS_LOG_DBG("mem_malloc from dynamic slab %d slab 0 : ptr %p ",
  231. slab_index,
  232. (void *)((uint32_t)alloc_block & (~ALL_SLAB_BUSY)));
  233. #endif
  234. break;
  235. } else if(((uint32_t)slab->base_addr & SLAB1_BUSY) != SLAB1_BUSY) {
  236. alloc_block = slab->base_addr
  237. + slabs->slabs[slab_index].block_size;
  238. slab->base_addr =
  239. (void *)((uint32_t)slab->base_addr | SLAB1_BUSY);
  240. #ifdef DEBUG
  241. SYS_LOG_DBG("mem_malloc from dynamic slab %d slab 1 : ptr %p ",
  242. slab_index,
  243. (void *)((uint32_t)alloc_block & (~ALL_SLAB_BUSY)));
  244. #endif
  245. break;
  246. }
  247. }
  248. }
  249. return (void *)((uint32_t)alloc_block & (~ALL_SLAB_BUSY));
  250. }
  251. static bool free_to_dynamic_slab(struct slabs_info *slabs, void * ptr)
  252. {
  253. bool found = false;
  254. bool free_to_parent = false;
  255. int slab_index = 0;
  256. struct dynamic_slab_info * slab = NULL;
  257. sys_slist_t * dynamic_list = NULL;
  258. void * base_addr = NULL;
  259. uint16_t block_size = 0;
  260. found_begin:
  261. for (slab_index = 0 ; slab_index < slabs->slab_num; slab_index++) {
  262. sys_snode_t *node, *tmp;
  263. dynamic_list =
  264. slabs->slabs[slab_index].dynamic_slab_list;
  265. SYS_SLIST_FOR_EACH_NODE_SAFE(dynamic_list, node, tmp) {
  266. slab = DYNAMIC_SLAB_INFO(node);
  267. base_addr = (void *)((uint32_t)slab->base_addr
  268. & (~ALL_SLAB_BUSY));
  269. block_size = slabs->slabs[slab_index].block_size * 2;
  270. if (base_addr <= ptr
  271. && (base_addr + block_size) > ptr) {
  272. found = true;
  273. goto found_end;
  274. } else {
  275. slab = NULL;
  276. }
  277. }
  278. }
  279. found_end:
  280. if (slab == NULL) {
  281. if (!free_to_parent) {
  282. return false;
  283. } else {
  284. /*target to stable slab ,mem_free to parent*/
  285. #ifdef DEBUG
  286. SYS_LOG_DBG("target to stable slab ,mem_free to parent %d ,ptr %p ",
  287. slab_index, ptr);
  288. #endif
  289. if (!free_to_stable_slab(slabs, ptr)) {
  290. SYS_LOG_ERR("mem_free to stable slab"
  291. ": ptr %p failed ",ptr);
  292. }
  293. return true;
  294. }
  295. }
  296. /*mem_free to slab 0 , remove busy flag for slab0*/
  297. if(base_addr == ptr
  298. && ((uint32_t)slab->base_addr & SLAB0_BUSY) == SLAB0_BUSY)
  299. {
  300. slab->base_addr =
  301. (void *)((uint32_t)slab->base_addr & (~SLAB0_BUSY));
  302. #ifdef DEBUG
  303. SYS_LOG_DBG("mem_free to dynamic slab %d slab 0: ptr %p ",slab_index,ptr);
  304. #endif
  305. }
  306. /*mem_free to slab 1 , remove busy flag for slab1*/
  307. if(base_addr == (ptr + block_size / 2)
  308. && ((uint32_t)slab->base_addr & SLAB1_BUSY) == SLAB1_BUSY)
  309. {
  310. slab->base_addr =
  311. (void *)((uint32_t)slab->base_addr & (~SLAB1_BUSY));
  312. #ifdef DEBUG
  313. SYS_LOG_DBG("mem_free to dynamic slab %d slab 1: ptr %p ",slab_index,ptr);
  314. #endif
  315. }
  316. /*
  317. * if all block mem_free , we need remove this node
  318. * ,and mem_free mem to Superior slab
  319. */
  320. if (((uint32_t)slab->base_addr & ALL_SLAB_BUSY) == 0) {
  321. free_to_parent = true;
  322. ptr = slab->base_addr;
  323. sys_slist_find_and_remove(dynamic_list,(sys_snode_t *)slab);
  324. if (!free_to_stable_slab(slabs, slab)) {
  325. SYS_LOG_ERR("mem_free to stable slab "
  326. ": ptr %p failed ",ptr);
  327. }
  328. #ifdef DEBUG
  329. SYS_LOG_DBG("all block mem_free,we need remove this node"
  330. " %d slab all : ptr %p ",slab_index,ptr);
  331. #endif
  332. goto found_begin;
  333. } else {
  334. return true;
  335. }
  336. }
  337. static bool generate_dynamic_slab(struct slabs_info *slabs,
  338. uint16_t parent_slab_index, uint16_t son_slab_index)
  339. {
  340. struct dynamic_slab_info * new_slab = NULL;
  341. void * base_addr = NULL;
  342. uint16_t iteration_index = parent_slab_index;
  343. #ifdef DEBUG
  344. SYS_LOG_DBG("slab %d is small , get memory form slab %d ",
  345. son_slab_index, parent_slab_index);
  346. #endif
  347. base_addr = malloc_from_stable_slab(slabs, parent_slab_index);
  348. if (base_addr == NULL) {
  349. SYS_LOG_ERR("generate dynamic slab failed because mem_malloc"
  350. " from parent slab %d failed",parent_slab_index);
  351. return false;
  352. }
  353. begin_new_daynamic_slab:
  354. iteration_index --;
  355. new_slab = (struct dynamic_slab_info *)malloc_from_stable_slab(slabs, 0);
  356. if (new_slab != NULL) {
  357. new_slab->base_addr = base_addr;
  358. } else {
  359. SYS_LOG_ERR("slab 0 is small, can't mem_malloc dynamic_slab_node");
  360. goto err_end;
  361. }
  362. #ifdef DEBUG
  363. SYS_LOG_DBG("new daynamic slab add to slab %d base_addr %p",
  364. iteration_index, base_addr);
  365. #endif
  366. sys_slist_append(slabs->slabs[iteration_index].dynamic_slab_list,
  367. (sys_snode_t *)new_slab);
  368. if (iteration_index > son_slab_index) {
  369. base_addr = malloc_from_dynamic_slab(slabs, iteration_index);
  370. if (base_addr != NULL) {
  371. memset(base_addr, 0, slabs->slabs[iteration_index].block_size);
  372. }
  373. goto begin_new_daynamic_slab;
  374. }
  375. return true;
  376. err_end:
  377. if ((iteration_index + 1) == parent_slab_index) {
  378. free_to_stable_slab(slabs, base_addr);
  379. } else {
  380. free_to_dynamic_slab(slabs, base_addr);
  381. }
  382. return false;
  383. }
  384. static int find_from_dynamic_slab(struct slabs_info *slabs, int slab_index)
  385. {
  386. sys_snode_t *node, *tmp;
  387. struct dynamic_slab_info * slab = NULL;
  388. sys_slist_t * dynamic_list = NULL;
  389. if (slab_index >= 0 && slab_index < slabs->slab_num) {
  390. dynamic_list = slabs->slabs[slab_index].dynamic_slab_list;
  391. SYS_SLIST_FOR_EACH_NODE_SAFE(dynamic_list, node, tmp) {
  392. slab = DYNAMIC_SLAB_INFO(node);
  393. if (((uint32_t)slab->base_addr & ALL_SLAB_BUSY) != ALL_SLAB_BUSY) {
  394. return slab_index;
  395. }
  396. }
  397. }
  398. return slabs->slab_num;
  399. }
  400. #endif
  401. static int find_slab_index(struct slabs_info *slabs, unsigned int num_bytes)
  402. {
  403. uint8_t i = 0;
  404. uint8_t first_fit_slab = slabs->slab_num;
  405. uint8_t target_slab_index = slabs->slab_num;
  406. uint8_t flag=1;
  407. for(i = 0; i < slabs->slab_num; i++) {
  408. if (slabs->slabs[i].block_size >= num_bytes) {
  409. target_slab_index = i;
  410. if (first_fit_slab == slabs->slab_num) {
  411. first_fit_slab = i;
  412. }
  413. if(k_mem_slab_num_free_get(
  414. slabs->slabs[target_slab_index].slab) != 0) {
  415. break;
  416. } else {
  417. if (flag) {
  418. SYS_LOG_DBG("%d ne",slabs->slabs[i].block_size);
  419. flag = 0;
  420. }
  421. }
  422. }
  423. }
  424. #ifdef CONFIG_USED_DYNAMIC_SLAB
  425. if(first_fit_slab >= 0
  426. && target_slab_index < slabs->slab_num
  427. && first_fit_slab != target_slab_index)
  428. {
  429. #ifdef DEBUG
  430. SYS_LOG_DBG("first_fit_slab %d ,target_slab_index %d ",
  431. first_fit_slab, target_slab_index);
  432. #endif
  433. if(slabs->slabs[target_slab_index].block_size
  434. <= slabs->slabs[first_fit_slab].block_size * 2) {
  435. return target_slab_index;
  436. }
  437. if (find_from_dynamic_slab(slabs, first_fit_slab) != first_fit_slab) {
  438. if(!generate_dynamic_slab(slabs,
  439. target_slab_index,first_fit_slab)) {
  440. target_slab_index = slabs->slab_num;
  441. }
  442. }
  443. if(target_slab_index != slabs->slab_num) {
  444. target_slab_index = first_fit_slab;
  445. }
  446. }
  447. #endif
  448. return target_slab_index;
  449. }
  450. static void dump_mem_hex(struct slabs_info *slabs, int slab_index)
  451. {
  452. int length= slabs->slabs[slab_index].block_size *
  453. slabs->slabs[slab_index].block_num;
  454. unsigned char * addr = slabs->slabs[slab_index].slab_base;
  455. int num = 0;
  456. os_printk("slab id : %d base addr: %p , lenght %d \n",
  457. slab_index, addr, length);
  458. os_printk("free_list %p next freelist %p \n",
  459. slabs->slabs[slab_index].slab->free_list,
  460. *(char **)(slabs->slabs[slab_index].slab->free_list));
  461. for(int i = 0 ; i < length; i++)
  462. {
  463. if((i % 16) == 0)
  464. {
  465. os_printk("\n");
  466. }
  467. os_printk(" %2x ",addr[i]);
  468. }
  469. os_printk("\n");
  470. void * free_node = slabs->slabs[slab_index].slab->free_list;
  471. while(free_node != NULL)
  472. {
  473. os_printk("node[%d] %p \n",num++,free_node);
  474. free_node = *(char **)free_node;
  475. }
  476. if(k_mem_slab_num_free_get(slabs->slabs[slab_index].slab) != num)
  477. {
  478. os_printk("mem lost num %d , mem_free %d \n",
  479. num,
  480. k_mem_slab_num_free_get(slabs->slabs[slab_index].slab));
  481. }
  482. }
  483. void * mem_slabs_malloc(struct slabs_info * slabs, unsigned int num_bytes)
  484. {
  485. void * block_ptr = NULL;
  486. unsigned int key = irq_lock();
  487. int slab_index = find_slab_index(slabs, num_bytes);
  488. #ifdef DEBUG
  489. SYS_LOG_DBG("Memory mem_malloc num_bytes %d bytes begin",num_bytes);
  490. #endif
  491. if(slab_index >= slabs->slab_num)
  492. {
  493. SYS_LOG_ERR("Memory allocation failed ,block too big num_bytes %d ",
  494. num_bytes);
  495. #ifdef DEBUG
  496. dump_stack();
  497. #endif
  498. goto END;
  499. }
  500. #ifdef CONFIG_USED_DYNAMIC_SLAB
  501. block_ptr = malloc_from_dynamic_slab(slabs, slab_index);
  502. if(block_ptr != NULL)
  503. {
  504. memset(block_ptr, 0, num_bytes);
  505. goto END;
  506. }
  507. #endif
  508. block_ptr = malloc_from_stable_slab(slabs, slab_index);
  509. if(block_ptr != NULL)
  510. {
  511. if (slabs->max_size[slab_index] < num_bytes)
  512. slabs->max_size[slab_index] = num_bytes;
  513. goto END;
  514. }
  515. END:
  516. #ifdef DEBUG
  517. SYS_LOG_INF("Memory allocation num_bytes %d : block_ptr %p slab_index %d",
  518. num_bytes, block_ptr, slab_index);
  519. #endif
  520. if(block_ptr == NULL)
  521. {
  522. // dump_stack();
  523. SYS_LOG_ERR("Memory allocation failed , num_bytes %d ", num_bytes);
  524. }
  525. irq_unlock(key);
  526. return block_ptr;
  527. }
  528. void mem_slabs_free(struct slabs_info * slabs, void *ptr)
  529. {
  530. unsigned int key = irq_lock();
  531. #ifdef DEBUG
  532. SYS_LOG_DBG("Memory Free ptr %p begin",ptr);
  533. #endif
  534. if (ptr != NULL)
  535. {
  536. #ifdef CONFIG_USED_DYNAMIC_SLAB
  537. if(free_to_dynamic_slab(slabs, ptr))
  538. {
  539. #ifdef DEBUG
  540. SYS_LOG_INF("Memory Free to dynamic slab ptr %p ",ptr);
  541. #endif
  542. goto exit;
  543. }
  544. #endif
  545. if(!free_to_stable_slab(slabs, ptr))
  546. {
  547. #ifdef DEBUG
  548. dump_stack();
  549. #endif
  550. SYS_LOG_ERR("Memory Free ERR ptr %p ", ptr);
  551. goto exit;
  552. }
  553. #ifdef DEBUG
  554. SYS_LOG_INF("Memory Free ptr %p ",ptr);
  555. #endif
  556. }else
  557. {
  558. SYS_LOG_ERR("Memory Free ERR NULL ");
  559. }
  560. exit:
  561. irq_unlock(key);
  562. }
  563. void slabs_mem_init(struct slabs_info * slabs)
  564. {
  565. for(int i = 0 ; i < slabs->slab_num; i++)
  566. {
  567. k_mem_slab_init(slabs->slabs[i].slab,
  568. slabs->slabs[i].slab_base,
  569. slabs->slabs[i].block_size,
  570. slabs->slabs[i].block_num);
  571. #ifdef CONFIG_USED_DYNAMIC_SLAB
  572. sys_slist_init(slabs->slabs[i].dynamic_slab_list);
  573. #endif
  574. }
  575. }
  576. void mem_slabs_dump(struct slabs_info * slabs, int index)
  577. {
  578. int total_used = 0;
  579. int total_size = 0;
  580. for(int i = 0 ; i < slabs->slab_num; i++)
  581. {
  582. total_used +=
  583. slabs->slabs[i].block_size *
  584. k_mem_slab_num_used_get(slabs->slabs[i].slab);
  585. total_size += slabs->slabs[i].block_size *
  586. slabs->slabs[i].block_num;
  587. }
  588. if(slabs->slab_flag == SYSTEM_MEM_SLAB)
  589. {
  590. os_printk("system total mem : %d bytes ,used mem %d bytes\n",
  591. total_size,
  592. total_used);
  593. }
  594. else
  595. {
  596. os_printk("app total mem : %d bytes ,used mem %d bytes \n",
  597. total_size,
  598. total_used );
  599. }
  600. for(int i = 0 ; i < slabs->slab_num; i++)
  601. {
  602. os_printk(
  603. " mem slab %d :block size %4d : used %4d , mem_free %4d,"
  604. " max used %4d, max size %4d\n",
  605. i ,
  606. slabs->slabs[i].block_size,
  607. k_mem_slab_num_used_get(slabs->slabs[i].slab),
  608. k_mem_slab_num_free_get(slabs->slabs[i].slab),
  609. slabs->max_used[i], slabs->max_size[i]);
  610. }
  611. #ifdef CONFIG_USED_DYNAMIC_SLAB
  612. for(int i = 0 ; i < slabs->slab_num; i++)
  613. {
  614. sys_snode_t *node, *tmp;
  615. sys_slist_t * dynamic_list =
  616. slabs->slabs[i].dynamic_slab_list;
  617. os_printk("slab %d dynamic slab list: \n", i);
  618. SYS_SLIST_FOR_EACH_NODE_SAFE(dynamic_list, node, tmp) {
  619. struct dynamic_slab_info * slab = DYNAMIC_SLAB_INFO(node);
  620. os_printk("%8x : %8x %8x \n",
  621. ((uint32_t)slab->base_addr & (~ALL_SLAB_BUSY)),
  622. ((uint32_t)slab->base_addr & (SLAB0_BUSY)),
  623. ((uint32_t)slab->base_addr & (SLAB1_BUSY)));
  624. }
  625. }
  626. #endif
  627. if(index >= 0 && index < slabs->slab_num)
  628. {
  629. dump_mem_hex(slabs, index);
  630. }
  631. }
  632. int mem_slab_init(void)
  633. {
  634. slabs_mem_init((struct slabs_info *)&sys_slab);
  635. #ifndef APP_USED_SYSTEM_SLAB
  636. slabs_mem_init((struct slabs_info *)&app_slab);
  637. #endif
  638. return 0;
  639. }