show_thread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * Copyright (c) 2019 Actions Semiconductor Co., Ltd
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file cbuf interface
  8. */
  9. #include <kernel.h>
  10. #include <string.h>
  11. #include <arch/arm/aarch32/cortex_m/cmsis.h>
  12. #include <kernel_internal.h>
  13. #include <drivers/rtc.h>
  14. #include <soc_pmu.h>
  15. #define th_name(th) (k_thread_name_get(th)!= NULL?k_thread_name_get(th):"NA")
  16. #ifdef CONFIG_ARM_UNWIND
  17. void unwind_backtrace(struct k_thread *th);
  18. static void thread_show_info(const struct k_thread *cthread, void *user_data)
  19. {
  20. unwind_backtrace((struct k_thread *)cthread);
  21. }
  22. #else
  23. #ifndef EXC_RETURN_FTYPE
  24. /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
  25. #define EXC_RETURN_FTYPE (0x00000010UL)
  26. #endif
  27. static uint32_t sh_adjust_sp_by_fpu(const struct k_thread *th)
  28. {
  29. #if defined(CONFIG_ARM_STORE_EXC_RETURN)
  30. if((th->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)
  31. return 18*4; /*s0-s15 fpscr lr*/
  32. #endif
  33. return 0;
  34. }
  35. static uint8_t bk_th_get_cpu_load(struct k_thread *ph, unsigned int *exec_cycles)
  36. {
  37. #ifdef CONFIG_THREAD_RUNTIME_STATS
  38. int ret = 0;
  39. k_thread_runtime_stats_t rt_stats_thread;
  40. k_thread_runtime_stats_t rt_stats_all;
  41. if(ph == NULL)
  42. return 0;
  43. ret = 0;
  44. if (k_thread_runtime_stats_get(ph, &rt_stats_thread) != 0) {
  45. ret++;
  46. }
  47. if (k_thread_runtime_stats_all_get(&rt_stats_all) != 0) {
  48. ret++;
  49. }
  50. if (ret == 0) {
  51. *exec_cycles = rt_stats_thread.execution_cycles;
  52. return (rt_stats_thread.execution_cycles * 100U) / rt_stats_all.execution_cycles;
  53. }else{
  54. return 111;//error
  55. }
  56. #else
  57. return 0;
  58. #endif
  59. }
  60. extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
  61. CONFIG_ISR_STACK_SIZE);
  62. static void bk_th_get_stack_info(struct k_thread *ph, unsigned int *stack_sz, unsigned int *used_per)
  63. {
  64. #if defined(CONFIG_THREAD_STACK_INFO)
  65. uint8_t *buf;
  66. size_t size, unused, i;
  67. int ret;
  68. if(ph == NULL){// IRQ
  69. buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]);
  70. size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]);
  71. unused = 0;
  72. for (i = 0; i < size; i++) {
  73. if (buf[i] == 0xAAU) {
  74. unused++;
  75. } else {
  76. break;
  77. }
  78. }
  79. }else{
  80. size = ph->stack_info.size;
  81. ret = k_thread_stack_space_get(ph, &unused);
  82. if (ret) {
  83. return;
  84. }
  85. }
  86. *stack_sz = size;
  87. *used_per = ((size - unused) * 100U) / size;
  88. #endif
  89. }
  90. #ifdef CONFIG_DEBUG_COREDUMP
  91. #include <debug/coredump.h>
  92. #include <stdio.h>
  93. #define PC_MAX_NUM 16
  94. #define THREAD_MAX_NUM 32
  95. #define BK_TH_MAIGIC 0x68
  96. struct bk_th_info {
  97. uint8_t th_maigic;
  98. uint8_t th_state;
  99. uint8_t th_run :1;
  100. uint8_t pc_num :7;
  101. uint8_t cpu_load_per;
  102. uint32_t execution_cycles;
  103. uint32_t stack_size:24;
  104. uint32_t stack_used_per:8;
  105. char th_name[CONFIG_THREAD_MAX_NAME_LEN];
  106. uint32_t th_pc[PC_MAX_NUM];
  107. };
  108. #define BK_TRACE_MAIGIC 0xBA45
  109. struct bk_trace_info {
  110. uint16_t bk_maigic;
  111. uint8_t bk_maigic1;
  112. uint8_t thread_num;
  113. uint32_t time;
  114. struct bk_th_info thread[THREAD_MAX_NUM];
  115. };
  116. #ifdef CONFIG_SOC_NO_PSRAM
  117. __in_section_unique(trace.g_bk_trace.noinit)
  118. #endif
  119. static struct bk_trace_info g_bk_trace;
  120. static void bk_trace_init(void)
  121. {
  122. memset(&g_bk_trace, 0, sizeof(struct bk_trace_info));
  123. g_bk_trace.bk_maigic = BK_TRACE_MAIGIC;
  124. g_bk_trace.bk_maigic1 = BK_TH_MAIGIC;
  125. g_bk_trace.thread_num = 0;
  126. g_bk_trace.time = 0;
  127. #ifdef CONFIG_RTC_ACTS
  128. const struct device *rtc = device_get_binding(CONFIG_RTC_0_NAME);
  129. struct rtc_time tm;
  130. if(rtc && !rtc_get_time(rtc, &tm))
  131. rtc_tm_to_time(&tm, &g_bk_trace.time);
  132. #endif
  133. }
  134. static struct bk_th_info * bk_get_cur_th_info(void)
  135. {
  136. if(g_bk_trace.thread_num < THREAD_MAX_NUM)
  137. return &g_bk_trace.thread[g_bk_trace.thread_num];
  138. else
  139. return NULL;
  140. }
  141. static void bk_th_info_init(struct k_thread *ph, const char *name, uint8_t stat, uint8_t b_run)
  142. {
  143. struct bk_th_info *th;
  144. unsigned int stack_sz = 0, stack_used = 0;
  145. th = bk_get_cur_th_info();
  146. if(th == NULL)
  147. return;
  148. th->th_maigic = BK_TH_MAIGIC;
  149. th->th_run = b_run;
  150. th->th_state = stat;
  151. th->cpu_load_per = bk_th_get_cpu_load(ph, &th->execution_cycles);
  152. bk_th_get_stack_info(ph, &stack_sz, &stack_used);
  153. th->stack_size = stack_sz;
  154. th->stack_used_per = stack_used;
  155. strncpy(th->th_name, name, CONFIG_THREAD_MAX_NAME_LEN);
  156. th->pc_num = 0;
  157. }
  158. static void bk_th_info_set_pc(uint32_t pc)
  159. {
  160. struct bk_th_info *th;
  161. th = bk_get_cur_th_info();
  162. if(th == NULL)
  163. return;
  164. if(th->pc_num < PC_MAX_NUM)
  165. th->th_pc[th->pc_num++] = pc;
  166. }
  167. static void bk_th_info_end(void)
  168. {
  169. if(g_bk_trace.thread_num < THREAD_MAX_NUM)
  170. g_bk_trace.thread_num++;
  171. }
  172. void bk_th_coredump(void)
  173. {
  174. uintptr_t start =(uintptr_t)(&g_bk_trace);
  175. coredump_memory_dump(start, start+ 8 + g_bk_trace.thread_num *(sizeof(struct bk_th_info)));
  176. }
  177. int bk_th_coredump_set(uint32_t start, uint8_t *buf, uint32_t offset, int len)
  178. {
  179. uint32_t addr =(uintptr_t)(&g_bk_trace);
  180. uint32_t size = sizeof(g_bk_trace);
  181. uint8_t *pb = (uint8_t *) &g_bk_trace;
  182. if(start != addr)
  183. return 0;
  184. if(offset > size){
  185. //printk("th over\n");
  186. return 1;
  187. }
  188. if(offset+len <= size) {
  189. memcpy(pb+offset, buf, len);
  190. }else{
  191. //printk("th mybe over\n");
  192. memcpy(pb+offset, buf, size-offset);
  193. }
  194. return 1;
  195. }
  196. int bk_th_coredump_out(int (*out_cb)(uint8_t *data, uint32_t len))
  197. {
  198. struct rtc_time *tm, t;
  199. struct bk_th_info *th;
  200. char buf[164];
  201. int i,j, len, use_szie, all = 0;
  202. if(g_bk_trace.bk_maigic != BK_TRACE_MAIGIC){
  203. len = sprintf(buf, "th magic fail: 0x%x\r\n", g_bk_trace.bk_maigic);
  204. out_cb(buf, len);
  205. return len;
  206. }
  207. tm = &t;
  208. rtc_time_to_tm(g_bk_trace.time, tm);
  209. len = snprintf(buf, 127, "time: %02d-%02d-%02d %02d:%02d:%02d:%03d, num=%d\r\n",
  210. 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min,
  211. tm->tm_sec, tm->tm_ms , g_bk_trace.thread_num);
  212. out_cb(buf, len);
  213. all +=len;
  214. if(g_bk_trace.thread_num > THREAD_MAX_NUM)
  215. g_bk_trace.thread_num = THREAD_MAX_NUM;
  216. for(i = 0; i < g_bk_trace.thread_num; i++){
  217. th = &g_bk_trace.thread[i];
  218. use_szie = th->stack_size * th->stack_used_per/100;
  219. len = snprintf(buf, 159, "%s %d thread=%s, state=%d, stack size %zu usage %zu (%u %%), cycle=%d cpuload=%u %%\r\n", (th->th_run? "*" : " "),
  220. i, th->th_name, th->th_state, th->stack_size, use_szie, th->stack_used_per, th->execution_cycles, th->cpu_load_per);
  221. out_cb(buf, len);
  222. all +=len;
  223. if(th->th_maigic != BK_TH_MAIGIC){
  224. len = snprintf(buf, 159, "--err maigic fail---\r\n");
  225. out_cb(buf, len);
  226. all +=len;
  227. }else{
  228. len = snprintf(buf, 159,"%%ZEPHYR_TOOLS%%\\gcc-arm-none-eabi-9-2020-q2-update-win32\\bin\\arm-none-eabi-addr2line.exe -e zephyr.elf -a -f ");
  229. out_cb(buf, len);
  230. all +=len;
  231. if(th->pc_num > PC_MAX_NUM)
  232. th->pc_num = PC_MAX_NUM;
  233. len = 0;
  234. for(j = 0; j < th->pc_num; j++){
  235. len += snprintf(buf+len, 159-len, "%08x ",th->th_pc[j]);
  236. if(len >= 159)
  237. break;
  238. }
  239. buf[len++] = '\r';
  240. buf[len++] = '\n';
  241. buf[len] = 0;
  242. out_cb(buf, len);
  243. all +=len;
  244. }
  245. }
  246. return all;
  247. }
  248. #else
  249. static void bk_trace_init(void) {}
  250. static void bk_th_info_init(struct k_thread *th, const char *name, uint8_t stat, uint8_t b_run){}
  251. static void bk_th_info_set_pc(uint32_t pc) {}
  252. static void bk_th_info_end(void) {}
  253. #endif
  254. static void sh_dump_mem(const char *msg, uint32_t mem_addr, int len)
  255. {
  256. int i;
  257. uint32_t *ptr = (uint32_t *)mem_addr;
  258. printk("%s=0x%08x\n", msg, mem_addr);
  259. for(i = 0; i < len/4; i++)
  260. {
  261. printk("%08x ", ptr[i]);
  262. if(i % 8 == 7)
  263. printk("\n");
  264. }
  265. printk("\n\n");
  266. }
  267. static void sh_backtrace_print_elx(uint32_t sp, uint32_t sp_start, uint32_t stk_end, const z_arch_esf_t *esf)
  268. {
  269. uint32_t where = sp_start;
  270. uint32_t pc = 0;
  271. if(sp > stk_end){
  272. printk("stack overflow SP<%08x> at [START <%08x>] from [END <%08x>]\n", sp, sp_start, stk_end);
  273. return;
  274. }
  275. printk("\n%%ZEPHYR_TOOLS%%\\gcc-arm-none-eabi-9-2020-q2-update-win32\\bin\\arm-none-eabi-addr2line.exe -e zephyr.elf -a -f ");
  276. if(esf){
  277. printk("%08x %08x ", esf->basic.pc, esf->basic.lr);
  278. bk_th_info_set_pc(esf->basic.pc);
  279. bk_th_info_set_pc(esf->basic.lr);
  280. }
  281. while (where < stk_end) {
  282. pc = *(uint32_t*)where;
  283. if( (pc > 0x10000000) && (pc < 0x10200000) ){
  284. printk("%08x ", pc);
  285. bk_th_info_set_pc(pc);
  286. }
  287. where += 4;
  288. }
  289. printk("\n\n");
  290. sh_dump_mem("sp=", sp, 512);
  291. }
  292. static void sh_print_thread_info(struct k_thread *th)
  293. {
  294. const char *tname;
  295. unsigned int stack_sz = 0, stack_used_per = 0, stack_used;
  296. unsigned int cpu_load_per, cpu_cycle = 0;
  297. bk_th_get_stack_info(th, &stack_sz, &stack_used_per);
  298. cpu_load_per = bk_th_get_cpu_load(th, &cpu_cycle);
  299. stack_used = stack_sz*stack_used_per/100;
  300. tname = k_thread_name_get(th);
  301. printk("\n %s%p %s state: %s, stack size %zu usage %zu (%u %%), cycle=%d cpuload=%u %%, backtrace \n", ((th == k_current_get()) ? "*" : " "),
  302. th, (tname?tname : "NA"), k_thread_state_str(th),stack_sz, stack_used, stack_used_per, cpu_cycle, cpu_load_per);
  303. bk_th_info_init(th, (tname?tname : "NA"), th->base.thread_state, (th == k_current_get()));
  304. }
  305. static void sh_backtrace(const struct k_thread *th)
  306. {
  307. const struct _callee_saved *callee;
  308. const z_arch_esf_t *esf;
  309. uint32_t sp, sp_end;
  310. if(th == NULL)
  311. th = k_current_get();
  312. sh_print_thread_info(( struct k_thread *)th);
  313. sp_end = th->stack_info.start + th->stack_info.size;
  314. if(th == k_current_get()) {
  315. sp = __get_PSP();
  316. if(k_is_in_isr()){
  317. callee = &th->callee_saved;
  318. esf = (z_arch_esf_t *)sp;
  319. sh_backtrace_print_elx(sp, sp+32+sh_adjust_sp_by_fpu(th), sp_end , esf);
  320. }else{
  321. sp = __get_PSP();
  322. sh_backtrace_print_elx(sp, sp, sp_end , NULL);
  323. }
  324. }else{
  325. callee = &th->callee_saved;
  326. esf = (z_arch_esf_t *)callee->psp;
  327. sp = callee->psp + 32 + sh_adjust_sp_by_fpu(th); /*r0-r3, r12/lr/pc/xpsr*/
  328. sh_backtrace_print_elx(callee->psp, sp, sp_end, esf);
  329. }
  330. bk_th_info_end();
  331. }
  332. static void sh_backtrace_irq(const z_arch_esf_t *esf)
  333. {
  334. int i;
  335. uint32_t sp;
  336. sp = __get_MSP();
  337. unsigned int stack_sz = 0, stack_used_per = 0, stack_used;
  338. for(i = 0; i < 128; i++) {// find irq sp context
  339. if(memcmp((void*)sp, esf, 32) == 0)
  340. break;
  341. sp += 4;
  342. }
  343. if(i == 128){
  344. printk("not find irq sp\n");
  345. return ;
  346. }
  347. bk_th_get_stack_info(NULL, &stack_sz, &stack_used_per);
  348. stack_used = stack_sz*stack_used_per/100;
  349. printk("show irq stack, stack size %zu usage %zu (%u %%) :\n", stack_sz, stack_used, stack_used_per);
  350. bk_th_info_init(NULL,"irq:", 0, 0);
  351. sh_backtrace_print_elx(sp, sp+32, sp+1024, esf);
  352. bk_th_info_end();
  353. }
  354. static void stack_dump(const struct k_thread *th)
  355. {
  356. #if 0
  357. const z_arch_esf_t *esf;
  358. const struct _callee_saved *callee = &th->callee_saved;
  359. esf = (z_arch_esf_t *)callee->psp;
  360. printk("############ thread: %p info############\n", th);
  361. printk("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x\n",
  362. esf->basic.a1, esf->basic.a2, esf->basic.a3);
  363. printk("r3/a4: 0x%08x r12/ip: 0x%08x r14/lr: 0x%08x\n",
  364. esf->basic.a4, esf->basic.ip, esf->basic.lr);
  365. printk("r4/v1: 0x%08x r5/v2: 0x%08x r6/v3: 0x%08x\n",
  366. callee->v1, callee->v2, callee->v3);
  367. printk("r7/v4: 0x%08x r8/v5: 0x%08x r9/v6: 0x%08x\n",
  368. callee->v4, callee->v5, callee->v6);
  369. printk("r10/v7: 0x%08x r11/v8: 0x%08x psp: 0x%08x\n",
  370. callee->v7, callee->v8, callee->psp);
  371. printk(" xpsr: 0x%08x\n", esf->basic.xpsr);
  372. printk("(r15/pc): 0x%08x\n", esf->basic.pc);
  373. #endif
  374. }
  375. static void thread_show_info(const struct k_thread *cthread, void *user_data)
  376. {
  377. //const struct k_thread *cur = (const struct k_thread *)user_data;
  378. stack_dump(cthread);
  379. sh_backtrace(cthread);
  380. }
  381. /*exceptions printk*/
  382. void k_sys_fatal_error_handler(unsigned int reason,
  383. const z_arch_esf_t *esf)
  384. {
  385. soc_pmu_check_onoff_reset_func();
  386. bk_trace_init();
  387. if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
  388. sh_backtrace_irq(esf);
  389. }
  390. printk("stop system\n");
  391. k_thread_foreach(thread_show_info, NULL);
  392. }
  393. #endif
  394. void show_stack(void)
  395. {
  396. struct k_thread *cur = (struct k_thread *) k_current_get();
  397. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  398. printk_dma_switch(0);
  399. #endif
  400. printk("****show thread stack cur=%s ****\n", th_name(cur));
  401. k_thread_foreach(thread_show_info, NULL);
  402. #ifdef CONFIG_ACTIONS_PRINTK_DMA
  403. printk_dma_switch(1);
  404. #endif
  405. }