kernel_service.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * Copyright (c) 2018 Nordic Semiconductor ASA
  3. * Copyright (c) 2016 Intel Corporation
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. */
  7. #include <sys/printk.h>
  8. #include <shell/shell.h>
  9. #include <init.h>
  10. #include <sys/reboot.h>
  11. #include <debug/stack.h>
  12. #include <string.h>
  13. #include <device.h>
  14. #include <drivers/timer/system_timer.h>
  15. #include <kernel.h>
  16. static int cmd_kernel_version(const struct shell *shell,
  17. size_t argc, char **argv)
  18. {
  19. uint32_t version = sys_kernel_version_get();
  20. ARG_UNUSED(argc);
  21. ARG_UNUSED(argv);
  22. shell_print(shell, "Zephyr version %d.%d.%d",
  23. SYS_KERNEL_VER_MAJOR(version),
  24. SYS_KERNEL_VER_MINOR(version),
  25. SYS_KERNEL_VER_PATCHLEVEL(version));
  26. return 0;
  27. }
  28. static int cmd_kernel_uptime(const struct shell *shell,
  29. size_t argc, char **argv)
  30. {
  31. ARG_UNUSED(argc);
  32. ARG_UNUSED(argv);
  33. shell_print(shell, "Uptime: %u ms", k_uptime_get_32());
  34. return 0;
  35. }
  36. static int cmd_kernel_cycles(const struct shell *shell,
  37. size_t argc, char **argv)
  38. {
  39. ARG_UNUSED(argc);
  40. ARG_UNUSED(argv);
  41. shell_print(shell, "cycles: %u hw cycles", k_cycle_get_32());
  42. return 0;
  43. }
  44. #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
  45. defined(CONFIG_THREAD_MONITOR)
  46. static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
  47. {
  48. struct k_thread *thread = (struct k_thread *)cthread;
  49. const struct shell *shell = (const struct shell *)user_data;
  50. unsigned int pcnt;
  51. size_t unused;
  52. size_t size = thread->stack_info.size;
  53. const char *tname;
  54. int ret;
  55. #ifdef CONFIG_THREAD_RUNTIME_STATS
  56. k_thread_runtime_stats_t rt_stats_thread;
  57. k_thread_runtime_stats_t rt_stats_all;
  58. #endif
  59. tname = k_thread_name_get(thread);
  60. shell_print(shell, "%s%p %-10s",
  61. (thread == k_current_get()) ? "*" : " ",
  62. thread,
  63. tname ? tname : "NA");
  64. shell_print(shell, "\toptions: 0x%x, priority: %d timeout: %d",
  65. thread->base.user_options,
  66. thread->base.prio,
  67. thread->base.timeout.dticks);
  68. shell_print(shell, "\tstate: %s, entry: %p", k_thread_state_str(thread),
  69. thread->entry);
  70. #ifdef CONFIG_THREAD_RUNTIME_STATS
  71. ret = 0;
  72. if (k_thread_runtime_stats_get(thread, &rt_stats_thread) != 0) {
  73. ret++;
  74. }
  75. if (k_thread_runtime_stats_all_get(&rt_stats_all) != 0) {
  76. ret++;
  77. }
  78. if (ret == 0) {
  79. pcnt = (rt_stats_thread.execution_cycles * 100U) /
  80. rt_stats_all.execution_cycles;
  81. /*
  82. * z_prf() does not support %llu by default unless
  83. * CONFIG_MINIMAL_LIBC_LL_PRINTF=y. So do conditional
  84. * compilation to avoid blindly enabling this kconfig
  85. * so it won't increase RAM/ROM usage too much on 32-bit
  86. * targets.
  87. */
  88. #ifdef CONFIG_64BIT
  89. shell_print(shell, "\tTotal execution cycles: %llu (%u %%)",
  90. rt_stats_thread.execution_cycles,
  91. pcnt);
  92. #else
  93. shell_print(shell, "\tTotal execution cycles: %lu (%u %%)",
  94. (uint32_t)rt_stats_thread.execution_cycles,
  95. pcnt);
  96. #endif
  97. } else {
  98. shell_print(shell, "\tTotal execution cycles: ? (? %%)");
  99. }
  100. #endif
  101. ret = k_thread_stack_space_get(thread, &unused);
  102. if (ret) {
  103. shell_print(shell,
  104. "Unable to determine unused stack size (%d)\n",
  105. ret);
  106. } else {
  107. /* Calculate the real size reserved for the stack */
  108. pcnt = ((size - unused) * 100U) / size;
  109. shell_print(shell,
  110. "\tstack size %zu, unused %zu, usage %zu / %zu (%u %%)\n",
  111. size, unused, size - unused, size, pcnt);
  112. }
  113. }
  114. static int cmd_kernel_threads(const struct shell *shell,
  115. size_t argc, char **argv)
  116. {
  117. ARG_UNUSED(argc);
  118. ARG_UNUSED(argv);
  119. shell_print(shell, "Scheduler: %u since last call", sys_clock_elapsed());
  120. shell_print(shell, "Threads:");
  121. k_thread_foreach(shell_tdata_dump, (void *)shell);
  122. return 0;
  123. }
  124. static void shell_stack_dump(const struct k_thread *thread, void *user_data)
  125. {
  126. const struct shell *shell = (const struct shell *)user_data;
  127. unsigned int pcnt;
  128. size_t unused;
  129. size_t size = thread->stack_info.size;
  130. const char *tname;
  131. int ret;
  132. ret = k_thread_stack_space_get(thread, &unused);
  133. if (ret) {
  134. shell_print(shell,
  135. "Unable to determine unused stack size (%d)\n",
  136. ret);
  137. return;
  138. }
  139. tname = k_thread_name_get((struct k_thread *)thread);
  140. /* Calculate the real size reserved for the stack */
  141. pcnt = ((size - unused) * 100U) / size;
  142. shell_print((const struct shell *)user_data,
  143. "%p %-10s (real size %u):\tunused %u\tusage %u / %u (%u %%)",
  144. thread,
  145. tname ? tname : "NA",
  146. size, unused, size - unused, size, pcnt);
  147. }
  148. extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
  149. CONFIG_ISR_STACK_SIZE);
  150. static int cmd_kernel_stacks(const struct shell *shell,
  151. size_t argc, char **argv)
  152. {
  153. uint8_t *buf;
  154. size_t size, unused;
  155. ARG_UNUSED(argc);
  156. ARG_UNUSED(argv);
  157. k_thread_foreach(shell_stack_dump, (void *)shell);
  158. /* Placeholder logic for interrupt stack until we have better
  159. * kernel support, including dumping arch-specific exception-related
  160. * stack buffers.
  161. */
  162. for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
  163. buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
  164. size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
  165. unused = 0;
  166. for (size_t i = 0; i < size; i++) {
  167. if (buf[i] == 0xAAU) {
  168. unused++;
  169. } else {
  170. break;
  171. }
  172. }
  173. shell_print(shell,
  174. "%p IRQ %02d (real size %zu):\tunused %zu\tusage %zu / %zu (%zu %%)",
  175. &z_interrupt_stacks[i], i, size, unused,
  176. size - unused, size,
  177. ((size - unused) * 100U) / size);
  178. }
  179. return 0;
  180. }
  181. #endif
  182. #if defined(CONFIG_REBOOT)
  183. static int cmd_kernel_reboot_warm(const struct shell *shell,
  184. size_t argc, char **argv)
  185. {
  186. ARG_UNUSED(argc);
  187. ARG_UNUSED(argv);
  188. #if (CONFIG_KERNEL_SHELL_REBOOT_DELAY > 0)
  189. k_sleep(K_MSEC(CONFIG_KERNEL_SHELL_REBOOT_DELAY));
  190. #endif
  191. sys_reboot(SYS_REBOOT_WARM);
  192. return 0;
  193. }
  194. static int cmd_kernel_reboot_cold(const struct shell *shell,
  195. size_t argc, char **argv)
  196. {
  197. ARG_UNUSED(argc);
  198. ARG_UNUSED(argv);
  199. #if (CONFIG_KERNEL_SHELL_REBOOT_DELAY > 0)
  200. k_sleep(K_MSEC(CONFIG_KERNEL_SHELL_REBOOT_DELAY));
  201. #endif
  202. sys_reboot(SYS_REBOOT_COLD);
  203. return 0;
  204. }
  205. SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel_reboot,
  206. SHELL_CMD(cold, NULL, "Cold reboot.", cmd_kernel_reboot_cold),
  207. SHELL_CMD(warm, NULL, "Warm reboot.", cmd_kernel_reboot_warm),
  208. SHELL_SUBCMD_SET_END /* Array terminated. */
  209. );
  210. #endif
  211. SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel,
  212. SHELL_CMD(cycles, NULL, "Kernel cycles.", cmd_kernel_cycles),
  213. #if defined(CONFIG_REBOOT)
  214. SHELL_CMD(reboot, &sub_kernel_reboot, "Reboot.", NULL),
  215. #endif
  216. #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
  217. defined(CONFIG_THREAD_MONITOR)
  218. SHELL_CMD(stacks, NULL, "List threads stack usage.", cmd_kernel_stacks),
  219. SHELL_CMD(threads, NULL, "List kernel threads.", cmd_kernel_threads),
  220. #endif
  221. SHELL_CMD(uptime, NULL, "Kernel uptime.", cmd_kernel_uptime),
  222. SHELL_CMD(version, NULL, "Kernel version.", cmd_kernel_version),
  223. SHELL_SUBCMD_SET_END /* Array terminated. */
  224. );
  225. SHELL_CMD_REGISTER(kernel, &sub_kernel, "Kernel commands", NULL);