tracing_core.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Copyright (c) 2019 Intel corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <init.h>
  7. #include <string.h>
  8. #include <kernel.h>
  9. #include <sys/util.h>
  10. #include <sys/atomic.h>
  11. #include <tracing_core.h>
  12. #include <tracing_buffer.h>
  13. #include <tracing_backend.h>
  14. #define TRACING_CMD_ENABLE "enable"
  15. #define TRACING_CMD_DISABLE "disable"
  16. #ifdef CONFIG_TRACING_BACKEND_UART
  17. #define TRACING_BACKEND_NAME "tracing_backend_uart"
  18. #elif defined CONFIG_TRACING_BACKEND_USB
  19. #define TRACING_BACKEND_NAME "tracing_backend_usb"
  20. #elif defined CONFIG_TRACING_BACKEND_POSIX
  21. #define TRACING_BACKEND_NAME "tracing_backend_posix"
  22. #elif defined CONFIG_TRACING_BACKEND_RAM
  23. #define TRACING_BACKEND_NAME "tracing_backend_ram"
  24. #else
  25. #define TRACING_BACKEND_NAME ""
  26. #endif
  27. enum tracing_state {
  28. TRACING_DISABLE = 0,
  29. TRACING_ENABLE
  30. };
  31. static atomic_t tracing_state;
  32. static atomic_t tracing_packet_drop_num;
  33. static struct tracing_backend *working_backend;
  34. #ifdef CONFIG_TRACING_ASYNC
  35. #define TRACING_THREAD_NAME "tracing_thread"
  36. static k_tid_t tracing_thread_tid;
  37. static struct k_thread tracing_thread;
  38. static struct k_timer tracing_thread_timer;
  39. static K_SEM_DEFINE(tracing_thread_sem, 0, 1);
  40. static K_THREAD_STACK_DEFINE(tracing_thread_stack,
  41. CONFIG_TRACING_THREAD_STACK_SIZE);
  42. static void tracing_thread_func(void *dummy1, void *dummy2, void *dummy3)
  43. {
  44. uint8_t *transferring_buf;
  45. uint32_t transferring_length, tracing_buffer_max_length;
  46. tracing_thread_tid = k_current_get();
  47. tracing_buffer_max_length = tracing_buffer_capacity_get();
  48. while (true) {
  49. if (tracing_buffer_is_empty()) {
  50. k_sem_take(&tracing_thread_sem, K_FOREVER);
  51. } else {
  52. transferring_length =
  53. tracing_buffer_get_claim(
  54. &transferring_buf,
  55. tracing_buffer_max_length);
  56. tracing_buffer_handle(transferring_buf,
  57. transferring_length);
  58. tracing_buffer_get_finish(transferring_length);
  59. }
  60. }
  61. }
  62. static void tracing_thread_timer_expiry_fn(struct k_timer *timer)
  63. {
  64. k_sem_give(&tracing_thread_sem);
  65. }
  66. #endif
  67. static void tracing_set_state(enum tracing_state state)
  68. {
  69. atomic_set(&tracing_state, state);
  70. }
  71. static int tracing_init(const struct device *arg)
  72. {
  73. ARG_UNUSED(arg);
  74. tracing_buffer_init();
  75. working_backend = tracing_backend_get(TRACING_BACKEND_NAME);
  76. tracing_backend_init(working_backend);
  77. atomic_set(&tracing_packet_drop_num, 0);
  78. if (IS_ENABLED(CONFIG_TRACING_HANDLE_HOST_CMD)) {
  79. tracing_set_state(TRACING_DISABLE);
  80. } else {
  81. tracing_set_state(TRACING_ENABLE);
  82. }
  83. #ifdef CONFIG_TRACING_ASYNC
  84. k_timer_init(&tracing_thread_timer,
  85. tracing_thread_timer_expiry_fn, NULL);
  86. k_thread_create(&tracing_thread, tracing_thread_stack,
  87. K_THREAD_STACK_SIZEOF(tracing_thread_stack),
  88. tracing_thread_func, NULL, NULL, NULL,
  89. K_LOWEST_APPLICATION_THREAD_PRIO, 0, K_NO_WAIT);
  90. k_thread_name_set(&tracing_thread, TRACING_THREAD_NAME);
  91. #endif
  92. return 0;
  93. }
  94. SYS_INIT(tracing_init, APPLICATION, 0);
  95. #ifdef CONFIG_TRACING_ASYNC
  96. void tracing_trigger_output(bool before_put_is_empty)
  97. {
  98. if (before_put_is_empty) {
  99. k_timer_start(&tracing_thread_timer,
  100. K_MSEC(CONFIG_TRACING_THREAD_WAIT_THRESHOLD),
  101. K_NO_WAIT);
  102. }
  103. }
  104. bool is_tracing_thread(void)
  105. {
  106. return (!k_is_in_isr() && (k_current_get() == tracing_thread_tid));
  107. }
  108. #endif
  109. bool is_tracing_enabled(void)
  110. {
  111. return atomic_get(&tracing_state) == TRACING_ENABLE;
  112. }
  113. void tracing_cmd_handle(uint8_t *buf, uint32_t length)
  114. {
  115. if (strncmp(buf, TRACING_CMD_ENABLE, length) == 0) {
  116. tracing_set_state(TRACING_ENABLE);
  117. } else if (strncmp(buf, TRACING_CMD_DISABLE, length) == 0) {
  118. tracing_set_state(TRACING_DISABLE);
  119. }
  120. }
  121. void tracing_buffer_handle(uint8_t *data, uint32_t length)
  122. {
  123. tracing_backend_output(working_backend, data, length);
  124. }
  125. void tracing_packet_drop_handle(void)
  126. {
  127. atomic_inc(&tracing_packet_drop_num);
  128. }