log_core.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /*
  2. * Copyright (c) 2018 Nordic Semiconductor ASA
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <logging/log_msg.h>
  7. #include "log_list.h"
  8. #include <logging/log.h>
  9. #include <logging/log_backend.h>
  10. #include <logging/log_ctrl.h>
  11. #include <logging/log_output.h>
  12. #include <logging/log_msg2.h>
  13. #include <logging/log_core2.h>
  14. #include <sys/mpsc_pbuf.h>
  15. #include <sys/printk.h>
  16. #include <sys_clock.h>
  17. #include <init.h>
  18. #include <sys/__assert.h>
  19. #include <sys/atomic.h>
  20. #include <ctype.h>
  21. #include <logging/log_frontend.h>
  22. #include <syscall_handler.h>
  23. LOG_MODULE_REGISTER(log);
  24. #ifndef CONFIG_LOG_PRINTK_MAX_STRING_LENGTH
  25. #define CONFIG_LOG_PRINTK_MAX_STRING_LENGTH 0
  26. #endif
  27. #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
  28. #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
  29. #endif
  30. #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
  31. #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
  32. #endif
  33. #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
  34. #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
  35. #endif
  36. #ifndef CONFIG_LOG_STRDUP_MAX_STRING
  37. /* Required to suppress compiler warnings related to array subscript above array bounds.
  38. * log_strdup explicitly accesses element with index of (sizeof(log_strdup_buf.buf) - 2).
  39. * Set to 2 because some compilers generate warning on strncpy(dst, src, 0).
  40. */
  41. #define CONFIG_LOG_STRDUP_MAX_STRING 2
  42. #endif
  43. #ifndef CONFIG_LOG_STRDUP_BUF_COUNT
  44. #define CONFIG_LOG_STRDUP_BUF_COUNT 0
  45. #endif
  46. #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
  47. #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
  48. #endif
  49. #ifndef CONFIG_LOG_BUFFER_SIZE
  50. #define CONFIG_LOG_BUFFER_SIZE 4
  51. #endif
  52. struct log_strdup_buf {
  53. atomic_t refcount;
  54. char buf[CONFIG_LOG_STRDUP_MAX_STRING + 1]; /* for termination */
  55. } __aligned(sizeof(uintptr_t));
  56. union log_msgs {
  57. struct log_msg *msg;
  58. union log_msg2_generic *msg2;
  59. };
  60. #define LOG_STRDUP_POOL_BUFFER_SIZE \
  61. (sizeof(struct log_strdup_buf) * CONFIG_LOG_STRDUP_BUF_COUNT)
  62. K_SEM_DEFINE(log_process_thread_sem, 0, 1);
  63. static const char *log_strdup_fail_msg = "<log_strdup alloc failed>";
  64. struct k_mem_slab log_strdup_pool;
  65. static uint8_t __noinit __aligned(sizeof(void *))
  66. log_strdup_pool_buf[LOG_STRDUP_POOL_BUFFER_SIZE];
  67. static struct log_list_t list;
  68. static atomic_t initialized;
  69. static bool panic_mode;
  70. static bool backend_attached;
  71. static atomic_t buffered_cnt;
  72. static atomic_t dropped_cnt;
  73. static k_tid_t proc_tid;
  74. static uint32_t log_strdup_in_use;
  75. static uint32_t log_strdup_max;
  76. static uint32_t log_strdup_longest;
  77. static struct k_timer log_process_thread_timer;
  78. static log_timestamp_t dummy_timestamp(void);
  79. static log_timestamp_get_t timestamp_func = dummy_timestamp;
  80. struct mpsc_pbuf_buffer log_buffer;
  81. static uint32_t __aligned(Z_LOG_MSG2_ALIGNMENT)
  82. buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
  83. static void notify_drop(const struct mpsc_pbuf_buffer *buffer,
  84. const union mpsc_pbuf_generic *item);
  85. static const struct mpsc_pbuf_buffer_config mpsc_config = {
  86. .buf = (uint32_t *)buf32,
  87. .size = ARRAY_SIZE(buf32),
  88. .notify_drop = notify_drop,
  89. .get_wlen = log_msg2_generic_get_wlen,
  90. .flags = IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
  91. MPSC_PBUF_MODE_OVERWRITE : 0
  92. };
  93. bool log_is_strdup(const void *buf);
  94. static void msg_process(union log_msgs msg, bool bypass);
  95. static log_timestamp_t dummy_timestamp(void)
  96. {
  97. return 0;
  98. }
  99. uint32_t z_log_get_s_mask(const char *str, uint32_t nargs)
  100. {
  101. char curr;
  102. bool arm = false;
  103. uint32_t arg = 0U;
  104. uint32_t mask = 0U;
  105. __ASSERT_NO_MSG(nargs <= 8*sizeof(mask));
  106. while ((curr = *str++) && arg < nargs) {
  107. if (curr == '%') {
  108. arm = !arm;
  109. } else if (arm && isalpha((int)curr)) {
  110. if (curr == 's') {
  111. mask |= BIT(arg);
  112. }
  113. arm = false;
  114. arg++;
  115. } else {
  116. ; /* standard character, continue walk */
  117. }
  118. }
  119. return mask;
  120. }
  121. /**
  122. * @brief Check if address is in read only section.
  123. *
  124. * @param addr Address.
  125. *
  126. * @return True if address identified within read only section.
  127. */
  128. static bool is_rodata(const void *addr)
  129. {
  130. #if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \
  131. defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \
  132. defined(CONFIG_RISCV) || defined(CONFIG_SPARC)
  133. extern const char *__rodata_region_start[];
  134. extern const char *__rodata_region_end[];
  135. #define RO_START __rodata_region_start
  136. #define RO_END __rodata_region_end
  137. #elif defined(CONFIG_XTENSA)
  138. extern const char *_rodata_start[];
  139. extern const char *_rodata_end[];
  140. #define RO_START _rodata_start
  141. #define RO_END _rodata_end
  142. #else
  143. #define RO_START 0
  144. #define RO_END 0
  145. #endif
  146. return (((const char *)addr >= (const char *)RO_START) &&
  147. ((const char *)addr < (const char *)RO_END));
  148. }
  149. /**
  150. * @brief Scan string arguments and report every address which is not in read
  151. * only memory and not yet duplicated.
  152. *
  153. * @param msg Log message.
  154. */
  155. static void detect_missed_strdup(struct log_msg *msg)
  156. {
  157. #define ERR_MSG "argument %d in source %s log message \"%s\" missing " \
  158. "log_strdup()."
  159. uint32_t idx;
  160. const char *str;
  161. const char *msg_str;
  162. uint32_t mask;
  163. if (!log_msg_is_std(msg)) {
  164. return;
  165. }
  166. msg_str = log_msg_str_get(msg);
  167. mask = z_log_get_s_mask(msg_str, log_msg_nargs_get(msg));
  168. while (mask) {
  169. idx = 31 - __builtin_clz(mask);
  170. str = (const char *)log_msg_arg_get(msg, idx);
  171. if (!is_rodata(str) && !log_is_strdup(str) &&
  172. (str != log_strdup_fail_msg)) {
  173. const char *src_name =
  174. log_source_name_get(CONFIG_LOG_DOMAIN_ID,
  175. log_msg_source_id_get(msg));
  176. if (IS_ENABLED(CONFIG_ASSERT)) {
  177. __ASSERT(0, ERR_MSG, idx, src_name, msg_str);
  178. } else {
  179. LOG_ERR(ERR_MSG, idx, src_name, msg_str);
  180. }
  181. }
  182. mask &= ~BIT(idx);
  183. }
  184. #undef ERR_MSG
  185. }
  186. static void z_log_msg_post_finalize(void)
  187. {
  188. atomic_inc(&buffered_cnt);
  189. if (panic_mode) {
  190. unsigned int key = irq_lock();
  191. (void)log_process(false);
  192. irq_unlock(key);
  193. } else if (proc_tid != NULL && buffered_cnt == 1) {
  194. k_timer_start(&log_process_thread_timer,
  195. K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS), K_NO_WAIT);
  196. } else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
  197. if ((buffered_cnt == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) &&
  198. (proc_tid != NULL)) {
  199. k_timer_stop(&log_process_thread_timer);
  200. k_sem_give(&log_process_thread_sem);
  201. }
  202. } else {
  203. /* No action needed. Message processing will be triggered by the
  204. * timeout or when number of upcoming messages exceeds the
  205. * threshold.
  206. */
  207. ;
  208. }
  209. }
  210. static inline void msg_finalize(struct log_msg *msg,
  211. struct log_msg_ids src_level)
  212. {
  213. unsigned int key;
  214. msg->hdr.ids = src_level;
  215. msg->hdr.timestamp = timestamp_func();
  216. key = irq_lock();
  217. log_list_add_tail(&list, msg);
  218. irq_unlock(key);
  219. z_log_msg_post_finalize();
  220. }
  221. void log_0(const char *str, struct log_msg_ids src_level)
  222. {
  223. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  224. log_frontend_0(str, src_level);
  225. } else {
  226. struct log_msg *msg = log_msg_create_0(str);
  227. if (msg == NULL) {
  228. return;
  229. }
  230. msg_finalize(msg, src_level);
  231. }
  232. }
  233. void log_1(const char *str,
  234. log_arg_t arg0,
  235. struct log_msg_ids src_level)
  236. {
  237. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  238. log_frontend_1(str, arg0, src_level);
  239. } else {
  240. struct log_msg *msg = log_msg_create_1(str, arg0);
  241. if (msg == NULL) {
  242. return;
  243. }
  244. msg_finalize(msg, src_level);
  245. }
  246. }
  247. void log_2(const char *str,
  248. log_arg_t arg0,
  249. log_arg_t arg1,
  250. struct log_msg_ids src_level)
  251. {
  252. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  253. log_frontend_2(str, arg0, arg1, src_level);
  254. } else {
  255. struct log_msg *msg = log_msg_create_2(str, arg0, arg1);
  256. if (msg == NULL) {
  257. return;
  258. }
  259. msg_finalize(msg, src_level);
  260. }
  261. }
  262. void log_3(const char *str,
  263. log_arg_t arg0,
  264. log_arg_t arg1,
  265. log_arg_t arg2,
  266. struct log_msg_ids src_level)
  267. {
  268. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  269. log_frontend_3(str, arg0, arg1, arg2, src_level);
  270. } else {
  271. struct log_msg *msg = log_msg_create_3(str, arg0, arg1, arg2);
  272. if (msg == NULL) {
  273. return;
  274. }
  275. msg_finalize(msg, src_level);
  276. }
  277. }
  278. void log_n(const char *str,
  279. log_arg_t *args,
  280. uint32_t narg,
  281. struct log_msg_ids src_level)
  282. {
  283. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  284. log_frontend_n(str, args, narg, src_level);
  285. } else {
  286. struct log_msg *msg = log_msg_create_n(str, args, narg);
  287. if (msg == NULL) {
  288. return;
  289. }
  290. msg_finalize(msg, src_level);
  291. }
  292. }
  293. void log_hexdump(const char *str, const void *data, uint32_t length,
  294. struct log_msg_ids src_level)
  295. {
  296. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  297. log_frontend_hexdump(str, (const uint8_t *)data, length,
  298. src_level);
  299. } else {
  300. struct log_msg *msg =
  301. log_msg_hexdump_create(str, (const uint8_t *)data, length);
  302. if (msg == NULL) {
  303. return;
  304. }
  305. msg_finalize(msg, src_level);
  306. }
  307. }
  308. void z_log_printk(const char *fmt, va_list ap)
  309. {
  310. if (IS_ENABLED(CONFIG_LOG_PRINTK)) {
  311. union {
  312. struct log_msg_ids structure;
  313. uint32_t value;
  314. } src_level_union = {
  315. {
  316. .level = LOG_LEVEL_INTERNAL_RAW_STRING
  317. }
  318. };
  319. if (k_is_user_context()) {
  320. uint8_t str[CONFIG_LOG_PRINTK_MAX_STRING_LENGTH + 1];
  321. vsnprintk(str, sizeof(str), fmt, ap);
  322. z_log_string_from_user(src_level_union.value, str);
  323. } else if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
  324. log_generic(src_level_union.structure, fmt, ap,
  325. LOG_STRDUP_SKIP);
  326. } else {
  327. uint8_t str[CONFIG_LOG_PRINTK_MAX_STRING_LENGTH + 1];
  328. struct log_msg *msg;
  329. int length;
  330. length = vsnprintk(str, sizeof(str), fmt, ap);
  331. length = MIN(length, sizeof(str));
  332. msg = log_msg_hexdump_create(NULL, str, length);
  333. if (msg == NULL) {
  334. return;
  335. }
  336. msg_finalize(msg, src_level_union.structure);
  337. }
  338. }
  339. }
  340. /** @brief Count number of arguments in formatted string.
  341. *
  342. * Function counts number of '%' not followed by '%'.
  343. */
  344. uint32_t log_count_args(const char *fmt)
  345. {
  346. uint32_t args = 0U;
  347. bool prev = false; /* if previous char was a modificator. */
  348. while (*fmt != '\0') {
  349. if (*fmt == '%') {
  350. prev = !prev;
  351. } else if (prev) {
  352. args++;
  353. prev = false;
  354. } else {
  355. ; /* standard character, continue walk */
  356. }
  357. fmt++;
  358. }
  359. return args;
  360. }
  361. void log_generic(struct log_msg_ids src_level, const char *fmt, va_list ap,
  362. enum log_strdup_action strdup_action)
  363. {
  364. if (k_is_user_context()) {
  365. log_generic_from_user(src_level, fmt, ap);
  366. } else if (IS_ENABLED(CONFIG_LOG_IMMEDIATE) &&
  367. (!IS_ENABLED(CONFIG_LOG_FRONTEND))) {
  368. struct log_backend const *backend;
  369. uint32_t timestamp = timestamp_func();
  370. for (int i = 0; i < log_backend_count_get(); i++) {
  371. backend = log_backend_get(i);
  372. bool runtime_ok =
  373. IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?
  374. (src_level.level <= log_filter_get(backend,
  375. src_level.domain_id,
  376. src_level.source_id,
  377. true)) : true;
  378. if (log_backend_is_active(backend) && runtime_ok) {
  379. va_list ap_tmp;
  380. va_copy(ap_tmp, ap);
  381. log_backend_put_sync_string(backend, src_level,
  382. timestamp, fmt, ap_tmp);
  383. va_end(ap_tmp);
  384. }
  385. }
  386. } else {
  387. log_arg_t args[LOG_MAX_NARGS];
  388. uint32_t nargs = log_count_args(fmt);
  389. __ASSERT_NO_MSG(nargs < LOG_MAX_NARGS);
  390. for (int i = 0; i < nargs; i++) {
  391. args[i] = va_arg(ap, log_arg_t);
  392. }
  393. if (strdup_action != LOG_STRDUP_SKIP) {
  394. uint32_t mask = z_log_get_s_mask(fmt, nargs);
  395. while (mask) {
  396. uint32_t idx = 31 - __builtin_clz(mask);
  397. const char *str = (const char *)args[idx];
  398. /* is_rodata(str) is not checked,
  399. * because log_strdup does it.
  400. * Hence, we will do only optional check
  401. * if already not duplicated.
  402. */
  403. if (strdup_action == LOG_STRDUP_EXEC
  404. || !log_is_strdup(str)) {
  405. args[idx] = (log_arg_t)log_strdup(str);
  406. }
  407. mask &= ~BIT(idx);
  408. }
  409. }
  410. log_n(fmt, args, nargs, src_level);
  411. }
  412. }
  413. void log_string_sync(struct log_msg_ids src_level, const char *fmt, ...)
  414. {
  415. va_list ap;
  416. va_start(ap, fmt);
  417. log_generic(src_level, fmt, ap, LOG_STRDUP_SKIP);
  418. va_end(ap);
  419. }
  420. void log_hexdump_sync(struct log_msg_ids src_level, const char *metadata,
  421. const void *data, uint32_t len)
  422. {
  423. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  424. log_frontend_hexdump(metadata, (const uint8_t *)data, len,
  425. src_level);
  426. } else {
  427. struct log_backend const *backend;
  428. log_timestamp_t timestamp = timestamp_func();
  429. for (int i = 0; i < log_backend_count_get(); i++) {
  430. backend = log_backend_get(i);
  431. bool runtime_ok =
  432. IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?
  433. (src_level.level <= log_filter_get(backend,
  434. src_level.domain_id,
  435. src_level.source_id,
  436. true)) : true;
  437. if (log_backend_is_active(backend) && runtime_ok) {
  438. log_backend_put_sync_hexdump(
  439. backend, src_level, timestamp, metadata,
  440. (const uint8_t *)data, len);
  441. }
  442. }
  443. }
  444. }
  445. static log_timestamp_t default_get_timestamp(void)
  446. {
  447. return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
  448. sys_clock_tick_get() : k_cycle_get_32();
  449. }
  450. static log_timestamp_t default_lf_get_timestamp(void)
  451. {
  452. return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
  453. k_uptime_get() : k_uptime_get_32();
  454. }
  455. void log_core_init(void)
  456. {
  457. uint32_t freq;
  458. panic_mode = false;
  459. dropped_cnt = 0;
  460. /* Set default timestamp. */
  461. if (sys_clock_hw_cycles_per_sec() > 1000000) {
  462. timestamp_func = default_lf_get_timestamp;
  463. freq = 1000U;
  464. } else {
  465. timestamp_func = default_get_timestamp;
  466. freq = sys_clock_hw_cycles_per_sec();
  467. }
  468. log_output_timestamp_freq_set(freq);
  469. if (IS_ENABLED(CONFIG_LOG2)) {
  470. log_set_timestamp_func(default_get_timestamp,
  471. IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
  472. CONFIG_SYS_CLOCK_TICKS_PER_SEC :
  473. sys_clock_hw_cycles_per_sec());
  474. if (IS_ENABLED(CONFIG_LOG2_MODE_DEFERRED)) {
  475. z_log_msg2_init();
  476. }
  477. } else if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
  478. log_msg_pool_init();
  479. log_list_init(&list);
  480. k_mem_slab_init(&log_strdup_pool, log_strdup_pool_buf,
  481. sizeof(struct log_strdup_buf),
  482. CONFIG_LOG_STRDUP_BUF_COUNT);
  483. }
  484. if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
  485. z_log_runtime_filters_init();
  486. }
  487. }
  488. void log_init(void)
  489. {
  490. __ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_NUM_OF_SLOTS);
  491. int i;
  492. if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
  493. log_frontend_init();
  494. }
  495. if (atomic_inc(&initialized) != 0) {
  496. return;
  497. }
  498. /* Assign ids to backends. */
  499. for (i = 0; i < log_backend_count_get(); i++) {
  500. const struct log_backend *backend = log_backend_get(i);
  501. if (backend->autostart) {
  502. if (backend->api->init != NULL) {
  503. backend->api->init(backend);
  504. }
  505. log_backend_enable(backend,
  506. backend->cb->ctx,
  507. CONFIG_LOG_MAX_LEVEL);
  508. }
  509. }
  510. }
  511. static void thread_set(k_tid_t process_tid)
  512. {
  513. proc_tid = process_tid;
  514. if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
  515. return;
  516. }
  517. if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
  518. process_tid &&
  519. buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
  520. k_sem_give(&log_process_thread_sem);
  521. }
  522. }
  523. void log_thread_set(k_tid_t process_tid)
  524. {
  525. if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
  526. __ASSERT_NO_MSG(0);
  527. } else {
  528. thread_set(process_tid);
  529. }
  530. }
  531. int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
  532. {
  533. if (timestamp_getter == NULL) {
  534. return -EINVAL;
  535. }
  536. timestamp_func = timestamp_getter;
  537. log_output_timestamp_freq_set(freq);
  538. return 0;
  539. }
  540. void z_impl_log_panic(void)
  541. {
  542. struct log_backend const *backend;
  543. if (panic_mode) {
  544. return;
  545. }
  546. /* If panic happened early logger might not be initialized.
  547. * Forcing initialization of the logger and auto-starting backends.
  548. */
  549. log_init();
  550. for (int i = 0; i < log_backend_count_get(); i++) {
  551. backend = log_backend_get(i);
  552. if (log_backend_is_active(backend)) {
  553. log_backend_panic(backend);
  554. }
  555. }
  556. if (!IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
  557. /* Flush */
  558. while (log_process(false) == true) {
  559. }
  560. }
  561. panic_mode = true;
  562. }
  563. #ifdef CONFIG_USERSPACE
  564. void z_vrfy_log_panic(void)
  565. {
  566. z_impl_log_panic();
  567. }
  568. #include <syscalls/log_panic_mrsh.c>
  569. #endif
  570. static bool msg_filter_check(struct log_backend const *backend,
  571. union log_msgs msg)
  572. {
  573. if (IS_ENABLED(CONFIG_LOG2) && !z_log_item_is_msg(msg.msg2)) {
  574. return true;
  575. }
  576. if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
  577. return true;
  578. }
  579. uint32_t backend_level;
  580. uint8_t level;
  581. uint8_t domain_id;
  582. int16_t source_id;
  583. if (IS_ENABLED(CONFIG_LOG2)) {
  584. struct log_msg2 *msg2 = &msg.msg2->log;
  585. struct log_source_dynamic_data *source =
  586. (struct log_source_dynamic_data *)
  587. log_msg2_get_source(msg2);
  588. level = log_msg2_get_level(msg2);
  589. domain_id = log_msg2_get_domain(msg2);
  590. source_id = source ? log_dynamic_source_id(source) : -1;
  591. } else {
  592. level = log_msg_level_get(msg.msg);
  593. domain_id = log_msg_domain_id_get(msg.msg);
  594. source_id = log_msg_source_id_get(msg.msg);
  595. }
  596. backend_level = log_filter_get(backend, domain_id,
  597. source_id, true);
  598. return (level <= backend_level);
  599. }
  600. static void msg_process(union log_msgs msg, bool bypass)
  601. {
  602. struct log_backend const *backend;
  603. if (!bypass) {
  604. if (!IS_ENABLED(CONFIG_LOG2) &&
  605. IS_ENABLED(CONFIG_LOG_DETECT_MISSED_STRDUP) &&
  606. !panic_mode) {
  607. detect_missed_strdup(msg.msg);
  608. }
  609. for (int i = 0; i < log_backend_count_get(); i++) {
  610. backend = log_backend_get(i);
  611. if (log_backend_is_active(backend) &&
  612. msg_filter_check(backend, msg)) {
  613. if (IS_ENABLED(CONFIG_LOG2)) {
  614. log_backend_msg2_process(backend,
  615. msg.msg2);
  616. } else {
  617. log_backend_put(backend, msg.msg);
  618. }
  619. }
  620. }
  621. }
  622. if (!IS_ENABLED(CONFIG_LOG2_MODE_IMMEDIATE)) {
  623. if (IS_ENABLED(CONFIG_LOG2)) {
  624. z_log_msg2_free(msg.msg2);
  625. } else {
  626. log_msg_put(msg.msg);
  627. }
  628. }
  629. }
  630. void dropped_notify(void)
  631. {
  632. uint32_t dropped = z_log_dropped_read_and_clear();
  633. for (int i = 0; i < log_backend_count_get(); i++) {
  634. struct log_backend const *backend = log_backend_get(i);
  635. if (log_backend_is_active(backend)) {
  636. log_backend_dropped(backend, dropped);
  637. }
  638. }
  639. }
  640. union log_msgs get_msg(void)
  641. {
  642. union log_msgs msg;
  643. if (IS_ENABLED(CONFIG_LOG2)) {
  644. msg.msg2 = z_log_msg2_claim();
  645. return msg;
  646. }
  647. int key = irq_lock();
  648. msg.msg = log_list_head_get(&list);
  649. irq_unlock(key);
  650. return msg;
  651. }
  652. static bool next_pending(void)
  653. {
  654. if (IS_ENABLED(CONFIG_LOG2)) {
  655. return z_log_msg2_pending();
  656. }
  657. return (log_list_head_peek(&list) != NULL);
  658. }
  659. void z_log_notify_backend_enabled(void)
  660. {
  661. /* Wakeup logger thread after attaching first backend. It might be
  662. * blocked with log messages pending.
  663. */
  664. if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
  665. k_sem_give(&log_process_thread_sem);
  666. }
  667. backend_attached = true;
  668. }
  669. bool z_impl_log_process(bool bypass)
  670. {
  671. union log_msgs msg;
  672. if (!backend_attached && !bypass) {
  673. return false;
  674. }
  675. msg = get_msg();
  676. if (msg.msg) {
  677. atomic_dec(&buffered_cnt);
  678. msg_process(msg, bypass);
  679. }
  680. if (!bypass && z_log_dropped_pending()) {
  681. dropped_notify();
  682. }
  683. return next_pending();
  684. }
  685. #ifdef CONFIG_USERSPACE
  686. bool z_vrfy_log_process(bool bypass)
  687. {
  688. return z_impl_log_process(bypass);
  689. }
  690. #include <syscalls/log_process_mrsh.c>
  691. #endif
  692. uint32_t z_impl_log_buffered_cnt(void)
  693. {
  694. return buffered_cnt;
  695. }
  696. #ifdef CONFIG_USERSPACE
  697. uint32_t z_vrfy_log_buffered_cnt(void)
  698. {
  699. return z_impl_log_buffered_cnt();
  700. }
  701. #include <syscalls/log_buffered_cnt_mrsh.c>
  702. #endif
  703. void z_log_dropped(void)
  704. {
  705. atomic_inc(&dropped_cnt);
  706. }
  707. uint32_t z_log_dropped_read_and_clear(void)
  708. {
  709. return atomic_set(&dropped_cnt, 0);
  710. }
  711. bool z_log_dropped_pending(void)
  712. {
  713. return dropped_cnt > 0;
  714. }
  715. static void notify_drop(const struct mpsc_pbuf_buffer *buffer,
  716. const union mpsc_pbuf_generic *item)
  717. {
  718. ARG_UNUSED(buffer);
  719. ARG_UNUSED(item);
  720. z_log_dropped();
  721. }
  722. char *z_log_strdup(const char *str)
  723. {
  724. struct log_strdup_buf *dup;
  725. int err;
  726. if (IS_ENABLED(CONFIG_LOG_IMMEDIATE) ||
  727. is_rodata(str) || k_is_user_context()) {
  728. return (char *)str;
  729. }
  730. err = k_mem_slab_alloc(&log_strdup_pool, (void **)&dup, K_NO_WAIT);
  731. if (err != 0) {
  732. /* failed to allocate */
  733. return (char *)log_strdup_fail_msg;
  734. }
  735. if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) {
  736. size_t slen = strlen(str);
  737. static struct k_spinlock lock;
  738. k_spinlock_key_t key;
  739. key = k_spin_lock(&lock);
  740. log_strdup_in_use++;
  741. log_strdup_max = MAX(log_strdup_in_use, log_strdup_max);
  742. log_strdup_longest = MAX(slen, log_strdup_longest);
  743. k_spin_unlock(&lock, key);
  744. }
  745. /* Set 'allocated' flag. */
  746. (void)atomic_set(&dup->refcount, 1);
  747. strncpy(dup->buf, str, sizeof(dup->buf) - 2);
  748. dup->buf[sizeof(dup->buf) - 2] = '~';
  749. dup->buf[sizeof(dup->buf) - 1] = '\0';
  750. return dup->buf;
  751. }
  752. uint32_t log_get_strdup_pool_current_utilization(void)
  753. {
  754. return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
  755. log_strdup_in_use : 0;
  756. }
  757. uint32_t log_get_strdup_pool_utilization(void)
  758. {
  759. return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
  760. log_strdup_max : 0;
  761. }
  762. uint32_t log_get_strdup_longest_string(void)
  763. {
  764. return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
  765. log_strdup_longest : 0;
  766. }
  767. bool log_is_strdup(const void *buf)
  768. {
  769. return PART_OF_ARRAY(log_strdup_pool_buf, (uint8_t *)buf);
  770. }
  771. void log_free(void *str)
  772. {
  773. struct log_strdup_buf *dup = CONTAINER_OF(str, struct log_strdup_buf,
  774. buf);
  775. if (atomic_dec(&dup->refcount) == 1) {
  776. k_mem_slab_free(&log_strdup_pool, (void **)&dup);
  777. if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) {
  778. atomic_dec((atomic_t *)&log_strdup_in_use);
  779. }
  780. }
  781. }
  782. #if defined(CONFIG_USERSPACE)
  783. /* LCOV_EXCL_START */
  784. void z_impl_z_log_string_from_user(uint32_t src_level_val, const char *str)
  785. {
  786. ARG_UNUSED(src_level_val);
  787. ARG_UNUSED(str);
  788. __ASSERT(false, "This function can be called from user mode only.");
  789. }
  790. /* LCOV_EXCL_STOP */
  791. void z_vrfy_z_log_string_from_user(uint32_t src_level_val, const char *str)
  792. {
  793. uint8_t level, domain_id, source_id;
  794. union {
  795. struct log_msg_ids structure;
  796. uint32_t value;
  797. } src_level_union;
  798. size_t len;
  799. int err;
  800. src_level_union.value = src_level_val;
  801. level = src_level_union.structure.level;
  802. domain_id = src_level_union.structure.domain_id;
  803. source_id = src_level_union.structure.source_id;
  804. Z_OOPS(Z_SYSCALL_VERIFY_MSG(
  805. (IS_ENABLED(CONFIG_LOG_PRINTK) || (level >= LOG_LEVEL_ERR)) &&
  806. (level <= LOG_LEVEL_DBG),
  807. "Invalid log level"));
  808. Z_OOPS(Z_SYSCALL_VERIFY_MSG(domain_id == CONFIG_LOG_DOMAIN_ID,
  809. "Invalid log domain_id"));
  810. Z_OOPS(Z_SYSCALL_VERIFY_MSG(source_id < log_sources_count(),
  811. "Invalid log source id"));
  812. if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) &&
  813. (level != LOG_LEVEL_INTERNAL_RAW_STRING) &&
  814. (level > LOG_FILTER_SLOT_GET(log_dynamic_filters_get(source_id),
  815. LOG_FILTER_AGGR_SLOT_IDX))) {
  816. /* Skip filtered out messages. */
  817. return;
  818. }
  819. /*
  820. * Validate and make a copy of the source string. Because we need
  821. * the log subsystem to eventually free it, we're going to use
  822. * log_strdup().
  823. */
  824. len = z_user_string_nlen(str, (level == LOG_LEVEL_INTERNAL_RAW_STRING) ?
  825. CONFIG_LOG_PRINTK_MAX_STRING_LENGTH :
  826. CONFIG_LOG_STRDUP_MAX_STRING, &err);
  827. Z_OOPS(Z_SYSCALL_VERIFY_MSG(err == 0, "invalid string passed in"));
  828. Z_OOPS(Z_SYSCALL_MEMORY_READ(str, len));
  829. if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
  830. log_string_sync(src_level_union.structure, "%s", str);
  831. } else if (IS_ENABLED(CONFIG_LOG_PRINTK) &&
  832. (level == LOG_LEVEL_INTERNAL_RAW_STRING)) {
  833. struct log_msg *msg;
  834. msg = log_msg_hexdump_create(NULL, str, len);
  835. if (msg != NULL) {
  836. msg_finalize(msg, src_level_union.structure);
  837. }
  838. } else {
  839. str = log_strdup(str);
  840. log_1("%s", (log_arg_t)str, src_level_union.structure);
  841. }
  842. }
  843. #include <syscalls/z_log_string_from_user_mrsh.c>
  844. void log_generic_from_user(struct log_msg_ids src_level,
  845. const char *fmt, va_list ap)
  846. {
  847. char buffer[CONFIG_LOG_STRDUP_MAX_STRING + 1];
  848. union {
  849. struct log_msg_ids structure;
  850. uint32_t value;
  851. } src_level_union;
  852. vsnprintk(buffer, sizeof(buffer), fmt, ap);
  853. __ASSERT_NO_MSG(sizeof(src_level) <= sizeof(uint32_t));
  854. src_level_union.structure = src_level;
  855. z_log_string_from_user(src_level_union.value, buffer);
  856. }
  857. void log_from_user(struct log_msg_ids src_level, const char *fmt, ...)
  858. {
  859. va_list ap;
  860. va_start(ap, fmt);
  861. log_generic_from_user(src_level, fmt, ap);
  862. va_end(ap);
  863. }
  864. /* LCOV_EXCL_START */
  865. void z_impl_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
  866. const uint8_t *data, uint32_t len)
  867. {
  868. ARG_UNUSED(src_level_val);
  869. ARG_UNUSED(metadata);
  870. ARG_UNUSED(data);
  871. ARG_UNUSED(len);
  872. __ASSERT(false, "This function can be called from user mode only.");
  873. }
  874. /* LCOV_EXCL_STOP */
  875. void z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
  876. const uint8_t *data, uint32_t len)
  877. {
  878. union {
  879. struct log_msg_ids structure;
  880. uint32_t value;
  881. } src_level_union;
  882. int err;
  883. char kmeta[CONFIG_LOG_STRDUP_MAX_STRING];
  884. src_level_union.value = src_level_val;
  885. Z_OOPS(Z_SYSCALL_VERIFY_MSG(
  886. (src_level_union.structure.level <= LOG_LEVEL_DBG) &&
  887. (src_level_union.structure.level >= LOG_LEVEL_ERR),
  888. "Invalid log level"));
  889. Z_OOPS(Z_SYSCALL_VERIFY_MSG(
  890. src_level_union.structure.domain_id == CONFIG_LOG_DOMAIN_ID,
  891. "Invalid log domain_id"));
  892. Z_OOPS(Z_SYSCALL_VERIFY_MSG(
  893. src_level_union.structure.source_id < log_sources_count(),
  894. "Invalid log source id"));
  895. if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) &&
  896. (src_level_union.structure.level > LOG_FILTER_SLOT_GET(
  897. log_dynamic_filters_get(src_level_union.structure.source_id),
  898. LOG_FILTER_AGGR_SLOT_IDX))) {
  899. /* Skip filtered out messages. */
  900. return;
  901. }
  902. /*
  903. * Validate and make a copy of the metadata string. Because we
  904. * need the log subsystem to eventually free it, we're going
  905. * to use log_strdup().
  906. */
  907. err = z_user_string_copy(kmeta, metadata, sizeof(kmeta));
  908. Z_OOPS(Z_SYSCALL_VERIFY_MSG(err == 0, "invalid meta passed in"));
  909. Z_OOPS(Z_SYSCALL_MEMORY_READ(data, len));
  910. if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
  911. log_hexdump_sync(src_level_union.structure,
  912. kmeta, data, len);
  913. } else {
  914. metadata = log_strdup(kmeta);
  915. log_hexdump(metadata, data, len, src_level_union.structure);
  916. }
  917. }
  918. #include <syscalls/z_log_hexdump_from_user_mrsh.c>
  919. void log_hexdump_from_user(struct log_msg_ids src_level, const char *metadata,
  920. const void *data, uint32_t len)
  921. {
  922. union {
  923. struct log_msg_ids structure;
  924. uint32_t value;
  925. } src_level_union;
  926. __ASSERT_NO_MSG(sizeof(src_level) <= sizeof(uint32_t));
  927. src_level_union.structure = src_level;
  928. z_log_hexdump_from_user(src_level_union.value, metadata,
  929. (const uint8_t *)data, len);
  930. }
  931. #else
  932. /* LCOV_EXCL_START */
  933. void z_impl_z_log_string_from_user(uint32_t src_level_val, const char *str)
  934. {
  935. ARG_UNUSED(src_level_val);
  936. ARG_UNUSED(str);
  937. __ASSERT_NO_MSG(false);
  938. }
  939. void z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
  940. const uint8_t *data, uint32_t len)
  941. {
  942. ARG_UNUSED(src_level_val);
  943. ARG_UNUSED(metadata);
  944. ARG_UNUSED(data);
  945. ARG_UNUSED(len);
  946. __ASSERT_NO_MSG(false);
  947. }
  948. void log_from_user(struct log_msg_ids src_level, const char *fmt, ...)
  949. {
  950. ARG_UNUSED(src_level);
  951. ARG_UNUSED(fmt);
  952. __ASSERT_NO_MSG(false);
  953. }
  954. void log_generic_from_user(struct log_msg_ids src_level,
  955. const char *fmt, va_list ap)
  956. {
  957. ARG_UNUSED(src_level);
  958. ARG_UNUSED(fmt);
  959. ARG_UNUSED(ap);
  960. __ASSERT_NO_MSG(false);
  961. }
  962. void log_hexdump_from_user(struct log_msg_ids src_level, const char *metadata,
  963. const void *data, uint32_t len)
  964. {
  965. ARG_UNUSED(src_level);
  966. ARG_UNUSED(metadata);
  967. ARG_UNUSED(data);
  968. ARG_UNUSED(len);
  969. __ASSERT_NO_MSG(false);
  970. }
  971. /* LCOV_EXCL_STOP */
  972. #endif /* !defined(CONFIG_USERSPACE) */
  973. void z_log_msg2_init(void)
  974. {
  975. mpsc_pbuf_init(&log_buffer, &mpsc_config);
  976. }
  977. static uint32_t log_diff_timestamp(void)
  978. {
  979. extern log_timestamp_get_t timestamp_func;
  980. return timestamp_func();
  981. }
  982. void z_log_msg2_put_trace(struct log_msg2_trace trace)
  983. {
  984. union log_msg2_generic generic = {
  985. .trace = trace
  986. };
  987. trace.hdr.timestamp = IS_ENABLED(CONFIG_LOG_TRACE_SHORT_TIMESTAMP) ?
  988. log_diff_timestamp() : timestamp_func();
  989. mpsc_pbuf_put_word(&log_buffer, generic.buf);
  990. }
  991. void z_log_msg2_put_trace_ptr(struct log_msg2_trace trace, void *data)
  992. {
  993. union log_msg2_generic generic = {
  994. .trace = trace
  995. };
  996. trace.hdr.timestamp = IS_ENABLED(CONFIG_LOG_TRACE_SHORT_TIMESTAMP) ?
  997. log_diff_timestamp() : timestamp_func();
  998. mpsc_pbuf_put_word_ext(&log_buffer, generic.buf, data);
  999. }
  1000. struct log_msg2 *z_log_msg2_alloc(uint32_t wlen)
  1001. {
  1002. return (struct log_msg2 *)mpsc_pbuf_alloc(&log_buffer, wlen,
  1003. K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
  1004. }
  1005. void z_log_msg2_commit(struct log_msg2 *msg)
  1006. {
  1007. msg->hdr.timestamp = timestamp_func();
  1008. if (IS_ENABLED(CONFIG_LOG2_MODE_IMMEDIATE)) {
  1009. union log_msgs msgs = {
  1010. .msg2 = (union log_msg2_generic *)msg
  1011. };
  1012. msg_process(msgs, false);
  1013. return;
  1014. }
  1015. mpsc_pbuf_commit(&log_buffer, (union mpsc_pbuf_generic *)msg);
  1016. if (IS_ENABLED(CONFIG_LOG2_MODE_DEFERRED)) {
  1017. z_log_msg_post_finalize();
  1018. }
  1019. }
  1020. union log_msg2_generic *z_log_msg2_claim(void)
  1021. {
  1022. return (union log_msg2_generic *)mpsc_pbuf_claim(&log_buffer);
  1023. }
  1024. void z_log_msg2_free(union log_msg2_generic *msg)
  1025. {
  1026. mpsc_pbuf_free(&log_buffer, (union mpsc_pbuf_generic *)msg);
  1027. }
  1028. bool z_log_msg2_pending(void)
  1029. {
  1030. return mpsc_pbuf_is_pending(&log_buffer);
  1031. }
  1032. static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
  1033. {
  1034. k_sem_give(&log_process_thread_sem);
  1035. }
  1036. static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
  1037. {
  1038. __ASSERT_NO_MSG(log_backend_count_get() > 0);
  1039. log_init();
  1040. thread_set(k_current_get());
  1041. while (true) {
  1042. if (log_process(false) == false) {
  1043. k_sem_take(&log_process_thread_sem, K_FOREVER);
  1044. }
  1045. }
  1046. }
  1047. K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
  1048. struct k_thread logging_thread;
  1049. static int enable_logger(const struct device *arg)
  1050. {
  1051. ARG_UNUSED(arg);
  1052. if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
  1053. k_timer_init(&log_process_thread_timer,
  1054. log_process_thread_timer_expiry_fn, NULL);
  1055. /* start logging thread */
  1056. k_thread_create(&logging_thread, logging_stack,
  1057. K_KERNEL_STACK_SIZEOF(logging_stack),
  1058. log_process_thread_func, NULL, NULL, NULL,
  1059. K_LOWEST_APPLICATION_THREAD_PRIO, 0,
  1060. COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
  1061. K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
  1062. K_NO_WAIT));
  1063. k_thread_name_set(&logging_thread, "logging");
  1064. } else {
  1065. log_init();
  1066. }
  1067. return 0;
  1068. }
  1069. SYS_INIT(enable_logger, POST_KERNEL, 0);