thread.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * Copyright (c) 2010-2014 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief Kernel thread support
  9. *
  10. * This module provides general purpose thread support.
  11. */
  12. #include <kernel.h>
  13. #include <spinlock.h>
  14. #include <sys/math_extras.h>
  15. #include <sys_clock.h>
  16. #include <ksched.h>
  17. #include <wait_q.h>
  18. #include <syscall_handler.h>
  19. #include <kernel_internal.h>
  20. #include <kswap.h>
  21. #include <init.h>
  22. #include <tracing/tracing.h>
  23. #include <string.h>
  24. #include <stdbool.h>
  25. #include <irq_offload.h>
  26. #include <sys/check.h>
  27. #include <random/rand32.h>
  28. #include <sys/atomic.h>
  29. #include <logging/log.h>
  30. LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
  31. #ifdef CONFIG_THREAD_RUNTIME_STATS
  32. k_thread_runtime_stats_t threads_runtime_stats;
  33. #endif
  34. #ifdef CONFIG_THREAD_MONITOR
  35. /* This lock protects the linked list of active threads; i.e. the
  36. * initial _kernel.threads pointer and the linked list made up of
  37. * thread->next_thread (until NULL)
  38. */
  39. static struct k_spinlock z_thread_monitor_lock;
  40. #endif /* CONFIG_THREAD_MONITOR */
  41. #define _FOREACH_STATIC_THREAD(thread_data) \
  42. STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
  43. void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
  44. {
  45. #if defined(CONFIG_THREAD_MONITOR)
  46. struct k_thread *thread;
  47. k_spinlock_key_t key;
  48. __ASSERT(user_cb != NULL, "user_cb can not be NULL");
  49. /*
  50. * Lock is needed to make sure that the _kernel.threads is not being
  51. * modified by the user_cb either directly or indirectly.
  52. * The indirect ways are through calling k_thread_create and
  53. * k_thread_abort from user_cb.
  54. */
  55. key = k_spin_lock(&z_thread_monitor_lock);
  56. SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
  57. for (thread = _kernel.threads; thread; thread = thread->next_thread) {
  58. user_cb(thread, user_data);
  59. }
  60. SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
  61. k_spin_unlock(&z_thread_monitor_lock, key);
  62. #endif
  63. }
  64. void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
  65. {
  66. #if defined(CONFIG_THREAD_MONITOR)
  67. struct k_thread *thread;
  68. k_spinlock_key_t key;
  69. __ASSERT(user_cb != NULL, "user_cb can not be NULL");
  70. key = k_spin_lock(&z_thread_monitor_lock);
  71. SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
  72. for (thread = _kernel.threads; thread; thread = thread->next_thread) {
  73. k_spin_unlock(&z_thread_monitor_lock, key);
  74. user_cb(thread, user_data);
  75. key = k_spin_lock(&z_thread_monitor_lock);
  76. }
  77. SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
  78. k_spin_unlock(&z_thread_monitor_lock, key);
  79. #endif
  80. }
  81. bool k_is_in_isr(void)
  82. {
  83. return arch_is_in_isr();
  84. }
  85. /*
  86. * This function tags the current thread as essential to system operation.
  87. * Exceptions raised by this thread will be treated as a fatal system error.
  88. */
  89. void z_thread_essential_set(void)
  90. {
  91. _current->base.user_options |= K_ESSENTIAL;
  92. }
  93. /*
  94. * This function tags the current thread as not essential to system operation.
  95. * Exceptions raised by this thread may be recoverable.
  96. * (This is the default tag for a thread.)
  97. */
  98. void z_thread_essential_clear(void)
  99. {
  100. _current->base.user_options &= ~K_ESSENTIAL;
  101. }
  102. /*
  103. * This routine indicates if the current thread is an essential system thread.
  104. *
  105. * Returns true if current thread is essential, false if it is not.
  106. */
  107. bool z_is_thread_essential(void)
  108. {
  109. return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
  110. }
  111. #ifdef CONFIG_THREAD_CUSTOM_DATA
  112. void z_impl_k_thread_custom_data_set(void *value)
  113. {
  114. _current->custom_data = value;
  115. }
  116. #ifdef CONFIG_USERSPACE
  117. static inline void z_vrfy_k_thread_custom_data_set(void *data)
  118. {
  119. z_impl_k_thread_custom_data_set(data);
  120. }
  121. #include <syscalls/k_thread_custom_data_set_mrsh.c>
  122. #endif
  123. void *z_impl_k_thread_custom_data_get(void)
  124. {
  125. return _current->custom_data;
  126. }
  127. #ifdef CONFIG_USERSPACE
  128. static inline void *z_vrfy_k_thread_custom_data_get(void)
  129. {
  130. return z_impl_k_thread_custom_data_get();
  131. }
  132. #include <syscalls/k_thread_custom_data_get_mrsh.c>
  133. #endif /* CONFIG_USERSPACE */
  134. #endif /* CONFIG_THREAD_CUSTOM_DATA */
  135. #if defined(CONFIG_THREAD_MONITOR)
  136. /*
  137. * Remove a thread from the kernel's list of active threads.
  138. */
  139. void z_thread_monitor_exit(struct k_thread *thread)
  140. {
  141. k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
  142. if (thread == _kernel.threads) {
  143. _kernel.threads = _kernel.threads->next_thread;
  144. } else {
  145. struct k_thread *prev_thread;
  146. prev_thread = _kernel.threads;
  147. while ((prev_thread != NULL) &&
  148. (thread != prev_thread->next_thread)) {
  149. prev_thread = prev_thread->next_thread;
  150. }
  151. if (prev_thread != NULL) {
  152. prev_thread->next_thread = thread->next_thread;
  153. }
  154. }
  155. k_spin_unlock(&z_thread_monitor_lock, key);
  156. }
  157. #endif
  158. int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
  159. {
  160. #ifdef CONFIG_THREAD_NAME
  161. if (thread == NULL) {
  162. thread = _current;
  163. }
  164. strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN);
  165. thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
  166. SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
  167. return 0;
  168. #else
  169. ARG_UNUSED(thread);
  170. ARG_UNUSED(value);
  171. SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
  172. return -ENOSYS;
  173. #endif /* CONFIG_THREAD_NAME */
  174. }
  175. #ifdef CONFIG_USERSPACE
  176. static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
  177. {
  178. #ifdef CONFIG_THREAD_NAME
  179. char name[CONFIG_THREAD_MAX_NAME_LEN];
  180. if (thread != NULL) {
  181. if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
  182. return -EINVAL;
  183. }
  184. }
  185. /* In theory we could copy directly into thread->name, but
  186. * the current z_vrfy / z_impl split does not provide a
  187. * means of doing so.
  188. */
  189. if (z_user_string_copy(name, (char *)str, sizeof(name)) != 0) {
  190. return -EFAULT;
  191. }
  192. return z_impl_k_thread_name_set(thread, name);
  193. #else
  194. return -ENOSYS;
  195. #endif /* CONFIG_THREAD_NAME */
  196. }
  197. #include <syscalls/k_thread_name_set_mrsh.c>
  198. #endif /* CONFIG_USERSPACE */
  199. const char *k_thread_name_get(struct k_thread *thread)
  200. {
  201. #ifdef CONFIG_THREAD_NAME
  202. return (const char *)thread->name;
  203. #else
  204. ARG_UNUSED(thread);
  205. return NULL;
  206. #endif /* CONFIG_THREAD_NAME */
  207. }
  208. int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
  209. {
  210. #ifdef CONFIG_THREAD_NAME
  211. strncpy(buf, thread->name, size);
  212. return 0;
  213. #else
  214. ARG_UNUSED(thread);
  215. ARG_UNUSED(buf);
  216. ARG_UNUSED(size);
  217. return -ENOSYS;
  218. #endif /* CONFIG_THREAD_NAME */
  219. }
  220. const char *k_thread_state_str(k_tid_t thread_id)
  221. {
  222. switch (thread_id->base.thread_state) {
  223. case 0:
  224. return "";
  225. case _THREAD_DUMMY:
  226. return "dummy";
  227. case _THREAD_PENDING:
  228. return "pending";
  229. case _THREAD_PRESTART:
  230. return "prestart";
  231. case _THREAD_DEAD:
  232. return "dead";
  233. case _THREAD_SUSPENDED:
  234. return "suspended";
  235. case _THREAD_ABORTING:
  236. return "aborting";
  237. case _THREAD_QUEUED:
  238. return "queued";
  239. default:
  240. /* Add a break, some day when another case gets added at the end,
  241. * this bit of defensive programming will be useful
  242. */
  243. break;
  244. }
  245. return "unknown";
  246. }
  247. #ifdef CONFIG_USERSPACE
  248. static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
  249. char *buf, size_t size)
  250. {
  251. #ifdef CONFIG_THREAD_NAME
  252. size_t len;
  253. struct z_object *ko = z_object_find(thread);
  254. /* Special case: we allow reading the names of initialized threads
  255. * even if we don't have permission on them
  256. */
  257. if (thread == NULL || ko->type != K_OBJ_THREAD ||
  258. (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
  259. return -EINVAL;
  260. }
  261. if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
  262. return -EFAULT;
  263. }
  264. len = strlen(thread->name);
  265. if (len + 1 > size) {
  266. return -ENOSPC;
  267. }
  268. return z_user_to_copy((void *)buf, thread->name, len + 1);
  269. #else
  270. ARG_UNUSED(thread);
  271. ARG_UNUSED(buf);
  272. ARG_UNUSED(size);
  273. return -ENOSYS;
  274. #endif /* CONFIG_THREAD_NAME */
  275. }
  276. #include <syscalls/k_thread_name_copy_mrsh.c>
  277. #endif /* CONFIG_USERSPACE */
  278. #ifdef CONFIG_MULTITHREADING
  279. #ifdef CONFIG_STACK_SENTINEL
  280. #ifdef CONFIG_STACK_MONITOR
  281. #include <arch/arm/aarch32/cortex_m/cmsis.h>
  282. #define CUR_TH_NAME(th) (k_thread_name_get(th)?k_thread_name_get(th):"NA")
  283. void check_thread_stack_overlow(void)
  284. {
  285. unsigned long stack_bottom, sp;
  286. int free_size;
  287. struct k_thread *thread = _current;
  288. sp = __get_PSP();/*sp maybe not current thread sp, if An interrupt occurred during thread switching */
  289. if(k_is_in_isr()){
  290. #ifdef CONFIG_THREAD_RUNTIME_STATS
  291. if(thread->rt_stats.last_switched_in == 0) /*current thread switch out , psp is not current thread sp*/
  292. return ;
  293. #else
  294. return;
  295. #endif
  296. }
  297. stack_bottom = (unsigned long)thread->stack_info.start;
  298. /* check coarsely */
  299. free_size = (long)(sp - stack_bottom);
  300. if (free_size >= 128)
  301. return;
  302. else {
  303. #ifdef CONFIG_STACK_MONITOR_PARANOID
  304. unsigned int *stack_ptr;
  305. int real_free_size;
  306. /* check more precisely */
  307. stack_ptr = (unsigned int *)(stack_bottom+4);
  308. real_free_size = 0;
  309. while ((unsigned long)stack_ptr < sp) {
  310. if (*(stack_ptr++) != 0xaaaaaaaa)
  311. break;
  312. real_free_size += 4;
  313. }
  314. if (free_size > real_free_size)
  315. free_size = real_free_size;
  316. #endif
  317. }
  318. if (free_size <= 0) {
  319. printk("*** Stack overflow detected ***\n");
  320. printk("Thread:%p %-10s, SP: 0x%lx, sp_b = 0x%lx orverride %d bytes!\n", thread, CUR_TH_NAME(thread), sp, stack_bottom, -free_size);
  321. z_except_reason(K_ERR_STACK_CHK_FAIL);
  322. } else if (free_size <= CONFIG_STACK_MONITOR_WARN_THRESHOLD) {
  323. printk("*** Stack usage warning *** \n");
  324. printk("Thread: %p %-10s, SP: 0x%lx: sp_b = 0x%lx stack only has %d bytes free space!\n",
  325. thread, CUR_TH_NAME(thread), sp, stack_bottom, free_size);
  326. }
  327. }
  328. #endif
  329. /* Check that the stack sentinel is still present
  330. *
  331. * The stack sentinel feature writes a magic value to the lowest 4 bytes of
  332. * the thread's stack when the thread is initialized. This value gets checked
  333. * in a few places:
  334. *
  335. * 1) In k_yield() if the current thread is not swapped out
  336. * 2) After servicing a non-nested interrupt
  337. * 3) In z_swap(), check the sentinel in the outgoing thread
  338. *
  339. * Item 2 requires support in arch/ code.
  340. *
  341. * If the check fails, the thread will be terminated appropriately through
  342. * the system fatal error handler.
  343. */
  344. void z_check_stack_sentinel(void)
  345. {
  346. uint32_t *stack;
  347. if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
  348. return;
  349. }
  350. #ifdef CONFIG_STACK_MONITOR
  351. check_thread_stack_overlow();
  352. #endif
  353. stack = (uint32_t *)_current->stack_info.start;
  354. if (*stack != STACK_SENTINEL) {
  355. printk("STACK_SENTINEL over\n");
  356. /* Restore it so further checks don't trigger this same error */
  357. *stack = STACK_SENTINEL;
  358. z_except_reason(K_ERR_STACK_CHK_FAIL);
  359. }
  360. }
  361. #endif /* CONFIG_STACK_SENTINEL */
  362. void z_impl_k_thread_start(struct k_thread *thread)
  363. {
  364. SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
  365. z_sched_start(thread);
  366. }
  367. #ifdef CONFIG_USERSPACE
  368. static inline void z_vrfy_k_thread_start(struct k_thread *thread)
  369. {
  370. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  371. return z_impl_k_thread_start(thread);
  372. }
  373. #include <syscalls/k_thread_start_mrsh.c>
  374. #endif
  375. #endif
  376. #ifdef CONFIG_MULTITHREADING
  377. static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
  378. {
  379. #ifdef CONFIG_SYS_CLOCK_EXISTS
  380. if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
  381. k_thread_start(thread);
  382. } else {
  383. z_add_thread_timeout(thread, delay);
  384. }
  385. #else
  386. ARG_UNUSED(delay);
  387. k_thread_start(thread);
  388. #endif
  389. }
  390. #endif
  391. #if CONFIG_STACK_POINTER_RANDOM
  392. int z_stack_adjust_initialized;
  393. static size_t random_offset(size_t stack_size)
  394. {
  395. size_t random_val;
  396. if (!z_stack_adjust_initialized) {
  397. z_early_boot_rand_get((uint8_t *)&random_val, sizeof(random_val));
  398. } else {
  399. sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
  400. }
  401. /* Don't need to worry about alignment of the size here,
  402. * arch_new_thread() is required to do it.
  403. *
  404. * FIXME: Not the best way to get a random number in a range.
  405. * See #6493
  406. */
  407. const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
  408. if (unlikely(fuzz * 2 > stack_size)) {
  409. return 0;
  410. }
  411. return fuzz;
  412. }
  413. #if defined(CONFIG_STACK_GROWS_UP)
  414. /* This is so rare not bothering for now */
  415. #error "Stack pointer randomization not implemented for upward growing stacks"
  416. #endif /* CONFIG_STACK_GROWS_UP */
  417. #endif /* CONFIG_STACK_POINTER_RANDOM */
  418. static char *setup_thread_stack(struct k_thread *new_thread,
  419. k_thread_stack_t *stack, size_t stack_size)
  420. {
  421. size_t stack_obj_size, stack_buf_size;
  422. char *stack_ptr, *stack_buf_start;
  423. size_t delta = 0;
  424. #ifdef CONFIG_USERSPACE
  425. if (z_stack_is_user_capable(stack)) {
  426. stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
  427. stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
  428. stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
  429. } else
  430. #endif
  431. {
  432. /* Object cannot host a user mode thread */
  433. stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
  434. stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
  435. stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
  436. }
  437. /* Initial stack pointer at the high end of the stack object, may
  438. * be reduced later in this function by TLS or random offset
  439. */
  440. stack_ptr = (char *)stack + stack_obj_size;
  441. LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
  442. " buf_size %zu stack_ptr=%p",
  443. stack, new_thread, stack_obj_size, stack_buf_start,
  444. stack_buf_size, stack_ptr);
  445. #ifdef CONFIG_INIT_STACKS
  446. memset(stack_buf_start, 0xaa, stack_buf_size);
  447. #endif
  448. #ifdef CONFIG_STACK_SENTINEL
  449. /* Put the stack sentinel at the lowest 4 bytes of the stack area.
  450. * We periodically check that it's still present and kill the thread
  451. * if it isn't.
  452. */
  453. *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
  454. #endif /* CONFIG_STACK_SENTINEL */
  455. #ifdef CONFIG_THREAD_LOCAL_STORAGE
  456. /* TLS is always last within the stack buffer */
  457. delta += arch_tls_stack_setup(new_thread, stack_ptr);
  458. #endif /* CONFIG_THREAD_LOCAL_STORAGE */
  459. #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
  460. size_t tls_size = sizeof(struct _thread_userspace_local_data);
  461. /* reserve space on highest memory of stack buffer for local data */
  462. delta += tls_size;
  463. new_thread->userspace_local_data =
  464. (struct _thread_userspace_local_data *)(stack_ptr - delta);
  465. #endif
  466. #if CONFIG_STACK_POINTER_RANDOM
  467. delta += random_offset(stack_buf_size);
  468. #endif
  469. delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
  470. #ifdef CONFIG_THREAD_STACK_INFO
  471. /* Initial values. Arches which implement MPU guards that "borrow"
  472. * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
  473. * will need to appropriately update this.
  474. *
  475. * The bounds tracked here correspond to the area of the stack object
  476. * that the thread can access, which includes TLS.
  477. */
  478. new_thread->stack_info.start = (uintptr_t)stack_buf_start;
  479. new_thread->stack_info.size = stack_buf_size;
  480. new_thread->stack_info.delta = delta;
  481. #endif
  482. stack_ptr -= delta;
  483. return stack_ptr;
  484. }
  485. #define THREAD_COOKIE 0x1337C0D3
  486. /*
  487. * The provided stack_size value is presumed to be either the result of
  488. * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
  489. * of K_THREAD_STACK_DEFINE() which defined 'stack'.
  490. */
  491. char *z_setup_new_thread(struct k_thread *new_thread,
  492. k_thread_stack_t *stack, size_t stack_size,
  493. k_thread_entry_t entry,
  494. void *p1, void *p2, void *p3,
  495. int prio, uint32_t options, const char *name)
  496. {
  497. char *stack_ptr;
  498. Z_ASSERT_VALID_PRIO(prio, entry);
  499. #ifdef CONFIG_USERSPACE
  500. __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
  501. "user thread %p with kernel-only stack %p",
  502. new_thread, stack);
  503. z_object_init(new_thread);
  504. z_object_init(stack);
  505. new_thread->stack_obj = stack;
  506. new_thread->syscall_frame = NULL;
  507. /* Any given thread has access to itself */
  508. k_object_access_grant(new_thread, new_thread);
  509. #endif
  510. z_waitq_init(&new_thread->join_queue);
  511. /* Initialize various struct k_thread members */
  512. z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
  513. stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
  514. #ifdef CONFIG_KERNEL_COHERENCE
  515. /* Check that the thread object is safe, but that the stack is
  516. * still cached!
  517. */
  518. __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
  519. __ASSERT_NO_MSG(!arch_mem_coherent(stack));
  520. #endif
  521. arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
  522. /* static threads overwrite it afterwards with real value */
  523. new_thread->init_data = NULL;
  524. #ifdef CONFIG_USE_SWITCH
  525. /* switch_handle must be non-null except when inside z_swap()
  526. * for synchronization reasons. Historically some notional
  527. * USE_SWITCH architectures have actually ignored the field
  528. */
  529. __ASSERT(new_thread->switch_handle != NULL,
  530. "arch layer failed to initialize switch_handle");
  531. #endif
  532. #ifdef CONFIG_THREAD_CUSTOM_DATA
  533. /* Initialize custom data field (value is opaque to kernel) */
  534. new_thread->custom_data = NULL;
  535. #endif
  536. #ifdef CONFIG_THREAD_MONITOR
  537. new_thread->entry.pEntry = entry;
  538. new_thread->entry.parameter1 = p1;
  539. new_thread->entry.parameter2 = p2;
  540. new_thread->entry.parameter3 = p3;
  541. k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
  542. new_thread->next_thread = _kernel.threads;
  543. _kernel.threads = new_thread;
  544. k_spin_unlock(&z_thread_monitor_lock, key);
  545. #endif
  546. #ifdef CONFIG_THREAD_NAME
  547. if (name != NULL) {
  548. strncpy(new_thread->name, name,
  549. CONFIG_THREAD_MAX_NAME_LEN - 1);
  550. /* Ensure NULL termination, truncate if longer */
  551. new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
  552. } else {
  553. new_thread->name[0] = '\0';
  554. }
  555. #endif
  556. #ifdef CONFIG_SCHED_CPU_MASK
  557. new_thread->base.cpu_mask = -1;
  558. #endif
  559. #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
  560. /* _current may be null if the dummy thread is not used */
  561. if (!_current) {
  562. new_thread->resource_pool = NULL;
  563. return stack_ptr;
  564. }
  565. #endif
  566. #ifdef CONFIG_THREAD_TIMER
  567. sys_dlist_init(&new_thread->thread_timer_q);
  568. #endif
  569. #ifdef CONFIG_USERSPACE
  570. z_mem_domain_init_thread(new_thread);
  571. if ((options & K_INHERIT_PERMS) != 0U) {
  572. z_thread_perms_inherit(_current, new_thread);
  573. }
  574. #endif
  575. #ifdef CONFIG_SCHED_DEADLINE
  576. new_thread->base.prio_deadline = 0;
  577. #endif
  578. new_thread->resource_pool = _current->resource_pool;
  579. SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
  580. #ifdef CONFIG_THREAD_RUNTIME_STATS
  581. memset(&new_thread->rt_stats, 0, sizeof(new_thread->rt_stats));
  582. #endif
  583. return stack_ptr;
  584. }
  585. #ifdef CONFIG_MULTITHREADING
  586. k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
  587. k_thread_stack_t *stack,
  588. size_t stack_size, k_thread_entry_t entry,
  589. void *p1, void *p2, void *p3,
  590. int prio, uint32_t options, k_timeout_t delay)
  591. {
  592. __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
  593. z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
  594. prio, options, NULL);
  595. if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
  596. schedule_new_thread(new_thread, delay);
  597. }
  598. return new_thread;
  599. }
  600. #ifdef CONFIG_USERSPACE
  601. bool z_stack_is_user_capable(k_thread_stack_t *stack)
  602. {
  603. return z_object_find(stack) != NULL;
  604. }
  605. k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
  606. k_thread_stack_t *stack,
  607. size_t stack_size, k_thread_entry_t entry,
  608. void *p1, void *p2, void *p3,
  609. int prio, uint32_t options, k_timeout_t delay)
  610. {
  611. size_t total_size, stack_obj_size;
  612. struct z_object *stack_object;
  613. /* The thread and stack objects *must* be in an uninitialized state */
  614. Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
  615. /* No need to check z_stack_is_user_capable(), it won't be in the
  616. * object table if it isn't
  617. */
  618. stack_object = z_object_find(stack);
  619. Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
  620. K_OBJ_THREAD_STACK_ELEMENT,
  621. _OBJ_INIT_FALSE) == 0,
  622. "bad stack object"));
  623. /* Verify that the stack size passed in is OK by computing the total
  624. * size and comparing it with the size value in the object metadata
  625. */
  626. Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
  627. stack_size, &total_size),
  628. "stack size overflow (%zu+%zu)",
  629. stack_size,
  630. K_THREAD_STACK_RESERVED));
  631. /* Testing less-than-or-equal since additional room may have been
  632. * allocated for alignment constraints
  633. */
  634. #ifdef CONFIG_GEN_PRIV_STACKS
  635. stack_obj_size = stack_object->data.stack_data->size;
  636. #else
  637. stack_obj_size = stack_object->data.stack_size;
  638. #endif
  639. Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
  640. "stack size %zu is too big, max is %zu",
  641. total_size, stack_obj_size));
  642. /* User threads may only create other user threads and they can't
  643. * be marked as essential
  644. */
  645. Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
  646. Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
  647. /* Check validity of prio argument; must be the same or worse priority
  648. * than the caller
  649. */
  650. Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
  651. Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
  652. _current->base.prio)));
  653. z_setup_new_thread(new_thread, stack, stack_size,
  654. entry, p1, p2, p3, prio, options, NULL);
  655. if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
  656. schedule_new_thread(new_thread, delay);
  657. }
  658. return new_thread;
  659. }
  660. #include <syscalls/k_thread_create_mrsh.c>
  661. #endif /* CONFIG_USERSPACE */
  662. #endif /* CONFIG_MULTITHREADING */
  663. #ifdef CONFIG_MULTITHREADING
  664. #ifdef CONFIG_USERSPACE
  665. static void grant_static_access(void)
  666. {
  667. STRUCT_SECTION_FOREACH(z_object_assignment, pos) {
  668. for (int i = 0; pos->objects[i] != NULL; i++) {
  669. k_object_access_grant(pos->objects[i],
  670. pos->thread);
  671. }
  672. }
  673. }
  674. #endif /* CONFIG_USERSPACE */
  675. void z_init_static_threads(void)
  676. {
  677. _FOREACH_STATIC_THREAD(thread_data) {
  678. z_setup_new_thread(
  679. thread_data->init_thread,
  680. thread_data->init_stack,
  681. thread_data->init_stack_size,
  682. thread_data->init_entry,
  683. thread_data->init_p1,
  684. thread_data->init_p2,
  685. thread_data->init_p3,
  686. thread_data->init_prio,
  687. thread_data->init_options,
  688. thread_data->init_name);
  689. thread_data->init_thread->init_data = thread_data;
  690. }
  691. #ifdef CONFIG_USERSPACE
  692. grant_static_access();
  693. #endif
  694. /*
  695. * Non-legacy static threads may be started immediately or
  696. * after a previously specified delay. Even though the
  697. * scheduler is locked, ticks can still be delivered and
  698. * processed. Take a sched lock to prevent them from running
  699. * until they are all started.
  700. *
  701. * Note that static threads defined using the legacy API have a
  702. * delay of K_FOREVER.
  703. */
  704. k_sched_lock();
  705. _FOREACH_STATIC_THREAD(thread_data) {
  706. if (thread_data->init_delay != K_TICKS_FOREVER) {
  707. schedule_new_thread(thread_data->init_thread,
  708. K_MSEC(thread_data->init_delay));
  709. }
  710. }
  711. k_sched_unlock();
  712. }
  713. #endif
  714. void z_init_thread_base(struct _thread_base *thread_base, int priority,
  715. uint32_t initial_state, unsigned int options)
  716. {
  717. /* k_q_node is initialized upon first insertion in a list */
  718. thread_base->pended_on = NULL;
  719. thread_base->user_options = (uint8_t)options;
  720. thread_base->thread_state = (uint8_t)initial_state;
  721. thread_base->prio = priority;
  722. thread_base->sched_locked = 0U;
  723. #ifdef CONFIG_SMP
  724. thread_base->is_idle = 0;
  725. #endif
  726. /* swap_data does not need to be initialized */
  727. z_init_thread_timeout(thread_base);
  728. }
  729. FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
  730. void *p1, void *p2, void *p3)
  731. {
  732. SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
  733. _current->base.user_options |= K_USER;
  734. z_thread_essential_clear();
  735. #ifdef CONFIG_THREAD_MONITOR
  736. _current->entry.pEntry = entry;
  737. _current->entry.parameter1 = p1;
  738. _current->entry.parameter2 = p2;
  739. _current->entry.parameter3 = p3;
  740. #endif
  741. #ifdef CONFIG_USERSPACE
  742. __ASSERT(z_stack_is_user_capable(_current->stack_obj),
  743. "dropping to user mode with kernel-only stack object");
  744. #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
  745. memset(_current->userspace_local_data, 0,
  746. sizeof(struct _thread_userspace_local_data));
  747. #endif
  748. #ifdef CONFIG_THREAD_LOCAL_STORAGE
  749. arch_tls_stack_setup(_current,
  750. (char *)(_current->stack_info.start +
  751. _current->stack_info.size));
  752. #endif
  753. arch_user_mode_enter(entry, p1, p2, p3);
  754. #else
  755. /* XXX In this case we do not reset the stack */
  756. z_thread_entry(entry, p1, p2, p3);
  757. #endif
  758. }
  759. /* These spinlock assertion predicates are defined here because having
  760. * them in spinlock.h is a giant header ordering headache.
  761. */
  762. #ifdef CONFIG_SPIN_VALIDATE
  763. bool z_spin_lock_valid(struct k_spinlock *l)
  764. {
  765. uintptr_t thread_cpu = l->thread_cpu;
  766. if (thread_cpu != 0U) {
  767. if ((thread_cpu & 3U) == _current_cpu->id) {
  768. return false;
  769. }
  770. }
  771. return true;
  772. }
  773. bool z_spin_unlock_valid(struct k_spinlock *l)
  774. {
  775. if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
  776. return false;
  777. }
  778. l->thread_cpu = 0;
  779. return true;
  780. }
  781. void z_spin_lock_set_owner(struct k_spinlock *l)
  782. {
  783. l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
  784. }
  785. #ifdef CONFIG_KERNEL_COHERENCE
  786. bool z_spin_lock_mem_coherent(struct k_spinlock *l)
  787. {
  788. return arch_mem_coherent((void *)l);
  789. }
  790. #endif /* CONFIG_KERNEL_COHERENCE */
  791. #endif /* CONFIG_SPIN_VALIDATE */
  792. int z_impl_k_float_disable(struct k_thread *thread)
  793. {
  794. #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
  795. return arch_float_disable(thread);
  796. #else
  797. return -ENOTSUP;
  798. #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
  799. }
  800. int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
  801. {
  802. #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
  803. return arch_float_enable(thread, options);
  804. #else
  805. return -ENOTSUP;
  806. #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
  807. }
  808. #ifdef CONFIG_USERSPACE
  809. static inline int z_vrfy_k_float_disable(struct k_thread *thread)
  810. {
  811. Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
  812. return z_impl_k_float_disable(thread);
  813. }
  814. #include <syscalls/k_float_disable_mrsh.c>
  815. #endif /* CONFIG_USERSPACE */
  816. #ifdef CONFIG_IRQ_OFFLOAD
  817. /* Make offload_sem visible outside under testing, in order to release
  818. * it outside when error happened.
  819. */
  820. K_SEM_DEFINE(offload_sem, 1, 1);
  821. void irq_offload(irq_offload_routine_t routine, const void *parameter)
  822. {
  823. k_sem_take(&offload_sem, K_FOREVER);
  824. arch_irq_offload(routine, parameter);
  825. k_sem_give(&offload_sem);
  826. }
  827. #endif
  828. #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
  829. #ifdef CONFIG_STACK_GROWS_UP
  830. #error "Unsupported configuration for stack analysis"
  831. #endif
  832. int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
  833. size_t *unused_ptr)
  834. {
  835. const uint8_t *start = (uint8_t *)thread->stack_info.start;
  836. size_t size = thread->stack_info.size;
  837. size_t unused = 0;
  838. const uint8_t *checked_stack = start;
  839. /* Take the address of any local variable as a shallow bound for the
  840. * stack pointer. Addresses above it are guaranteed to be
  841. * accessible.
  842. */
  843. const uint8_t *stack_pointer = (const uint8_t *)&start;
  844. /* If we are currently running on the stack being analyzed, some
  845. * memory management hardware will generate an exception if we
  846. * read unused stack memory.
  847. *
  848. * This never happens when invoked from user mode, as user mode
  849. * will always run this function on the privilege elevation stack.
  850. */
  851. if ((stack_pointer > start) && (stack_pointer <= (start + size)) &&
  852. IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
  853. /* TODO: We could add an arch_ API call to temporarily
  854. * disable the stack checking in the CPU, but this would
  855. * need to be properly managed wrt context switches/interrupts
  856. */
  857. return -ENOTSUP;
  858. }
  859. if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
  860. /* First 4 bytes of the stack buffer reserved for the
  861. * sentinel value, it won't be 0xAAAAAAAA for thread
  862. * stacks.
  863. *
  864. * FIXME: thread->stack_info.start ought to reflect
  865. * this!
  866. */
  867. checked_stack += 4;
  868. size -= 4;
  869. }
  870. for (size_t i = 0; i < size; i++) {
  871. if ((checked_stack[i]) == 0xaaU) {
  872. unused++;
  873. } else {
  874. break;
  875. }
  876. }
  877. *unused_ptr = unused;
  878. return 0;
  879. }
  880. #ifdef CONFIG_USERSPACE
  881. int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
  882. size_t *unused_ptr)
  883. {
  884. size_t unused;
  885. int ret;
  886. ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
  887. CHECKIF(ret != 0) {
  888. return ret;
  889. }
  890. ret = z_impl_k_thread_stack_space_get(thread, &unused);
  891. CHECKIF(ret != 0) {
  892. return ret;
  893. }
  894. ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
  895. CHECKIF(ret != 0) {
  896. return ret;
  897. }
  898. return 0;
  899. }
  900. #include <syscalls/k_thread_stack_space_get_mrsh.c>
  901. #endif /* CONFIG_USERSPACE */
  902. #endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
  903. #ifdef CONFIG_USERSPACE
  904. static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
  905. const struct k_thread *t)
  906. {
  907. Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
  908. return z_impl_k_thread_timeout_remaining_ticks(t);
  909. }
  910. #include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
  911. static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
  912. const struct k_thread *t)
  913. {
  914. Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
  915. return z_impl_k_thread_timeout_expires_ticks(t);
  916. }
  917. #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
  918. #endif
  919. #if defined(CONFIG_ACTIONS_ARM_MPU)
  920. //#define CONFIG_MONITOR_THREAD_STACK_WR
  921. #endif
  922. #ifdef CONFIG_MONITOR_THREAD_STACK_WR
  923. extern void act_mpu_set(uint32_t chan, uint32_t mem_base, uint32_t size, uint32_t attr);
  924. extern void act_mpu_unset(uint32_t chan);
  925. #define THREAD_STACK_BASE 0
  926. static void check_monitor_thread_stack(struct k_thread *thread, int iset)
  927. {
  928. if(THREAD_STACK_BASE < thread->stack_info.start)
  929. return;
  930. if(THREAD_STACK_BASE >= thread->stack_info.start + thread->stack_info.size)
  931. return;
  932. printk("mpu: %p %-10s \n", thread, k_thread_name_get(thread));
  933. if(iset){
  934. act_mpu_set(1, thread->stack_info.start, thread->stack_info.size, 2);
  935. }else{
  936. act_mpu_unset(1);
  937. }
  938. }
  939. static struct k_thread *g_thread;
  940. void check_switch_out(void)
  941. {
  942. if(g_thread)
  943. check_monitor_thread_stack(g_thread, 1);
  944. }
  945. #endif
  946. #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
  947. void z_thread_mark_switched_in(void)
  948. {
  949. #ifdef CONFIG_TRACING
  950. SYS_PORT_TRACING_FUNC(k_thread, switched_in);
  951. #endif
  952. #ifdef CONFIG_THREAD_RUNTIME_STATS
  953. struct k_thread *thread;
  954. thread = k_current_get();
  955. #ifdef CONFIG_MONITOR_THREAD_STACK_WR
  956. check_switch_out();
  957. check_monitor_thread_stack(thread, 0);
  958. #endif
  959. #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
  960. thread->rt_stats.last_switched_in = timing_counter_get();
  961. #else
  962. thread->rt_stats.last_switched_in = k_cycle_get_32();
  963. #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
  964. #endif /* CONFIG_THREAD_RUNTIME_STATS */
  965. }
  966. void z_thread_mark_switched_out(void)
  967. {
  968. #ifdef CONFIG_THREAD_RUNTIME_STATS
  969. #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
  970. timing_t now;
  971. #else
  972. uint32_t now;
  973. uint32_t diff_us;
  974. #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
  975. uint64_t diff;
  976. struct k_thread *thread;
  977. thread = k_current_get();
  978. //check_monitor_thread_stack(thread, 1);
  979. if (unlikely(thread->rt_stats.last_switched_in == 0)) {
  980. /* Has not run before */
  981. return;
  982. }
  983. if (unlikely(thread->base.thread_state == _THREAD_DUMMY)) {
  984. /* dummy thread has no stat struct */
  985. return;
  986. }
  987. #ifdef CONFIG_MONITOR_THREAD_STACK_WR
  988. g_thread = thread;
  989. #endif
  990. #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
  991. now = timing_counter_get();
  992. diff = timing_cycles_get(&thread->rt_stats.last_switched_in, &now);
  993. #else
  994. now = k_cycle_get_32();
  995. diff = (uint64_t)(now - thread->rt_stats.last_switched_in);
  996. thread->rt_stats.last_switched_in = 0;
  997. diff_us = SYS_CLOCK_HW_CYCLES_TO_NS_AVG(diff, 1000);
  998. if ((diff_us > (1000*10)) && (thread->base.prio < K_LOWEST_THREAD_PRIO)) {
  999. /* Run time large than 10ms */
  1000. printk("\nThread %p %-10s run time %d ms\n", thread, k_thread_name_get(thread), (diff_us/1000));
  1001. }
  1002. #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
  1003. thread->rt_stats.stats.execution_cycles += diff;
  1004. threads_runtime_stats.execution_cycles += diff;
  1005. #endif /* CONFIG_THREAD_RUNTIME_STATS */
  1006. #ifdef CONFIG_TRACING
  1007. SYS_PORT_TRACING_FUNC(k_thread, switched_out);
  1008. #endif
  1009. }
  1010. #ifdef CONFIG_THREAD_RUNTIME_STATS
  1011. int k_thread_runtime_stats_get(k_tid_t thread,
  1012. k_thread_runtime_stats_t *stats)
  1013. {
  1014. if ((thread == NULL) || (stats == NULL)) {
  1015. return -EINVAL;
  1016. }
  1017. (void)memcpy(stats, &thread->rt_stats.stats,
  1018. sizeof(thread->rt_stats.stats));
  1019. return 0;
  1020. }
  1021. int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
  1022. {
  1023. if (stats == NULL) {
  1024. return -EINVAL;
  1025. }
  1026. (void)memcpy(stats, &threads_runtime_stats,
  1027. sizeof(threads_runtime_stats));
  1028. return 0;
  1029. }
  1030. void k_thread_runtime_clear(void)
  1031. {
  1032. struct k_thread *thread_list = NULL;
  1033. unsigned int key;
  1034. key = irq_lock();
  1035. thread_list = (struct k_thread *)(_kernel.threads);
  1036. while (thread_list != NULL) {
  1037. if (thread_list == k_current_get()){
  1038. #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
  1039. thread_list->rt_stats.last_switched_in = timing_counter_get();
  1040. #else
  1041. thread_list->rt_stats.last_switched_in = k_cycle_get_32();
  1042. #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
  1043. }
  1044. /* clear old cycles counter */
  1045. thread_list->rt_stats.stats.execution_cycles = 0;
  1046. thread_list = (struct k_thread *)thread_list->next_thread;
  1047. }
  1048. threads_runtime_stats.execution_cycles = 0;
  1049. irq_unlock(key);
  1050. }
  1051. #endif /* CONFIG_THREAD_RUNTIME_STATS */
  1052. #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */