init.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. /*
  2. * Copyright (c) 2010-2014 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief Kernel initialization module
  9. *
  10. * This module contains routines that are used to initialize the kernel.
  11. */
  12. #include <zephyr.h>
  13. #include <offsets_short.h>
  14. #include <kernel.h>
  15. #include <sys/printk.h>
  16. #include <debug/stack.h>
  17. #include <random/rand32.h>
  18. #include <linker/sections.h>
  19. #include <toolchain.h>
  20. #include <kernel_structs.h>
  21. #include <device.h>
  22. #include <init.h>
  23. #include <linker/linker-defs.h>
  24. #include <ksched.h>
  25. #include <string.h>
  26. #include <sys/dlist.h>
  27. #include <kernel_internal.h>
  28. #include <drivers/entropy.h>
  29. #include <logging/log_ctrl.h>
  30. #include <tracing/tracing.h>
  31. #include <stdbool.h>
  32. #include <debug/gcov.h>
  33. #include <kswap.h>
  34. #include <timing/timing.h>
  35. #include <logging/log.h>
  36. LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
  37. /* the only struct z_kernel instance */
  38. struct z_kernel _kernel;
  39. /* init/main and idle threads */
  40. #if 0
  41. K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
  42. #else
  43. __in_section_unique(main.noinit.stack) struct z_thread_stack_element __aligned(Z_KERNEL_STACK_OBJ_ALIGN) \
  44. z_main_stack[CONFIG_MAIN_STACK_SIZE];
  45. #endif
  46. struct k_thread z_main_thread;
  47. #ifdef CONFIG_MULTITHREADING
  48. __pinned_bss
  49. struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
  50. static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
  51. CONFIG_MP_NUM_CPUS,
  52. CONFIG_IDLE_STACK_SIZE);
  53. #endif /* CONFIG_MULTITHREADING */
  54. /*
  55. * storage space for the interrupt stack
  56. *
  57. * Note: This area is used as the system stack during kernel initialization,
  58. * since the kernel hasn't yet set up its own stack areas. The dual purposing
  59. * of this area is safe since interrupts are disabled until the kernel context
  60. * switches to the init thread.
  61. */
  62. #if 0
  63. K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
  64. CONFIG_ISR_STACK_SIZE);
  65. #else
  66. __in_section_unique(interrupt.noinit.stack) struct z_thread_stack_element __aligned(Z_KERNEL_STACK_OBJ_ALIGN) \
  67. z_interrupt_stacks[CONFIG_MP_NUM_CPUS][CONFIG_ISR_STACK_SIZE];
  68. #endif
  69. #ifdef CONFIG_SYS_CLOCK_EXISTS
  70. #define initialize_timeouts() do { \
  71. sys_dlist_init(&_timeout_q); \
  72. } while (false)
  73. #else
  74. #define initialize_timeouts() do { } while ((0))
  75. #endif
  76. extern void idle(void *unused1, void *unused2, void *unused3);
  77. /* LCOV_EXCL_START
  78. *
  79. * This code is called so early in the boot process that code coverage
  80. * doesn't work properly. In addition, not all arches call this code,
  81. * some like x86 do this with optimized assembly
  82. */
  83. /**
  84. *
  85. * @brief Clear BSS
  86. *
  87. * This routine clears the BSS region, so all bytes are 0.
  88. *
  89. * @return N/A
  90. */
  91. __boot_func
  92. void z_bss_zero(void)
  93. {
  94. #ifndef CONFIG_SOC_NO_PSRAM
  95. if ((uint32_t)&__psram_bss_end - (uint32_t)&__psram_bss_start > 0) {
  96. (void)memset(&__psram_bss_start, 0, (uint32_t)&__psram_bss_end - (uint32_t)&__psram_bss_start);
  97. }
  98. if ((uint32_t)&__sram_bss_end - (uint32_t)&__sram_bss_start > 0) {
  99. (void)memset(&__sram_bss_start, 0, (uint32_t)&__sram_bss_end - (uint32_t)&__sram_bss_start);
  100. }
  101. #else
  102. if ((uint32_t)&__bss_end - (uint32_t)&__bss_start > 0) {
  103. (void)memset(&__bss_start, 0, (uint32_t)&__bss_end - (uint32_t)&__bss_start);
  104. }
  105. #endif
  106. #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
  107. (void)memset(&__ccm_bss_start, 0,
  108. ((uint32_t) &__ccm_bss_end - (uint32_t) &__ccm_bss_start));
  109. #endif
  110. #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
  111. (void)memset(&__dtcm_bss_start, 0,
  112. ((uint32_t) &__dtcm_bss_end - (uint32_t) &__dtcm_bss_start));
  113. #endif
  114. #ifdef CONFIG_CODE_DATA_RELOCATION
  115. extern void bss_zeroing_relocation(void);
  116. bss_zeroing_relocation();
  117. #endif /* CONFIG_CODE_DATA_RELOCATION */
  118. #ifdef CONFIG_COVERAGE_GCOV
  119. (void)memset(&__gcov_bss_start, 0,
  120. ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
  121. #endif
  122. }
  123. #ifdef CONFIG_LINKER_USE_BOOT_SECTION
  124. /**
  125. * @brief Clear BSS within the bot region
  126. *
  127. * This routine clears the BSS within the boot region.
  128. * This is separate from z_bss_zero() as boot region may
  129. * contain symbols required for the boot process before
  130. * paging is initialized.
  131. */
  132. __boot_func
  133. void z_bss_zero_boot(void)
  134. {
  135. (void)memset(&lnkr_boot_bss_start, 0,
  136. (uintptr_t)&lnkr_boot_bss_end
  137. - (uintptr_t)&lnkr_boot_bss_start);
  138. }
  139. #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
  140. #ifdef CONFIG_LINKER_USE_PINNED_SECTION
  141. /**
  142. * @brief Clear BSS within the pinned region
  143. *
  144. * This routine clears the BSS within the pinned region.
  145. * This is separate from z_bss_zero() as pinned region may
  146. * contain symbols required for the boot process before
  147. * paging is initialized.
  148. */
  149. #ifdef CONFIG_LINKER_USE_BOOT_SECTION
  150. __boot_func
  151. #else
  152. __pinned_func
  153. #endif
  154. void z_bss_zero_pinned(void)
  155. {
  156. (void)memset(&lnkr_pinned_bss_start, 0,
  157. (uintptr_t)&lnkr_pinned_bss_end
  158. - (uintptr_t)&lnkr_pinned_bss_start);
  159. }
  160. #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
  161. #ifdef CONFIG_STACK_CANARIES
  162. extern volatile uintptr_t __stack_chk_guard;
  163. #endif /* CONFIG_STACK_CANARIES */
  164. /* LCOV_EXCL_STOP */
  165. __pinned_bss
  166. bool z_sys_post_kernel;
  167. extern void boot_banner(void);
  168. /**
  169. *
  170. * @brief Mainline for kernel's background thread
  171. *
  172. * This routine completes kernel initialization by invoking the remaining
  173. * init functions, then invokes application's main() routine.
  174. *
  175. * @return N/A
  176. */
  177. __boot_func
  178. static void bg_thread_main(void *unused1, void *unused2, void *unused3)
  179. {
  180. ARG_UNUSED(unused1);
  181. ARG_UNUSED(unused2);
  182. ARG_UNUSED(unused3);
  183. #ifdef CONFIG_MMU
  184. /* Invoked here such that backing store or eviction algorithms may
  185. * initialize kernel objects, and that all POST_KERNEL and later tasks
  186. * may perform memory management tasks (except for z_phys_map() which
  187. * is allowed at any time)
  188. */
  189. z_mem_manage_init();
  190. #endif /* CONFIG_MMU */
  191. z_sys_post_kernel = true;
  192. z_sys_init_run_level(_SYS_INIT_LEVEL_POST_KERNEL);
  193. #if CONFIG_STACK_POINTER_RANDOM
  194. z_stack_adjust_initialized = 1;
  195. #endif
  196. boot_banner();
  197. #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_ARCH_POSIX)
  198. void z_cpp_init_static(void);
  199. z_cpp_init_static();
  200. #endif
  201. /* Final init level before app starts */
  202. z_sys_init_run_level(_SYS_INIT_LEVEL_APPLICATION);
  203. z_init_static_threads();
  204. #ifdef CONFIG_KERNEL_COHERENCE
  205. __ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
  206. #endif
  207. #ifdef CONFIG_SMP
  208. z_smp_init();
  209. z_sys_init_run_level(_SYS_INIT_LEVEL_SMP);
  210. #endif
  211. #ifdef CONFIG_MMU
  212. z_mem_manage_boot_finish();
  213. #endif /* CONFIG_MMU */
  214. extern int main(void);
  215. main();
  216. /* Mark nonessenrial since main() has no more work to do */
  217. z_main_thread.base.user_options &= ~K_ESSENTIAL;
  218. #ifdef CONFIG_COVERAGE_DUMP
  219. /* Dump coverage data once the main() has exited. */
  220. gcov_coverage_dump();
  221. #endif
  222. } /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
  223. #if defined(CONFIG_MULTITHREADING)
  224. __boot_func
  225. static void init_idle_thread(int i)
  226. {
  227. struct k_thread *thread = &z_idle_threads[i];
  228. k_thread_stack_t *stack = z_idle_stacks[i];
  229. #ifdef CONFIG_THREAD_NAME
  230. char tname[8];
  231. snprintk(tname, 8, "idle %02d", i);
  232. #else
  233. char *tname = NULL;
  234. #endif /* CONFIG_THREAD_NAME */
  235. z_setup_new_thread(thread, stack,
  236. CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
  237. NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
  238. tname);
  239. z_mark_thread_as_started(thread);
  240. #ifdef CONFIG_SMP
  241. thread->base.is_idle = 1U;
  242. #endif
  243. }
  244. void z_reinit_idle_thread(int i)
  245. {
  246. init_idle_thread(i);
  247. }
  248. /**
  249. *
  250. * @brief Initializes kernel data structures
  251. *
  252. * This routine initializes various kernel data structures, including
  253. * the init and idle threads and any architecture-specific initialization.
  254. *
  255. * Note that all fields of "_kernel" are set to zero on entry, which may
  256. * be all the initialization many of them require.
  257. *
  258. * @return initial stack pointer for the main thread
  259. */
  260. __boot_func
  261. static char *prepare_multithreading(void)
  262. {
  263. char *stack_ptr;
  264. /* _kernel.ready_q is all zeroes */
  265. z_sched_init();
  266. #ifndef CONFIG_SMP
  267. /*
  268. * prime the cache with the main thread since:
  269. *
  270. * - the cache can never be NULL
  271. * - the main thread will be the one to run first
  272. * - no other thread is initialized yet and thus their priority fields
  273. * contain garbage, which would prevent the cache loading algorithm
  274. * to work as intended
  275. */
  276. _kernel.ready_q.cache = &z_main_thread;
  277. #endif
  278. stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
  279. CONFIG_MAIN_STACK_SIZE, bg_thread_main,
  280. NULL, NULL, NULL,
  281. CONFIG_MAIN_THREAD_PRIORITY,
  282. K_ESSENTIAL, "main");
  283. z_mark_thread_as_started(&z_main_thread);
  284. z_ready_thread(&z_main_thread);
  285. for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
  286. init_idle_thread(i);
  287. _kernel.cpus[i].idle_thread = &z_idle_threads[i];
  288. _kernel.cpus[i].id = i;
  289. _kernel.cpus[i].irq_stack =
  290. (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]) +
  291. K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]));
  292. }
  293. return stack_ptr;
  294. }
  295. __boot_func
  296. static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
  297. {
  298. #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
  299. arch_switch_to_main_thread(&z_main_thread, stack_ptr, bg_thread_main);
  300. #else
  301. ARG_UNUSED(stack_ptr);
  302. /*
  303. * Context switch to main task (entry function is _main()): the
  304. * current fake thread is not on a wait queue or ready queue, so it
  305. * will never be rescheduled in.
  306. */
  307. z_swap_unlocked();
  308. #endif
  309. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  310. }
  311. #endif /* CONFIG_MULTITHREADING */
  312. #if defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR)
  313. __boot_func
  314. void z_early_boot_rand_get(uint8_t *buf, size_t length)
  315. {
  316. int n = sizeof(uint32_t);
  317. #ifdef CONFIG_ENTROPY_HAS_DRIVER
  318. const struct device *entropy = device_get_binding(DT_CHOSEN_ZEPHYR_ENTROPY_LABEL);
  319. int rc;
  320. if (entropy == NULL) {
  321. goto sys_rand_fallback;
  322. }
  323. /* Try to see if driver provides an ISR-specific API */
  324. rc = entropy_get_entropy_isr(entropy, buf, length, ENTROPY_BUSYWAIT);
  325. if (rc == -ENOTSUP) {
  326. /* Driver does not provide an ISR-specific API, assume it can
  327. * be called from ISR context
  328. */
  329. rc = entropy_get_entropy(entropy, buf, length);
  330. }
  331. if (rc >= 0) {
  332. return;
  333. }
  334. /* Fall through to fallback */
  335. sys_rand_fallback:
  336. #endif
  337. /* FIXME: this assumes sys_rand32_get() won't use any synchronization
  338. * primitive, like semaphores or mutexes. It's too early in the boot
  339. * process to use any of them. Ideally, only the path where entropy
  340. * devices are available should be built, this is only a fallback for
  341. * those devices without a HWRNG entropy driver.
  342. */
  343. while (length > 0U) {
  344. uint32_t rndbits;
  345. uint8_t *p_rndbits = (uint8_t *)&rndbits;
  346. rndbits = sys_rand32_get();
  347. if (length < sizeof(uint32_t)) {
  348. n = length;
  349. }
  350. for (int i = 0; i < n; i++) {
  351. *buf = *p_rndbits;
  352. buf++;
  353. p_rndbits++;
  354. }
  355. length -= n;
  356. }
  357. }
  358. /* defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR) */
  359. #endif
  360. /**
  361. *
  362. * @brief Initialize kernel
  363. *
  364. * This routine is invoked when the system is ready to run C code. The
  365. * processor must be running in 32-bit mode, and the BSS must have been
  366. * cleared/zeroed.
  367. *
  368. * @return Does not return
  369. */
  370. __boot_func
  371. FUNC_NORETURN void z_cstart(void)
  372. {
  373. /* gcov hook needed to get the coverage report.*/
  374. gcov_static_init();
  375. LOG_CORE_INIT();
  376. /* perform any architecture-specific initialization */
  377. arch_kernel_init();
  378. #if defined(CONFIG_MULTITHREADING)
  379. /* Note: The z_ready_thread() call in prepare_multithreading() requires
  380. * a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
  381. */
  382. struct k_thread dummy_thread;
  383. z_dummy_thread_init(&dummy_thread);
  384. #endif
  385. /* do any necessary initialization of static devices */
  386. z_device_state_init();
  387. /* perform basic hardware initialization */
  388. z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
  389. z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
  390. #ifdef CONFIG_STACK_CANARIES
  391. uintptr_t stack_guard;
  392. z_early_boot_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
  393. __stack_chk_guard = stack_guard;
  394. __stack_chk_guard <<= 8;
  395. #endif /* CONFIG_STACK_CANARIES */
  396. #ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
  397. timing_init();
  398. timing_start();
  399. #endif
  400. #ifdef CONFIG_MULTITHREADING
  401. switch_to_main_thread(prepare_multithreading());
  402. #else
  403. #ifdef ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING
  404. /* Custom ARCH-specific routine to switch to main()
  405. * in the case of no multi-threading.
  406. */
  407. ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING(bg_thread_main,
  408. NULL, NULL, NULL);
  409. #else
  410. bg_thread_main(NULL, NULL, NULL);
  411. /* LCOV_EXCL_START
  412. * We've already dumped coverage data at this point.
  413. */
  414. irq_lock();
  415. while (true) {
  416. }
  417. /* LCOV_EXCL_STOP */
  418. #endif
  419. #endif /* CONFIG_MULTITHREADING */
  420. /*
  421. * Compiler can't tell that the above routines won't return and issues
  422. * a warning unless we explicitly tell it that control never gets this
  423. * far.
  424. */
  425. CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
  426. }