kernel_arch_interface.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /*
  2. * Copyright (c) 2019 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief Internal kernel APIs implemented at the architecture layer.
  9. *
  10. * Not all architecture-specific defines are here, APIs that are used
  11. * by public functions and macros are defined in include/sys/arch_interface.h.
  12. *
  13. * For all inline functions prototyped here, the implementation is expected
  14. * to be provided by arch/ARCH/include/kernel_arch_func.h
  15. */
  16. #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
  17. #define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
  18. #include <kernel.h>
  19. #include <sys/arch_interface.h>
  20. #ifndef _ASMLANGUAGE
  21. #ifdef __cplusplus
  22. extern "C" {
  23. #endif
  24. /**
  25. * @defgroup arch-timing Architecture timing APIs
  26. * @{
  27. */
  28. #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
  29. /**
  30. * Architecture-specific implementation of busy-waiting
  31. *
  32. * @param usec_to_wait Wait period, in microseconds
  33. */
  34. void arch_busy_wait(uint32_t usec_to_wait);
  35. #endif
  36. /** @} */
  37. /**
  38. * @defgroup arch-threads Architecture thread APIs
  39. * @ingroup arch-interface
  40. * @{
  41. */
  42. /** Handle arch-specific logic for setting up new threads
  43. *
  44. * The stack and arch-specific thread state variables must be set up
  45. * such that a later attempt to switch to this thread will succeed
  46. * and we will enter z_thread_entry with the requested thread and
  47. * arguments as its parameters.
  48. *
  49. * At some point in this function's implementation, z_setup_new_thread() must
  50. * be called with the true bounds of the available stack buffer within the
  51. * thread's stack object.
  52. *
  53. * The provided stack pointer is guaranteed to be properly aligned with respect
  54. * to the CPU and ABI requirements. There may be space reserved between the
  55. * stack pointer and the bounds of the stack buffer for initial stack pointer
  56. * randomization and thread-local storage.
  57. *
  58. * Fields in thread->base will be initialized when this is called.
  59. *
  60. * @param thread Pointer to uninitialized struct k_thread
  61. * @param stack Pointer to the stack object
  62. * @param stack_ptr Aligned initial stack pointer
  63. * @param entry Thread entry function
  64. * @param p1 1st entry point parameter
  65. * @param p2 2nd entry point parameter
  66. * @param p3 3rd entry point parameter
  67. */
  68. void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
  69. char *stack_ptr, k_thread_entry_t entry,
  70. void *p1, void *p2, void *p3);
  71. #ifdef CONFIG_USE_SWITCH
  72. /** Cooperative context switch primitive
  73. *
  74. * The action of arch_switch() should be to switch to a new context
  75. * passed in the first argument, and save a pointer to the current
  76. * context into the address passed in the second argument.
  77. *
  78. * The actual type and interpretation of the switch handle is specified
  79. * by the architecture. It is the same data structure stored in the
  80. * "switch_handle" field of a newly-created thread in arch_new_thread(),
  81. * and passed to the kernel as the "interrupted" argument to
  82. * z_get_next_switch_handle().
  83. *
  84. * Note that on SMP systems, the kernel uses the store through the
  85. * second pointer as a synchronization point to detect when a thread
  86. * context is completely saved (so another CPU can know when it is
  87. * safe to switch). This store must be done AFTER all relevant state
  88. * is saved, and must include whatever memory barriers or cache
  89. * management code is required to be sure another CPU will see the
  90. * result correctly.
  91. *
  92. * The simplest implementation of arch_switch() is generally to push
  93. * state onto the thread stack and use the resulting stack pointer as the
  94. * switch handle. Some architectures may instead decide to use a pointer
  95. * into the thread struct as the "switch handle" type. These can legally
  96. * assume that the second argument to arch_switch() is the address of the
  97. * switch_handle field of struct thread_base and can use an offset on
  98. * this value to find other parts of the thread struct. For example a (C
  99. * pseudocode) implementation of arch_switch() might look like:
  100. *
  101. * void arch_switch(void *switch_to, void **switched_from)
  102. * {
  103. * struct k_thread *new = switch_to;
  104. * struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
  105. * switch_handle);
  106. *
  107. * // save old context...
  108. * *switched_from = old;
  109. * // restore new context...
  110. * }
  111. *
  112. * Note that the kernel manages the switch_handle field for
  113. * synchronization as described above. So it is not legal for
  114. * architecture code to assume that it has any particular value at any
  115. * other time. In particular it is not legal to read the field from the
  116. * address passed in the second argument.
  117. *
  118. * @param switch_to Incoming thread's switch handle
  119. * @param switched_from Pointer to outgoing thread's switch handle storage
  120. * location, which must be updated.
  121. */
  122. static inline void arch_switch(void *switch_to, void **switched_from);
  123. #else
  124. /**
  125. * Cooperatively context switch
  126. *
  127. * Must be called with interrupts locked with the provided key.
  128. * This is the older-style context switching method, which is incompatible
  129. * with SMP. New arch ports, either SMP or UP, are encouraged to implement
  130. * arch_switch() instead.
  131. *
  132. * @param key Interrupt locking key
  133. * @return If woken from blocking on some kernel object, the result of that
  134. * blocking operation.
  135. */
  136. int arch_swap(unsigned int key);
  137. /**
  138. * Set the return value for the specified thread.
  139. *
  140. * It is assumed that the specified @a thread is pending.
  141. *
  142. * @param thread Pointer to thread object
  143. * @param value value to set as return value
  144. */
  145. static ALWAYS_INLINE void
  146. arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
  147. #endif /* CONFIG_USE_SWITCH i*/
  148. #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
  149. /**
  150. * Custom logic for entering main thread context at early boot
  151. *
  152. * Used by architectures where the typical trick of setting up a dummy thread
  153. * in early boot context to "switch out" of isn't workable.
  154. *
  155. * @param main_thread main thread object
  156. * @param stack_ptr Initial stack pointer
  157. * @param _main Entry point for application main function.
  158. */
  159. void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
  160. k_thread_entry_t _main);
  161. #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
  162. #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
  163. /**
  164. * @brief Disable floating point context preservation
  165. *
  166. * The function is used to disable the preservation of floating
  167. * point context information for a particular thread.
  168. *
  169. * @note For ARM architecture, disabling floating point preservation may only
  170. * be requested for the current thread and cannot be requested in ISRs.
  171. *
  172. * @retval 0 On success.
  173. * @retval -EINVAL If the floating point disabling could not be performed.
  174. * @retval -ENOTSUP If the operation is not supported
  175. */
  176. int arch_float_disable(struct k_thread *thread);
  177. /**
  178. * @brief Enable floating point context preservation
  179. *
  180. * The function is used to enable the preservation of floating
  181. * point context information for a particular thread.
  182. * This API depends on each architecture implimentation. If the architecture
  183. * does not support enabling, this API will always be failed.
  184. *
  185. * The @a options parameter indicates which floating point register sets will
  186. * be used by the specified thread. Currently it is used by x86 only.
  187. *
  188. * @param thread ID of thread.
  189. * @param options architecture dependent options
  190. *
  191. * @retval 0 On success.
  192. * @retval -EINVAL If the floating point enabling could not be performed.
  193. * @retval -ENOTSUP If the operation is not supported
  194. */
  195. int arch_float_enable(struct k_thread *thread, unsigned int options);
  196. #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
  197. /** @} */
  198. /**
  199. * @defgroup arch-pm Architecture-specific power management APIs
  200. * @ingroup arch-interface
  201. * @{
  202. */
  203. /** Halt the system, optionally propagating a reason code */
  204. FUNC_NORETURN void arch_system_halt(unsigned int reason);
  205. /** @} */
  206. /**
  207. * @defgroup arch-irq Architecture-specific IRQ APIs
  208. * @ingroup arch-interface
  209. * @{
  210. */
  211. /**
  212. * Test if the current context is in interrupt context
  213. *
  214. * XXX: This is inconsistently handled among arches wrt exception context
  215. * See: #17656
  216. *
  217. * @return true if we are in interrupt context
  218. */
  219. static inline bool arch_is_in_isr(void);
  220. /** @} */
  221. /**
  222. * @defgroup arch-mmu Architecture-specific memory-mapping APIs
  223. * @ingroup arch-interface
  224. * @{
  225. */
  226. #ifdef CONFIG_MMU
  227. /**
  228. * Map physical memory into the virtual address space
  229. *
  230. * This is a low-level interface to mapping pages into the address space.
  231. * Behavior when providing unaligned addresses/sizes is undefined, these
  232. * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
  233. *
  234. * The core kernel handles all management of the virtual address space;
  235. * by the time we invoke this function, we know exactly where this mapping
  236. * will be established. If the page tables already had mappings installed
  237. * for the virtual memory region, these will be overwritten.
  238. *
  239. * If the target architecture supports multiple page sizes, currently
  240. * only the smallest page size will be used.
  241. *
  242. * The memory range itself is never accessed by this operation.
  243. *
  244. * This API must be safe to call in ISRs or exception handlers. Calls
  245. * to this API are assumed to be serialized, and indeed all usage will
  246. * originate from kernel/mm.c which handles virtual memory management.
  247. *
  248. * Architectures are expected to pre-allocate page tables for the entire
  249. * address space, as defined by CONFIG_KERNEL_VM_BASE and
  250. * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
  251. * allocation for paging structures.
  252. *
  253. * Validation of arguments should be done via assertions.
  254. *
  255. * This API is part of infrastructure still under development and may
  256. * change.
  257. *
  258. * @param virt Page-aligned Destination virtual address to map
  259. * @param phys Page-aligned Source physical address to map
  260. * @param size Page-aligned size of the mapped memory region in bytes
  261. * @param flags Caching, access and control flags, see K_MAP_* macros
  262. */
  263. void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
  264. /**
  265. * Remove mappings for a provided virtual address range
  266. *
  267. * This is a low-level interface for un-mapping pages from the address space.
  268. * When this completes, the relevant page table entries will be updated as
  269. * if no mapping was ever made for that memory range. No previous context
  270. * needs to be preserved. This function must update mappings in all active
  271. * page tables.
  272. *
  273. * Behavior when providing unaligned addresses/sizes is undefined, these
  274. * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
  275. *
  276. * Behavior when providing an address range that is not already mapped is
  277. * undefined.
  278. *
  279. * This function should never require memory allocations for paging structures,
  280. * and it is not necessary to free any paging structures. Empty page tables
  281. * due to all contained entries being un-mapped may remain in place.
  282. *
  283. * Implementations must invalidate TLBs as necessary.
  284. *
  285. * This API is part of infrastructure still under development and may change.
  286. *
  287. * @param addr Page-aligned base virtual address to un-map
  288. * @param size Page-aligned region size
  289. */
  290. void arch_mem_unmap(void *addr, size_t size);
  291. /**
  292. * Get the mapped physical memory address from virtual address.
  293. *
  294. * The function only needs to query the current set of page tables as
  295. * the information it reports must be common to all of them if multiple
  296. * page tables are in use. If multiple page tables are active it is unnecessary
  297. * to iterate over all of them.
  298. *
  299. * Unless otherwise specified, virtual pages have the same mappings
  300. * across all page tables. Calling this function on data pages that are
  301. * exceptions to this rule (such as the scratch page) is undefined behavior.
  302. * Just check the currently installed page tables and return the information
  303. * in that.
  304. *
  305. * @param virt Page-aligned virtual address
  306. * @param[out] phys Mapped physical address (can be NULL if only checking
  307. * if virtual address is mapped)
  308. *
  309. * @retval 0 if mapping is found and valid
  310. * @retval -EFAULT if virtual address is not mapped
  311. */
  312. int arch_page_phys_get(void *virt, uintptr_t *phys);
  313. #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
  314. /**
  315. * Update page frame database with reserved pages
  316. *
  317. * Some page frames within system RAM may not be available for use. A good
  318. * example of this is reserved regions in the first megabyte on PC-like systems.
  319. *
  320. * Implementations of this function should mark all relavent entries in
  321. * z_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
  322. * early system initialization with mm_lock held.
  323. */
  324. void arch_reserved_pages_update(void);
  325. #endif /* ARCH_HAS_RESERVED_PAGE_FRAMES */
  326. #ifdef CONFIG_DEMAND_PAGING
  327. /**
  328. * Update all page tables for a paged-out data page
  329. *
  330. * This function:
  331. * - Sets the data page virtual address to trigger a fault if accessed that
  332. * can be distinguished from access violations or un-mapped pages.
  333. * - Saves the provided location value so that it can retrieved for that
  334. * data page in the page fault handler.
  335. * - The location value semantics are undefined here but the value will be
  336. * always be page-aligned. It could be 0.
  337. *
  338. * If multiple page tables are in use, this must update all page tables.
  339. * This function is called with interrupts locked.
  340. *
  341. * Calling this function on data pages which are already paged out is
  342. * undefined behavior.
  343. *
  344. * This API is part of infrastructure still under development and may change.
  345. */
  346. void arch_mem_page_out(void *addr, uintptr_t location);
  347. /**
  348. * Update all page tables for a paged-in data page
  349. *
  350. * This function:
  351. * - Maps the specified virtual data page address to the provided physical
  352. * page frame address, such that future memory accesses will function as
  353. * expected. Access and caching attributes are undisturbed.
  354. * - Clears any accounting for "accessed" and "dirty" states.
  355. *
  356. * If multiple page tables are in use, this must update all page tables.
  357. * This function is called with interrupts locked.
  358. *
  359. * Calling this function on data pages which are already paged in is
  360. * undefined behavior.
  361. *
  362. * This API is part of infrastructure still under development and may change.
  363. */
  364. void arch_mem_page_in(void *addr, uintptr_t phys);
  365. /**
  366. * Update current page tables for a temporary mapping
  367. *
  368. * Map a physical page frame address to a special virtual address
  369. * Z_SCRATCH_PAGE, with read/write access to supervisor mode, such that
  370. * when this function returns, the calling context can read/write the page
  371. * frame's contents from the Z_SCRATCH_PAGE address.
  372. *
  373. * This mapping only needs to be done on the current set of page tables,
  374. * as it is only used for a short period of time exclusively by the caller.
  375. * This function is called with interrupts locked.
  376. *
  377. * This API is part of infrastructure still under development and may change.
  378. */
  379. void arch_mem_scratch(uintptr_t phys);
  380. enum arch_page_location {
  381. ARCH_PAGE_LOCATION_PAGED_OUT,
  382. ARCH_PAGE_LOCATION_PAGED_IN,
  383. ARCH_PAGE_LOCATION_BAD
  384. };
  385. /**
  386. * Fetch location information about a page at a particular address
  387. *
  388. * The function only needs to query the current set of page tables as
  389. * the information it reports must be common to all of them if multiple
  390. * page tables are in use. If multiple page tables are active it is unnecessary
  391. * to iterate over all of them. This may allow certain types of optimizations
  392. * (such as reverse page table mapping on x86).
  393. *
  394. * This function is called with interrupts locked, so that the reported
  395. * information can't become stale while decisions are being made based on it.
  396. *
  397. * Unless otherwise specified, virtual data pages have the same mappings
  398. * across all page tables. Calling this function on data pages that are
  399. * exceptions to this rule (such as the scratch page) is undefined behavior.
  400. * Just check the currently installed page tables and return the information
  401. * in that.
  402. *
  403. * @param addr Virtual data page address that took the page fault
  404. * @param [out] location In the case of ARCH_PAGE_FAULT_PAGED_OUT, the backing
  405. * store location value used to retrieve the data page. In the case of
  406. * ARCH_PAGE_FAULT_PAGED_IN, the physical address the page is mapped to.
  407. * @retval ARCH_PAGE_FAULT_PAGED_OUT The page was evicted to the backing store.
  408. * @retval ARCH_PAGE_FAULT_PAGED_IN The data page is resident in memory.
  409. * @retval ARCH_PAGE_FAULT_BAD The page is un-mapped or otherwise has had
  410. * invalid access
  411. */
  412. enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
  413. /**
  414. * @def ARCH_DATA_PAGE_ACCESSED
  415. *
  416. * Bit indicating the data page was accessed since the value was last cleared.
  417. *
  418. * Used by marking eviction algorithms. Safe to set this if uncertain.
  419. *
  420. * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
  421. */
  422. /**
  423. * @def ARCH_DATA_PAGE_DIRTY
  424. *
  425. * Bit indicating the data page, if evicted, will need to be paged out.
  426. *
  427. * Set if the data page was modified since it was last paged out, or if
  428. * it has never been paged out before. Safe to set this if uncertain.
  429. *
  430. * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
  431. */
  432. /**
  433. * @def ARCH_DATA_PAGE_LOADED
  434. *
  435. * Bit indicating that the data page is loaded into a physical page frame.
  436. *
  437. * If un-set, the data page is paged out or not mapped.
  438. */
  439. /**
  440. * @def ARCH_DATA_PAGE_NOT_MAPPED
  441. *
  442. * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
  443. * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
  444. */
  445. /**
  446. * Retrieve page characteristics from the page table(s)
  447. *
  448. * The architecture is responsible for maintaining "accessed" and "dirty"
  449. * states of data pages to support marking eviction algorithms. This can
  450. * either be directly supported by hardware or emulated by modifying
  451. * protection policy to generate faults on reads or writes. In all cases
  452. * the architecture must maintain this information in some way.
  453. *
  454. * For the provided virtual address, report the logical OR of the accessed
  455. * and dirty states for the relevant entries in all active page tables in
  456. * the system if the page is mapped and not paged out.
  457. *
  458. * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
  459. * This function will report its prior state. If multiple page tables are in
  460. * use, this function clears accessed state in all of them.
  461. *
  462. * This function is called with interrupts locked, so that the reported
  463. * information can't become stale while decisions are being made based on it.
  464. *
  465. * The return value may have other bits set which the caller must ignore.
  466. *
  467. * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
  468. * is undefined behavior.
  469. *
  470. * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
  471. * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
  472. * them.
  473. *
  474. * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
  475. * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
  476. *
  477. * Unless otherwise specified, virtual data pages have the same mappings
  478. * across all page tables. Calling this function on data pages that are
  479. * exceptions to this rule (such as the scratch page) is undefined behavior.
  480. *
  481. * This API is part of infrastructure still under development and may change.
  482. *
  483. * @param addr Virtual address to look up in page tables
  484. * @param [out] location If non-NULL, updated with either physical page frame
  485. * address or backing store location depending on
  486. * ARCH_DATA_PAGE_LOADED state. This is not touched if
  487. * ARCH_DATA_PAGE_NOT_MAPPED.
  488. * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
  489. * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
  490. * configuration
  491. */
  492. uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
  493. bool clear_accessed);
  494. #endif /* CONFIG_DEMAND_PAGING */
  495. #endif /* CONFIG_MMU */
  496. /** @} */
  497. /**
  498. * @defgroup arch-misc Miscellaneous architecture APIs
  499. * @ingroup arch-interface
  500. * @{
  501. */
  502. /**
  503. * Early boot console output hook
  504. *
  505. * Definition of this function is optional. If implemented, any invocation
  506. * of printk() (or logging calls with CONFIG_LOG_MINIMAL which are backed by
  507. * printk) will default to sending characters to this function. It is
  508. * useful for early boot debugging before main serial or console drivers
  509. * come up.
  510. *
  511. * This can be overridden at runtime with __printk_hook_install().
  512. *
  513. * The default __weak implementation of this does nothing.
  514. *
  515. * @param c Character to print
  516. * @return The character printed
  517. */
  518. int arch_printk_char_out(int c);
  519. /**
  520. * Architecture-specific kernel initialization hook
  521. *
  522. * This function is invoked near the top of _Cstart, for additional
  523. * architecture-specific setup before the rest of the kernel is brought up.
  524. *
  525. * TODO: Deprecate, most arches are using a prep_c() function to do the same
  526. * thing in a simpler way
  527. */
  528. static inline void arch_kernel_init(void);
  529. /** Do nothing and return. Yawn. */
  530. static inline void arch_nop(void);
  531. /** @} */
  532. /**
  533. * @defgroup arch-coredump Architecture-specific core dump APIs
  534. * @ingroup arch-interface
  535. * @{
  536. */
  537. /**
  538. * @brief Architecture-specific handling during coredump
  539. *
  540. * This dumps architecture-specific information during coredump.
  541. *
  542. * @param esf Exception Stack Frame (arch-specific)
  543. */
  544. void arch_coredump_info_dump(const z_arch_esf_t *esf);
  545. /**
  546. * @brief Get the target code specified by the architecture.
  547. */
  548. uint16_t arch_coredump_tgt_code_get(void);
  549. /** @} */
  550. /**
  551. * @defgroup arch-tls Architecture-specific Thread Local Storage APIs
  552. * @ingroup arch-interface
  553. * @{
  554. */
  555. /**
  556. * @brief Setup Architecture-specific TLS area in stack
  557. *
  558. * This sets up the stack area for thread local storage.
  559. * The structure inside in area is architecture specific.
  560. *
  561. * @param new_thread New thread object
  562. * @param stack_ptr Stack pointer
  563. * @return Number of bytes taken by the TLS area
  564. */
  565. size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
  566. /** @} */
  567. /* Include arch-specific inline function implementation */
  568. #include <kernel_arch_func.h>
  569. #ifdef __cplusplus
  570. }
  571. #endif
  572. #endif /* _ASMLANGUAGE */
  573. #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */