kernel_internal.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. * @brief Architecture-independent private kernel APIs
  9. *
  10. * This file contains private kernel APIs that are not architecture-specific.
  11. */
  12. #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
  13. #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
  14. #include <kernel.h>
  15. #include <kernel_arch_interface.h>
  16. #include <string.h>
  17. #ifndef _ASMLANGUAGE
  18. #ifdef __cplusplus
  19. extern "C" {
  20. #endif
  21. /* Early boot functions */
  22. void z_bss_zero(void);
  23. #ifdef CONFIG_XIP
  24. void z_data_copy(void);
  25. #else
  26. static inline void z_data_copy(void)
  27. {
  28. /* Do nothing */
  29. }
  30. #endif
  31. #ifdef CONFIG_LINKER_USE_BOOT_SECTION
  32. void z_bss_zero_boot(void);
  33. #else
  34. static inline void z_bss_zero_boot(void)
  35. {
  36. /* Do nothing */
  37. }
  38. #endif
  39. #ifdef CONFIG_LINKER_USE_PINNED_SECTION
  40. void z_bss_zero_pinned(void);
  41. #else
  42. static inline void z_bss_zero_pinned(void)
  43. {
  44. /* Do nothing */
  45. }
  46. #endif
  47. FUNC_NORETURN void z_cstart(void);
  48. void z_device_state_init(void);
  49. extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
  50. void *p1, void *p2, void *p3);
  51. extern char *z_setup_new_thread(struct k_thread *new_thread,
  52. k_thread_stack_t *stack, size_t stack_size,
  53. k_thread_entry_t entry,
  54. void *p1, void *p2, void *p3,
  55. int prio, uint32_t options, const char *name);
  56. /**
  57. * @brief Allocate aligned memory from the current thread's resource pool
  58. *
  59. * Threads may be assigned a resource pool, which will be used to allocate
  60. * memory on behalf of certain kernel and driver APIs. Memory reserved
  61. * in this way should be freed with k_free().
  62. *
  63. * If called from an ISR, the k_malloc() system heap will be used if it exists.
  64. *
  65. * @param align Required memory alignment
  66. * @param size Memory allocation size
  67. * @return A pointer to the allocated memory, or NULL if there is insufficient
  68. * RAM in the pool or there is no pool to draw memory from
  69. */
  70. void *z_thread_aligned_alloc(size_t align, size_t size);
  71. /**
  72. * @brief Allocate some memory from the current thread's resource pool
  73. *
  74. * Threads may be assigned a resource pool, which will be used to allocate
  75. * memory on behalf of certain kernel and driver APIs. Memory reserved
  76. * in this way should be freed with k_free().
  77. *
  78. * If called from an ISR, the k_malloc() system heap will be used if it exists.
  79. *
  80. * @param size Memory allocation size
  81. * @return A pointer to the allocated memory, or NULL if there is insufficient
  82. * RAM in the pool or there is no pool to draw memory from
  83. */
  84. static inline void *z_thread_malloc(size_t size)
  85. {
  86. return z_thread_aligned_alloc(0, size);
  87. }
  88. /* set and clear essential thread flag */
  89. extern void z_thread_essential_set(void);
  90. extern void z_thread_essential_clear(void);
  91. /* clean up when a thread is aborted */
  92. #if defined(CONFIG_THREAD_MONITOR)
  93. extern void z_thread_monitor_exit(struct k_thread *thread);
  94. #else
  95. #define z_thread_monitor_exit(thread) \
  96. do {/* nothing */ \
  97. } while (false)
  98. #endif /* CONFIG_THREAD_MONITOR */
  99. #ifdef CONFIG_USE_SWITCH
  100. /* This is a arch function traditionally, but when the switch-based
  101. * z_swap() is in use it's a simple inline provided by the kernel.
  102. */
  103. static ALWAYS_INLINE void
  104. arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
  105. {
  106. thread->swap_retval = value;
  107. }
  108. #endif
  109. static ALWAYS_INLINE void
  110. z_thread_return_value_set_with_data(struct k_thread *thread,
  111. unsigned int value,
  112. void *data)
  113. {
  114. arch_thread_return_value_set(thread, value);
  115. thread->base.swap_data = data;
  116. }
  117. #ifdef CONFIG_SMP
  118. extern void z_smp_init(void);
  119. extern void smp_timer_init(void);
  120. #endif
  121. extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
  122. #if CONFIG_STACK_POINTER_RANDOM
  123. extern int z_stack_adjust_initialized;
  124. #endif
  125. extern struct k_thread z_main_thread;
  126. #ifdef CONFIG_MULTITHREADING
  127. extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
  128. #endif
  129. K_KERNEL_PINNED_STACK_ARRAY_EXTERN(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
  130. CONFIG_ISR_STACK_SIZE);
  131. #ifdef CONFIG_GEN_PRIV_STACKS
  132. extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
  133. #endif
  134. #ifdef CONFIG_USERSPACE
  135. bool z_stack_is_user_capable(k_thread_stack_t *stack);
  136. /* Memory domain setup hook, called from z_setup_new_thread() */
  137. void z_mem_domain_init_thread(struct k_thread *thread);
  138. /* Memory domain teardown hook, called from z_thread_abort() */
  139. void z_mem_domain_exit_thread(struct k_thread *thread);
  140. /* This spinlock:
  141. *
  142. * - Protects the full set of active k_mem_domain objects and their contents
  143. * - Serializes calls to arch_mem_domain_* APIs
  144. *
  145. * If architecture code needs to access k_mem_domain structures or the
  146. * partitions they contain at any other point, this spinlock should be held.
  147. * Uniprocessor systems can get away with just locking interrupts but this is
  148. * not recommended.
  149. */
  150. extern struct k_spinlock z_mem_domain_lock;
  151. #endif /* CONFIG_USERSPACE */
  152. #ifdef CONFIG_GDBSTUB
  153. struct gdb_ctx;
  154. /* Should be called by the arch layer. This is the gdbstub main loop
  155. * and synchronously communicate with gdb on host.
  156. */
  157. extern int z_gdb_main_loop(struct gdb_ctx *ctx, bool start);
  158. #endif
  159. #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
  160. void z_thread_mark_switched_in(void);
  161. void z_thread_mark_switched_out(void);
  162. #else
  163. /**
  164. * @brief Called after a thread has been selected to run
  165. */
  166. #define z_thread_mark_switched_in()
  167. /**
  168. * @brief Called before a thread has been selected to run
  169. */
  170. #define z_thread_mark_switched_out()
  171. #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
  172. /* Init hook for page frame management, invoked immediately upon entry of
  173. * main thread, before POST_KERNEL tasks
  174. */
  175. void z_mem_manage_init(void);
  176. /**
  177. * @brief Finalize page frame management at the end of boot process.
  178. */
  179. void z_mem_manage_boot_finish(void);
  180. #define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
  181. __key = k_spin_lock(lck); \
  182. !__i.key; \
  183. k_spin_unlock(lck, __key), __i.key = 1)
  184. #ifdef CONFIG_PM
  185. /* When the kernel is about to go idle, it calls this function to notify the
  186. * power management subsystem, that the kernel is ready to enter the idle state.
  187. *
  188. * At this point, the kernel has disabled interrupts and computed the maximum
  189. * time the system can remain idle. The function passes the time that the system
  190. * can remain idle. The SOC interface performs power operations that can be done
  191. * in the available time. The power management operations must halt execution of
  192. * the CPU.
  193. *
  194. * This function assumes that a wake up event has already been set up by the
  195. * application.
  196. *
  197. * This function is entered with interrupts disabled. It should re-enable
  198. * interrupts if it had entered a power state.
  199. */
  200. enum pm_state pm_system_suspend(int32_t ticks);
  201. /**
  202. * Notify exit from kernel idling after PM operations
  203. *
  204. * This function would notify exit from kernel idling if a corresponding
  205. * pm_system_suspend() notification was handled and did not return
  206. * PM_STATE_ACTIVE.
  207. *
  208. * This function would be called from the ISR context of the event
  209. * that caused the exit from kernel idling. This will be called immediately
  210. * after interrupts are enabled. This is called to give a chance to do
  211. * any operations before the kernel would switch tasks or processes nested
  212. * interrupts. This is required for cpu low power states that would require
  213. * interrupts to be enabled while entering low power states. e.g. C1 in x86. In
  214. * those cases, the ISR would be invoked immediately after the event wakes up
  215. * the CPU, before code following the CPU wait, gets a chance to execute. This
  216. * can be ignored if no operation needs to be done at the wake event
  217. * notification. Alternatively pm_idle_exit_notification_disable() can
  218. * be called in pm_system_suspend to disable this notification.
  219. */
  220. void pm_system_resume(void);
  221. #endif
  222. #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
  223. /**
  224. * Initialize the timing histograms for demand paging.
  225. */
  226. void z_paging_histogram_init(void);
  227. /**
  228. * Increment the counter in the timing histogram.
  229. *
  230. * @param hist The timing histogram to be updated.
  231. * @param cycles Time spent in measured operation.
  232. */
  233. void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
  234. uint32_t cycles);
  235. #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
  236. #ifdef __cplusplus
  237. }
  238. #endif
  239. #endif /* _ASMLANGUAGE */
  240. #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */