arch_interface.h 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright (c) 2019 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @defgroup arch-interface Architecture Interface
  8. * @brief Internal kernel APIs with public scope
  9. *
  10. * Any public kernel APIs that are implemented as inline functions and need to
  11. * call architecture-specific API so will have the prototypes for the
  12. * architecture-specific APIs here. Architecture APIs that aren't used in this
  13. * way go in kernel/include/kernel_arch_interface.h.
  14. *
  15. * The set of architecture-specific APIs used internally by public macros and
  16. * inline functions in public headers are also specified and documented.
  17. *
  18. * For all macros and inline function prototypes described herein, <arch/cpu.h>
  19. * must eventually pull in full definitions for all of them (the actual macro
  20. * defines and inline function bodies)
  21. *
  22. * include/kernel.h and other public headers depend on definitions in this
  23. * header.
  24. */
  25. #ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
  26. #define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
  27. #ifndef _ASMLANGUAGE
  28. #include <toolchain.h>
  29. #include <stddef.h>
  30. #include <zephyr/types.h>
  31. #include <arch/cpu.h>
  32. #include <irq_offload.h>
  33. #ifdef __cplusplus
  34. extern "C" {
  35. #endif
  36. /* NOTE: We cannot pull in kernel.h here, need some forward declarations */
  37. struct k_thread;
  38. struct k_mem_domain;
  39. typedef struct z_thread_stack_element k_thread_stack_t;
  40. typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
  41. /**
  42. * @defgroup arch-timing Architecture timing APIs
  43. * @ingroup arch-interface
  44. * @{
  45. */
  46. /**
  47. * Obtain the current cycle count, in units that are hardware-specific
  48. *
  49. * @see k_cycle_get_32()
  50. */
  51. static inline uint32_t arch_k_cycle_get_32(void);
  52. /** @} */
  53. /**
  54. * @addtogroup arch-threads
  55. * @{
  56. */
  57. /**
  58. * @def ARCH_THREAD_STACK_RESERVED
  59. *
  60. * @see K_THREAD_STACK_RESERVED
  61. */
  62. /**
  63. * @def ARCH_STACK_PTR_ALIGN
  64. *
  65. * Required alignment of the CPU's stack pointer register value, dictated by
  66. * hardware constraints and the ABI calling convention.
  67. *
  68. * @see Z_STACK_PTR_ALIGN
  69. */
  70. /**
  71. * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
  72. *
  73. * Required alignment of the lowest address of a stack object.
  74. *
  75. * Optional definition.
  76. *
  77. * @see Z_THREAD_STACK_OBJ_ALIGN
  78. */
  79. /**
  80. * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
  81. * @brief Round up a stack buffer size to alignment constraints
  82. *
  83. * Adjust a requested stack buffer size to the true size of its underlying
  84. * buffer, defined as the area usable for thread stack context and thread-
  85. * local storage.
  86. *
  87. * The size value passed here does not include storage reserved for platform
  88. * data.
  89. *
  90. * The returned value is either the same size provided (if already properly
  91. * aligned), or rounded up to satisfy alignment constraints. Calculations
  92. * performed here *must* be idempotent.
  93. *
  94. * Optional definition. If undefined, stack buffer sizes are either:
  95. * - Rounded up to the next power of two if user mode is enabled on an arch
  96. * with an MPU that requires such alignment
  97. * - Rounded up to ARCH_STACK_PTR_ALIGN
  98. *
  99. * @see Z_THREAD_STACK_SIZE_ADJUST
  100. */
  101. /**
  102. * @def ARCH_KERNEL_STACK_RESERVED
  103. * @brief MPU guard size for kernel-only stacks
  104. *
  105. * If MPU stack guards are used to catch stack overflows, specify the
  106. * amount of space reserved in kernel stack objects. If guard sizes are
  107. * context dependent, this should be in the minimum guard size, with
  108. * remaining space carved out if needed.
  109. *
  110. * Optional definition, defaults to 0.
  111. *
  112. * @see K_KERNEL_STACK_RESERVED
  113. */
  114. /**
  115. * @def ARCH_KERNEL_STACK_OBJ_ALIGN
  116. * @brief Required alignment of the lowest address of a kernel-only stack.
  117. */
  118. /** @} */
  119. /**
  120. * @addtogroup arch-pm
  121. * @{
  122. */
  123. /**
  124. * @brief Power save idle routine
  125. *
  126. * This function will be called by the kernel idle loop or possibly within
  127. * an implementation of z_pm_save_idle in the kernel when the
  128. * '_pm_save_flag' variable is non-zero.
  129. *
  130. * Architectures that do not implement power management instructions may
  131. * immediately return, otherwise a power-saving instruction should be
  132. * issued to wait for an interrupt.
  133. *
  134. * @note The function is expected to return after the interrupt that has
  135. * caused the CPU to exit power-saving mode has been serviced, although
  136. * this is not a firm requirement.
  137. *
  138. * @see k_cpu_idle()
  139. */
  140. void arch_cpu_idle(void);
  141. /**
  142. * @brief Atomically re-enable interrupts and enter low power mode
  143. *
  144. * The requirements for arch_cpu_atomic_idle() are as follows:
  145. *
  146. * -# Enabling interrupts and entering a low-power mode needs to be
  147. * atomic, i.e. there should be no period of time where interrupts are
  148. * enabled before the processor enters a low-power mode. See the comments
  149. * in k_lifo_get(), for example, of the race condition that
  150. * occurs if this requirement is not met.
  151. *
  152. * -# After waking up from the low-power mode, the interrupt lockout state
  153. * must be restored as indicated in the 'key' input parameter.
  154. *
  155. * @see k_cpu_atomic_idle()
  156. *
  157. * @param key Lockout key returned by previous invocation of arch_irq_lock()
  158. */
  159. void arch_cpu_atomic_idle(unsigned int key);
  160. /** @} */
  161. /**
  162. * @addtogroup arch-smp
  163. * @{
  164. */
  165. /**
  166. * Per-cpu entry function
  167. *
  168. * @param data context parameter, implementation specific
  169. */
  170. typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
  171. /**
  172. * @brief Start a numbered CPU on a MP-capable system
  173. *
  174. * This starts and initializes a specific CPU. The main thread on startup is
  175. * running on CPU zero, other processors are numbered sequentially. On return
  176. * from this function, the CPU is known to have begun operating and will enter
  177. * the provided function. Its interrupts will be initialized but disabled such
  178. * that irq_unlock() with the provided key will work to enable them.
  179. *
  180. * Normally, in SMP mode this function will be called by the kernel
  181. * initialization and should not be used as a user API. But it is defined here
  182. * for special-purpose apps which want Zephyr running on one core and to use
  183. * others for design-specific processing.
  184. *
  185. * @param cpu_num Integer number of the CPU
  186. * @param stack Stack memory for the CPU
  187. * @param sz Stack buffer size, in bytes
  188. * @param fn Function to begin running on the CPU.
  189. * @param arg Untyped argument to be passed to "fn"
  190. */
  191. void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
  192. arch_cpustart_t fn, void *arg);
  193. /**
  194. * @brief Return CPU power status
  195. *
  196. * @param cpu_num Integer number of the CPU
  197. */
  198. bool arch_cpu_active(int cpu_num);
  199. /** @} */
  200. /**
  201. * @addtogroup arch-irq
  202. * @{
  203. */
  204. #ifndef CONFIG_DISABLE_IRQ_STAT
  205. /**
  206. * Lock interrupts on the current CPU
  207. *
  208. * @see irq_lock()
  209. */
  210. static inline unsigned int arch_irq_lock(void);
  211. /**
  212. * Unlock interrupts on the current CPU
  213. *
  214. * @see irq_unlock()
  215. */
  216. static inline void arch_irq_unlock(unsigned int key);
  217. #else
  218. /**
  219. * Lock interrupts on the current CPU
  220. *
  221. * @see irq_lock()
  222. */
  223. extern unsigned int arch_irq_lock(void);
  224. /**
  225. * Unlock interrupts on the current CPU
  226. *
  227. * @see irq_unlock()
  228. */
  229. extern void arch_irq_unlock(unsigned int key);
  230. #endif
  231. /**
  232. * Test if calling arch_irq_unlock() with this key would unlock irqs
  233. *
  234. * @param key value returned by arch_irq_lock()
  235. * @return true if interrupts were unlocked prior to the arch_irq_lock()
  236. * call that produced the key argument.
  237. */
  238. static inline bool arch_irq_unlocked(unsigned int key);
  239. /**
  240. * Disable the specified interrupt line
  241. *
  242. * @note: The behavior of interrupts that arrive after this call
  243. * returns and before the corresponding call to arch_irq_enable() is
  244. * undefined. The hardware is not required to latch and deliver such
  245. * an interrupt, though on some architectures that may work. Other
  246. * architectures will simply lose such an interrupt and never deliver
  247. * it. Many drivers and subsystems are not tolerant of such dropped
  248. * interrupts and it is the job of the application layer to ensure
  249. * that behavior remains correct.
  250. *
  251. * @see irq_disable()
  252. */
  253. void arch_irq_disable(unsigned int irq);
  254. /**
  255. * Enable the specified interrupt line
  256. *
  257. * @see irq_enable()
  258. */
  259. void arch_irq_enable(unsigned int irq);
  260. /**
  261. * Test if an interrupt line is enabled
  262. *
  263. * @see irq_is_enabled()
  264. */
  265. int arch_irq_is_enabled(unsigned int irq);
  266. /**
  267. * Arch-specific hook to install a dynamic interrupt.
  268. *
  269. * @param irq IRQ line number
  270. * @param priority Interrupt priority
  271. * @param routine Interrupt service routine
  272. * @param parameter ISR parameter
  273. * @param flags Arch-specific IRQ configuration flag
  274. *
  275. * @return The vector assigned to this interrupt
  276. */
  277. int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
  278. void (*routine)(const void *parameter),
  279. const void *parameter, uint32_t flags);
  280. /**
  281. * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
  282. *
  283. * @see IRQ_CONNECT()
  284. */
  285. /**
  286. * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
  287. *
  288. * @see IRQ_DIRECT_CONNECT()
  289. */
  290. /**
  291. * @def ARCH_ISR_DIRECT_PM()
  292. *
  293. * @see ISR_DIRECT_PM()
  294. */
  295. /**
  296. * @def ARCH_ISR_DIRECT_HEADER()
  297. *
  298. * @see ISR_DIRECT_HEADER()
  299. */
  300. /**
  301. * @def ARCH_ISR_DIRECT_FOOTER(swap)
  302. *
  303. * @see ISR_DIRECT_FOOTER()
  304. */
  305. /**
  306. * @def ARCH_ISR_DIRECT_DECLARE(name)
  307. *
  308. * @see ISR_DIRECT_DECLARE()
  309. */
  310. /**
  311. * @def ARCH_EXCEPT(reason_p)
  312. *
  313. * Generate a software induced fatal error.
  314. *
  315. * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
  316. * K_ERR_STACK_CHK_FAIL may be induced.
  317. *
  318. * This should ideally generate a software trap, with exception context
  319. * indicating state when this was invoked. General purpose register state at
  320. * the time of trap should not be disturbed from the calling context.
  321. *
  322. * @param reason_p K_ERR_ scoped reason code for the fatal error.
  323. */
  324. #ifdef CONFIG_IRQ_OFFLOAD
  325. /**
  326. * Run a function in interrupt context.
  327. *
  328. * Implementations should invoke an exception such that the kernel goes through
  329. * its interrupt handling dispatch path, to include switching to the interrupt
  330. * stack, and runs the provided routine and parameter.
  331. *
  332. * The only intended use-case for this function is for test code to simulate
  333. * the correctness of kernel APIs in interrupt handling context. This API
  334. * is not intended for real applications.
  335. *
  336. * @see irq_offload()
  337. *
  338. * @param routine Function to run in interrupt context
  339. * @param parameter Value to pass to the function when invoked
  340. */
  341. void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
  342. #endif /* CONFIG_IRQ_OFFLOAD */
  343. /** @} */
  344. /**
  345. * @defgroup arch-smp Architecture-specific SMP APIs
  346. * @ingroup arch-interface
  347. * @{
  348. */
  349. #ifdef CONFIG_SMP
  350. /** Return the CPU struct for the currently executing CPU */
  351. static inline struct _cpu *arch_curr_cpu(void);
  352. /**
  353. * Broadcast an interrupt to all CPUs
  354. *
  355. * This will invoke z_sched_ipi() on other CPUs in the system.
  356. */
  357. void arch_sched_ipi(void);
  358. #endif /* CONFIG_SMP */
  359. /** @} */
  360. /**
  361. * @defgroup arch-userspace Architecture-specific userspace APIs
  362. * @ingroup arch-interface
  363. * @{
  364. */
  365. #ifdef CONFIG_USERSPACE
  366. /**
  367. * Invoke a system call with 0 arguments.
  368. *
  369. * No general-purpose register state other than return value may be preserved
  370. * when transitioning from supervisor mode back down to user mode for
  371. * security reasons.
  372. *
  373. * It is required that all arguments be stored in registers when elevating
  374. * privileges from user to supervisor mode.
  375. *
  376. * Processing of the syscall takes place on a separate kernel stack. Interrupts
  377. * should be enabled when invoking the system call marshallers from the
  378. * dispatch table. Thread preemption may occur when handling system calls.
  379. *
  380. * Call ids are untrusted and must be bounds-checked, as the value is used to
  381. * index the system call dispatch table, containing function pointers to the
  382. * specific system call code.
  383. *
  384. * @param call_id System call ID
  385. * @return Return value of the system call. Void system calls return 0 here.
  386. */
  387. static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
  388. /**
  389. * Invoke a system call with 1 argument.
  390. *
  391. * @see arch_syscall_invoke0()
  392. *
  393. * @param arg1 First argument to the system call.
  394. * @param call_id System call ID, will be bounds-checked and used to reference
  395. * kernel-side dispatch table
  396. * @return Return value of the system call. Void system calls return 0 here.
  397. */
  398. static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
  399. uintptr_t call_id);
  400. /**
  401. * Invoke a system call with 2 arguments.
  402. *
  403. * @see arch_syscall_invoke0()
  404. *
  405. * @param arg1 First argument to the system call.
  406. * @param arg2 Second argument to the system call.
  407. * @param call_id System call ID, will be bounds-checked and used to reference
  408. * kernel-side dispatch table
  409. * @return Return value of the system call. Void system calls return 0 here.
  410. */
  411. static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
  412. uintptr_t call_id);
  413. /**
  414. * Invoke a system call with 3 arguments.
  415. *
  416. * @see arch_syscall_invoke0()
  417. *
  418. * @param arg1 First argument to the system call.
  419. * @param arg2 Second argument to the system call.
  420. * @param arg3 Third argument to the system call.
  421. * @param call_id System call ID, will be bounds-checked and used to reference
  422. * kernel-side dispatch table
  423. * @return Return value of the system call. Void system calls return 0 here.
  424. */
  425. static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
  426. uintptr_t arg3,
  427. uintptr_t call_id);
  428. /**
  429. * Invoke a system call with 4 arguments.
  430. *
  431. * @see arch_syscall_invoke0()
  432. *
  433. * @param arg1 First argument to the system call.
  434. * @param arg2 Second argument to the system call.
  435. * @param arg3 Third argument to the system call.
  436. * @param arg4 Fourth argument to the system call.
  437. * @param call_id System call ID, will be bounds-checked and used to reference
  438. * kernel-side dispatch table
  439. * @return Return value of the system call. Void system calls return 0 here.
  440. */
  441. static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
  442. uintptr_t arg3, uintptr_t arg4,
  443. uintptr_t call_id);
  444. /**
  445. * Invoke a system call with 5 arguments.
  446. *
  447. * @see arch_syscall_invoke0()
  448. *
  449. * @param arg1 First argument to the system call.
  450. * @param arg2 Second argument to the system call.
  451. * @param arg3 Third argument to the system call.
  452. * @param arg4 Fourth argument to the system call.
  453. * @param arg5 Fifth argument to the system call.
  454. * @param call_id System call ID, will be bounds-checked and used to reference
  455. * kernel-side dispatch table
  456. * @return Return value of the system call. Void system calls return 0 here.
  457. */
  458. static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
  459. uintptr_t arg3, uintptr_t arg4,
  460. uintptr_t arg5,
  461. uintptr_t call_id);
  462. /**
  463. * Invoke a system call with 6 arguments.
  464. *
  465. * @see arch_syscall_invoke0()
  466. *
  467. * @param arg1 First argument to the system call.
  468. * @param arg2 Second argument to the system call.
  469. * @param arg3 Third argument to the system call.
  470. * @param arg4 Fourth argument to the system call.
  471. * @param arg5 Fifth argument to the system call.
  472. * @param arg6 Sixth argument to the system call.
  473. * @param call_id System call ID, will be bounds-checked and used to reference
  474. * kernel-side dispatch table
  475. * @return Return value of the system call. Void system calls return 0 here.
  476. */
  477. static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
  478. uintptr_t arg3, uintptr_t arg4,
  479. uintptr_t arg5, uintptr_t arg6,
  480. uintptr_t call_id);
  481. /**
  482. * Indicate whether we are currently running in user mode
  483. *
  484. * @return true if the CPU is currently running with user permissions
  485. */
  486. static inline bool arch_is_user_context(void);
  487. /**
  488. * @brief Get the maximum number of partitions for a memory domain
  489. *
  490. * @return Max number of partitions, or -1 if there is no limit
  491. */
  492. int arch_mem_domain_max_partitions_get(void);
  493. #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
  494. /**
  495. *
  496. * @brief Architecture-specific hook for memory domain initialization
  497. *
  498. * Perform any tasks needed to initialize architecture-specific data within
  499. * the memory domain, such as reserving memory for page tables. All members
  500. * of the provided memory domain aside from `arch` will be initialized when
  501. * this is called, but no threads will be a assigned yet.
  502. *
  503. * This function may fail if initializing the memory domain requires allocation,
  504. * such as for page tables.
  505. *
  506. * The associated function k_mem_domain_init() documents that making
  507. * multiple init calls to the same memory domain is undefined behavior,
  508. * but has no assertions in place to check this. If this matters, it may be
  509. * desirable to add checks for this in the implementation of this function.
  510. *
  511. * @param domain The memory domain to initialize
  512. * @retval 0 Success
  513. * @retval -ENOMEM Insufficient memory
  514. */
  515. int arch_mem_domain_init(struct k_mem_domain *domain);
  516. #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
  517. #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
  518. /**
  519. * @brief Add a thread to a memory domain (arch-specific)
  520. *
  521. * Architecture-specific hook to manage internal data structures or hardware
  522. * state when the provided thread has been added to a memory domain.
  523. *
  524. * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
  525. * be added to before this is called. Implementations may assume that the
  526. * thread is not already a member of this domain.
  527. *
  528. * @param thread Thread which needs to be configured.
  529. */
  530. void arch_mem_domain_thread_add(struct k_thread *thread);
  531. /**
  532. * @brief Remove a thread from a memory domain (arch-specific)
  533. *
  534. * Architecture-specific hook to manage internal data structures or hardware
  535. * state when the provided thread has been removed from a memory domain.
  536. *
  537. * The thread's memory domain pointer will be the domain that the thread
  538. * is being removed from.
  539. *
  540. * @param thread Thread being removed from its memory domain
  541. */
  542. void arch_mem_domain_thread_remove(struct k_thread *thread);
  543. /**
  544. * @brief Remove a partition from the memory domain (arch-specific)
  545. *
  546. * Architecture-specific hook to manage internal data structures or hardware
  547. * state when a memory domain has had a partition removed.
  548. *
  549. * The partition index data, and the number of partitions configured, are not
  550. * respectively cleared and decremented in the domain until after this function
  551. * runs.
  552. *
  553. * @param domain The memory domain structure
  554. * @param partition_id The partition index that needs to be deleted
  555. */
  556. void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
  557. uint32_t partition_id);
  558. /**
  559. * @brief Add a partition to the memory domain
  560. *
  561. * Architecture-specific hook to manage internal data structures or hardware
  562. * state when a memory domain has a partition added.
  563. *
  564. * @param domain The memory domain structure
  565. * @param partition_id The partition that needs to be added
  566. */
  567. void arch_mem_domain_partition_add(struct k_mem_domain *domain,
  568. uint32_t partition_id);
  569. #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
  570. /**
  571. * @brief Check memory region permissions
  572. *
  573. * Given a memory region, return whether the current memory management hardware
  574. * configuration would allow a user thread to read/write that region. Used by
  575. * system calls to validate buffers coming in from userspace.
  576. *
  577. * Notes:
  578. * The function is guaranteed to never return validation success, if the entire
  579. * buffer area is not user accessible.
  580. *
  581. * The function is guaranteed to correctly validate the permissions of the
  582. * supplied buffer, if the user access permissions of the entire buffer are
  583. * enforced by a single, enabled memory management region.
  584. *
  585. * In some architectures the validation will always return failure
  586. * if the supplied memory buffer spans multiple enabled memory management
  587. * regions (even if all such regions permit user access).
  588. *
  589. * @warning 0 size buffer has undefined behavior.
  590. *
  591. * @param addr start address of the buffer
  592. * @param size the size of the buffer
  593. * @param write If nonzero, additionally check if the area is writable.
  594. * Otherwise, just check if the memory can be read.
  595. *
  596. * @return nonzero if the permissions don't match.
  597. */
  598. int arch_buffer_validate(void *addr, size_t size, int write);
  599. /**
  600. * Perform a one-way transition from supervisor to kernel mode.
  601. *
  602. * Implementations of this function must do the following:
  603. *
  604. * - Reset the thread's stack pointer to a suitable initial value. We do not
  605. * need any prior context since this is a one-way operation.
  606. * - Set up any kernel stack region for the CPU to use during privilege
  607. * elevation
  608. * - Put the CPU in whatever its equivalent of user mode is
  609. * - Transfer execution to arch_new_thread() passing along all the supplied
  610. * arguments, in user mode.
  611. *
  612. * @param user_entry Entry point to start executing as a user thread
  613. * @param p1 1st parameter to user thread
  614. * @param p2 2nd parameter to user thread
  615. * @param p3 3rd parameter to user thread
  616. */
  617. FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
  618. void *p1, void *p2, void *p3);
  619. /**
  620. * @brief Induce a kernel oops that appears to come from a specific location
  621. *
  622. * Normally, k_oops() generates an exception that appears to come from the
  623. * call site of the k_oops() itself.
  624. *
  625. * However, when validating arguments to a system call, if there are problems
  626. * we want the oops to appear to come from where the system call was invoked
  627. * and not inside the validation function.
  628. *
  629. * @param ssf System call stack frame pointer. This gets passed as an argument
  630. * to _k_syscall_handler_t functions and its contents are completely
  631. * architecture specific.
  632. */
  633. FUNC_NORETURN void arch_syscall_oops(void *ssf);
  634. /**
  635. * @brief Safely take the length of a potentially bad string
  636. *
  637. * This must not fault, instead the err parameter must have -1 written to it.
  638. * This function otherwise should work exactly like libc strnlen(). On success
  639. * *err should be set to 0.
  640. *
  641. * @param s String to measure
  642. * @param maxsize Max length of the string
  643. * @param err Error value to write
  644. * @return Length of the string, not counting NULL byte, up to maxsize
  645. */
  646. size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
  647. #endif /* CONFIG_USERSPACE */
  648. /**
  649. * @brief Detect memory coherence type
  650. *
  651. * Required when ARCH_HAS_COHERENCE is true. This function returns
  652. * true if the byte pointed to lies within an architecture-defined
  653. * "coherence region" (typically implemented with uncached memory) and
  654. * can safely be used in multiprocessor code without explicit flush or
  655. * invalidate operations.
  656. *
  657. * @note The result is for only the single byte at the specified
  658. * address, this API is not required to check region boundaries or to
  659. * expect aligned pointers. The expectation is that the code above
  660. * will have queried the appropriate address(es).
  661. */
  662. #ifndef CONFIG_ARCH_HAS_COHERENCE
  663. static inline bool arch_mem_coherent(void *ptr)
  664. {
  665. ARG_UNUSED(ptr);
  666. return true;
  667. }
  668. #endif
  669. /**
  670. * @brief Ensure cache coherence prior to context switch
  671. *
  672. * Required when ARCH_HAS_COHERENCE is true. On cache-incoherent
  673. * multiprocessor architectures, thread stacks are cached by default
  674. * for performance reasons. They must therefore be flushed
  675. * appropriately on context switch. The rules are:
  676. *
  677. * 1. The region containing live data in the old stack (generally the
  678. * bytes between the current stack pointer and the top of the stack
  679. * memory) must be flushed to underlying storage so a new CPU that
  680. * runs the same thread sees the correct data. This must happen
  681. * before the assignment of the switch_handle field in the thread
  682. * struct which signals the completion of context switch.
  683. *
  684. * 2. Any data areas to be read from the new stack (generally the same
  685. * as the live region when it was saved) should be invalidated (and
  686. * NOT flushed!) in the data cache. This is because another CPU
  687. * may have run or re-initialized the thread since this CPU
  688. * suspended it, and any data present in cache will be stale.
  689. *
  690. * @note The kernel will call this function during interrupt exit when
  691. * a new thread has been chosen to run, and also immediately before
  692. * entering arch_switch() to effect a code-driven context switch. In
  693. * the latter case, it is very likely that more data will be written
  694. * to the old_thread stack region after this function returns but
  695. * before the completion of the switch. Simply flushing naively here
  696. * is not sufficient on many architectures and coordination with the
  697. * arch_switch() implementation is likely required.
  698. *
  699. * @arg old_thread The old thread to be flushed before being allowed
  700. * to run on other CPUs.
  701. * @arg old_switch_handle The switch handle to be stored into
  702. * old_thread (it will not be valid until the
  703. * cache is flushed so is not present yet).
  704. * This will be NULL if inside z_swap()
  705. * (because the arch_switch() has not saved it
  706. * yet).
  707. * @arg new_thread The new thread to be invalidated before it runs locally.
  708. */
  709. #ifndef CONFIG_KERNEL_COHERENCE
  710. static inline void arch_cohere_stacks(struct k_thread *old_thread,
  711. void *old_switch_handle,
  712. struct k_thread *new_thread)
  713. {
  714. ARG_UNUSED(old_thread);
  715. ARG_UNUSED(old_switch_handle);
  716. ARG_UNUSED(new_thread);
  717. }
  718. #endif
  719. /** @} */
  720. /**
  721. * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
  722. * @ingroup arch-interface
  723. * @{
  724. */
  725. /**
  726. * @def ARCH_GDB_NUM_REGISTERS
  727. *
  728. * ARCH_GDB_NUM_REGISTERS is architecure specific and
  729. * this symbol must be defined in architecure specific header
  730. */
  731. #ifdef CONFIG_GDBSTUB
  732. /**
  733. * @brief Architecture layer debug start
  734. *
  735. * This function is called by @c gdb_init()
  736. */
  737. void arch_gdb_init(void);
  738. /**
  739. * @brief Continue running program
  740. *
  741. * Continue software execution.
  742. */
  743. void arch_gdb_continue(void);
  744. /**
  745. * @brief Continue with one step
  746. *
  747. * Continue software execution until reaches the next statement.
  748. */
  749. void arch_gdb_step(void);
  750. #endif
  751. /** @} */
  752. /**
  753. * @defgroup arch_cache Architecture-specific cache functions
  754. * @ingroup arch-interface
  755. * @{
  756. */
  757. #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_HAS_ARCH_CACHE)
  758. /**
  759. *
  760. * @brief Enable d-cache
  761. *
  762. * @see arch_dcache_enable
  763. */
  764. void arch_dcache_enable(void);
  765. /**
  766. *
  767. * @brief Disable d-cache
  768. *
  769. * @see arch_dcache_disable
  770. */
  771. void arch_dcache_disable(void);
  772. /**
  773. *
  774. * @brief Enable i-cache
  775. *
  776. * @see arch_icache_enable
  777. */
  778. void arch_icache_enable(void);
  779. /**
  780. *
  781. * @brief Enable i-cache
  782. *
  783. * @see arch_dcache_disable
  784. */
  785. void arch_dcache_disable(void);
  786. /**
  787. *
  788. * @brief Write-back / Invalidate / Write-back + Invalidate all d-cache
  789. *
  790. * @see arch_dcache_all
  791. */
  792. int arch_dcache_all(int op);
  793. /**
  794. *
  795. * @brief Write-back / Invalidate / Write-back + Invalidate d-cache lines
  796. *
  797. * @see arch_dcache_range
  798. */
  799. int arch_dcache_range(void *addr, size_t size, int op);
  800. /**
  801. *
  802. * @brief Write-back / Invalidate / Write-back + Invalidate all i-cache
  803. *
  804. * @see arch_icache_all
  805. */
  806. int arch_icache_all(int op);
  807. /**
  808. *
  809. * @brief Write-back / Invalidate / Write-back + Invalidate i-cache lines
  810. *
  811. * @see arch_icache_range
  812. */
  813. int arch_icache_range(void *addr, size_t size, int op);
  814. #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
  815. /**
  816. *
  817. * @brief Get d-cache line size
  818. *
  819. * @see sys_cache_data_line_size_get
  820. */
  821. size_t arch_dcache_line_size_get(void);
  822. #endif /* CONFIG_DCACHE_LINE_SIZE_DETECT */
  823. #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
  824. /**
  825. *
  826. * @brief Get i-cache line size
  827. *
  828. * @see sys_cache_instr_line_size_get
  829. */
  830. size_t arch_icache_line_size_get(void);
  831. #endif /* CONFIG_ICACHE_LINE_SIZE_DETECT */
  832. #endif /* CONFIG_CACHE_MANAGEMENT && CONFIG_HAS_ARCH_CACHE */
  833. /** @} */
  834. #ifdef CONFIG_TIMING_FUNCTIONS
  835. #include <timing/types.h>
  836. /**
  837. * @ingroup arch-timing
  838. * @{
  839. */
  840. /**
  841. * @brief Initialize the timing subsystem.
  842. *
  843. * Perform the necessary steps to initialize the timing subsystem.
  844. *
  845. * @see timing_init()
  846. */
  847. void arch_timing_init(void);
  848. /**
  849. * @brief Signal the start of the timing information gathering.
  850. *
  851. * Signal to the timing subsystem that timing information
  852. * will be gathered from this point forward.
  853. *
  854. * @see timing_start()
  855. */
  856. void arch_timing_start(void);
  857. /**
  858. * @brief Signal the end of the timing information gathering.
  859. *
  860. * Signal to the timing subsystem that timing information
  861. * is no longer being gathered from this point forward.
  862. *
  863. * @see timing_stop()
  864. */
  865. void arch_timing_stop(void);
  866. /**
  867. * @brief Return timing counter.
  868. *
  869. * @return Timing counter.
  870. *
  871. * @see timing_counter_get()
  872. */
  873. timing_t arch_timing_counter_get(void);
  874. /**
  875. * @brief Get number of cycles between @p start and @p end.
  876. *
  877. * For some architectures or SoCs, the raw numbers from counter
  878. * need to be scaled to obtain actual number of cycles.
  879. *
  880. * @param start Pointer to counter at start of a measured execution.
  881. * @param end Pointer to counter at stop of a measured execution.
  882. * @return Number of cycles between start and end.
  883. *
  884. * @see timing_cycles_get()
  885. */
  886. uint64_t arch_timing_cycles_get(volatile timing_t *const start,
  887. volatile timing_t *const end);
  888. /**
  889. * @brief Get frequency of counter used (in Hz).
  890. *
  891. * @return Frequency of counter used for timing in Hz.
  892. *
  893. * @see timing_freq_get()
  894. */
  895. uint64_t arch_timing_freq_get(void);
  896. /**
  897. * @brief Convert number of @p cycles into nanoseconds.
  898. *
  899. * @param cycles Number of cycles
  900. * @return Converted time value
  901. *
  902. * @see timing_cycles_to_ns()
  903. */
  904. uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
  905. /**
  906. * @brief Convert number of @p cycles into nanoseconds with averaging.
  907. *
  908. * @param cycles Number of cycles
  909. * @param count Times of accumulated cycles to average over
  910. * @return Converted time value
  911. *
  912. * @see timing_cycles_to_ns_avg()
  913. */
  914. uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
  915. /**
  916. * @brief Get frequency of counter used (in MHz).
  917. *
  918. * @return Frequency of counter used for timing in MHz.
  919. *
  920. * @see timing_freq_get_mhz()
  921. */
  922. uint32_t arch_timing_freq_get_mhz(void);
  923. /** @} */
  924. #endif /* CONFIG_TIMING_FUNCTIONS */
  925. #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
  926. struct msi_vector;
  927. typedef struct msi_vector msi_vector_t;
  928. /**
  929. * @brief Allocate vector(s) for the endpoint MSI message(s).
  930. *
  931. * @param priority the MSI vectors base interrupt priority
  932. * @param vectors an array to fill with allocated MSI vectors
  933. * @param n_vector the size of MSI vectors array
  934. *
  935. * @return The number of allocated MSI vectors
  936. */
  937. uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
  938. msi_vector_t *vectors,
  939. uint8_t n_vector);
  940. /**
  941. * @brief Connect an MSI vector to the given routine
  942. *
  943. * @param vector The MSI vector to connect to
  944. * @param routine Interrupt service routine
  945. * @param parameter ISR parameter
  946. * @param flags Arch-specific IRQ configuration flag
  947. *
  948. * @return True on success, false otherwise
  949. */
  950. bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
  951. void (*routine)(const void *parameter),
  952. const void *parameter,
  953. uint32_t flags);
  954. #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
  955. #ifdef __cplusplus
  956. }
  957. #endif /* __cplusplus */
  958. #include <arch/arch_inlines.h>
  959. #endif /* _ASMLANGUAGE */
  960. #endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */