mmu.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright (c) 2020 Intel Corporation.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef KERNEL_INCLUDE_MMU_H
  7. #define KERNEL_INCLUDE_MMU_H
  8. #ifdef CONFIG_MMU
  9. #include <stdint.h>
  10. #include <sys/slist.h>
  11. #include <sys/__assert.h>
  12. #include <sys/util.h>
  13. #include <sys/mem_manage.h>
  14. #include <linker/linker-defs.h>
  15. /*
  16. * At present, page frame management is only done for main system RAM,
  17. * and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
  18. * and CONFIG_SRAM_SIZE.
  19. *
  20. * If we have other RAM regions (DCCM, etc) these typically have special
  21. * properties and shouldn't be used generically for demand paging or
  22. * anonymous mappings. We don't currently maintain an ontology of these in the
  23. * core kernel.
  24. */
  25. #define Z_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
  26. #define Z_PHYS_RAM_SIZE ((size_t)KB(CONFIG_SRAM_SIZE))
  27. #define Z_PHYS_RAM_END (Z_PHYS_RAM_START + Z_PHYS_RAM_SIZE)
  28. #define Z_NUM_PAGE_FRAMES (Z_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
  29. /** End virtual address of virtual address space */
  30. #define Z_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
  31. #define Z_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
  32. #define Z_VIRT_RAM_END (Z_VIRT_RAM_START + Z_VIRT_RAM_SIZE)
  33. /* Boot-time virtual location of the kernel image. */
  34. #define Z_KERNEL_VIRT_START ((uint8_t *)(&z_mapped_start))
  35. #define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end))
  36. #define Z_KERNEL_VIRT_SIZE (Z_KERNEL_VIRT_END - Z_KERNEL_VIRT_START)
  37. #define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
  38. (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
  39. /* Only applies to boot RAM mappings within the Zephyr image that have never
  40. * been remapped or paged out. Never use this unless you know exactly what you
  41. * are doing.
  42. */
  43. #define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) - Z_VM_OFFSET))
  44. #define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) + Z_VM_OFFSET))
  45. #ifdef CONFIG_ARCH_MAPS_ALL_RAM
  46. #define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
  47. #else
  48. #define Z_FREE_VM_START Z_KERNEL_VIRT_END
  49. #endif
  50. /*
  51. * Macros and data structures for physical page frame accounting,
  52. * APIs for use by eviction and backing store algorithms. This code
  53. * is otherwise not application-facing.
  54. */
  55. /*
  56. * z_page_frame flags bits
  57. */
  58. /** This page contains critical kernel data and will never be swapped */
  59. #define Z_PAGE_FRAME_PINNED BIT(0)
  60. /** This physical page is reserved by hardware; we will never use it */
  61. #define Z_PAGE_FRAME_RESERVED BIT(1)
  62. /**
  63. * This physical page is mapped to some virtual memory address
  64. *
  65. * Currently, we just support one mapping per page frame. If a page frame
  66. * is mapped to multiple virtual pages then it must be pinned.
  67. */
  68. #define Z_PAGE_FRAME_MAPPED BIT(2)
  69. /**
  70. * This page frame is currently involved in a page-in/out operation
  71. */
  72. #define Z_PAGE_FRAME_BUSY BIT(3)
  73. /**
  74. * This page frame has a clean copy in the backing store
  75. */
  76. #define Z_PAGE_FRAME_BACKED BIT(4)
  77. /**
  78. * Data structure for physical page frames
  79. *
  80. * An array of these is instantiated, one element per physical RAM page.
  81. * Hence it's necessary to constrain its size as much as possible.
  82. */
  83. struct z_page_frame {
  84. union {
  85. /* If mapped, virtual address this page is mapped to */
  86. void *addr;
  87. /* If unmapped and available, free pages list membership. */
  88. sys_snode_t node;
  89. };
  90. /* Z_PAGE_FRAME_* flags */
  91. uint8_t flags;
  92. /* TODO: Backing store and eviction algorithms may both need to
  93. * introduce custom members for accounting purposes. Come up with
  94. * a layer of abstraction for this. They may also want additional
  95. * flags bits which shouldn't clobber each other. At all costs
  96. * the total size of struct z_page_frame must be minimized.
  97. */
  98. } __packed;
  99. static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
  100. {
  101. return (pf->flags & Z_PAGE_FRAME_PINNED) != 0U;
  102. }
  103. static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
  104. {
  105. return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0U;
  106. }
  107. static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
  108. {
  109. return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0U;
  110. }
  111. static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
  112. {
  113. return (pf->flags & Z_PAGE_FRAME_BUSY) != 0U;
  114. }
  115. static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
  116. {
  117. return (pf->flags & Z_PAGE_FRAME_BACKED) != 0U;
  118. }
  119. static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
  120. {
  121. return (!z_page_frame_is_reserved(pf) && z_page_frame_is_mapped(pf) &&
  122. !z_page_frame_is_pinned(pf) && !z_page_frame_is_busy(pf));
  123. }
  124. /* If true, page is not being used for anything, is not reserved, is a member
  125. * of some free pages list, isn't busy, and may be mapped in memory
  126. */
  127. static inline bool z_page_frame_is_available(struct z_page_frame *page)
  128. {
  129. return page->flags == 0U;
  130. }
  131. static inline void z_assert_phys_aligned(uintptr_t phys)
  132. {
  133. __ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
  134. "physical address 0x%lx is not page-aligned", phys);
  135. (void)phys;
  136. }
  137. extern struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
  138. static inline uintptr_t z_page_frame_to_phys(struct z_page_frame *pf)
  139. {
  140. return (uintptr_t)((pf - z_page_frames) * CONFIG_MMU_PAGE_SIZE) +
  141. Z_PHYS_RAM_START;
  142. }
  143. /* Presumes there is but one mapping in the virtual address space */
  144. static inline void *z_page_frame_to_virt(struct z_page_frame *pf)
  145. {
  146. return pf->addr;
  147. }
  148. static inline bool z_is_page_frame(uintptr_t phys)
  149. {
  150. z_assert_phys_aligned(phys);
  151. return (phys >= Z_PHYS_RAM_START) && (phys < Z_PHYS_RAM_END);
  152. }
  153. static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
  154. {
  155. __ASSERT(z_is_page_frame(phys),
  156. "0x%lx not an SRAM physical address", phys);
  157. return &z_page_frames[(phys - Z_PHYS_RAM_START) /
  158. CONFIG_MMU_PAGE_SIZE];
  159. }
  160. static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
  161. {
  162. __ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
  163. "unaligned addr %p", addr);
  164. __ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
  165. "unaligned size %zu", size);
  166. __ASSERT(addr + size > addr,
  167. "region %p size %zu zero or wraps around", addr, size);
  168. __ASSERT(addr >= Z_VIRT_RAM_START && addr + size < Z_VIRT_RAM_END,
  169. "invalid virtual address region %p (%zu)", addr, size);
  170. }
  171. /* Debug function, pretty-print page frame information for all frames
  172. * concisely to printk.
  173. */
  174. void z_page_frames_dump(void);
  175. /* Number of free page frames. This information may go stale immediately */
  176. extern size_t z_free_page_count;
  177. /* Convenience macro for iterating over all page frames */
  178. #define Z_PAGE_FRAME_FOREACH(_phys, _pageframe) \
  179. for (_phys = Z_PHYS_RAM_START, _pageframe = z_page_frames; \
  180. _phys < Z_PHYS_RAM_END; \
  181. _phys += CONFIG_MMU_PAGE_SIZE, _pageframe++)
  182. #ifdef CONFIG_DEMAND_PAGING
  183. /* We reserve a virtual page as a scratch area for page-ins/outs at the end
  184. * of the address space
  185. */
  186. #define Z_VM_RESERVED CONFIG_MMU_PAGE_SIZE
  187. #define Z_SCRATCH_PAGE ((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
  188. (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
  189. CONFIG_MMU_PAGE_SIZE))
  190. #else
  191. #define Z_VM_RESERVED 0
  192. #endif
  193. #ifdef CONFIG_DEMAND_PAGING
  194. /*
  195. * Core kernel demand paging APIs
  196. */
  197. /**
  198. * Number of page faults since system startup
  199. *
  200. * Counts only those page faults that were handled successfully by the demand
  201. * paging mechanism and were not errors.
  202. *
  203. * @return Number of successful page faults
  204. */
  205. unsigned long z_num_pagefaults_get(void);
  206. /**
  207. * Free a page frame physical address by evicting its contents
  208. *
  209. * The indicated page frame, if it contains a data page, will have that
  210. * data page evicted to the backing store. The page frame will then be
  211. * marked as available for mappings or page-ins.
  212. *
  213. * This is useful for freeing up entire memory banks so that they may be
  214. * deactivated to save power.
  215. *
  216. * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
  217. * called by ISRs as the backing store may be in-use.
  218. *
  219. * @param phys Page frame physical address
  220. * @retval 0 Success
  221. * @retval -ENOMEM Insufficient backing store space
  222. */
  223. int z_page_frame_evict(uintptr_t phys);
  224. /**
  225. * Handle a page fault for a virtual data page
  226. *
  227. * This is invoked from the architecture page fault handler.
  228. *
  229. * If a valid page fault, the core kernel will obtain a page frame,
  230. * populate it with the data page that was evicted to the backing store,
  231. * update page tables, and return so that the faulting instruction may be
  232. * re-tried.
  233. *
  234. * The architecture must not call this function if the page was mapped and
  235. * not paged out at the time the exception was triggered (i.e. a protection
  236. * violation for a mapped page).
  237. *
  238. * If the faulting context had interrupts disabled when the page fault was
  239. * triggered, the entire page fault handling path must have interrupts
  240. * disabled, including the invocation of this function.
  241. *
  242. * Otherwise, interrupts may be enabled and the page fault handler may be
  243. * preemptible. Races to page-in will be appropriately handled by the kernel.
  244. *
  245. * @param addr Faulting virtual address
  246. * @retval true Page fault successfully handled, or nothing needed to be done.
  247. * The arch layer should retry the faulting instruction.
  248. * @retval false This page fault was from an un-mapped page, should
  249. * be treated as an error, and not re-tried.
  250. */
  251. bool z_page_fault(void *addr);
  252. #endif /* CONFIG_DEMAND_PAGING */
  253. #endif /* CONFIG_MMU */
  254. #endif /* KERNEL_INCLUDE_MMU_H */