mem_manage.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * Copyright (c) 2020 Intel Corporation
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
  7. #define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
  8. #include <sys/util.h>
  9. #include <toolchain.h>
  10. /*
  11. * Caching mode definitions. These are mutually exclusive.
  12. */
  13. /** No caching. Most drivers want this. */
  14. #define K_MEM_CACHE_NONE 2
  15. /** Write-through caching. Used by certain drivers. */
  16. #define K_MEM_CACHE_WT 1
  17. /** Full write-back caching. Any RAM mapped wants this. */
  18. #define K_MEM_CACHE_WB 0
  19. /** Reserved bits for cache modes in k_map() flags argument */
  20. #define K_MEM_CACHE_MASK (BIT(3) - 1)
  21. /*
  22. * Region permission attributes. Default is read-only, no user, no exec
  23. */
  24. /** Region will have read/write access (and not read-only) */
  25. #define K_MEM_PERM_RW BIT(3)
  26. /** Region will be executable (normally forbidden) */
  27. #define K_MEM_PERM_EXEC BIT(4)
  28. /** Region will be accessible to user mode (normally supervisor-only) */
  29. #define K_MEM_PERM_USER BIT(5)
  30. /*
  31. * This is the offset to subtract from a virtual address mapped in the
  32. * kernel's permanent mapping of RAM, to obtain its physical address.
  33. *
  34. * virt_addr = phys_addr + Z_MEM_VM_OFFSET
  35. *
  36. * This only works for virtual addresses within the interval
  37. * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
  38. *
  39. * These macros are intended for assembly, linker code, and static initializers.
  40. * Use with care.
  41. *
  42. * Note that when demand paging is active, these will only work with page
  43. * frames that are pinned to their virtual mapping at boot.
  44. *
  45. * TODO: This will likely need to move to an arch API or need additional
  46. * constraints defined.
  47. */
  48. #ifdef CONFIG_MMU
  49. #define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
  50. (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
  51. #else
  52. #define Z_MEM_VM_OFFSET 0
  53. #endif
  54. #define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
  55. #define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
  56. #if Z_MEM_VM_OFFSET != 0
  57. #define Z_VM_KERNEL 1
  58. #ifdef CONFIG_XIP
  59. #error "XIP and a virtual memory kernel are not allowed"
  60. #endif
  61. #endif
  62. #ifndef _ASMLANGUAGE
  63. #include <stdint.h>
  64. #include <stddef.h>
  65. #include <inttypes.h>
  66. #include <sys/__assert.h>
  67. struct k_mem_paging_stats_t {
  68. #ifdef CONFIG_DEMAND_PAGING_STATS
  69. struct {
  70. /** Number of page faults */
  71. unsigned long cnt;
  72. /** Number of page faults with IRQ locked */
  73. unsigned long irq_locked;
  74. /** Number of page faults with IRQ unlocked */
  75. unsigned long irq_unlocked;
  76. #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
  77. /** Number of page faults while in ISR */
  78. unsigned long in_isr;
  79. #endif
  80. } pagefaults;
  81. struct {
  82. /** Number of clean pages selected for eviction */
  83. unsigned long clean;
  84. /** Number of dirty pages selected for eviction */
  85. unsigned long dirty;
  86. } eviction;
  87. #endif /* CONFIG_DEMAND_PAGING_STATS */
  88. };
  89. struct k_mem_paging_histogram_t {
  90. #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
  91. /* Counts for each bin in timing histogram */
  92. unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
  93. /* Bounds for the bins in timing histogram,
  94. * excluding the first and last (hence, NUM_SLOTS - 1).
  95. */
  96. unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
  97. #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
  98. };
  99. /* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
  100. static inline uintptr_t z_mem_phys_addr(void *virt)
  101. {
  102. uintptr_t addr = (uintptr_t)virt;
  103. #ifdef CONFIG_MMU
  104. __ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
  105. (addr < (CONFIG_KERNEL_VM_BASE +
  106. (CONFIG_KERNEL_VM_SIZE))),
  107. "address %p not in permanent mappings", virt);
  108. #else
  109. /* Should be identity-mapped */
  110. __ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
  111. (addr < (CONFIG_SRAM_BASE_ADDRESS +
  112. (CONFIG_SRAM_SIZE * 1024UL))),
  113. "physical address 0x%lx not in RAM",
  114. (unsigned long)addr);
  115. #endif /* CONFIG_MMU */
  116. /* TODO add assertion that this page is pinned to boot mapping,
  117. * the above checks won't be sufficient with demand paging
  118. */
  119. return Z_MEM_PHYS_ADDR(addr);
  120. }
  121. /* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
  122. static inline void *z_mem_virt_addr(uintptr_t phys)
  123. {
  124. __ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
  125. (phys < (CONFIG_SRAM_BASE_ADDRESS +
  126. (CONFIG_SRAM_SIZE * 1024UL))),
  127. "physical address 0x%lx not in RAM", (unsigned long)phys);
  128. /* TODO add assertion that this page frame is pinned to boot mapping,
  129. * the above check won't be sufficient with demand paging
  130. */
  131. return (void *)Z_MEM_VIRT_ADDR(phys);
  132. }
  133. #ifdef __cplusplus
  134. extern "C" {
  135. #endif
  136. /**
  137. * Map a physical memory region into the kernel's virtual address space
  138. *
  139. * This function is intended for mapping memory-mapped I/O regions into
  140. * the virtual address space. Given a physical address and a size, return a
  141. * linear address representing the base of where the physical region is mapped
  142. * in the virtual address space for the Zephyr kernel.
  143. *
  144. * This function alters the active page tables in the area reserved
  145. * for the kernel. This function will choose the virtual address
  146. * and return it to the caller.
  147. *
  148. * Portable code should never assume that phys_addr and linear_addr will
  149. * be equal.
  150. *
  151. * Caching and access properties are controlled by the 'flags' parameter.
  152. * Unused bits in 'flags' are reserved for future expansion.
  153. * A caching mode must be selected. By default, the region is read-only
  154. * with user access and code execution forbidden. This policy is changed
  155. * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter.
  156. *
  157. * If there is insufficient virtual address space for the mapping this will
  158. * generate a kernel panic.
  159. *
  160. * This API is only available if CONFIG_MMU is enabled.
  161. *
  162. * It is highly discouraged to use this function to map system RAM page
  163. * frames. It may conflict with anonymous memory mappings and demand paging
  164. * and produce undefined behavior. Do not use this for RAM unless you know
  165. * exactly what you are doing. If you need a chunk of memory, use k_mem_map().
  166. * If you need a contiguous buffer of physical memory, statically declare it
  167. * and pin it at build time, it will be mapped when the system boots.
  168. *
  169. * This API is part of infrastructure still under development and may
  170. * change.
  171. *
  172. * @param virt [out] Output virtual address storage location
  173. * @param phys Physical address base of the memory region
  174. * @param size Size of the memory region
  175. * @param flags Caching mode and access flags, see K_MAP_* macros
  176. */
  177. void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
  178. uint32_t flags);
  179. /**
  180. * Unmap a virtual memory region from kernel's virtual address space.
  181. *
  182. * This function is intended to be used by drivers and early boot routines
  183. * where temporary memory mappings need to be made. This allows these
  184. * memory mappings to be discarded once they are no longer needed.
  185. *
  186. * This function alters the active page tables in the area reserved
  187. * for the kernel.
  188. *
  189. * This will align the input parameters to page boundaries so that
  190. * this can be used with the virtual address as returned by
  191. * z_phys_map().
  192. *
  193. * This API is only available if CONFIG_MMU is enabled.
  194. *
  195. * It is highly discouraged to use this function to unmap memory mappings.
  196. * It may conflict with anonymous memory mappings and demand paging and
  197. * produce undefined behavior. Do not use this unless you know exactly
  198. * what you are doing.
  199. *
  200. * This API is part of infrastructure still under development and may
  201. * change.
  202. *
  203. * @param virt Starting address of the virtual address region to be unmapped.
  204. * @param size Size of the virtual address region
  205. */
  206. void z_phys_unmap(uint8_t *virt, size_t size);
  207. /*
  208. * k_mem_map() control flags
  209. */
  210. /**
  211. * @def K_MEM_MAP_UNINIT
  212. *
  213. * @brief The mapped region is not guaranteed to be zeroed.
  214. *
  215. * This may improve performance. The associated page frames may contain
  216. * indeterminate data, zeroes, or even sensitive information.
  217. *
  218. * This may not be used with K_MEM_PERM_USER as there are no circumstances
  219. * where this is safe.
  220. */
  221. #define K_MEM_MAP_UNINIT BIT(16)
  222. /**
  223. * @def K_MEM_MAP_LOCK
  224. *
  225. * Region will be pinned in memory and never paged
  226. *
  227. * Such memory is guaranteed to never produce a page fault due to page-outs
  228. * or copy-on-write once the mapping call has returned. Physical page frames
  229. * will be pre-fetched as necessary and pinned.
  230. */
  231. #define K_MEM_MAP_LOCK BIT(17)
  232. /**
  233. * @def K_MEM_MAP_GUARD
  234. *
  235. * A un-mapped virtual guard page will be placed in memory immediately preceding
  236. * the mapped region. This page will still be noted as being used by the
  237. * virtual memory manager. The total size of the allocation will be the
  238. * requested size plus the size of this guard page. The returned address
  239. * pointer will not include the guard page immediately below it. The typical
  240. * use-case is downward-growing thread stacks.
  241. *
  242. * Zephyr treats page faults on this guard page as a fatal K_ERR_STACK_CHK_FAIL
  243. * if it determines it immediately precedes a stack buffer, this is
  244. * implemented in the architecture layer.
  245. *
  246. * DEPRECATED: k_mem_map() will always allocate guard pages, so this bit
  247. * no longer has any effect.
  248. */
  249. #define K_MEM_MAP_GUARD __DEPRECATED_MACRO BIT(18)
  250. /**
  251. * Return the amount of free memory available
  252. *
  253. * The returned value will reflect how many free RAM page frames are available.
  254. * If demand paging is enabled, it may still be possible to allocate more.
  255. *
  256. * The information reported by this function may go stale immediately if
  257. * concurrent memory mappings or page-ins take place.
  258. *
  259. * @return Free physical RAM, in bytes
  260. */
  261. size_t k_mem_free_get(void);
  262. /**
  263. * Map anonymous memory into Zephyr's address space
  264. *
  265. * This function effectively increases the data space available to Zephyr.
  266. * The kernel will choose a base virtual address and return it to the caller.
  267. * The memory will have access permissions for all contexts set per the
  268. * provided flags argument.
  269. *
  270. * If user thread access control needs to be managed in any way, do not enable
  271. * K_MEM_PERM_USER flags here; instead manage the region's permissions
  272. * with memory domain APIs after the mapping has been established. Setting
  273. * K_MEM_PERM_USER here will allow all user threads to access this memory
  274. * which is usually undesirable.
  275. *
  276. * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
  277. *
  278. * The mapped region is not guaranteed to be physically contiguous in memory.
  279. * Physically contiguous buffers should be allocated statically and pinned
  280. * at build time.
  281. *
  282. * Pages mapped in this way have write-back cache settings.
  283. *
  284. * The returned virtual memory pointer will be page-aligned. The size
  285. * parameter, and any base address for re-mapping purposes must be page-
  286. * aligned.
  287. *
  288. * Note that the allocation includes two guard pages immediately before
  289. * and after the requested region. The total size of the allocation will be
  290. * the requested size plus the size of these two guard pages.
  291. *
  292. * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
  293. * function, with details in the documentation for these flags.
  294. *
  295. * @param size Size of the memory mapping. This must be page-aligned.
  296. * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
  297. * @return The mapped memory location, or NULL if insufficient virtual address
  298. * space, insufficient physical memory to establish the mapping,
  299. * or insufficient memory for paging structures.
  300. */
  301. void *k_mem_map(size_t size, uint32_t flags);
  302. /**
  303. * Un-map mapped memory
  304. *
  305. * This removes a memory mapping for the provided page-aligned region.
  306. * Associated page frames will be free and the kernel may re-use the associated
  307. * virtual address region. Any paged out data pages may be discarded.
  308. *
  309. * Calling this function on a region which was not mapped to begin with is
  310. * undefined behavior.
  311. *
  312. * @param addr Page-aligned memory region base virtual address
  313. * @param size Page-aligned memory region size
  314. */
  315. void k_mem_unmap(void *addr, size_t size);
  316. /**
  317. * Given an arbitrary region, provide a aligned region that covers it
  318. *
  319. * The returned region will have both its base address and size aligned
  320. * to the provided alignment value.
  321. *
  322. * @param aligned_addr [out] Aligned address
  323. * @param aligned_size [out] Aligned region size
  324. * @param addr Region base address
  325. * @param size Region size
  326. * @param align What to align the address and size to
  327. * @retval offset between aligned_addr and addr
  328. */
  329. size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
  330. uintptr_t addr, size_t size, size_t align);
  331. /**
  332. * @defgroup mem-demand-paging Demand Paging APIs
  333. * @{
  334. */
  335. /**
  336. * Evict a page-aligned virtual memory region to the backing store
  337. *
  338. * Useful if it is known that a memory region will not be used for some time.
  339. * All the data pages within the specified region will be evicted to the
  340. * backing store if they weren't already, with their associated page frames
  341. * marked as available for mappings or page-ins.
  342. *
  343. * None of the associated page frames mapped to the provided region should
  344. * be pinned.
  345. *
  346. * Note that there are no guarantees how long these pages will be evicted,
  347. * they could take page faults immediately.
  348. *
  349. * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
  350. * called by ISRs as the backing store may be in-use.
  351. *
  352. * @param addr Base page-aligned virtual address
  353. * @param size Page-aligned data region size
  354. * @retval 0 Success
  355. * @retval -ENOMEM Insufficient space in backing store to satisfy request.
  356. * The region may be partially paged out.
  357. */
  358. int k_mem_page_out(void *addr, size_t size);
  359. /**
  360. * Load a virtual data region into memory
  361. *
  362. * After the function completes, all the page frames associated with this
  363. * function will be paged in. However, they are not guaranteed to stay there.
  364. * This is useful if the region is known to be used soon.
  365. *
  366. * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
  367. * called by ISRs as the backing store may be in-use.
  368. *
  369. * @param addr Base page-aligned virtual address
  370. * @param size Page-aligned data region size
  371. */
  372. void k_mem_page_in(void *addr, size_t size);
  373. /**
  374. * Pin an aligned virtual data region, paging in as necessary
  375. *
  376. * After the function completes, all the page frames associated with this
  377. * region will be resident in memory and pinned such that they stay that way.
  378. * This is a stronger version of z_mem_page_in().
  379. *
  380. * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
  381. * called by ISRs as the backing store may be in-use.
  382. *
  383. * @param addr Base page-aligned virtual address
  384. * @param size Page-aligned data region size
  385. */
  386. void k_mem_pin(void *addr, size_t size);
  387. /**
  388. * Un-pin an aligned virtual data region
  389. *
  390. * After the function completes, all the page frames associated with this
  391. * region will be no longer marked as pinned. This does not evict the region,
  392. * follow this with z_mem_page_out() if you need that.
  393. *
  394. * @param addr Base page-aligned virtual address
  395. * @param size Page-aligned data region size
  396. */
  397. void k_mem_unpin(void *addr, size_t size);
  398. /**
  399. * Get the paging statistics since system startup
  400. *
  401. * This populates the paging statistics struct being passed in
  402. * as argument.
  403. *
  404. * @param[in,out] stats Paging statistics struct to be filled.
  405. */
  406. __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
  407. struct k_thread;
  408. /**
  409. * Get the paging statistics since system startup for a thread
  410. *
  411. * This populates the paging statistics struct being passed in
  412. * as argument for a particular thread.
  413. *
  414. * @param[in] thread Thread
  415. * @param[in,out] stats Paging statistics struct to be filled.
  416. */
  417. __syscall
  418. void k_mem_paging_thread_stats_get(struct k_thread *thread,
  419. struct k_mem_paging_stats_t *stats);
  420. /**
  421. * Get the eviction timing histogram
  422. *
  423. * This populates the timing histogram struct being passed in
  424. * as argument.
  425. *
  426. * @param[in,out] hist Timing histogram struct to be filled.
  427. */
  428. __syscall void k_mem_paging_histogram_eviction_get(
  429. struct k_mem_paging_histogram_t *hist);
  430. /**
  431. * Get the backing store page-in timing histogram
  432. *
  433. * This populates the timing histogram struct being passed in
  434. * as argument.
  435. *
  436. * @param[in,out] hist Timing histogram struct to be filled.
  437. */
  438. __syscall void k_mem_paging_histogram_backing_store_page_in_get(
  439. struct k_mem_paging_histogram_t *hist);
  440. /**
  441. * Get the backing store page-out timing histogram
  442. *
  443. * This populates the timing histogram struct being passed in
  444. * as argument.
  445. *
  446. * @param[in,out] hist Timing histogram struct to be filled.
  447. */
  448. __syscall void k_mem_paging_histogram_backing_store_page_out_get(
  449. struct k_mem_paging_histogram_t *hist);
  450. #include <syscalls/mem_manage.h>
  451. /** @} */
  452. /**
  453. * Eviction algorithm APIs
  454. *
  455. * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs
  456. * @{
  457. */
  458. /**
  459. * Select a page frame for eviction
  460. *
  461. * The kernel will invoke this to choose a page frame to evict if there
  462. * are no free page frames.
  463. *
  464. * This function will never be called before the initial
  465. * k_mem_paging_eviction_init().
  466. *
  467. * This function is invoked with interrupts locked.
  468. *
  469. * @param [out] dirty Whether the page to evict is dirty
  470. * @return The page frame to evict
  471. */
  472. struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
  473. /**
  474. * Initialization function
  475. *
  476. * Called at POST_KERNEL to perform any necessary initialization tasks for the
  477. * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be
  478. * called until this has returned, and this will only be called once.
  479. */
  480. void k_mem_paging_eviction_init(void);
  481. /** @} */
  482. /**
  483. * Backing store APIs
  484. *
  485. * @defgroup mem-demand-paging-backing-store Backing Store APIs
  486. * @{
  487. */
  488. /**
  489. * Reserve or fetch a storage location for a data page loaded into a page frame
  490. *
  491. * The returned location token must be unique to the mapped virtual address.
  492. * This location will be used in the backing store to page out data page
  493. * contents for later retrieval. The location value must be page-aligned.
  494. *
  495. * This function may be called multiple times on the same data page. If its
  496. * page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
  497. * the previous backing store location for the data page containing a cached
  498. * clean copy. This clean copy may be updated on page-out, or used to
  499. * discard clean pages without needing to write out their contents.
  500. *
  501. * If the backing store is full, some other backing store location which caches
  502. * a loaded data page may be selected, in which case its associated page frame
  503. * will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
  504. *
  505. * pf->addr will indicate the virtual address the page is currently mapped to.
  506. * Large, sparse backing stores which can contain the entire address space
  507. * may simply generate location tokens purely as a function of pf->addr with no
  508. * other management necessary.
  509. *
  510. * This function distinguishes whether it was called on behalf of a page
  511. * fault. A free backing store location must always be reserved in order for
  512. * page faults to succeed. If the page_fault parameter is not set, this
  513. * function should return -ENOMEM even if one location is available.
  514. *
  515. * This function is invoked with interrupts locked.
  516. *
  517. * @param pf Virtual address to obtain a storage location
  518. * @param [out] location storage location token
  519. * @param page_fault Whether this request was for a page fault
  520. * @return 0 Success
  521. * @return -ENOMEM Backing store is full
  522. */
  523. int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
  524. uintptr_t *location,
  525. bool page_fault);
  526. /**
  527. * Free a backing store location
  528. *
  529. * Any stored data may be discarded, and the location token associated with
  530. * this address may be re-used for some other data page.
  531. *
  532. * This function is invoked with interrupts locked.
  533. *
  534. * @param location Location token to free
  535. */
  536. void k_mem_paging_backing_store_location_free(uintptr_t location);
  537. /**
  538. * Copy a data page from Z_SCRATCH_PAGE to the specified location
  539. *
  540. * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
  541. * to the intended source page frame for the calling context.
  542. *
  543. * Calls to this and k_mem_paging_backing_store_page_in() will always be
  544. * serialized, but interrupts may be enabled.
  545. *
  546. * @param location Location token for the data page, for later retrieval
  547. */
  548. void k_mem_paging_backing_store_page_out(uintptr_t location);
  549. /**
  550. * Copy a data page from the provided location to Z_SCRATCH_PAGE.
  551. *
  552. * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
  553. * to the intended destination page frame for the calling context.
  554. *
  555. * Calls to this and k_mem_paging_backing_store_page_out() will always be
  556. * serialized, but interrupts may be enabled.
  557. *
  558. * @param location Location token for the data page
  559. */
  560. void k_mem_paging_backing_store_page_in(uintptr_t location);
  561. /**
  562. * Update internal accounting after a page-in
  563. *
  564. * This is invoked after k_mem_paging_backing_store_page_in() and interrupts
  565. * have been* re-locked, making it safe to access the z_page_frame data.
  566. * The location value will be the same passed to
  567. * k_mem_paging_backing_store_page_in().
  568. *
  569. * The primary use-case for this is to update custom fields for the backing
  570. * store in the page frame, to reflect where the data should be evicted to
  571. * if it is paged out again. This may be a no-op in some implementations.
  572. *
  573. * If the backing store caches paged-in data pages, this is the appropriate
  574. * time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
  575. * out clean data pages if they are noted as clean in the page tables and the
  576. * Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
  577. *
  578. * @param pf Page frame that was loaded in
  579. * @param location Location of where the loaded data page was retrieved
  580. */
  581. void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
  582. uintptr_t location);
  583. /**
  584. * Backing store initialization function.
  585. *
  586. * The implementation may expect to receive page in/out calls as soon as this
  587. * returns, but not before that. Called at POST_KERNEL.
  588. *
  589. * This function is expected to do two things:
  590. * - Initialize any internal data structures and accounting for the backing
  591. * store.
  592. * - If the backing store already contains all or some loaded kernel data pages
  593. * at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
  594. * associated page frames, and any internal accounting set up appropriately.
  595. */
  596. void k_mem_paging_backing_store_init(void);
  597. /** @} */
  598. #ifdef __cplusplus
  599. }
  600. #endif
  601. #endif /* !_ASMLANGUAGE */
  602. #endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */