12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427 |
- #include <stdint.h>
- #include <kernel_arch_interface.h>
- #include <spinlock.h>
- #include <mmu.h>
- #include <init.h>
- #include <kernel_internal.h>
- #include <syscall_handler.h>
- #include <toolchain.h>
- #include <linker/linker-defs.h>
- #include <sys/bitarray.h>
- #include <timing/timing.h>
- #include <logging/log.h>
- LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
- struct k_spinlock z_mm_lock;
- struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
- #if __ASSERT_ON
- static bool page_frames_initialized;
- #endif
- #define COLOR_PAGE_FRAMES 1
- #if COLOR_PAGE_FRAMES
- #define ANSI_DEFAULT "\x1B[0m"
- #define ANSI_RED "\x1B[1;31m"
- #define ANSI_GREEN "\x1B[1;32m"
- #define ANSI_YELLOW "\x1B[1;33m"
- #define ANSI_BLUE "\x1B[1;34m"
- #define ANSI_MAGENTA "\x1B[1;35m"
- #define ANSI_CYAN "\x1B[1;36m"
- #define ANSI_GREY "\x1B[1;90m"
- #define COLOR(x) printk(_CONCAT(ANSI_, x))
- #else
- #define COLOR(x) do { } while (0)
- #endif
- static void page_frame_dump(struct z_page_frame *pf)
- {
- if (z_page_frame_is_reserved(pf)) {
- COLOR(CYAN);
- printk("R");
- } else if (z_page_frame_is_busy(pf)) {
- COLOR(MAGENTA);
- printk("B");
- } else if (z_page_frame_is_pinned(pf)) {
- COLOR(YELLOW);
- printk("P");
- } else if (z_page_frame_is_available(pf)) {
- COLOR(GREY);
- printk(".");
- } else if (z_page_frame_is_mapped(pf)) {
- COLOR(DEFAULT);
- printk("M");
- } else {
- COLOR(RED);
- printk("?");
- }
- }
- void z_page_frames_dump(void)
- {
- int column = 0;
- __ASSERT(page_frames_initialized, "%s called too early", __func__);
- printk("Physical memory from 0x%lx to 0x%lx\n",
- Z_PHYS_RAM_START, Z_PHYS_RAM_END);
- for (int i = 0; i < Z_NUM_PAGE_FRAMES; i++) {
- struct z_page_frame *pf = &z_page_frames[i];
- page_frame_dump(pf);
- column++;
- if (column == 64) {
- column = 0;
- printk("\n");
- }
- }
- COLOR(DEFAULT);
- if (column != 0) {
- printk("\n");
- }
- }
- #define VIRT_FOREACH(_base, _size, _pos) \
- for (_pos = _base; \
- _pos < ((uint8_t *)_base + _size); _pos += CONFIG_MMU_PAGE_SIZE)
- #define PHYS_FOREACH(_base, _size, _pos) \
- for (_pos = _base; \
- _pos < ((uintptr_t)_base + _size); _pos += CONFIG_MMU_PAGE_SIZE)
- SYS_BITARRAY_DEFINE(virt_region_bitmap,
- CONFIG_KERNEL_VM_SIZE / CONFIG_MMU_PAGE_SIZE);
- static bool virt_region_inited;
- #define Z_VIRT_REGION_START_ADDR Z_FREE_VM_START
- #define Z_VIRT_REGION_END_ADDR (Z_VIRT_RAM_END - Z_VM_RESERVED)
- static inline uintptr_t virt_from_bitmap_offset(size_t offset, size_t size)
- {
- return POINTER_TO_UINT(Z_VIRT_RAM_END)
- - (offset * CONFIG_MMU_PAGE_SIZE) - size;
- }
- static inline size_t virt_to_bitmap_offset(void *vaddr, size_t size)
- {
- return (POINTER_TO_UINT(Z_VIRT_RAM_END)
- - POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE;
- }
- static void virt_region_init(void)
- {
- size_t offset, num_bits;
-
- if (Z_VM_RESERVED > 0) {
-
- num_bits = Z_VM_RESERVED / CONFIG_MMU_PAGE_SIZE;
- (void)sys_bitarray_set_region(&virt_region_bitmap,
- num_bits, 0);
- }
-
- num_bits = POINTER_TO_UINT(Z_FREE_VM_START)
- - POINTER_TO_UINT(Z_VIRT_RAM_START);
- offset = virt_to_bitmap_offset(Z_VIRT_RAM_START, num_bits);
- num_bits /= CONFIG_MMU_PAGE_SIZE;
- (void)sys_bitarray_set_region(&virt_region_bitmap,
- num_bits, offset);
- virt_region_inited = true;
- }
- static void *virt_region_alloc(size_t size)
- {
- uintptr_t dest_addr;
- size_t offset;
- size_t num_bits;
- int ret;
- if (unlikely(!virt_region_inited)) {
- virt_region_init();
- }
- num_bits = size / CONFIG_MMU_PAGE_SIZE;
- ret = sys_bitarray_alloc(&virt_region_bitmap, num_bits, &offset);
- if (ret != 0) {
- LOG_ERR("insufficient virtual address space (requested %zu)",
- size);
- return NULL;
- }
-
- dest_addr = virt_from_bitmap_offset(offset, size);
-
- if (dest_addr < POINTER_TO_UINT(Z_VIRT_REGION_START_ADDR)) {
- (void)sys_bitarray_free(&virt_region_bitmap, size, offset);
- return NULL;
- }
- return UINT_TO_POINTER(dest_addr);
- }
- static void virt_region_free(void *vaddr, size_t size)
- {
- size_t offset, num_bits;
- uint8_t *vaddr_u8 = (uint8_t *)vaddr;
- if (unlikely(!virt_region_inited)) {
- virt_region_init();
- }
- __ASSERT((vaddr_u8 >= Z_VIRT_REGION_START_ADDR)
- && ((vaddr_u8 + size) < Z_VIRT_REGION_END_ADDR),
- "invalid virtual address region %p (%zu)", vaddr_u8, size);
- if (!((vaddr_u8 >= Z_VIRT_REGION_START_ADDR)
- && ((vaddr_u8 + size) < Z_VIRT_REGION_END_ADDR))) {
- return;
- }
- offset = virt_to_bitmap_offset(vaddr, size);
- num_bits = size / CONFIG_MMU_PAGE_SIZE;
- (void)sys_bitarray_free(&virt_region_bitmap, num_bits, offset);
- }
- static sys_slist_t free_page_frame_list;
- size_t z_free_page_count;
- #define PF_ASSERT(pf, expr, fmt, ...) \
- __ASSERT(expr, "page frame 0x%lx: " fmt, z_page_frame_to_phys(pf), \
- ##__VA_ARGS__)
- static struct z_page_frame *free_page_frame_list_get(void)
- {
- sys_snode_t *node;
- struct z_page_frame *pf = NULL;
- node = sys_slist_get(&free_page_frame_list);
- if (node != NULL) {
- z_free_page_count--;
- pf = CONTAINER_OF(node, struct z_page_frame, node);
- PF_ASSERT(pf, z_page_frame_is_available(pf),
- "unavailable but somehow on free list");
- }
- return pf;
- }
- static void free_page_frame_list_put(struct z_page_frame *pf)
- {
- PF_ASSERT(pf, z_page_frame_is_available(pf),
- "unavailable page put on free list");
- sys_slist_append(&free_page_frame_list, &pf->node);
- z_free_page_count++;
- }
- static void free_page_frame_list_init(void)
- {
- sys_slist_init(&free_page_frame_list);
- }
- static void page_frame_free_locked(struct z_page_frame *pf)
- {
- pf->flags = 0;
- free_page_frame_list_put(pf);
- }
- static void frame_mapped_set(struct z_page_frame *pf, void *addr)
- {
- PF_ASSERT(pf, !z_page_frame_is_reserved(pf),
- "attempted to map a reserved page frame");
-
- PF_ASSERT(pf, !z_page_frame_is_mapped(pf) || z_page_frame_is_pinned(pf),
- "non-pinned and already mapped to %p", pf->addr);
- pf->flags |= Z_PAGE_FRAME_MAPPED;
- pf->addr = addr;
- }
- static int virt_to_page_frame(void *virt, uintptr_t *phys)
- {
- uintptr_t paddr;
- struct z_page_frame *pf;
- int ret = -EFAULT;
- Z_PAGE_FRAME_FOREACH(paddr, pf) {
- if (z_page_frame_is_mapped(pf)) {
- if (virt == pf->addr) {
- ret = 0;
- *phys = z_page_frame_to_phys(pf);
- break;
- }
- }
- }
- return ret;
- }
- __weak FUNC_ALIAS(virt_to_page_frame, arch_page_phys_get, int);
- #ifdef CONFIG_DEMAND_PAGING
- static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
- bool page_in, uintptr_t *location_ptr);
- static inline void do_backing_store_page_in(uintptr_t location);
- static inline void do_backing_store_page_out(uintptr_t location);
- #endif
- static int map_anon_page(void *addr, uint32_t flags)
- {
- struct z_page_frame *pf;
- uintptr_t phys;
- bool lock = (flags & K_MEM_MAP_LOCK) != 0U;
- bool uninit = (flags & K_MEM_MAP_UNINIT) != 0U;
- pf = free_page_frame_list_get();
- if (pf == NULL) {
- #ifdef CONFIG_DEMAND_PAGING
- uintptr_t location;
- bool dirty;
- int ret;
- pf = k_mem_paging_eviction_select(&dirty);
- __ASSERT(pf != NULL, "failed to get a page frame");
- LOG_DBG("evicting %p at 0x%lx", pf->addr,
- z_page_frame_to_phys(pf));
- ret = page_frame_prepare_locked(pf, &dirty, false, &location);
- if (ret != 0) {
- return -ENOMEM;
- }
- if (dirty) {
- do_backing_store_page_out(location);
- }
- pf->flags = 0;
- #else
- return -ENOMEM;
- #endif
- }
- phys = z_page_frame_to_phys(pf);
- arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
- if (lock) {
- pf->flags |= Z_PAGE_FRAME_PINNED;
- }
- frame_mapped_set(pf, addr);
- LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys);
- if (!uninit) {
-
- memset(addr, 0, CONFIG_MMU_PAGE_SIZE);
- }
- return 0;
- }
- void *k_mem_map(size_t size, uint32_t flags)
- {
- uint8_t *dst;
- size_t total_size;
- int ret;
- k_spinlock_key_t key;
- uint8_t *pos;
- __ASSERT(!(((flags & K_MEM_PERM_USER) != 0U) &&
- ((flags & K_MEM_MAP_UNINIT) != 0U)),
- "user access to anonymous uninitialized pages is forbidden");
- __ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
- "unaligned size %zu passed to %s", size, __func__);
- __ASSERT(size != 0, "zero sized memory mapping");
- __ASSERT(page_frames_initialized, "%s called too early", __func__);
- __ASSERT((flags & K_MEM_CACHE_MASK) == 0U,
- "%s does not support explicit cache settings", __func__);
- key = k_spin_lock(&z_mm_lock);
-
- total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
- dst = virt_region_alloc(total_size);
- if (dst == NULL) {
-
- goto out;
- }
-
- arch_mem_unmap(dst, CONFIG_MMU_PAGE_SIZE);
- arch_mem_unmap(dst + CONFIG_MMU_PAGE_SIZE + size,
- CONFIG_MMU_PAGE_SIZE);
-
- dst += CONFIG_MMU_PAGE_SIZE;
- VIRT_FOREACH(dst, size, pos) {
- ret = map_anon_page(pos, flags);
- if (ret != 0) {
-
- dst = NULL;
- goto out;
- }
- }
- out:
- k_spin_unlock(&z_mm_lock, key);
- return dst;
- }
- void k_mem_unmap(void *addr, size_t size)
- {
- uintptr_t phys;
- uint8_t *pos;
- struct z_page_frame *pf;
- k_spinlock_key_t key;
- size_t total_size;
- int ret;
-
- __ASSERT_NO_MSG(POINTER_TO_UINT(addr) >= CONFIG_MMU_PAGE_SIZE);
-
- pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
- z_mem_assert_virtual_region(pos, size + (CONFIG_MMU_PAGE_SIZE * 2));
- key = k_spin_lock(&z_mm_lock);
-
- pos = addr;
- ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL);
- if (ret == 0) {
- __ASSERT(ret == 0,
- "%s: cannot find preceding guard page for (%p, %zu)",
- __func__, addr, size);
- goto out;
- }
- ret = arch_page_phys_get(pos + size, NULL);
- if (ret == 0) {
- __ASSERT(ret == 0,
- "%s: cannot find succeeding guard page for (%p, %zu)",
- __func__, addr, size);
- goto out;
- }
- VIRT_FOREACH(addr, size, pos) {
- ret = arch_page_phys_get(pos, &phys);
- __ASSERT(ret == 0,
- "%s: cannot unmap an unmapped address %p",
- __func__, pos);
- if (ret != 0) {
-
- goto out;
- }
- __ASSERT(z_is_page_frame(phys),
- "%s: 0x%lx is not a page frame", __func__, phys);
- if (!z_is_page_frame(phys)) {
-
- goto out;
- }
-
- pf = z_phys_to_page_frame(phys);
- __ASSERT(z_page_frame_is_mapped(pf),
- "%s: 0x%lx is not a mapped page frame", __func__, phys);
- if (!z_page_frame_is_mapped(pf)) {
-
- goto out;
- }
- arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE);
-
- page_frame_free_locked(pf);
- }
-
- pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
- total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
- virt_region_free(pos, total_size);
- out:
- k_spin_unlock(&z_mm_lock, key);
- }
- size_t k_mem_free_get(void)
- {
- size_t ret;
- k_spinlock_key_t key;
- __ASSERT(page_frames_initialized, "%s called too early", __func__);
- key = k_spin_lock(&z_mm_lock);
- #ifdef CONFIG_DEMAND_PAGING
- if (z_free_page_count > CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE) {
- ret = z_free_page_count - CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE;
- } else {
- ret = 0;
- }
- #else
- ret = z_free_page_count;
- #endif
- k_spin_unlock(&z_mm_lock, key);
- return ret * (size_t)CONFIG_MMU_PAGE_SIZE;
- }
- void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
- {
- uintptr_t aligned_phys, addr_offset;
- size_t aligned_size;
- k_spinlock_key_t key;
- uint8_t *dest_addr;
- addr_offset = k_mem_region_align(&aligned_phys, &aligned_size,
- phys, size,
- CONFIG_MMU_PAGE_SIZE);
- __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys);
- __ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)),
- "wraparound for physical address 0x%lx (size %zu)",
- aligned_phys, aligned_size);
- key = k_spin_lock(&z_mm_lock);
-
- dest_addr = virt_region_alloc(aligned_size);
- if (!dest_addr) {
- goto fail;
- }
-
- __ASSERT((uintptr_t)dest_addr <
- ((uintptr_t)dest_addr + (size - 1)),
- "wraparound for virtual address %p (size %zu)",
- dest_addr, size);
- LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr,
- aligned_phys, aligned_size, flags, addr_offset);
- arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
- k_spin_unlock(&z_mm_lock, key);
- *virt_ptr = dest_addr + addr_offset;
- return;
- fail:
-
- LOG_ERR("memory mapping 0x%lx (size %zu, flags 0x%x) failed",
- phys, size, flags);
- k_panic();
- }
- void z_phys_unmap(uint8_t *virt, size_t size)
- {
- uintptr_t aligned_virt, addr_offset;
- size_t aligned_size;
- k_spinlock_key_t key;
- addr_offset = k_mem_region_align(&aligned_virt, &aligned_size,
- POINTER_TO_UINT(virt), size,
- CONFIG_MMU_PAGE_SIZE);
- __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_virt);
- __ASSERT(aligned_virt < (aligned_virt + (aligned_size - 1)),
- "wraparound for virtual address 0x%lx (size %zu)",
- aligned_virt, aligned_size);
- key = k_spin_lock(&z_mm_lock);
- arch_mem_unmap(UINT_TO_POINTER(aligned_virt), aligned_size);
- virt_region_free(virt, size);
- k_spin_unlock(&z_mm_lock, key);
- }
- size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
- uintptr_t addr, size_t size, size_t align)
- {
- size_t addr_offset;
-
- *aligned_addr = ROUND_DOWN(addr, align);
- addr_offset = addr - *aligned_addr;
- *aligned_size = ROUND_UP(size + addr_offset, align);
- return addr_offset;
- }
- #if defined(CONFIG_LINKER_USE_BOOT_SECTION) || defined(CONFIG_LINKER_USE_PINNED_SECTION)
- static void mark_linker_section_pinned(void *start_addr, void *end_addr,
- bool pin)
- {
- struct z_page_frame *pf;
- uint8_t *addr;
- uintptr_t pinned_start = ROUND_DOWN(POINTER_TO_UINT(start_addr),
- CONFIG_MMU_PAGE_SIZE);
- uintptr_t pinned_end = ROUND_UP(POINTER_TO_UINT(end_addr),
- CONFIG_MMU_PAGE_SIZE);
- size_t pinned_size = pinned_end - pinned_start;
- VIRT_FOREACH(UINT_TO_POINTER(pinned_start), pinned_size, addr)
- {
- pf = z_phys_to_page_frame(Z_BOOT_VIRT_TO_PHYS(addr));
- frame_mapped_set(pf, addr);
- if (pin) {
- pf->flags |= Z_PAGE_FRAME_PINNED;
- } else {
- pf->flags &= ~Z_PAGE_FRAME_PINNED;
- }
- }
- }
- #endif
- void z_mem_manage_init(void)
- {
- uintptr_t phys;
- uint8_t *addr;
- struct z_page_frame *pf;
- k_spinlock_key_t key = k_spin_lock(&z_mm_lock);
- free_page_frame_list_init();
- ARG_UNUSED(addr);
- #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
-
- arch_reserved_pages_update();
- #endif
- #ifdef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
-
- VIRT_FOREACH(Z_KERNEL_VIRT_START, Z_KERNEL_VIRT_SIZE, addr)
- {
- pf = z_phys_to_page_frame(Z_BOOT_VIRT_TO_PHYS(addr));
- frame_mapped_set(pf, addr);
-
- pf->flags |= Z_PAGE_FRAME_PINNED;
- }
- #endif
- #ifdef CONFIG_LINKER_USE_BOOT_SECTION
-
- mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, true);
- #endif
- #ifdef CONFIG_LINKER_USE_PINNED_SECTION
-
- mark_linker_section_pinned(lnkr_pinned_start, lnkr_pinned_end, true);
- #endif
-
- Z_PAGE_FRAME_FOREACH(phys, pf) {
- if (z_page_frame_is_available(pf)) {
- free_page_frame_list_put(pf);
- }
- }
- LOG_DBG("free page frames: %zu", z_free_page_count);
- #ifdef CONFIG_DEMAND_PAGING
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- z_paging_histogram_init();
- #endif
- k_mem_paging_backing_store_init();
- k_mem_paging_eviction_init();
- #endif
- #if __ASSERT_ON
- page_frames_initialized = true;
- #endif
- k_spin_unlock(&z_mm_lock, key);
- #ifndef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
-
- z_bss_zero();
- #endif
- }
- void z_mem_manage_boot_finish(void)
- {
- #ifdef CONFIG_LINKER_USE_BOOT_SECTION
-
- mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, false);
- #endif
- }
- #ifdef CONFIG_DEMAND_PAGING
- #ifdef CONFIG_DEMAND_PAGING_STATS
- struct k_mem_paging_stats_t paging_stats;
- extern struct k_mem_paging_histogram_t z_paging_histogram_eviction;
- extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
- extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
- #endif
- static inline void do_backing_store_page_in(uintptr_t location)
- {
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- uint32_t time_diff;
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- timing_t time_start, time_end;
- time_start = timing_counter_get();
- #else
- uint32_t time_start;
- time_start = k_cycle_get_32();
- #endif
- #endif
- k_mem_paging_backing_store_page_in(location);
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- time_end = timing_counter_get();
- time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
- #else
- time_diff = k_cycle_get_32() - time_start;
- #endif
- z_paging_histogram_inc(&z_paging_histogram_backing_store_page_in,
- time_diff);
- #endif
- }
- static inline void do_backing_store_page_out(uintptr_t location)
- {
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- uint32_t time_diff;
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- timing_t time_start, time_end;
- time_start = timing_counter_get();
- #else
- uint32_t time_start;
- time_start = k_cycle_get_32();
- #endif
- #endif
- k_mem_paging_backing_store_page_out(location);
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- time_end = timing_counter_get();
- time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
- #else
- time_diff = k_cycle_get_32() - time_start;
- #endif
- z_paging_histogram_inc(&z_paging_histogram_backing_store_page_out,
- time_diff);
- #endif
- }
- BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP));
- static void virt_region_foreach(void *addr, size_t size,
- void (*func)(void *))
- {
- z_mem_assert_virtual_region(addr, size);
- for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
- func((uint8_t *)addr + offset);
- }
- }
- static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
- bool page_fault, uintptr_t *location_ptr)
- {
- uintptr_t phys;
- int ret;
- bool dirty = *dirty_ptr;
- phys = z_page_frame_to_phys(pf);
- __ASSERT(!z_page_frame_is_pinned(pf), "page frame 0x%lx is pinned",
- phys);
-
- if (z_page_frame_is_mapped(pf)) {
- dirty = dirty || !z_page_frame_is_backed(pf);
- }
- if (dirty || page_fault) {
- arch_mem_scratch(phys);
- }
- if (z_page_frame_is_mapped(pf)) {
- ret = k_mem_paging_backing_store_location_get(pf, location_ptr,
- page_fault);
- if (ret != 0) {
- LOG_ERR("out of backing store memory");
- return -ENOMEM;
- }
- arch_mem_page_out(pf->addr, *location_ptr);
- } else {
-
- __ASSERT(!dirty, "un-mapped page determined to be dirty");
- }
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
-
- __ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
- phys);
- pf->flags |= Z_PAGE_FRAME_BUSY;
- #endif
-
- *dirty_ptr = dirty;
- return 0;
- }
- static int do_mem_evict(void *addr)
- {
- bool dirty;
- struct z_page_frame *pf;
- uintptr_t location;
- int key, ret;
- uintptr_t flags, phys;
- #if CONFIG_DEMAND_PAGING_ALLOW_IRQ
- __ASSERT(!k_is_in_isr(),
- "%s is unavailable in ISRs with CONFIG_DEMAND_PAGING_ALLOW_IRQ",
- __func__);
- k_sched_lock();
- #endif
- key = irq_lock();
- flags = arch_page_info_get(addr, &phys, false);
- __ASSERT((flags & ARCH_DATA_PAGE_NOT_MAPPED) == 0,
- "address %p isn't mapped", addr);
- if ((flags & ARCH_DATA_PAGE_LOADED) == 0) {
-
- ret = 0;
- goto out;
- }
- dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
- pf = z_phys_to_page_frame(phys);
- __ASSERT(pf->addr == addr, "page frame address mismatch");
- ret = page_frame_prepare_locked(pf, &dirty, false, &location);
- if (ret != 0) {
- goto out;
- }
- __ASSERT(ret == 0, "failed to prepare page frame");
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- irq_unlock(key);
- #endif
- if (dirty) {
- do_backing_store_page_out(location);
- }
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- key = irq_lock();
- #endif
- page_frame_free_locked(pf);
- out:
- irq_unlock(key);
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- k_sched_unlock();
- #endif
- return ret;
- }
- int k_mem_page_out(void *addr, size_t size)
- {
- __ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
- addr);
- z_mem_assert_virtual_region(addr, size);
- for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
- void *pos = (uint8_t *)addr + offset;
- int ret;
- ret = do_mem_evict(pos);
- if (ret != 0) {
- return ret;
- }
- }
- return 0;
- }
- int z_page_frame_evict(uintptr_t phys)
- {
- int key, ret;
- struct z_page_frame *pf;
- bool dirty;
- uintptr_t flags;
- uintptr_t location;
- __ASSERT(page_frames_initialized, "%s called on 0x%lx too early",
- __func__, phys);
-
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- __ASSERT(!k_is_in_isr(),
- "%s is unavailable in ISRs with CONFIG_DEMAND_PAGING_ALLOW_IRQ",
- __func__);
- k_sched_lock();
- #endif
- key = irq_lock();
- pf = z_phys_to_page_frame(phys);
- if (!z_page_frame_is_mapped(pf)) {
-
- ret = 0;
- goto out;
- }
- flags = arch_page_info_get(pf->addr, NULL, false);
-
- __ASSERT((flags & ARCH_DATA_PAGE_LOADED) != 0, "data page not loaded");
- dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
- ret = page_frame_prepare_locked(pf, &dirty, false, &location);
- if (ret != 0) {
- goto out;
- }
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- irq_unlock(key);
- #endif
- if (dirty) {
- do_backing_store_page_out(location);
- }
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- key = irq_lock();
- #endif
- page_frame_free_locked(pf);
- out:
- irq_unlock(key);
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- k_sched_unlock();
- #endif
- return ret;
- }
- static inline void paging_stats_faults_inc(struct k_thread *faulting_thread,
- int key)
- {
- #ifdef CONFIG_DEMAND_PAGING_STATS
- bool is_irq_unlocked = arch_irq_unlocked(key);
- paging_stats.pagefaults.cnt++;
- if (is_irq_unlocked) {
- paging_stats.pagefaults.irq_unlocked++;
- } else {
- paging_stats.pagefaults.irq_locked++;
- }
- #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
- faulting_thread->paging_stats.pagefaults.cnt++;
- if (is_irq_unlocked) {
- faulting_thread->paging_stats.pagefaults.irq_unlocked++;
- } else {
- faulting_thread->paging_stats.pagefaults.irq_locked++;
- }
- #else
- ARG_UNUSED(faulting_thread);
- #endif
- #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- if (k_is_in_isr()) {
- paging_stats.pagefaults.in_isr++;
- #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
- faulting_thread->paging_stats.pagefaults.in_isr++;
- #endif
- }
- #endif
- #endif
- }
- static inline void paging_stats_eviction_inc(struct k_thread *faulting_thread,
- bool dirty)
- {
- #ifdef CONFIG_DEMAND_PAGING_STATS
- if (dirty) {
- paging_stats.eviction.dirty++;
- } else {
- paging_stats.eviction.clean++;
- }
- #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
- if (dirty) {
- faulting_thread->paging_stats.eviction.dirty++;
- } else {
- faulting_thread->paging_stats.eviction.clean++;
- }
- #else
- ARG_UNUSED(faulting_thread);
- #endif
- #endif
- }
- static inline struct z_page_frame *do_eviction_select(bool *dirty)
- {
- struct z_page_frame *pf;
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- uint32_t time_diff;
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- timing_t time_start, time_end;
- time_start = timing_counter_get();
- #else
- uint32_t time_start;
- time_start = k_cycle_get_32();
- #endif
- #endif
- pf = k_mem_paging_eviction_select(dirty);
- #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
- #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
- time_end = timing_counter_get();
- time_diff = (uint32_t)timing_cycles_get(&time_start, &time_end);
- #else
- time_diff = k_cycle_get_32() - time_start;
- #endif
- z_paging_histogram_inc(&z_paging_histogram_eviction, time_diff);
- #endif
- return pf;
- }
- static bool do_page_fault(void *addr, bool pin)
- {
- struct z_page_frame *pf;
- int key, ret;
- uintptr_t page_in_location, page_out_location;
- enum arch_page_location status;
- bool result;
- bool dirty = false;
- struct k_thread *faulting_thread = _current_cpu->current;
- __ASSERT(page_frames_initialized, "page fault at %p happened too early",
- addr);
- LOG_DBG("page fault at %p", addr);
-
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
-
- k_sched_lock();
- __ASSERT(!k_is_in_isr(), "ISR page faults are forbidden");
- #endif
- key = irq_lock();
- status = arch_page_location_get(addr, &page_in_location);
- if (status == ARCH_PAGE_LOCATION_BAD) {
-
- result = false;
- goto out;
- }
- result = true;
- if (status == ARCH_PAGE_LOCATION_PAGED_IN) {
- if (pin) {
-
- uintptr_t phys = page_in_location;
- pf = z_phys_to_page_frame(phys);
- pf->flags |= Z_PAGE_FRAME_PINNED;
- }
-
- goto out;
- }
- __ASSERT(status == ARCH_PAGE_LOCATION_PAGED_OUT,
- "unexpected status value %d", status);
- paging_stats_faults_inc(faulting_thread, key);
- pf = free_page_frame_list_get();
- if (pf == NULL) {
-
- pf = do_eviction_select(&dirty);
- __ASSERT(pf != NULL, "failed to get a page frame");
- LOG_DBG("evicting %p at 0x%lx", pf->addr,
- z_page_frame_to_phys(pf));
- paging_stats_eviction_inc(faulting_thread, dirty);
- }
- ret = page_frame_prepare_locked(pf, &dirty, true, &page_out_location);
- __ASSERT(ret == 0, "failed to prepare page frame");
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- irq_unlock(key);
-
- #endif
- if (dirty) {
- do_backing_store_page_out(page_out_location);
- }
- do_backing_store_page_in(page_in_location);
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- key = irq_lock();
- pf->flags &= ~Z_PAGE_FRAME_BUSY;
- #endif
- if (pin) {
- pf->flags |= Z_PAGE_FRAME_PINNED;
- }
- pf->flags |= Z_PAGE_FRAME_MAPPED;
- pf->addr = UINT_TO_POINTER(POINTER_TO_UINT(addr)
- & ~(CONFIG_MMU_PAGE_SIZE - 1));
- arch_mem_page_in(addr, z_page_frame_to_phys(pf));
- k_mem_paging_backing_store_page_finalize(pf, page_in_location);
- out:
- irq_unlock(key);
- #ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
- k_sched_unlock();
- #endif
- return result;
- }
- static void do_page_in(void *addr)
- {
- bool ret;
- ret = do_page_fault(addr, false);
- __ASSERT(ret, "unmapped memory address %p", addr);
- (void)ret;
- }
- void k_mem_page_in(void *addr, size_t size)
- {
- __ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
- "%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
- __func__);
- virt_region_foreach(addr, size, do_page_in);
- }
- static void do_mem_pin(void *addr)
- {
- bool ret;
- ret = do_page_fault(addr, true);
- __ASSERT(ret, "unmapped memory address %p", addr);
- (void)ret;
- }
- void k_mem_pin(void *addr, size_t size)
- {
- __ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
- "%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
- __func__);
- virt_region_foreach(addr, size, do_mem_pin);
- }
- bool z_page_fault(void *addr)
- {
- return do_page_fault(addr, false);
- }
- static void do_mem_unpin(void *addr)
- {
- struct z_page_frame *pf;
- int key;
- uintptr_t flags, phys;
- key = irq_lock();
- flags = arch_page_info_get(addr, &phys, false);
- __ASSERT((flags & ARCH_DATA_PAGE_NOT_MAPPED) == 0,
- "invalid data page at %p", addr);
- if ((flags & ARCH_DATA_PAGE_LOADED) != 0) {
- pf = z_phys_to_page_frame(phys);
- pf->flags &= ~Z_PAGE_FRAME_PINNED;
- }
- irq_unlock(key);
- }
- void k_mem_unpin(void *addr, size_t size)
- {
- __ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
- addr);
- virt_region_foreach(addr, size, do_mem_unpin);
- }
- #endif
|