| /arch/or1k/ |
| A D | mmu.c | 28 void or1k_invalidate_tlb(vaddr_t vaddr, uint count) { in or1k_invalidate_tlb() argument 67 vaddr += PAGE_SIZE; in or1k_invalidate_tlb() 72 uint index = vaddr / SECTION_SIZE; in arch_mmu_query() 82 index = (vaddr % SECTION_SIZE) / PAGE_SIZE; in arch_mmu_query() 108 if (!IS_PAGE_ALIGNED(vaddr)) in arch_mmu_unmap() 113 uint index = vaddr / SECTION_SIZE; in arch_mmu_unmap() 116 vaddr += PAGE_SIZE; in arch_mmu_unmap() 126 vaddr += SECTION_SIZE; in arch_mmu_unmap() 155 l1_index = vaddr / SECTION_SIZE; in arch_mmu_map() 160 vaddr += SECTION_SIZE; in arch_mmu_map() [all …]
|
| /arch/arm64/ |
| A D | mmu.c | 38 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in is_valid_vaddr() 127 vaddr_rem = vaddr; in arch_mmu_query() 229 if (!vaddr) in alloc_page_table() 233 free(vaddr); in alloc_page_table() 254 free(vaddr); in free_page_table() 261 void *vaddr; in arm64_mmu_get_page_table() local 282 return vaddr; in arm64_mmu_get_page_table() 366 vaddr += chunk_size; in arm64_mmu_unmap_pt() 380 vaddr_t vaddr = vaddr_in; in arm64_mmu_map_pt() local 436 vaddr += chunk_size; in arm64_mmu_map_pt() [all …]
|
| A D | start.S | 24 vaddr .req x23 label 148 subs size, tmp, vaddr 154 subs size, tmp, vaddr 158 subs size, tmp, vaddr 162 subs size, tmp, vaddr 173 orr tmp, vaddr, paddr 179 eor vaddr, vaddr, #(~0 << MMU_KERNEL_SIZE_SHIFT) 202 orr tmp2, vaddr, paddr 226 add vaddr, vaddr, tmp 233 eor vaddr, vaddr, #(~0 << MMU_KERNEL_SIZE_SHIFT) [all …]
|
| /arch/x86/64/ |
| A D | mmu.c | 58 static bool x86_mmu_check_vaddr(vaddr_t vaddr) { in x86_mmu_check_vaddr() argument 59 uint64_t addr = (uint64_t)vaddr; in x86_mmu_check_vaddr() 98 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in is_valid_vaddr() 481 tlbsync_local(vaddr); in x86_mmu_unmap_entry() 505 tlbsync_local(vaddr); in x86_mmu_unmap_entry() 513 if (!(x86_mmu_check_vaddr(vaddr))) in x86_mmu_unmap() 519 vaddr_t next_aligned_v_addr = vaddr; in x86_mmu_unmap() 533 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_unmap() 586 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_query() 614 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_map() [all …]
|
| /arch/x86/32/ |
| A D | mmu.c | 114 const map_addr_t pde = get_pd_entry_from_pd_table(vaddr, pdt); in x86_mmu_get_mapping() 131 const map_addr_t pte = get_pt_entry_from_page_table(vaddr, pt); in x86_mmu_get_mapping() 140 *paddr = get_pfn_from_pte(pte) + (vaddr & PAGE_OFFSET_MASK_4KB); in x86_mmu_get_mapping() 269 tlbsync_local(vaddr); in x86_mmu_unmap_entry() 278 x86_mmu_unmap_entry(vaddr, level - 1, next_table_addr); in x86_mmu_unmap_entry() 293 tlbsync_local(vaddr); in x86_mmu_unmap_entry() 303 if (!IS_ALIGNED(vaddr, PAGE_SIZE)) in x86_mmu_unmap() 309 vaddr_t next_aligned_v_addr = vaddr; in x86_mmu_unmap() 323 if (!IS_ALIGNED(vaddr, PAGE_SIZE)) in arch_mmu_unmap() 329 return (x86_mmu_unmap(aspace->cr3, vaddr, count)); in arch_mmu_unmap() [all …]
|
| /arch/arm/arm/ |
| A D | mmu.c | 143 DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr)); in arm_mmu_map_section() 147 index = vaddr / SECTION_SIZE; in arm_mmu_map_section() 168 DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr)); in arm_mmu_unmap_section() 242 uint index = vaddr / MB; in arch_mmu_query() 494 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); in arch_mmu_map() 516 vaddr += SECTION_SIZE; in arch_mmu_map() 558 vaddr += PAGE_SIZE; in arch_mmu_map() 583 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); in arch_mmu_unmap() 584 if (!IS_PAGE_ALIGNED(vaddr)) in arch_mmu_unmap() 608 vaddr += SECTION_SIZE; in arch_mmu_unmap() [all …]
|
| /arch/arm/arm/include/arch/ |
| A D | aspace.h | 28 static inline bool arch_mmu_is_valid_vaddr(struct arch_aspace *aspace, vaddr_t vaddr) { in arch_mmu_is_valid_vaddr() argument 29 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in arch_mmu_is_valid_vaddr()
|
| /arch/include/arch/ |
| A D | mmu.h | 49 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) __NON… 50 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) __NONNULL((1)); 51 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) __NONNUL…
|
| /arch/riscv/ |
| A D | mmu.cpp | 313 LTRACEF("vaddr %#lx\n", vaddr); in riscv_pt_walk() 320 uint index = vaddr_to_index(vaddr, level); in riscv_pt_walk() 325 level, index, ptep, *ptep, vaddr); in riscv_pt_walk() 338 index = vaddr_to_index(vaddr, level); in riscv_pt_walk() 343 auto ret = callback(level, index, pte, &vaddr); in riscv_pt_walk() 379 index = vaddr_to_index(vaddr, level); in riscv_pt_walk() 419 level, index, pte, *vaddr, paddr, count, flags); in arch_mmu_map() 452 *vaddr += PAGE_SIZE; in arch_mmu_map() 497 *paddr = pa | (*vaddr & page_mask); in arch_mmu_query() 552 *vaddr += PAGE_SIZE; in arch_mmu_unmap() [all …]
|
| /arch/arm64/include/arch/arm64/ |
| A D | mmu.h | 271 int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs, 275 int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
|