Lines Matching refs:vaddr

40 static inline map_addr_t get_pdp_entry_from_pdp_table(vaddr_t vaddr, map_addr_t pdpt) {  in get_pdp_entry_from_pdp_table()  argument
44 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1)); in get_pdp_entry_from_pdp_table()
65 static inline map_addr_t get_pd_entry_from_pd_table(vaddr_t vaddr, map_addr_t pdt) { in get_pd_entry_from_pd_table() argument
69 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in get_pd_entry_from_pd_table()
74 static inline map_addr_t get_pt_entry_from_page_table(vaddr_t vaddr, map_addr_t pt) { in get_pt_entry_from_page_table() argument
78 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in get_pt_entry_from_page_table()
138 status_t x86_mmu_get_mapping(map_addr_t init_table, vaddr_t vaddr, uint32_t *ret_level, in x86_mmu_get_mapping() argument
157 pdt = get_pdp_entry_from_pdp_table(vaddr, pdpt); in x86_mmu_get_mapping()
164 pt = get_pd_entry_from_pd_table(vaddr, pdt); in x86_mmu_get_mapping()
175 pt = get_pd_entry_from_pd_table(vaddr, pdt); in x86_mmu_get_mapping()
185 *last_valid_entry = get_pfn_from_pt(pt) + (vaddr & PAGE_OFFSET_MASK_2MB); in x86_mmu_get_mapping()
188 *last_valid_entry = get_pfn_from_pde(pt) + (vaddr & PAGE_OFFSET_MASK_4MB); in x86_mmu_get_mapping()
195 pte = get_pt_entry_from_page_table(vaddr, pt); in x86_mmu_get_mapping()
203 *last_valid_entry = get_pfn_from_pte(pte) + (vaddr & PAGE_OFFSET_MASK_4KB); in x86_mmu_get_mapping()
216 vaddr_t vaddr, arch_flags_t in_flags, in x86_mmu_check_mapping() argument
224 (!IS_ALIGNED(vaddr, PAGE_SIZE)) || in x86_mmu_check_mapping()
229 status = x86_mmu_get_mapping(init_table, vaddr, ret_level, &existing_flags, last_valid_entry); in x86_mmu_check_mapping()
249 static void update_pdp_entry(vaddr_t vaddr, map_addr_t pdpt, map_addr_t *m, arch_flags_t flags) { in update_pdp_entry() argument
253 pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1)); in update_pdp_entry()
259 static void update_pt_entry(vaddr_t vaddr, map_addr_t paddr, map_addr_t pt, arch_flags_t flags) { in update_pt_entry() argument
263 pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in update_pt_entry()
270 static void update_pd_entry(vaddr_t vaddr, map_addr_t pdt, paddr_t m, arch_flags_t flags) { in update_pd_entry() argument
274 pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in update_pd_entry()
305 vaddr_t vaddr, arch_flags_t mmu_flags) { in x86_mmu_add_mapping() argument
314 if ((!IS_ALIGNED(vaddr, PAGE_SIZE)) || (!IS_ALIGNED(paddr, PAGE_SIZE)) ) in x86_mmu_add_mapping()
319 pdt = get_pdp_entry_from_pdp_table(vaddr, init_table); in x86_mmu_add_mapping()
327 update_pdp_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags)); in x86_mmu_add_mapping()
333 pt = get_pd_entry_from_pd_table(vaddr, pdt); in x86_mmu_add_mapping()
345 update_pd_entry(vaddr, pdt, m, get_x86_arch_flags(mmu_flags)); in x86_mmu_add_mapping()
349 pt = get_pd_entry_from_pd_table(vaddr, init_table); in x86_mmu_add_mapping()
361 update_pd_entry(vaddr, init_table, pd_paddr, get_x86_arch_flags(mmu_flags)); in x86_mmu_add_mapping()
367 update_pt_entry(vaddr, paddr, pt, get_x86_arch_flags(mmu_flags)); in x86_mmu_add_mapping()
384 static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, map_addr_t table_entry) { in x86_mmu_unmap_entry() argument
394 offset = ((vaddr >> PDP_SHIFT) & ((1 << PDPT_ADDR_OFFSET) - 1)); in x86_mmu_unmap_entry()
401 offset = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in x86_mmu_unmap_entry()
407 offset = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1)); in x86_mmu_unmap_entry()
419 x86_mmu_unmap_entry(vaddr, level, (map_addr_t)next_table_addr); in x86_mmu_unmap_entry()
441 status_t x86_mmu_unmap(map_addr_t init_table, vaddr_t vaddr, uint count) { in x86_mmu_unmap() argument
445 if (!IS_ALIGNED(vaddr, PAGE_SIZE)) in x86_mmu_unmap()
451 next_aligned_v_addr = vaddr; in x86_mmu_unmap()
464 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) { in arch_mmu_unmap() argument
469 if (!IS_ALIGNED(vaddr, PAGE_SIZE)) in arch_mmu_unmap()
478 return (x86_mmu_unmap(X86_PHYS_TO_VIRT(init_table_from_cr3), vaddr, count)); in arch_mmu_unmap()
521 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) { in arch_mmu_query() argument
527 LTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags); in arch_mmu_query()
537 …stat = x86_mmu_get_mapping(X86_PHYS_TO_VIRT(current_cr3_val), vaddr, &ret_level, &ret_flags, &last… in arch_mmu_query()
550 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) { in arch_mmu_map() argument
556 if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE))) in arch_mmu_map()
565 range.start_vaddr = vaddr; in arch_mmu_map()