Lines Matching refs:vaddr

33 static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr) {  in is_valid_vaddr()  argument
34 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in is_valid_vaddr()
83 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) { in arch_mmu_query() argument
93 LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr); in arch_mmu_query()
98 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_query()
99 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_query()
108 vaddr_rem = vaddr - kernel_base; in arch_mmu_query()
116 vaddr_rem = vaddr; in arch_mmu_query()
131 vaddr, index, index_shift, vaddr_rem, pte); in arch_mmu_query()
187 vaddr, paddr ? *paddr : ~0UL, flags ? *flags : ~0U); in arch_mmu_query()
208 void *vaddr = memalign(size, size); in alloc_page_table() local
209 if (!vaddr) in alloc_page_table()
211 *paddrp = vaddr_to_paddr(vaddr); in alloc_page_table()
213 free(vaddr); in alloc_page_table()
222 static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift) { in free_page_table() argument
223 LTRACEF("vaddr %p paddr 0x%lx page_size_shift %u\n", vaddr, paddr, page_size_shift); in free_page_table()
234 free(vaddr); in free_page_table()
241 void *vaddr; in arm64_mmu_get_page_table() local
252 vaddr = paddr_to_kvaddr(paddr); in arm64_mmu_get_page_table()
254 LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr); in arm64_mmu_get_page_table()
255 memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift); in arm64_mmu_get_page_table()
262 return vaddr; in arm64_mmu_get_page_table()
295 static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel, in arm64_mmu_unmap_pt() argument
309 vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table); in arm64_mmu_unmap_pt()
324 arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size, in arm64_mmu_unmap_pt()
340 ARM64_TLBI(vaae1is, vaddr >> 12); in arm64_mmu_unmap_pt()
342 ARM64_TLBI(vae1is, vaddr >> 12 | (vaddr_t)asid << 48); in arm64_mmu_unmap_pt()
346 vaddr += chunk_size; in arm64_mmu_unmap_pt()
360 vaddr_t vaddr = vaddr_in; in arm64_mmu_map_pt() local
371 vaddr, vaddr_rel, paddr, size, attrs, in arm64_mmu_map_pt()
394 ret = arm64_mmu_map_pt(vaddr, vaddr_rem, paddr, chunk_size, attrs, in arm64_mmu_map_pt()
416 vaddr += chunk_size; in arm64_mmu_map_pt()
431 int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs, in arm64_mmu_map() argument
436 vaddr_t vaddr_rel = vaddr - vaddr_base; in arm64_mmu_map()
440 vaddr, paddr, size, attrs, asid); in arm64_mmu_map()
444 vaddr, size, vaddr_base, vaddr_rel_max); in arm64_mmu_map()
453 ret = arm64_mmu_map_pt(vaddr, vaddr_rel, paddr, size, attrs, in arm64_mmu_map()
459 int arm64_mmu_unmap(vaddr_t vaddr, size_t size, in arm64_mmu_unmap() argument
463 vaddr_t vaddr_rel = vaddr - vaddr_base; in arm64_mmu_unmap()
466 LTRACEF("vaddr 0x%lx, size 0x%lx, asid 0x%x\n", vaddr, size, asid); in arm64_mmu_unmap()
470 vaddr, size, vaddr_base, vaddr_rel_max); in arm64_mmu_unmap()
479 arm64_mmu_unmap_pt(vaddr, vaddr_rel, size, in arm64_mmu_unmap()
485 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) { in arch_mmu_map() argument
486 LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags); in arch_mmu_map()
491 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_map()
492 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_map()
496 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); in arch_mmu_map()
498 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr)) in arch_mmu_map()
506 ret = arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE, in arch_mmu_map()
512 ret = arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE, in arch_mmu_map()
522 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) { in arch_mmu_unmap() argument
523 LTRACEF("vaddr 0x%lx count %u\n", vaddr, count); in arch_mmu_unmap()
528 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_unmap()
530 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_unmap()
533 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); in arch_mmu_unmap()
534 if (!IS_PAGE_ALIGNED(vaddr)) in arch_mmu_unmap()
539 ret = arm64_mmu_unmap(vaddr, count * PAGE_SIZE, in arch_mmu_unmap()
545 ret = arm64_mmu_unmap(vaddr, count * PAGE_SIZE, in arch_mmu_unmap()