Lines Matching refs:aspace

33 static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr) {  in is_valid_vaddr()  argument
34 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); in is_valid_vaddr()
83 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) { in arch_mmu_query() argument
93 LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr); in arch_mmu_query()
95 DEBUG_ASSERT(aspace); in arch_mmu_query()
96 DEBUG_ASSERT(aspace->tt_virt); in arch_mmu_query()
98 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_query()
99 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_query()
103 if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) { in arch_mmu_query()
121 page_table = aspace->tt_virt; in arch_mmu_query()
485 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) { in arch_mmu_map() argument
488 DEBUG_ASSERT(aspace); in arch_mmu_map()
489 DEBUG_ASSERT(aspace->tt_virt); in arch_mmu_map()
491 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_map()
492 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_map()
505 if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) { in arch_mmu_map()
510 aspace->tt_virt, MMU_ARM64_GLOBAL_ASID); in arch_mmu_map()
516 aspace->tt_virt, MMU_ARM64_USER_ASID); in arch_mmu_map()
522 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) { in arch_mmu_unmap() argument
525 DEBUG_ASSERT(aspace); in arch_mmu_unmap()
526 DEBUG_ASSERT(aspace->tt_virt); in arch_mmu_unmap()
528 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); in arch_mmu_unmap()
530 if (!is_valid_vaddr(aspace, vaddr)) in arch_mmu_unmap()
538 if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) { in arch_mmu_unmap()
542 aspace->tt_virt, in arch_mmu_unmap()
548 aspace->tt_virt, in arch_mmu_unmap()
555 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) { in arch_mmu_init_aspace() argument
556 LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags); in arch_mmu_init_aspace()
558 DEBUG_ASSERT(aspace); in arch_mmu_init_aspace()
564 aspace->flags = flags; in arch_mmu_init_aspace()
570 aspace->base = base; in arch_mmu_init_aspace()
571 aspace->size = size; in arch_mmu_init_aspace()
572 aspace->tt_virt = arm64_kernel_translation_table; in arch_mmu_init_aspace()
573 aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); in arch_mmu_init_aspace()
578 aspace->base = base; in arch_mmu_init_aspace()
579 aspace->size = size; in arch_mmu_init_aspace()
585 aspace->tt_virt = va; in arch_mmu_init_aspace()
586 aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); in arch_mmu_init_aspace()
590 memset(aspace->tt_virt, 0, PAGE_SIZE); in arch_mmu_init_aspace()
593 LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt); in arch_mmu_init_aspace()
598 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) { in arch_mmu_destroy_aspace() argument
599 LTRACEF("aspace %p\n", aspace); in arch_mmu_destroy_aspace()
601 DEBUG_ASSERT(aspace); in arch_mmu_destroy_aspace()
602 DEBUG_ASSERT((aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0); in arch_mmu_destroy_aspace()
606 vm_page_t *page = paddr_to_vm_page(aspace->tt_phys); in arch_mmu_destroy_aspace()
613 void arch_mmu_context_switch(arch_aspace_t *aspace) { in arch_mmu_context_switch() argument
615 TRACEF("aspace %p\n", aspace); in arch_mmu_context_switch()
619 if (aspace) { in arch_mmu_context_switch()
620 DEBUG_ASSERT((aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0); in arch_mmu_context_switch()
623 ttbr = ((uint64_t)MMU_ARM64_USER_ASID << 48) | aspace->tt_phys; in arch_mmu_context_switch()