1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef __ARM_MMU_MM_H__ 3 #define __ARM_MMU_MM_H__ 4 5 #include <xen/bug.h> 6 #include <xen/pdx.h> 7 #include <xen/types.h> 8 #include <asm/mm.h> 9 #include <asm/mmu/layout.h> 10 #include <asm/page.h> 11 12 /* Non-boot CPUs use this to find the correct pagetables. */ 13 extern uint64_t init_ttbr; 14 15 extern mfn_t directmap_mfn_start, directmap_mfn_end; 16 extern vaddr_t directmap_virt_end; 17 #ifdef CONFIG_ARM_64 18 extern vaddr_t directmap_virt_start; 19 extern unsigned long directmap_base_pdx; 20 #endif 21 22 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) 23 24 #define virt_to_maddr(va) ({ \ 25 vaddr_t va_ = (vaddr_t)(va); \ 26 (paddr_t)((va_to_par(va_) & PADDR_MASK & PAGE_MASK) | (va_ & ~PAGE_MASK)); \ 27 }) 28 29 #ifdef CONFIG_ARM_32 30 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) 31 #define is_xen_heap_mfn(mfn) ({ \ 32 unsigned long mfn_ = mfn_x(mfn); \ 33 (mfn_ >= mfn_x(directmap_mfn_start) && \ 34 mfn_ < mfn_x(directmap_mfn_end)); \ 35 }) 36 37 /** 38 * Find the virtual address corresponding to a machine address 39 * 40 * Only memory backing the XENHEAP has a corresponding virtual address to 41 * be found. This is so we can save precious virtual space, as it's in 42 * short supply on arm32. This mapping is not subject to PDX compression 43 * because XENHEAP is known to be physically contiguous and can't hence 44 * jump over the PDX hole. This means we can avoid the roundtrips 45 * converting to/from pdx. 46 * 47 * @param ma Machine address 48 * @return Virtual address mapped to `ma` 49 */ maddr_to_virt(paddr_t ma)50static inline void *maddr_to_virt(paddr_t ma) 51 { 52 ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma))); 53 ma -= mfn_to_maddr(directmap_mfn_start); 54 return (void *)(unsigned long) ma + XENHEAP_VIRT_START; 55 } 56 #else 57 /** 58 * Find the virtual address corresponding to a machine address 59 * 60 * The directmap covers all conventional memory accesible by the 61 * hypervisor. This means it's subject to PDX compression. 62 * 63 * Note there's an extra offset applied (directmap_base_pdx) on top of the 64 * regular PDX compression logic. Its purpose is to skip over the initial 65 * range of non-existing memory, should there be one. 66 * 67 * @param ma Machine address 68 * @return Virtual address mapped to `ma` 69 */ maddr_to_virt(paddr_t ma)70static inline void *maddr_to_virt(paddr_t ma) 71 { 72 ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) < 73 (DIRECTMAP_SIZE >> PAGE_SHIFT)); 74 return (void *)(XENHEAP_VIRT_START - 75 (directmap_base_pdx << PAGE_SHIFT) + 76 maddr_to_directmapoff(ma)); 77 } 78 #endif 79 80 /* Convert between Xen-heap virtual addresses and page-info structures. */ virt_to_page(const void * v)81static inline struct page_info *virt_to_page(const void *v) 82 { 83 unsigned long va = (unsigned long)v; 84 unsigned long pdx; 85 86 ASSERT(va >= XENHEAP_VIRT_START); 87 ASSERT(va < directmap_virt_end); 88 89 pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; 90 pdx += mfn_to_pdx(directmap_mfn_start); 91 return frame_table + pdx - frametable_base_pdx; 92 } 93 94 /* 95 * Print a walk of a page table or p2m 96 * 97 * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2) 98 * addr is the PA or IPA to translate 99 * root_level is the starting level of the page table 100 * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 ) 101 * nr_root_tables is the number of concatenated tables at the root. 102 * this can only be != 1 for P2M walks starting at the first or 103 * subsequent level. 104 */ 105 void dump_pt_walk(paddr_t ttbr, paddr_t addr, 106 unsigned int root_level, 107 unsigned int nr_root_tables); 108 109 /* Switch to a new root page-tables */ 110 extern void switch_ttbr(uint64_t ttbr); 111 extern void relocate_and_switch_ttbr(uint64_t ttbr); 112 113 #endif /* __ARM_MMU_MM_H__ */ 114 115 /* 116 * Local variables: 117 * mode: C 118 * c-file-style: "BSD" 119 * c-basic-offset: 4 120 * indent-tabs-mode: nil 121 * End: 122 */ 123