1 #ifndef __ARM_ARM64_PAGE_H__
2 #define __ARM_ARM64_PAGE_H__
3 
4 #ifndef __ASSEMBLY__
5 
6 #include <asm/alternative.h>
7 
8 /* Write a pagetable entry */
write_pte(lpae_t * p,lpae_t pte)9 static inline void write_pte(lpae_t *p, lpae_t pte)
10 {
11     asm volatile (
12         /* Ensure any writes have completed with the old mappings. */
13         "dsb sy;"
14         "str %0, [%1];"         /* Write the entry */
15         "dsb sy;"
16         : : "r" (pte.bits), "r" (p) : "memory");
17 }
18 
19 /* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
20 #define __invalidate_dcache_one(R) "dc ivac, %" #R ";"
21 
22 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
23 #define __clean_dcache_one(R)                   \
24     ALTERNATIVE("dc cvac, %" #R ";",            \
25                 "dc civac, %" #R ";",           \
26                 ARM64_WORKAROUND_CLEAN_CACHE)   \
27 
28 /* Inline ASM to clean and invalidate dcache on register R (may be an
29  * inline asm operand) */
30 #define __clean_and_invalidate_dcache_one(R) "dc  civac, %" #R ";"
31 
32 /* Invalidate all instruction caches in Inner Shareable domain to PoU */
invalidate_icache(void)33 static inline void invalidate_icache(void)
34 {
35     asm volatile ("ic ialluis");
36     dsb(ish);               /* Ensure completion of the flush I-cache */
37     isb();
38 }
39 
40 /*
41  * Flush all hypervisor mappings from the TLB of the local processor.
42  *
43  * This is needed after changing Xen code mappings.
44  *
45  * The caller needs to issue the necessary DSB and D-cache flushes
46  * before calling flush_xen_text_tlb.
47  */
flush_xen_text_tlb_local(void)48 static inline void flush_xen_text_tlb_local(void)
49 {
50     asm volatile (
51         "isb;"       /* Ensure synchronization with previous changes to text */
52         "tlbi   alle2;"                 /* Flush hypervisor TLB */
53         "ic     iallu;"                 /* Flush I-cache */
54         "dsb    sy;"                    /* Ensure completion of TLB flush */
55         "isb;"
56         : : : "memory");
57 }
58 
59 /*
60  * Flush all hypervisor mappings from the data TLB of the local
61  * processor. This is not sufficient when changing code mappings or
62  * for self modifying code.
63  */
flush_xen_data_tlb_local(void)64 static inline void flush_xen_data_tlb_local(void)
65 {
66     asm volatile (
67         "dsb    sy;"                    /* Ensure visibility of PTE writes */
68         "tlbi   alle2;"                 /* Flush hypervisor TLB */
69         "dsb    sy;"                    /* Ensure completion of TLB flush */
70         "isb;"
71         : : : "memory");
72 }
73 
74 /* Flush TLB of local processor for address va. */
__flush_xen_data_tlb_one_local(vaddr_t va)75 static inline void  __flush_xen_data_tlb_one_local(vaddr_t va)
76 {
77     asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
78 }
79 
80 /* Flush TLB of all processors in the inner-shareable domain for
81  * address va. */
__flush_xen_data_tlb_one(vaddr_t va)82 static inline void __flush_xen_data_tlb_one(vaddr_t va)
83 {
84     asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
85 }
86 
87 /* Ask the MMU to translate a VA for us */
__va_to_par(vaddr_t va)88 static inline uint64_t __va_to_par(vaddr_t va)
89 {
90     uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
91 
92     asm volatile ("at s1e2r, %0;" : : "r" (va));
93     isb();
94     par = READ_SYSREG64(PAR_EL1);
95     WRITE_SYSREG64(tmp, PAR_EL1);
96     return par;
97 }
98 
99 /* Ask the MMU to translate a Guest VA for us */
gva_to_ma_par(vaddr_t va,unsigned int flags)100 static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags)
101 {
102     uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
103 
104     if ( (flags & GV2M_WRITE) == GV2M_WRITE )
105         asm volatile ("at s12e1w, %0;" : : "r" (va));
106     else
107         asm volatile ("at s12e1r, %0;" : : "r" (va));
108     isb();
109     par = READ_SYSREG64(PAR_EL1);
110     WRITE_SYSREG64(tmp, PAR_EL1);
111     return par;
112 }
113 
gva_to_ipa_par(vaddr_t va,unsigned int flags)114 static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
115 {
116     uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
117 
118     if ( (flags & GV2M_WRITE) == GV2M_WRITE )
119         asm volatile ("at s1e1w, %0;" : : "r" (va));
120     else
121         asm volatile ("at s1e1r, %0;" : : "r" (va));
122     isb();
123     par = READ_SYSREG64(PAR_EL1);
124     WRITE_SYSREG64(tmp, PAR_EL1);
125     return par;
126 }
127 
128 extern void clear_page(void *to);
129 
130 #endif /* __ASSEMBLY__ */
131 
132 #endif /* __ARM_ARM64_PAGE_H__ */
133 
134 /*
135  * Local variables:
136  * mode: C
137  * c-file-style: "BSD"
138  * c-basic-offset: 4
139  * tab-width: 4
140  * indent-tabs-mode: nil
141  * End:
142  */
143