1 #ifndef __ARM_ARM32_PAGE_H__
2 #define __ARM_ARM32_PAGE_H__
3 
4 #ifndef __ASSEMBLY__
5 
6 /* Write a pagetable entry.
7  *
8  * If the table entry is changing a text mapping, it is responsibility
9  * of the caller to issue an ISB after write_pte.
10  */
write_pte(lpae_t * p,lpae_t pte)11 static inline void write_pte(lpae_t *p, lpae_t pte)
12 {
13     asm volatile (
14         /* Ensure any writes have completed with the old mappings. */
15         "dsb;"
16         /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
17         "strd %0, %H0, [%1];"
18         "dsb;"
19         : : "r" (pte.bits), "r" (p) : "memory");
20 }
21 
22 /* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
23 #define __invalidate_dcache_one(R) STORE_CP32(R, DCIMVAC)
24 
25 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
26 #define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC)
27 
28 /* Inline ASM to clean and invalidate dcache on register R (may be an
29  * inline asm operand) */
30 #define __clean_and_invalidate_dcache_one(R) STORE_CP32(R, DCCIMVAC)
31 
32 /*
33  * Invalidate all instruction caches in Inner Shareable domain to PoU.
34  * We also need to flush the branch predictor for ARMv7 as it may be
35  * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b).
36  */
invalidate_icache(void)37 static inline void invalidate_icache(void)
38 {
39     asm volatile (
40         CMD_CP32(ICIALLUIS)     /* Flush I-cache. */
41         CMD_CP32(BPIALLIS)      /* Flush branch predictor. */
42         : : : "memory");
43 
44     dsb(ish);                   /* Ensure completion of the flush I-cache */
45     isb();                      /* Synchronize fetched instruction stream. */
46 }
47 
48 /*
49  * Flush all hypervisor mappings from the TLB and branch predictor of
50  * the local processor.
51  *
52  * This is needed after changing Xen code mappings.
53  *
54  * The caller needs to issue the necessary DSB and D-cache flushes
55  * before calling flush_xen_text_tlb.
56  */
flush_xen_text_tlb_local(void)57 static inline void flush_xen_text_tlb_local(void)
58 {
59     asm volatile (
60         "isb;"                        /* Ensure synchronization with previous changes to text */
61         CMD_CP32(TLBIALLH)            /* Flush hypervisor TLB */
62         CMD_CP32(ICIALLU)             /* Flush I-cache */
63         CMD_CP32(BPIALL)              /* Flush branch predictor */
64         "dsb;"                        /* Ensure completion of TLB+BP flush */
65         "isb;"
66         : : : "memory");
67 }
68 
69 /*
70  * Flush all hypervisor mappings from the data TLB of the local
71  * processor. This is not sufficient when changing code mappings or
72  * for self modifying code.
73  */
flush_xen_data_tlb_local(void)74 static inline void flush_xen_data_tlb_local(void)
75 {
76     asm volatile("dsb;" /* Ensure preceding are visible */
77                  CMD_CP32(TLBIALLH)
78                  "dsb;" /* Ensure completion of the TLB flush */
79                  "isb;"
80                  : : : "memory");
81 }
82 
83 /* Flush TLB of local processor for address va. */
__flush_xen_data_tlb_one_local(vaddr_t va)84 static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
85 {
86     asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
87 }
88 
89 /* Flush TLB of all processors in the inner-shareable domain for
90  * address va. */
__flush_xen_data_tlb_one(vaddr_t va)91 static inline void __flush_xen_data_tlb_one(vaddr_t va)
92 {
93     asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
94 }
95 
96 /* Ask the MMU to translate a VA for us */
__va_to_par(vaddr_t va)97 static inline uint64_t __va_to_par(vaddr_t va)
98 {
99     uint64_t par, tmp;
100     tmp = READ_CP64(PAR);
101     WRITE_CP32(va, ATS1HR);
102     isb(); /* Ensure result is available. */
103     par = READ_CP64(PAR);
104     WRITE_CP64(tmp, PAR);
105     return par;
106 }
107 
108 /* Ask the MMU to translate a Guest VA for us */
gva_to_ma_par(vaddr_t va,unsigned int flags)109 static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags)
110 {
111     uint64_t par, tmp;
112     tmp = READ_CP64(PAR);
113     if ( (flags & GV2M_WRITE) == GV2M_WRITE )
114         WRITE_CP32(va, ATS12NSOPW);
115     else
116         WRITE_CP32(va, ATS12NSOPR);
117     isb(); /* Ensure result is available. */
118     par = READ_CP64(PAR);
119     WRITE_CP64(tmp, PAR);
120     return par;
121 }
gva_to_ipa_par(vaddr_t va,unsigned int flags)122 static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
123 {
124     uint64_t par, tmp;
125     tmp = READ_CP64(PAR);
126     if ( (flags & GV2M_WRITE) == GV2M_WRITE )
127         WRITE_CP32(va, ATS1CPW);
128     else
129         WRITE_CP32(va, ATS1CPR);
130     isb(); /* Ensure result is available. */
131     par = READ_CP64(PAR);
132     WRITE_CP64(tmp, PAR);
133     return par;
134 }
135 
136 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
137 
138 #endif /* __ASSEMBLY__ */
139 
140 #endif /* __ARM_ARM32_PAGE_H__ */
141 
142 /*
143  * Local variables:
144  * mode: C
145  * c-file-style: "BSD"
146  * c-basic-offset: 4
147  * tab-width: 4
148  * indent-tabs-mode: nil
149  * End:
150  */
151