1 #ifndef __ARM_PAGE_H__
2 #define __ARM_PAGE_H__
3
4 #include <public/xen.h>
5 #include <asm/processor.h>
6 #include <asm/lpae.h>
7
8 #ifdef CONFIG_ARM_64
9 #define PADDR_BITS 48
10 #else
11 #define PADDR_BITS 40
12 #endif
13 #define PADDR_MASK ((1ULL << PADDR_BITS)-1)
14
15 #define VADDR_BITS 32
16 #define VADDR_MASK (~0UL)
17
18 /* Shareability values for the LPAE entries */
19 #define LPAE_SH_NON_SHAREABLE 0x0
20 #define LPAE_SH_UNPREDICTALE 0x1
21 #define LPAE_SH_OUTER 0x2
22 #define LPAE_SH_INNER 0x3
23
24 /*
25 * Attribute Indexes.
26 *
27 * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page
28 * table entry. They are indexes into the bytes of the MAIR*
29 * registers, as defined below.
30 *
31 */
32 #define MT_DEVICE_nGnRnE 0x0
33 #define MT_NORMAL_NC 0x1
34 #define MT_NORMAL_WT 0x2
35 #define MT_NORMAL_WB 0x3
36 #define MT_DEVICE_nGnRE 0x4
37 #define MT_NORMAL 0x7
38
39 /*
40 * LPAE Memory region attributes. Indexed by the AttrIndex bits of a
41 * LPAE entry; the 8-bit fields are packed little-endian into MAIR0 and MAIR1.
42 *
43 * See section "Device memory" B2.7.2 in ARM DDI 0487B.a for more
44 * details about the meaning of *G*R*E.
45 *
46 * ai encoding
47 * MT_DEVICE_nGnRnE 000 0000 0000 -- Strongly Ordered/Device nGnRnE
48 * MT_NORMAL_NC 001 0100 0100 -- Non-Cacheable
49 * MT_NORMAL_WT 010 1010 1010 -- Write-through
50 * MT_NORMAL_WB 011 1110 1110 -- Write-back
51 * MT_DEVICE_nGnRE 100 0000 0100 -- Device nGnRE
52 * ?? 101
53 * reserved 110
54 * MT_NORMAL 111 1111 1111 -- Write-back write-allocate
55 *
56 * /!\ It is not possible to combine the definition in MAIRVAL and then
57 * split because it would result to a 64-bit value that some assembler
58 * doesn't understand.
59 */
60 #define _MAIR0(attr, mt) (_AC(attr, ULL) << ((mt) * 8))
61 #define _MAIR1(attr, mt) (_AC(attr, ULL) << (((mt) * 8) - 32))
62
63 #define MAIR0VAL (_MAIR0(0x00, MT_DEVICE_nGnRnE)| \
64 _MAIR0(0x44, MT_NORMAL_NC) | \
65 _MAIR0(0xaa, MT_NORMAL_WT) | \
66 _MAIR0(0xee, MT_NORMAL_WB))
67
68 #define MAIR1VAL (_MAIR1(0x04, MT_DEVICE_nGnRE) | \
69 _MAIR1(0xff, MT_NORMAL))
70
71 #define MAIRVAL (MAIR1VAL << 32 | MAIR0VAL)
72
73 /*
74 * Layout of the flags used for updating the hypervisor page tables
75 *
76 * [0:2] Memory Attribute Index
77 * [3:4] Permission flags
78 */
79 #define PAGE_AI_MASK(x) ((x) & 0x7U)
80
81 #define _PAGE_XN_BIT 3
82 #define _PAGE_RO_BIT 4
83 #define _PAGE_XN (1U << _PAGE_XN_BIT)
84 #define _PAGE_RO (1U << _PAGE_RO_BIT)
85 #define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U)
86 #define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U)
87
88 /*
89 * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not
90 * meant to be used outside of this header.
91 */
92 #define _PAGE_DEVICE _PAGE_XN
93 #define _PAGE_NORMAL MT_NORMAL
94
95 #define PAGE_HYPERVISOR_RO (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN)
96 #define PAGE_HYPERVISOR_RX (_PAGE_NORMAL|_PAGE_RO)
97 #define PAGE_HYPERVISOR_RW (_PAGE_NORMAL|_PAGE_XN)
98
99 #define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW
100 #define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE|MT_DEVICE_nGnRE)
101 #define PAGE_HYPERVISOR_WC (_PAGE_DEVICE|MT_NORMAL_NC)
102
103 /*
104 * Stage 2 Memory Type.
105 *
106 * These are valid in the MemAttr[3:0] field of an LPAE stage 2 page
107 * table entry.
108 *
109 */
110 #define MATTR_DEV 0x1
111 #define MATTR_MEM_NC 0x5
112 #define MATTR_MEM 0xf
113
114 /* Flags for get_page_from_gva, gvirt_to_maddr etc */
115 #define GV2M_READ (0u<<0)
116 #define GV2M_WRITE (1u<<0)
117 #define GV2M_EXEC (1u<<1)
118
119 #ifndef __ASSEMBLY__
120
121 #include <xen/errno.h>
122 #include <xen/types.h>
123 #include <xen/lib.h>
124 #include <asm/system.h>
125
126 #if defined(CONFIG_ARM_32)
127 # include <asm/arm32/page.h>
128 #elif defined(CONFIG_ARM_64)
129 # include <asm/arm64/page.h>
130 #else
131 # error "unknown ARM variant"
132 #endif
133
134 /* Architectural minimum cacheline size is 4 32-bit words. */
135 #define MIN_CACHELINE_BYTES 16
136 /* Actual cacheline size on the boot CPU. */
137 extern size_t cacheline_bytes;
138
139 #define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE)
140
141 /* Functions for flushing medium-sized areas.
142 * if 'range' is large enough we might want to use model-specific
143 * full-cache flushes. */
144
invalidate_dcache_va_range(const void * p,unsigned long size)145 static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
146 {
147 const void *end = p + size;
148 size_t cacheline_mask = cacheline_bytes - 1;
149
150 dsb(sy); /* So the CPU issues all writes to the range */
151
152 if ( (uintptr_t)p & cacheline_mask )
153 {
154 p = (void *)((uintptr_t)p & ~cacheline_mask);
155 asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
156 p += cacheline_bytes;
157 }
158 if ( (uintptr_t)end & cacheline_mask )
159 {
160 end = (void *)((uintptr_t)end & ~cacheline_mask);
161 asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
162 }
163
164 for ( ; p < end; p += cacheline_bytes )
165 asm volatile (__invalidate_dcache_one(0) : : "r" (p));
166
167 dsb(sy); /* So we know the flushes happen before continuing */
168
169 return 0;
170 }
171
clean_dcache_va_range(const void * p,unsigned long size)172 static inline int clean_dcache_va_range(const void *p, unsigned long size)
173 {
174 const void *end = p + size;
175 dsb(sy); /* So the CPU issues all writes to the range */
176 p = (void *)((uintptr_t)p & ~(cacheline_bytes - 1));
177 for ( ; p < end; p += cacheline_bytes )
178 asm volatile (__clean_dcache_one(0) : : "r" (p));
179 dsb(sy); /* So we know the flushes happen before continuing */
180 /* ARM callers assume that dcache_* functions cannot fail. */
181 return 0;
182 }
183
clean_and_invalidate_dcache_va_range(const void * p,unsigned long size)184 static inline int clean_and_invalidate_dcache_va_range
185 (const void *p, unsigned long size)
186 {
187 const void *end = p + size;
188 dsb(sy); /* So the CPU issues all writes to the range */
189 p = (void *)((uintptr_t)p & ~(cacheline_bytes - 1));
190 for ( ; p < end; p += cacheline_bytes )
191 asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
192 dsb(sy); /* So we know the flushes happen before continuing */
193 /* ARM callers assume that dcache_* functions cannot fail. */
194 return 0;
195 }
196
197 /* Macros for flushing a single small item. The predicate is always
198 * compile-time constant so this will compile down to 3 instructions in
199 * the common case. */
200 #define clean_dcache(x) do { \
201 typeof(x) *_p = &(x); \
202 if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
203 clean_dcache_va_range(_p, sizeof(x)); \
204 else \
205 asm volatile ( \
206 "dsb sy;" /* Finish all earlier writes */ \
207 __clean_dcache_one(0) \
208 "dsb sy;" /* Finish flush before continuing */ \
209 : : "r" (_p), "m" (*_p)); \
210 } while (0)
211
212 #define clean_and_invalidate_dcache(x) do { \
213 typeof(x) *_p = &(x); \
214 if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
215 clean_and_invalidate_dcache_va_range(_p, sizeof(x)); \
216 else \
217 asm volatile ( \
218 "dsb sy;" /* Finish all earlier writes */ \
219 __clean_and_invalidate_dcache_one(0) \
220 "dsb sy;" /* Finish flush before continuing */ \
221 : : "r" (_p), "m" (*_p)); \
222 } while (0)
223
224 /*
225 * Flush a range of VA's hypervisor mappings from the data TLB of the
226 * local processor. This is not sufficient when changing code mappings
227 * or for self modifying code.
228 */
flush_xen_data_tlb_range_va_local(unsigned long va,unsigned long size)229 static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
230 unsigned long size)
231 {
232 unsigned long end = va + size;
233 dsb(sy); /* Ensure preceding are visible */
234 while ( va < end )
235 {
236 __flush_xen_data_tlb_one_local(va);
237 va += PAGE_SIZE;
238 }
239 dsb(sy); /* Ensure completion of the TLB flush */
240 isb();
241 }
242
243 /*
244 * Flush a range of VA's hypervisor mappings from the data TLB of all
245 * processors in the inner-shareable domain. This is not sufficient
246 * when changing code mappings or for self modifying code.
247 */
flush_xen_data_tlb_range_va(unsigned long va,unsigned long size)248 static inline void flush_xen_data_tlb_range_va(unsigned long va,
249 unsigned long size)
250 {
251 unsigned long end = va + size;
252 dsb(sy); /* Ensure preceding are visible */
253 while ( va < end )
254 {
255 __flush_xen_data_tlb_one(va);
256 va += PAGE_SIZE;
257 }
258 dsb(sy); /* Ensure completion of the TLB flush */
259 isb();
260 }
261
262 /* Flush the dcache for an entire page. */
263 void flush_page_to_ram(unsigned long mfn, bool sync_icache);
264
265 /*
266 * Print a walk of a page table or p2m
267 *
268 * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
269 * addr is the PA or IPA to translate
270 * root_level is the starting level of the page table
271 * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
272 * nr_root_tables is the number of concatenated tables at the root.
273 * this can only be != 1 for P2M walks starting at the first or
274 * subsequent level.
275 */
276 void dump_pt_walk(paddr_t ttbr, paddr_t addr,
277 unsigned int root_level,
278 unsigned int nr_root_tables);
279
280 /* Print a walk of the hypervisor's page tables for a virtual addr. */
281 extern void dump_hyp_walk(vaddr_t addr);
282 /* Print a walk of the p2m for a domain for a physical address. */
283 extern void dump_p2m_lookup(struct domain *d, paddr_t addr);
284
va_to_par(vaddr_t va)285 static inline uint64_t va_to_par(vaddr_t va)
286 {
287 uint64_t par = __va_to_par(va);
288 /* It is not OK to call this with an invalid VA */
289 if ( par & PAR_F )
290 {
291 dump_hyp_walk(va);
292 panic_PAR(par);
293 }
294 return par;
295 }
296
gva_to_ipa(vaddr_t va,paddr_t * paddr,unsigned int flags)297 static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags)
298 {
299 uint64_t par = gva_to_ipa_par(va, flags);
300 if ( par & PAR_F )
301 return -EFAULT;
302 *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
303 return 0;
304 }
305
306 /* Bits in the PAR returned by va_to_par */
307 #define PAR_FAULT 0x1
308
309 #endif /* __ASSEMBLY__ */
310
311 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
312
313 #endif /* __ARM_PAGE_H__ */
314
315 /*
316 * Local variables:
317 * mode: C
318 * c-file-style: "BSD"
319 * c-basic-offset: 4
320 * tab-width: 4
321 * indent-tabs-mode: nil
322 * End:
323 */
324