1 #ifndef __ARM_PAGE_H__
2 #define __ARM_PAGE_H__
3
4 #include <public/xen.h>
5 #include <xen/page-size.h>
6 #include <asm/processor.h>
7 #include <asm/lpae.h>
8 #include <asm/sysregs.h>
9
10 /* Shareability values for the LPAE entries */
11 #define LPAE_SH_NON_SHAREABLE 0x0
12 #define LPAE_SH_UNPREDICTALE 0x1
13 #define LPAE_SH_OUTER 0x2
14 #define LPAE_SH_INNER 0x3
15
16 /*
17 * Attribute Indexes.
18 *
19 * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page
20 * table entry. They are indexes into the bytes of the MAIR*
21 * registers, as defined below.
22 *
23 */
24 #define MT_DEVICE_nGnRnE 0x0
25 #define MT_NORMAL_NC 0x1
26 #define MT_NORMAL_WT 0x2
27 #define MT_NORMAL_WB 0x3
28 #define MT_DEVICE_nGnRE 0x4
29 #define MT_NORMAL 0x7
30
31 /*
32 * LPAE Memory region attributes. Indexed by the AttrIndex bits of a
33 * LPAE entry; the 8-bit fields are packed little-endian into MAIR0 and MAIR1.
34 *
35 * See section "Device memory" B2.7.2 in ARM DDI 0487B.a for more
36 * details about the meaning of *G*R*E.
37 *
38 * ai encoding
39 * MT_DEVICE_nGnRnE 000 0000 0000 -- Strongly Ordered/Device nGnRnE
40 * MT_NORMAL_NC 001 0100 0100 -- Non-Cacheable
41 * MT_NORMAL_WT 010 1010 1010 -- Write-through
42 * MT_NORMAL_WB 011 1110 1110 -- Write-back
43 * MT_DEVICE_nGnRE 100 0000 0100 -- Device nGnRE
44 * ?? 101
45 * reserved 110
46 * MT_NORMAL 111 1111 1111 -- Write-back write-allocate
47 *
48 * /!\ It is not possible to combine the definition in MAIRVAL and then
49 * split because it would result to a 64-bit value that some assembler
50 * doesn't understand.
51 */
52 #define _MAIR0(attr, mt) (_AC(attr, ULL) << ((mt) * 8))
53 #define _MAIR1(attr, mt) (_AC(attr, ULL) << (((mt) * 8) - 32))
54
55 #define MAIR0VAL (_MAIR0(0x00, MT_DEVICE_nGnRnE)| \
56 _MAIR0(0x44, MT_NORMAL_NC) | \
57 _MAIR0(0xaa, MT_NORMAL_WT) | \
58 _MAIR0(0xee, MT_NORMAL_WB))
59
60 #define MAIR1VAL (_MAIR1(0x04, MT_DEVICE_nGnRE) | \
61 _MAIR1(0xff, MT_NORMAL))
62
63 #define MAIRVAL (MAIR1VAL << 32 | MAIR0VAL)
64
65 /*
66 * Layout of the flags used for updating the hypervisor page tables
67 *
68 * [0:2] Memory Attribute Index
69 * [3:4] Permission flags
70 * [5] Page present
71 * [6] Only populate page tables
72 * [7] Superpage mappings is allowed
73 * [8] Set contiguous bit (internal flag)
74 */
75 #define PAGE_AI_MASK(x) ((x) & 0x7U)
76
77 #define _PAGE_XN_BIT 3
78 #define _PAGE_RO_BIT 4
79 #define _PAGE_XN (1U << _PAGE_XN_BIT)
80 #define _PAGE_RO (1U << _PAGE_RO_BIT)
81 #define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U)
82 #define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U)
83
84 #define _PAGE_PRESENT (1U << 5)
85 #define _PAGE_POPULATE (1U << 6)
86
87 #define _PAGE_BLOCK_BIT 7
88 #define _PAGE_BLOCK (1U << _PAGE_BLOCK_BIT)
89
90 #define _PAGE_CONTIG_BIT 8
91 #define _PAGE_CONTIG (1U << _PAGE_CONTIG_BIT)
92
93 /*
94 * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not
95 * meant to be used outside of this header.
96 */
97 #define _PAGE_DEVICE (_PAGE_XN|_PAGE_PRESENT)
98 #define _PAGE_NORMAL (MT_NORMAL|_PAGE_PRESENT)
99
100 #define PAGE_HYPERVISOR_RO (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN)
101 #define PAGE_HYPERVISOR_RX (_PAGE_NORMAL|_PAGE_RO)
102 #define PAGE_HYPERVISOR_RW (_PAGE_NORMAL|_PAGE_XN)
103
104 #define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW
105 #define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE|MT_DEVICE_nGnRE)
106 #define PAGE_HYPERVISOR_WC (_PAGE_DEVICE|MT_NORMAL_NC)
107
108 /*
109 * Stage 2 Memory Type.
110 *
111 * These are valid in the MemAttr[3:0] field of an LPAE stage 2 page
112 * table entry.
113 *
114 */
115 #define MATTR_DEV 0x1
116 #define MATTR_MEM_NC 0x5
117 #define MATTR_MEM 0xf
118
119 /* Flags for get_page_from_gva, gvirt_to_maddr etc */
120 #define GV2M_READ (0u<<0)
121 #define GV2M_WRITE (1u<<0)
122 #define GV2M_EXEC (1u<<1)
123
124 #ifndef __ASSEMBLY__
125
126 #include <xen/errno.h>
127 #include <xen/types.h>
128 #include <xen/lib.h>
129 #include <asm/atomic.h>
130 #include <asm/system.h>
131
132 #if defined(CONFIG_ARM_32)
133 # include <asm/arm32/page.h>
134 #elif defined(CONFIG_ARM_64)
135 # include <asm/arm64/page.h>
136 #else
137 # error "unknown ARM variant"
138 #endif
139
140 /* Architectural minimum cacheline size is 4 32-bit words. */
141 #define MIN_CACHELINE_BYTES 16
142 /* Min dcache line size on the boot CPU. */
143 extern size_t dcache_line_bytes;
144
145 #define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE)
146
147 #define clear_page_hot clear_page
148 #define clear_page_cold clear_page
149
150 #define scrub_page_hot(page) memset(page, SCRUB_BYTE_PATTERN, PAGE_SIZE)
151 #define scrub_page_cold scrub_page_hot
152
read_dcache_line_bytes(void)153 static inline size_t read_dcache_line_bytes(void)
154 {
155 register_t ctr;
156
157 /* Read CTR */
158 ctr = READ_SYSREG(CTR_EL0);
159
160 /* Bits 16-19 are the log2 number of words in the cacheline. */
161 return (size_t) (4 << ((ctr >> 16) & 0xf));
162 }
163
164 /* Functions for flushing medium-sized areas.
165 * if 'range' is large enough we might want to use model-specific
166 * full-cache flushes. */
167
invalidate_dcache_va_range(const void * p,unsigned long size)168 static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
169 {
170 size_t cacheline_mask = dcache_line_bytes - 1;
171 unsigned long idx = 0;
172
173 if ( !size )
174 return 0;
175
176 /* Passing a region that wraps around is illegal */
177 ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
178
179 dsb(sy); /* So the CPU issues all writes to the range */
180
181 if ( (uintptr_t)p & cacheline_mask )
182 {
183 size -= dcache_line_bytes - ((uintptr_t)p & cacheline_mask);
184 p = (void *)((uintptr_t)p & ~cacheline_mask);
185 asm_inline volatile (
186 __clean_and_invalidate_dcache_one(0) :: "r" (p) );
187 p += dcache_line_bytes;
188 }
189
190 for ( ; size >= dcache_line_bytes;
191 idx += dcache_line_bytes, size -= dcache_line_bytes )
192 asm volatile (__invalidate_dcache_one(0) : : "r" (p + idx));
193
194 if ( size > 0 )
195 asm_inline volatile (
196 __clean_and_invalidate_dcache_one(0) :: "r" (p + idx) );
197
198 dsb(sy); /* So we know the flushes happen before continuing */
199
200 return 0;
201 }
202
clean_dcache_va_range(const void * p,unsigned long size)203 static inline int clean_dcache_va_range(const void *p, unsigned long size)
204 {
205 size_t cacheline_mask = dcache_line_bytes - 1;
206 unsigned long idx = 0;
207
208 if ( !size )
209 return 0;
210
211 /* Passing a region that wraps around is illegal */
212 ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
213
214 dsb(sy); /* So the CPU issues all writes to the range */
215 size += (uintptr_t)p & cacheline_mask;
216 size = (size + cacheline_mask) & ~cacheline_mask;
217 p = (void *)((uintptr_t)p & ~cacheline_mask);
218 for ( ; size >= dcache_line_bytes;
219 idx += dcache_line_bytes, size -= dcache_line_bytes )
220 asm_inline volatile ( __clean_dcache_one(0) : : "r" (p + idx) );
221 dsb(sy); /* So we know the flushes happen before continuing */
222 /* ARM callers assume that dcache_* functions cannot fail. */
223 return 0;
224 }
225
clean_and_invalidate_dcache_va_range(const void * p,unsigned long size)226 static inline int clean_and_invalidate_dcache_va_range
227 (const void *p, unsigned long size)
228 {
229 size_t cacheline_mask = dcache_line_bytes - 1;
230 unsigned long idx = 0;
231
232 if ( !size )
233 return 0;
234
235 /* Passing a region that wraps around is illegal */
236 ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p);
237
238 dsb(sy); /* So the CPU issues all writes to the range */
239 size += (uintptr_t)p & cacheline_mask;
240 size = (size + cacheline_mask) & ~cacheline_mask;
241 p = (void *)((uintptr_t)p & ~cacheline_mask);
242 for ( ; size >= dcache_line_bytes;
243 idx += dcache_line_bytes, size -= dcache_line_bytes )
244 asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx));
245 dsb(sy); /* So we know the flushes happen before continuing */
246 /* ARM callers assume that dcache_* functions cannot fail. */
247 return 0;
248 }
249
250 /* Macros for flushing a single small item. The predicate is always
251 * compile-time constant so this will compile down to 3 instructions in
252 * the common case. */
253 #define clean_dcache(x) do { \
254 typeof(x) *_p = &(x); \
255 if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
256 clean_dcache_va_range(_p, sizeof(x)); \
257 else \
258 asm_inline volatile ( \
259 "dsb sy;" /* Finish all earlier writes */ \
260 __clean_dcache_one(0) \
261 "dsb sy;" /* Finish flush before continuing */ \
262 : : "r" (_p), "m" (*_p)); \
263 } while (0)
264
265 #define clean_and_invalidate_dcache(x) do { \
266 typeof(x) *_p = &(x); \
267 if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
268 clean_and_invalidate_dcache_va_range(_p, sizeof(x)); \
269 else \
270 asm_inline volatile ( \
271 "dsb sy;" /* Finish all earlier writes */ \
272 __clean_and_invalidate_dcache_one(0) \
273 "dsb sy;" /* Finish flush before continuing */ \
274 : : "r" (_p), "m" (*_p)); \
275 } while (0)
276
277 /*
278 * Write a pagetable entry.
279 *
280 * It is the responsibility of the caller to issue an ISB (if a new entry)
281 * or a TLB flush (if modified or removed) after write_pte().
282 */
write_pte(lpae_t * p,lpae_t pte)283 static inline void write_pte(lpae_t *p, lpae_t pte)
284 {
285 /* Ensure any writes have completed with the old mappings. */
286 dsb(sy);
287 /* Safely write the entry. This should always be an atomic write. */
288 write_atomic(p, pte);
289 dsb(sy);
290 }
291
292
293 /* Flush the dcache for an entire page. */
294 void flush_page_to_ram(unsigned long mfn, bool sync_icache);
295
296 /* Print a walk of the hypervisor's page tables for a virtual addr. */
297 extern void dump_hyp_walk(vaddr_t addr);
298
va_to_par(vaddr_t va)299 static inline uint64_t va_to_par(vaddr_t va)
300 {
301 uint64_t par = __va_to_par(va);
302 /* It is not OK to call this with an invalid VA */
303 if ( par & PAR_F )
304 {
305 dump_hyp_walk(va);
306 panic_PAR(par);
307 }
308 return par;
309 }
310
gva_to_ipa(vaddr_t va,paddr_t * paddr,unsigned int flags)311 static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags)
312 {
313 uint64_t par = gva_to_ipa_par(va, flags);
314 if ( par & PAR_F )
315 return -EFAULT;
316 *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
317 return 0;
318 }
319
320 /* Bits in the PAR returned by va_to_par */
321 #define PAR_FAULT 0x1
322
323 #endif /* __ASSEMBLY__ */
324
325 #endif /* __ARM_PAGE_H__ */
326
327 /*
328 * Local variables:
329 * mode: C
330 * c-file-style: "BSD"
331 * c-basic-offset: 4
332 * tab-width: 4
333 * indent-tabs-mode: nil
334 * End:
335 */
336