1 #ifndef __ARCH_ARM_MM__
2 #define __ARCH_ARM_MM__
3 
4 #include <xen/kernel.h>
5 #include <asm/page.h>
6 #include <public/xen.h>
7 #include <xen/pdx.h>
8 
9 #if defined(CONFIG_ARM_32)
10 # include <asm/arm32/mm.h>
11 #elif defined(CONFIG_ARM_64)
12 # include <asm/arm64/mm.h>
13 #else
14 # error "unknown ARM variant"
15 #endif
16 
17 /* Align Xen to a 2 MiB boundary. */
18 #define XEN_PADDR_ALIGN (1 << 21)
19 
20 /*
21  * Per-page-frame information.
22  *
23  * Every architecture must ensure the following:
24  *  1. 'struct page_info' contains a 'struct page_list_entry list'.
25  *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
26  */
27 #define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
28 
29 struct page_info
30 {
31     /* Each frame can be threaded onto a doubly-linked list. */
32     struct page_list_entry list;
33 
34     /* Reference count and various PGC_xxx flags and fields. */
35     unsigned long count_info;
36 
37     /* Context-dependent fields follow... */
38     union {
39         /* Page is in use: ((count_info & PGC_count_mask) != 0). */
40         struct {
41             /* Type reference count and various PGT_xxx flags and fields. */
42             unsigned long type_info;
43         } inuse;
44         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
45         union {
46             struct {
47                 /*
48                  * Index of the first *possibly* unscrubbed page in the buddy.
49                  * One more bit than maximum possible order to accommodate
50                  * INVALID_DIRTY_IDX.
51                  */
52 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
53                 unsigned long first_dirty:MAX_ORDER + 1;
54 
55                 /* Do TLBs need flushing for safety before next page use? */
56                 bool need_tlbflush:1;
57 
58 #define BUDDY_NOT_SCRUBBING    0
59 #define BUDDY_SCRUBBING        1
60 #define BUDDY_SCRUB_ABORT      2
61                 unsigned long scrub_state:2;
62             };
63 
64             unsigned long val;
65             } free;
66 
67     } u;
68 
69     union {
70         /* Page is in use, but not as a shadow. */
71         struct {
72             /* Owner of this page (zero if page is anonymous). */
73             struct domain *domain;
74         } inuse;
75 
76         /* Page is on a free list. */
77         struct {
78             /* Order-size of the free chunk this page is the head of. */
79             unsigned int order;
80         } free;
81 
82     } v;
83 
84     union {
85         /*
86          * Timestamp from 'TLB clock', used to avoid extra safety flushes.
87          * Only valid for: a) free pages, and b) pages with zero type count
88          */
89         u32 tlbflush_timestamp;
90     };
91     u64 pad;
92 };
93 
94 #define PG_shift(idx)   (BITS_PER_LONG - (idx))
95 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
96 
97 #define PGT_none          PG_mask(0, 1)  /* no special uses of this page   */
98 #define PGT_writable_page PG_mask(1, 1)  /* has writable mappings?         */
99 #define PGT_type_mask     PG_mask(1, 1)  /* Bits 31 or 63.                 */
100 
101  /* Count of uses of this frame as its current type. */
102 #define PGT_count_width   PG_shift(2)
103 #define PGT_count_mask    ((1UL<<PGT_count_width)-1)
104 
105  /* Cleared when the owning guest 'frees' this page. */
106 #define _PGC_allocated    PG_shift(1)
107 #define PGC_allocated     PG_mask(1, 1)
108   /* Page is Xen heap? */
109 #define _PGC_xen_heap     PG_shift(2)
110 #define PGC_xen_heap      PG_mask(1, 2)
111 /* ... */
112 /* Page is broken? */
113 #define _PGC_broken       PG_shift(7)
114 #define PGC_broken        PG_mask(1, 7)
115  /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
116 #define PGC_state         PG_mask(3, 9)
117 #define PGC_state_inuse   PG_mask(0, 9)
118 #define PGC_state_offlining PG_mask(1, 9)
119 #define PGC_state_offlined PG_mask(2, 9)
120 #define PGC_state_free    PG_mask(3, 9)
121 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
122 
123 /* Count of references to this frame. */
124 #define PGC_count_width   PG_shift(9)
125 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
126 
127 /*
128  * Page needs to be scrubbed. Since this bit can only be set on a page that is
129  * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
130  */
131 #define _PGC_need_scrub   _PGC_allocated
132 #define PGC_need_scrub    PGC_allocated
133 
134 extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
135 extern vaddr_t xenheap_virt_end;
136 #ifdef CONFIG_ARM_64
137 extern vaddr_t xenheap_virt_start;
138 #endif
139 
140 #ifdef CONFIG_ARM_32
141 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
142 #define is_xen_heap_mfn(mfn) ({                                 \
143     unsigned long _mfn = (mfn);                                 \
144     (_mfn >= mfn_x(xenheap_mfn_start) &&                        \
145      _mfn < mfn_x(xenheap_mfn_end));                            \
146 })
147 #else
148 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
149 #define is_xen_heap_mfn(mfn) \
150     (mfn_valid(_mfn(mfn)) && is_xen_heap_page(__mfn_to_page(mfn)))
151 #endif
152 
153 #define is_xen_fixed_mfn(mfn)                                   \
154     ((pfn_to_paddr(mfn) >= virt_to_maddr(&_start)) &&       \
155      (pfn_to_paddr(mfn) <= virt_to_maddr(&_end)))
156 
157 #define page_get_owner(_p)    (_p)->v.inuse.domain
158 #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d))
159 
160 #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
161 
162 #define XENSHARE_writable 0
163 #define XENSHARE_readonly 1
164 extern void share_xen_page_with_guest(
165     struct page_info *page, struct domain *d, int readonly);
166 extern void share_xen_page_with_privileged_guests(
167     struct page_info *page, int readonly);
168 
169 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
170 /* PDX of the first page in the frame table. */
171 extern unsigned long frametable_base_pdx;
172 
173 extern unsigned long max_page;
174 extern unsigned long total_pages;
175 
176 #define PDX_GROUP_SHIFT SECOND_SHIFT
177 
178 /* Boot-time pagetable setup */
179 extern void setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr);
180 /* Map FDT in boot pagetable */
181 extern void *early_fdt_map(paddr_t fdt_paddr);
182 /* Remove early mappings */
183 extern void remove_early_mappings(void);
184 /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
185  * new page table */
186 extern int init_secondary_pagetables(int cpu);
187 /* Switch secondary CPUS to its own pagetables and finalise MMU setup */
188 extern void mmu_init_secondary_cpu(void);
189 /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory.
190  * Base must be 32MB aligned and size a multiple of 32MB. */
191 extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
192 /* Map a frame table to cover physical addresses ps through pe */
193 extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
194 /* Map a 4k page in a fixmap entry */
195 extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes);
196 /* Remove a mapping from a fixmap entry */
197 extern void clear_fixmap(unsigned map);
198 /* map a physical range in virtual memory */
199 void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned attributes);
200 
ioremap_nocache(paddr_t start,size_t len)201 static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
202 {
203     return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
204 }
205 
ioremap_cache(paddr_t start,size_t len)206 static inline void __iomem *ioremap_cache(paddr_t start, size_t len)
207 {
208     return ioremap_attr(start, len, PAGE_HYPERVISOR);
209 }
210 
ioremap_wc(paddr_t start,size_t len)211 static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
212 {
213     return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
214 }
215 
216 /* XXX -- account for base */
217 #define mfn_valid(mfn)        ({                                              \
218     unsigned long __m_f_n = mfn_x(mfn);                                       \
219     likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \
220 })
221 
222 /* Convert between machine frame numbers and page-info structures. */
223 #define __mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) - frametable_base_pdx))
224 #define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
225 
226 /* Convert between machine addresses and page-info structures. */
227 #define maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
228 #define page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
229 
230 /* Convert between frame number and address formats.  */
231 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
232 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
233 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
234 #define gfn_to_gaddr(gfn)   pfn_to_paddr(gfn_x(gfn))
235 #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
236 #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
237 #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
238 #define vmap_to_mfn(va)     paddr_to_pfn(virt_to_maddr((vaddr_t)va))
239 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
240 
241 /* Page-align address and convert to frame number format */
242 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
243 
__virt_to_maddr(vaddr_t va)244 static inline paddr_t __virt_to_maddr(vaddr_t va)
245 {
246     uint64_t par = va_to_par(va);
247     return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
248 }
249 #define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
250 
251 #ifdef CONFIG_ARM_32
maddr_to_virt(paddr_t ma)252 static inline void *maddr_to_virt(paddr_t ma)
253 {
254     ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
255     ma -= mfn_to_maddr(xenheap_mfn_start);
256     return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
257 }
258 #else
maddr_to_virt(paddr_t ma)259 static inline void *maddr_to_virt(paddr_t ma)
260 {
261     ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
262     return (void *)(XENHEAP_VIRT_START -
263                     mfn_to_maddr(xenheap_mfn_start) +
264                     ((ma & ma_va_bottom_mask) |
265                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
266 }
267 #endif
268 
269 /*
270  * Translate a guest virtual address to a machine address.
271  * Return the fault information if the translation has failed else 0.
272  */
gvirt_to_maddr(vaddr_t va,paddr_t * pa,unsigned int flags)273 static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
274                                       unsigned int flags)
275 {
276     uint64_t par = gva_to_ma_par(va, flags);
277     if ( par & PAR_F )
278         return par;
279     *pa = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
280     return 0;
281 }
282 
283 /* Convert between Xen-heap virtual addresses and machine addresses. */
284 #define __pa(x)             (virt_to_maddr(x))
285 #define __va(x)             (maddr_to_virt(x))
286 
287 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
288 #define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
289 #define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
290 
291 /*
292  * We define non-underscored wrappers for above conversion functions.
293  * These are overriden in various source files while underscored version
294  * remain intact.
295  */
296 #define mfn_to_page(mfn)    __mfn_to_page(mfn)
297 #define page_to_mfn(pg)     __page_to_mfn(pg)
298 #define virt_to_mfn(va)     __virt_to_mfn(va)
299 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
300 
301 /* Convert between Xen-heap virtual addresses and page-info structures. */
virt_to_page(const void * v)302 static inline struct page_info *virt_to_page(const void *v)
303 {
304     unsigned long va = (unsigned long)v;
305     unsigned long pdx;
306 
307     ASSERT(va >= XENHEAP_VIRT_START);
308     ASSERT(va < xenheap_virt_end);
309 
310     pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
311     pdx += pfn_to_pdx(mfn_x(xenheap_mfn_start));
312     return frame_table + pdx - frametable_base_pdx;
313 }
314 
page_to_virt(const struct page_info * pg)315 static inline void *page_to_virt(const struct page_info *pg)
316 {
317     return mfn_to_virt(page_to_mfn(pg));
318 }
319 
320 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
321                                     unsigned long flags);
322 
323 /*
324  * The MPT (machine->physical mapping table) is an array of word-sized
325  * values, indexed on machine frame number. It is expected that guest OSes
326  * will use it to store a "physical" frame number to give the appearance of
327  * contiguous (or near contiguous) physical memory.
328  */
329 #undef  machine_to_phys_mapping
330 #define machine_to_phys_mapping  ((unsigned long *)RDWR_MPT_VIRT_START)
331 #define INVALID_M2P_ENTRY        (~0UL)
332 #define VALID_M2P(_e)            (!((_e) & (1UL<<(BITS_PER_LONG-1))))
333 #define SHARED_M2P_ENTRY         (~0UL - 1UL)
334 #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
335 
336 #define _set_gpfn_from_mfn(mfn, pfn) ({                        \
337     struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
338     if(d && (d == dom_cow))                                    \
339         machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY;     \
340     else                                                       \
341         machine_to_phys_mapping[(mfn)] = (pfn);                \
342     })
343 
put_gfn(struct domain * d,unsigned long gfn)344 static inline void put_gfn(struct domain *d, unsigned long gfn) {}
relinquish_shared_pages(struct domain * d)345 static inline int relinquish_shared_pages(struct domain *d)
346 {
347     return 0;
348 }
349 
350 /* Xen always owns P2M on ARM */
351 #define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0)
352 #define mfn_to_gmfn(_d, mfn)  (mfn)
353 
354 
355 /* Arch-specific portion of memory_op hypercall. */
356 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
357 
358 #define domain_set_alloc_bitsize(d) ((void)0)
359 #define domain_clamp_alloc_bitsize(d, b) (b)
360 
361 unsigned long domain_get_maximum_gpfn(struct domain *d);
362 
363 extern struct domain *dom_xen, *dom_io, *dom_cow;
364 
365 #define memguard_guard_stack(_p)       ((void)0)
366 #define memguard_guard_range(_p,_l)    ((void)0)
367 #define memguard_unguard_range(_p,_l)  ((void)0)
368 
369 /* Release all __init and __initdata ranges to be reused */
370 void free_init_memory(void);
371 
372 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
373                                           unsigned int order);
374 
375 extern void put_page_type(struct page_info *page);
put_page_and_type(struct page_info * page)376 static inline void put_page_and_type(struct page_info *page)
377 {
378     put_page_type(page);
379     put_page(page);
380 }
381 
382 void clear_and_clean_page(struct page_info *page);
383 
384 #endif /*  __ARCH_ARM_MM__ */
385 /*
386  * Local variables:
387  * mode: C
388  * c-file-style: "BSD"
389  * c-basic-offset: 4
390  * indent-tabs-mode: nil
391  * End:
392  */
393