1 #ifndef _XEN_P2M_H
2 #define _XEN_P2M_H
3 
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <xen/rwlock.h>
7 #include <xen/mem_access.h>
8 
9 #include <asm/current.h>
10 #include <asm/hsr.h>
11 
12 #define paddr_bits PADDR_BITS
13 
14 /* Holds the bit size of IPAs in p2m tables.  */
15 extern unsigned int p2m_ipa_bits;
16 
17 #define MAX_VMID_8_BIT  (1UL << 8)
18 #define MAX_VMID_16_BIT (1UL << 16)
19 
20 #define INVALID_VMID 0 /* VMID 0 is reserved */
21 
22 #ifdef CONFIG_ARM_64
23 extern unsigned int max_vmid;
24 /* VMID is by default 8 bit width on AArch64 */
25 #define MAX_VMID       max_vmid
26 #else
27 /* VMID is always 8 bit width on AArch32 */
28 #define MAX_VMID        MAX_VMID_8_BIT
29 #endif
30 
31 struct domain;
32 
33 extern void memory_type_changed(struct domain *d);
34 
35 /* Per-p2m-table state */
36 struct p2m_domain {
37     /*
38      * Lock that protects updates to the p2m.
39      */
40     rwlock_t lock;
41 
42     /* Pages used to construct the p2m */
43     struct page_list_head pages;
44 
45     /* The root of the p2m tree. May be concatenated */
46     struct page_info *root;
47 
48     /* Current VMID in use */
49     uint16_t vmid;
50 
51     /* Current Translation Table Base Register for the p2m */
52     uint64_t vttbr;
53 
54     /* Highest guest frame that's ever been mapped in the p2m */
55     gfn_t max_mapped_gfn;
56 
57     /*
58      * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
59      * preemptible manner this is update to track recall where to
60      * resume the search. Apart from during teardown this can only
61      * decrease. */
62     gfn_t lowest_mapped_gfn;
63 
64     /* Indicate if it is required to clean the cache when writing an entry */
65     bool clean_pte;
66 
67     /*
68      * P2M updates may required TLBs to be flushed (invalidated).
69      *
70      * Flushes may be deferred by setting 'need_flush' and then flushing
71      * when the p2m write lock is released.
72      *
73      * If an immediate flush is required (e.g, if a super page is
74      * shattered), call p2m_tlb_flush_sync().
75      */
76     bool need_flush;
77 
78     /* Gather some statistics for information purposes only */
79     struct {
80         /* Number of mappings at each p2m tree level */
81         unsigned long mappings[4];
82         /* Number of times we have shattered a mapping
83          * at each p2m tree level. */
84         unsigned long shattered[4];
85     } stats;
86 
87     /*
88      * If true, and an access fault comes in and there is no vm_event listener,
89      * pause domain. Otherwise, remove access restrictions.
90      */
91     bool access_required;
92 
93     /* Defines if mem_access is in use for the domain. */
94     bool mem_access_enabled;
95 
96     /*
97      * Default P2M access type for each page in the the domain: new pages,
98      * swapped in pages, cleared pages, and pages that are ambiguously
99      * retyped get this access type. See definition of p2m_access_t.
100      */
101     p2m_access_t default_access;
102 
103     /*
104      * Radix tree to store the p2m_access_t settings as the pte's don't have
105      * enough available bits to store this information.
106      */
107     struct radix_tree_root mem_access_settings;
108 
109     /* back pointer to domain */
110     struct domain *domain;
111 
112     /* Keeping track on which CPU this p2m was used and for which vCPU */
113     uint8_t last_vcpu_ran[NR_CPUS];
114 };
115 
116 /*
117  * List of possible type for each page in the p2m entry.
118  * The number of available bit per page in the pte for this purpose is 4 bits.
119  * So it's possible to only have 16 fields. If we run out of value in the
120  * future, it's possible to use higher value for pseudo-type and don't store
121  * them in the p2m entry.
122  */
123 typedef enum {
124     p2m_invalid = 0,    /* Nothing mapped here */
125     p2m_ram_rw,         /* Normal read/write guest RAM */
126     p2m_ram_ro,         /* Read-only; writes are silently dropped */
127     p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
128     p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
129     p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
130     p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */
131     p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */
132     p2m_grant_map_rw,   /* Read/write grant mapping */
133     p2m_grant_map_ro,   /* Read-only grant mapping */
134     /* The types below are only used to decide the page attribute in the P2M */
135     p2m_iommu_map_rw,   /* Read/write iommu mapping */
136     p2m_iommu_map_ro,   /* Read-only iommu mapping */
137     p2m_max_real_type,  /* Types after this won't be store in the p2m */
138 } p2m_type_t;
139 
140 /* We use bitmaps and mask to handle groups of types */
141 #define p2m_to_mask(_t) (1UL << (_t))
142 
143 /* RAM types, which map to real machine frames */
144 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) |        \
145                        p2m_to_mask(p2m_ram_ro))
146 
147 /* Grant mapping types, which map to a real frame in another VM */
148 #define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) |  \
149                          p2m_to_mask(p2m_grant_map_ro))
150 
151 /* Foreign mappings types */
152 #define P2M_FOREIGN_TYPES (p2m_to_mask(p2m_map_foreign_rw) | \
153                            p2m_to_mask(p2m_map_foreign_ro))
154 
155 /* Useful predicates */
156 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
157 #define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES)
158 #define p2m_is_any_ram(_t) (p2m_to_mask(_t) &                   \
159                             (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
160                              P2M_FOREIGN_TYPES))
161 
162 /* All common type definitions should live ahead of this inclusion. */
163 #ifdef _XEN_P2M_COMMON_H
164 # error "xen/p2m-common.h should not be included directly"
165 #endif
166 #include <xen/p2m-common.h>
167 
168 #if defined(CONFIG_MMU)
169 # include <asm/mmu/p2m.h>
170 #else
171 # include <asm/mpu/p2m.h>
172 #endif
173 
arch_acquire_resource_check(struct domain * d)174 static inline bool arch_acquire_resource_check(struct domain *d)
175 {
176     /*
177      * The reference counting of foreign entries in set_foreign_p2m_entry()
178      * is supported on Arm.
179      */
180     return true;
181 }
182 
183 static inline
p2m_altp2m_check(struct vcpu * v,uint16_t idx)184 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
185 {
186     /* Not supported on ARM. */
187 }
188 
189 /*
190  * Helper to restrict "p2m_ipa_bits" according the external entity
191  * (e.g. IOMMU) requirements.
192  *
193  * Each corresponding driver should report the maximum IPA bits
194  * (Stage-2 input size) it can support.
195  */
196 void p2m_restrict_ipa_bits(unsigned int ipa_bits);
197 
198 void p2m_vmid_allocator_init(void);
199 int p2m_alloc_vmid(struct domain *d);
200 void p2m_free_vmid(struct domain *d);
201 
202 /* Second stage paging setup, to be called on all CPUs */
203 void setup_virt_paging(void);
204 
205 /* Init the datastructures for later use by the p2m code */
206 int p2m_init(struct domain *d);
207 
208 /*
209  * The P2M resources are freed in two parts:
210  *  - p2m_teardown() will be called when relinquish the resources. It
211  *    will free large resources (e.g. intermediate page-tables) that
212  *    requires preemption.
213  *  - p2m_final_teardown() will be called when domain struct is been
214  *    freed. This *cannot* be preempted and therefore only small
215  *    resources should be freed here.
216  */
217 int p2m_teardown(struct domain *d);
218 void p2m_final_teardown(struct domain *d);
219 
220 /*
221  * Remove mapping refcount on each mapping page in the p2m
222  *
223  * TODO: For the moment only foreign mappings are handled
224  */
225 int relinquish_p2m_mapping(struct domain *d);
226 
227 /* Context switch */
228 void p2m_save_state(struct vcpu *p);
229 void p2m_restore_state(struct vcpu *n);
230 
231 /* Print debugging/statistial info about a domain's p2m */
232 void p2m_dump_info(struct domain *d);
233 
234 int p2m_set_allocation(struct domain *d, unsigned long pages, bool *preempted);
235 int p2m_teardown_allocation(struct domain *d);
236 
p2m_write_lock(struct p2m_domain * p2m)237 static inline void p2m_write_lock(struct p2m_domain *p2m)
238 {
239     write_lock(&p2m->lock);
240 }
241 
242 void p2m_write_unlock(struct p2m_domain *p2m);
243 
p2m_read_lock(struct p2m_domain * p2m)244 static inline void p2m_read_lock(struct p2m_domain *p2m)
245 {
246     read_lock(&p2m->lock);
247 }
248 
p2m_read_unlock(struct p2m_domain * p2m)249 static inline void p2m_read_unlock(struct p2m_domain *p2m)
250 {
251     read_unlock(&p2m->lock);
252 }
253 
p2m_is_locked(struct p2m_domain * p2m)254 static inline int p2m_is_locked(struct p2m_domain *p2m)
255 {
256     return rw_is_locked(&p2m->lock);
257 }
258 
p2m_is_write_locked(struct p2m_domain * p2m)259 static inline int p2m_is_write_locked(struct p2m_domain *p2m)
260 {
261     return rw_is_write_locked(&p2m->lock);
262 }
263 
264 /* Look up the MFN corresponding to a domain's GFN. */
265 mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
266 
267 /*
268  * Get details of a given gfn.
269  * The P2M lock should be taken by the caller.
270  */
271 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
272                     p2m_type_t *t, p2m_access_t *a,
273                     unsigned int *page_order,
274                     bool *valid);
275 
276 /*
277  * Direct set a p2m entry: only for use by the P2M code.
278  * The P2M write lock should be taken.
279  * TODO: Add a check in __p2m_set_entry() to avoid creating a mapping in
280  * arch_domain_create() that requires p2m_put_l3_page() to be called.
281  */
282 int p2m_set_entry(struct p2m_domain *p2m,
283                   gfn_t sgfn,
284                   unsigned long nr,
285                   mfn_t smfn,
286                   p2m_type_t t,
287                   p2m_access_t a);
288 
289 bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
290 
291 void p2m_domain_creation_finished(struct domain *d);
292 
293 /*
294  * Clean & invalidate caches corresponding to a region [start,end) of guest
295  * address space.
296  *
297  * start will get updated if the function is preempted.
298  */
299 int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
300 
301 void p2m_set_way_flush(struct vcpu *v, struct cpu_user_regs *regs);
302 
303 void p2m_toggle_cache(struct vcpu *v, bool was_enabled);
304 
305 void p2m_flush_vm(struct vcpu *v);
306 
307 /*
308  * Map a region in the guest p2m with a specific p2m type.
309  * The memory attributes will be derived from the p2m type.
310  */
311 int map_regions_p2mt(struct domain *d,
312                      gfn_t gfn,
313                      unsigned long nr,
314                      mfn_t mfn,
315                      p2m_type_t p2mt);
316 
317 int unmap_regions_p2mt(struct domain *d,
318                        gfn_t gfn,
319                        unsigned long nr,
320                        mfn_t mfn);
321 
322 int map_dev_mmio_page(struct domain *d, gfn_t gfn, mfn_t mfn);
323 
324 int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr,
325                        mfn_t mfn, p2m_type_t t);
326 
327 int guest_physmap_add_entry(struct domain *d,
328                             gfn_t gfn,
329                             mfn_t mfn,
330                             unsigned long page_order,
331                             p2m_type_t t);
332 
333 /* Untyped version for RAM only, for compatibility */
334 static inline int __must_check
guest_physmap_add_page(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int page_order)335 guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
336                        unsigned int page_order)
337 {
338     return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
339 }
340 
guest_physmap_add_pages(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int nr_pages)341 static inline int guest_physmap_add_pages(struct domain *d,
342                                           gfn_t gfn,
343                                           mfn_t mfn,
344                                           unsigned int nr_pages)
345 {
346     return p2m_insert_mapping(d, gfn, nr_pages, mfn, p2m_ram_rw);
347 }
348 
349 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
350 
351 /* Look up a GFN and take a reference count on the backing page. */
352 typedef unsigned int p2m_query_t;
353 #define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
354 #define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
355 
356 struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
357                                         p2m_type_t *t);
358 
get_page_from_gfn(struct domain * d,unsigned long gfn,p2m_type_t * t,p2m_query_t q)359 static inline struct page_info *get_page_from_gfn(
360     struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
361 {
362     mfn_t mfn;
363     p2m_type_t _t;
364     struct page_info *page;
365 
366     /*
367      * Special case for DOMID_XEN as it is the only domain so far that is
368      * not auto-translated.
369      */
370     if ( likely(d != dom_xen) )
371         return p2m_get_page_from_gfn(d, _gfn(gfn), t);
372 
373     if ( !t )
374         t = &_t;
375 
376     *t = p2m_invalid;
377 
378     /*
379      * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
380      * page.
381      */
382     mfn = _mfn(gfn);
383     page = mfn_to_page(mfn);
384 
385     if ( !mfn_valid(mfn) || !get_page(page, d) )
386         return NULL;
387 
388     if ( page->u.inuse.type_info & PGT_writable_page )
389         *t = p2m_ram_rw;
390     else
391         *t = p2m_ram_ro;
392 
393     return page;
394 }
395 
396 int get_page_type(struct page_info *page, unsigned long type);
397 bool is_iomem_page(mfn_t mfn);
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)398 static inline int get_page_and_type(struct page_info *page,
399                                     struct domain *domain,
400                                     unsigned long type)
401 {
402     int rc = get_page(page, domain);
403 
404     if ( likely(rc) && unlikely(!get_page_type(page, type)) )
405     {
406         put_page(page);
407         rc = 0;
408     }
409 
410     return rc;
411 }
412 
413 /* get host p2m table */
414 #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
415 
p2m_vm_event_sanity_check(struct domain * d)416 static inline bool p2m_vm_event_sanity_check(struct domain *d)
417 {
418     return true;
419 }
420 
421 /*
422  * Return the start of the next mapping based on the order of the
423  * current one.
424  */
gfn_next_boundary(gfn_t gfn,unsigned int order)425 static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
426 {
427     /*
428      * The order corresponds to the order of the mapping (or invalid
429      * range) in the page table. So we need to align the GFN before
430      * incrementing.
431      */
432     gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
433 
434     return gfn_add(gfn, 1UL << order);
435 }
436 
437 /*
438  * A vCPU has cache enabled only when the MMU is enabled and data cache
439  * is enabled.
440  */
vcpu_has_cache_enabled(struct vcpu * v)441 static inline bool vcpu_has_cache_enabled(struct vcpu *v)
442 {
443     const register_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M;
444 
445     /* Only works with the current vCPU */
446     ASSERT(current == v);
447 
448     return (READ_SYSREG(SCTLR_EL1) & mask) == mask;
449 }
450 
451 #endif /* _XEN_P2M_H */
452 
453 /*
454  * Local variables:
455  * mode: C
456  * c-file-style: "BSD"
457  * c-basic-offset: 4
458  * indent-tabs-mode: nil
459  * End:
460  */
461