1 #ifndef _XEN_P2M_H
2 #define _XEN_P2M_H
3 
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <xen/rwlock.h>
7 #include <xen/mem_access.h>
8 #include <public/vm_event.h> /* for vm_event_response_t */
9 #include <public/memory.h>
10 #include <xen/p2m-common.h>
11 #include <public/memory.h>
12 
13 #define paddr_bits PADDR_BITS
14 
15 /* Holds the bit size of IPAs in p2m tables.  */
16 extern unsigned int p2m_ipa_bits;
17 
18 struct domain;
19 
20 extern void memory_type_changed(struct domain *);
21 
22 /* Per-p2m-table state */
23 struct p2m_domain {
24     /*
25      * Lock that protects updates to the p2m.
26      *
27      * Please note that we use this lock in a nested way by calling
28      * access_guest_memory_by_ipa in guest_walk_(sd|ld). This must be
29      * considered in the future implementation.
30      */
31     rwlock_t lock;
32 
33     /* Pages used to construct the p2m */
34     struct page_list_head pages;
35 
36     /* The root of the p2m tree. May be concatenated */
37     struct page_info *root;
38 
39     /* Current VMID in use */
40     uint16_t vmid;
41 
42     /* Current Translation Table Base Register for the p2m */
43     uint64_t vttbr;
44 
45     /*
46      * Highest guest frame that's ever been mapped in the p2m
47      * Only takes into account ram and foreign mapping
48      */
49     gfn_t max_mapped_gfn;
50 
51     /*
52      * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
53      * preemptible manner this is update to track recall where to
54      * resume the search. Apart from during teardown this can only
55      * decrease. */
56     gfn_t lowest_mapped_gfn;
57 
58     /* Indicate if it is required to clean the cache when writing an entry */
59     bool clean_pte;
60 
61     /*
62      * P2M updates may required TLBs to be flushed (invalidated).
63      *
64      * Flushes may be deferred by setting 'need_flush' and then flushing
65      * when the p2m write lock is released.
66      *
67      * If an immediate flush is required (e.g, if a super page is
68      * shattered), call p2m_tlb_flush_sync().
69      */
70     bool need_flush;
71 
72     /* Gather some statistics for information purposes only */
73     struct {
74         /* Number of mappings at each p2m tree level */
75         unsigned long mappings[4];
76         /* Number of times we have shattered a mapping
77          * at each p2m tree level. */
78         unsigned long shattered[4];
79     } stats;
80 
81     /*
82      * If true, and an access fault comes in and there is no vm_event listener,
83      * pause domain. Otherwise, remove access restrictions.
84      */
85     bool access_required;
86 
87     /* Defines if mem_access is in use for the domain. */
88     bool mem_access_enabled;
89 
90     /*
91      * Default P2M access type for each page in the the domain: new pages,
92      * swapped in pages, cleared pages, and pages that are ambiguously
93      * retyped get this access type. See definition of p2m_access_t.
94      */
95     p2m_access_t default_access;
96 
97     /*
98      * Radix tree to store the p2m_access_t settings as the pte's don't have
99      * enough available bits to store this information.
100      */
101     struct radix_tree_root mem_access_settings;
102 
103     /* back pointer to domain */
104     struct domain *domain;
105 
106     /* Keeping track on which CPU this p2m was used and for which vCPU */
107     uint8_t last_vcpu_ran[NR_CPUS];
108 };
109 
110 /*
111  * List of possible type for each page in the p2m entry.
112  * The number of available bit per page in the pte for this purpose is 4 bits.
113  * So it's possible to only have 16 fields. If we run out of value in the
114  * future, it's possible to use higher value for pseudo-type and don't store
115  * them in the p2m entry.
116  */
117 typedef enum {
118     p2m_invalid = 0,    /* Nothing mapped here */
119     p2m_ram_rw,         /* Normal read/write guest RAM */
120     p2m_ram_ro,         /* Read-only; writes are silently dropped */
121     p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
122     p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
123     p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
124     p2m_map_foreign,    /* Ram pages from foreign domain */
125     p2m_grant_map_rw,   /* Read/write grant mapping */
126     p2m_grant_map_ro,   /* Read-only grant mapping */
127     /* The types below are only used to decide the page attribute in the P2M */
128     p2m_iommu_map_rw,   /* Read/write iommu mapping */
129     p2m_iommu_map_ro,   /* Read-only iommu mapping */
130     p2m_max_real_type,  /* Types after this won't be store in the p2m */
131 } p2m_type_t;
132 
133 /* We use bitmaps and mask to handle groups of types */
134 #define p2m_to_mask(_t) (1UL << (_t))
135 
136 /* RAM types, which map to real machine frames */
137 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) |        \
138                        p2m_to_mask(p2m_ram_ro))
139 
140 /* Grant mapping types, which map to a real frame in another VM */
141 #define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) |  \
142                          p2m_to_mask(p2m_grant_map_ro))
143 
144 /* Useful predicates */
145 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
146 #define p2m_is_foreign(_t) (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign))
147 #define p2m_is_any_ram(_t) (p2m_to_mask(_t) &                   \
148                             (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
149                              p2m_to_mask(p2m_map_foreign)))
150 
151 static inline
p2m_altp2m_check(struct vcpu * v,uint16_t idx)152 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
153 {
154     /* Not supported on ARM. */
155 }
156 
157 /* Second stage paging setup, to be called on all CPUs */
158 void setup_virt_paging(void);
159 
160 /* Init the datastructures for later use by the p2m code */
161 int p2m_init(struct domain *d);
162 
163 /* Return all the p2m resources to Xen. */
164 void p2m_teardown(struct domain *d);
165 
166 /*
167  * Remove mapping refcount on each mapping page in the p2m
168  *
169  * TODO: For the moment only foreign mappings are handled
170  */
171 int relinquish_p2m_mapping(struct domain *d);
172 
173 /* Context switch */
174 void p2m_save_state(struct vcpu *p);
175 void p2m_restore_state(struct vcpu *n);
176 
177 /* Print debugging/statistial info about a domain's p2m */
178 void p2m_dump_info(struct domain *d);
179 
p2m_write_lock(struct p2m_domain * p2m)180 static inline void p2m_write_lock(struct p2m_domain *p2m)
181 {
182     write_lock(&p2m->lock);
183 }
184 
185 void p2m_write_unlock(struct p2m_domain *p2m);
186 
p2m_read_lock(struct p2m_domain * p2m)187 static inline void p2m_read_lock(struct p2m_domain *p2m)
188 {
189     read_lock(&p2m->lock);
190 }
191 
p2m_read_unlock(struct p2m_domain * p2m)192 static inline void p2m_read_unlock(struct p2m_domain *p2m)
193 {
194     read_unlock(&p2m->lock);
195 }
196 
p2m_is_locked(struct p2m_domain * p2m)197 static inline int p2m_is_locked(struct p2m_domain *p2m)
198 {
199     return rw_is_locked(&p2m->lock);
200 }
201 
p2m_is_write_locked(struct p2m_domain * p2m)202 static inline int p2m_is_write_locked(struct p2m_domain *p2m)
203 {
204     return rw_is_write_locked(&p2m->lock);
205 }
206 
207 /* Look up the MFN corresponding to a domain's GFN. */
208 mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
209 
210 /*
211  * Get details of a given gfn.
212  * The P2M lock should be taken by the caller.
213  */
214 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
215                     p2m_type_t *t, p2m_access_t *a,
216                     unsigned int *page_order);
217 
218 /*
219  * Direct set a p2m entry: only for use by the P2M code.
220  * The P2M write lock should be taken.
221  */
222 int p2m_set_entry(struct p2m_domain *p2m,
223                   gfn_t sgfn,
224                   unsigned long nr,
225                   mfn_t smfn,
226                   p2m_type_t t,
227                   p2m_access_t a);
228 
229 /* Clean & invalidate caches corresponding to a region of guest address space */
230 int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr);
231 
232 /*
233  * Map a region in the guest p2m with a specific p2m type.
234  * The memory attributes will be derived from the p2m type.
235  */
236 int map_regions_p2mt(struct domain *d,
237                      gfn_t gfn,
238                      unsigned long nr,
239                      mfn_t mfn,
240                      p2m_type_t p2mt);
241 
242 int unmap_regions_p2mt(struct domain *d,
243                        gfn_t gfn,
244                        unsigned long nr,
245                        mfn_t mfn);
246 
247 int map_dev_mmio_region(struct domain *d,
248                         gfn_t gfn,
249                         unsigned long nr,
250                         mfn_t mfn);
251 
252 int guest_physmap_add_entry(struct domain *d,
253                             gfn_t gfn,
254                             mfn_t mfn,
255                             unsigned long page_order,
256                             p2m_type_t t);
257 
258 /* Untyped version for RAM only, for compatibility */
guest_physmap_add_page(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int page_order)259 static inline int guest_physmap_add_page(struct domain *d,
260                                          gfn_t gfn,
261                                          mfn_t mfn,
262                                          unsigned int page_order)
263 {
264     return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
265 }
266 
267 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
268 
269 /* Look up a GFN and take a reference count on the backing page. */
270 typedef unsigned int p2m_query_t;
271 #define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
272 #define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
273 
get_page_from_gfn(struct domain * d,unsigned long gfn,p2m_type_t * t,p2m_query_t q)274 static inline struct page_info *get_page_from_gfn(
275     struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
276 {
277     struct page_info *page;
278     p2m_type_t p2mt;
279     unsigned long mfn = mfn_x(p2m_lookup(d, _gfn(gfn), &p2mt));
280 
281     if (t)
282         *t = p2mt;
283 
284     if ( !p2m_is_any_ram(p2mt) )
285         return NULL;
286 
287     if ( !mfn_valid(_mfn(mfn)) )
288         return NULL;
289     page = mfn_to_page(mfn);
290 
291     /*
292      * get_page won't work on foreign mapping because the page doesn't
293      * belong to the current domain.
294      */
295     if ( p2m_is_foreign(p2mt) )
296     {
297         struct domain *fdom = page_get_owner_and_reference(page);
298         ASSERT(fdom != NULL);
299         ASSERT(fdom != d);
300         return page;
301     }
302 
303     if ( !get_page(page, d) )
304         return NULL;
305     return page;
306 }
307 
308 int get_page_type(struct page_info *page, unsigned long type);
309 bool is_iomem_page(mfn_t mfn);
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)310 static inline int get_page_and_type(struct page_info *page,
311                                     struct domain *domain,
312                                     unsigned long type)
313 {
314     int rc = get_page(page, domain);
315 
316     if ( likely(rc) && unlikely(!get_page_type(page, type)) )
317     {
318         put_page(page);
319         rc = 0;
320     }
321 
322     return rc;
323 }
324 
325 /* get host p2m table */
326 #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
327 
p2m_vm_event_sanity_check(struct domain * d)328 static inline bool p2m_vm_event_sanity_check(struct domain *d)
329 {
330     return true;
331 }
332 
333 /*
334  * Return the start of the next mapping based on the order of the
335  * current one.
336  */
gfn_next_boundary(gfn_t gfn,unsigned int order)337 static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
338 {
339     /*
340      * The order corresponds to the order of the mapping (or invalid
341      * range) in the page table. So we need to align the GFN before
342      * incrementing.
343      */
344     gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
345 
346     return gfn_add(gfn, 1UL << order);
347 }
348 
349 #endif /* _XEN_P2M_H */
350 
351 /*
352  * Local variables:
353  * mode: C
354  * c-file-style: "BSD"
355  * c-basic-offset: 4
356  * indent-tabs-mode: nil
357  * End:
358  */
359