1
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
4
5 #include <xen/list.h>
6 #include <xen/spinlock.h>
7 #include <xen/rwlock.h>
8 #include <asm/io.h>
9 #include <asm/uaccess.h>
10 #include <asm/x86_emulate.h>
11
12 /*
13 * Per-page-frame information.
14 *
15 * Every architecture must ensure the following:
16 * 1. 'struct page_info' contains a 'struct page_list_entry list'.
17 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
18 */
19 #define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
20
21 #ifndef CONFIG_BIGMEM
22 /*
23 * This definition is solely for the use in struct page_info (and
24 * struct page_list_head), intended to allow easy adjustment once x86-64
25 * wants to support more than 16TB.
26 * 'unsigned long' should be used for MFNs everywhere else.
27 */
28 #define __pdx_t unsigned int
29
30 #undef page_list_entry
31 struct page_list_entry
32 {
33 __pdx_t next, prev;
34 };
35 #else
36 #define __pdx_t unsigned long
37 #endif
38
39 struct page_sharing_info;
40
41 struct page_info
42 {
43 union {
44 /* Each frame can be threaded onto a doubly-linked list.
45 *
46 * For unused shadow pages, a list of free shadow pages;
47 * for multi-page shadows, links to the other pages in this shadow;
48 * for pinnable shadows, if pinned, a list of all pinned shadows
49 * (see sh_type_is_pinnable() for the definition of "pinnable"
50 * shadow types). N.B. a shadow may be both pinnable and multi-page.
51 * In that case the pages are inserted in order in the list of
52 * pinned shadows and walkers of that list must be prepared
53 * to keep them all together during updates.
54 */
55 struct page_list_entry list;
56 /* For non-pinnable single-page shadows, a higher entry that points
57 * at us. */
58 paddr_t up;
59 /* For shared/sharable pages, we use a doubly-linked list
60 * of all the {pfn,domain} pairs that map this page. We also include
61 * an opaque handle, which is effectively a version, so that clients
62 * of sharing share the version they expect to.
63 * This list is allocated and freed when a page is shared/unshared.
64 */
65 struct page_sharing_info *sharing;
66 };
67
68 /* Reference count and various PGC_xxx flags and fields. */
69 unsigned long count_info;
70
71 /* Context-dependent fields follow... */
72 union {
73
74 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
75 struct {
76 /* Type reference count and various PGT_xxx flags and fields. */
77 unsigned long type_info;
78 } inuse;
79
80 /* Page is in use as a shadow: count_info == 0. */
81 struct {
82 unsigned long type:5; /* What kind of shadow is this? */
83 unsigned long pinned:1; /* Is the shadow pinned? */
84 unsigned long head:1; /* Is this the first page of the shadow? */
85 #define PAGE_SH_REFCOUNT_WIDTH 25
86 unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
87 } sh;
88
89 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
90 union {
91 struct {
92 /*
93 * Index of the first *possibly* unscrubbed page in the buddy.
94 * One more bit than maximum possible order to accommodate
95 * INVALID_DIRTY_IDX.
96 */
97 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
98 unsigned int first_dirty;
99
100 /* Do TLBs need flushing for safety before next page use? */
101 bool need_tlbflush;
102
103 #define BUDDY_NOT_SCRUBBING 0
104 #define BUDDY_SCRUBBING 1
105 #define BUDDY_SCRUB_ABORT 2
106 uint8_t scrub_state;
107 };
108
109 unsigned long val;
110 } free;
111
112 } u;
113
114 union {
115
116 /* Page is in use, but not as a shadow. */
117 struct {
118 /* Owner of this page (zero if page is anonymous). */
119 __pdx_t _domain;
120 } inuse;
121
122 /* Page is in use as a shadow. */
123 struct {
124 /* GMFN of guest page we're a shadow of. */
125 __pdx_t back;
126 } sh;
127
128 /* Page is on a free list. */
129 struct {
130 /* Order-size of the free chunk this page is the head of. */
131 unsigned int order;
132 } free;
133
134 } v;
135
136 union {
137 /*
138 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
139 * Only valid for: a) free pages, and b) pages with zero type count
140 * (except page table pages when the guest is in shadow mode).
141 */
142 u32 tlbflush_timestamp;
143
144 /*
145 * When PGT_partial is true then the first two fields are valid and
146 * indicate that PTEs in the range [0, @nr_validated_ptes) have been
147 * validated. An extra page reference must be acquired (or not dropped)
148 * whenever PGT_partial gets set, and it must be dropped when the flag
149 * gets cleared. This is so that a get() leaving a page in partially
150 * validated state (where the caller would drop the reference acquired
151 * due to the getting of the type [apparently] failing [-ERESTART])
152 * would not accidentally result in a page left with zero general
153 * reference count, but non-zero type reference count (possible when
154 * the partial get() is followed immediately by domain destruction).
155 * Likewise, the ownership of the single type reference for partially
156 * (in-)validated pages is tied to this flag, i.e. the instance
157 * setting the flag must not drop that reference, whereas the instance
158 * clearing it will have to.
159 *
160 * If @partial_pte is positive then PTE at @nr_validated_ptes+1 has
161 * been partially validated. This implies that the general reference
162 * to the page (acquired from get_page_from_lNe()) would be dropped
163 * (again due to the apparent failure) and hence must be re-acquired
164 * when resuming the validation, but must not be dropped when picking
165 * up the page for invalidation.
166 *
167 * If @partial_pte is negative then PTE at @nr_validated_ptes+1 has
168 * been partially invalidated. This is basically the opposite case of
169 * above, i.e. the general reference to the page was not dropped in
170 * put_page_from_lNe() (due to the apparent failure), and hence it
171 * must be dropped when the put operation is resumed (and completes),
172 * but it must not be acquired if picking up the page for validation.
173 *
174 * The 3rd field, @linear_pt_count, indicates
175 * - by a positive value, how many same-level page table entries a page
176 * table has,
177 * - by a negative value, in how many same-level page tables a page is
178 * in use.
179 */
180 struct {
181 u16 nr_validated_ptes:PAGETABLE_ORDER + 1;
182 u16 :16 - PAGETABLE_ORDER - 1 - 2;
183 s16 partial_pte:2;
184 s16 linear_pt_count;
185 };
186
187 /*
188 * Guest pages with a shadow. This does not conflict with
189 * tlbflush_timestamp since page table pages are explicitly not
190 * tracked for TLB-flush avoidance when a guest runs in shadow mode.
191 */
192 u32 shadow_flags;
193
194 /* When in use as a shadow, next shadow in this hash chain. */
195 __pdx_t next_shadow;
196 };
197 };
198
199 #undef __pdx_t
200
201 #define PG_shift(idx) (BITS_PER_LONG - (idx))
202 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
203
204 /* The following page types are MUTUALLY EXCLUSIVE. */
205 #define PGT_none PG_mask(0, 3) /* no special uses of this page */
206 #define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
207 #define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
208 #define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
209 #define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
210 #define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */
211 #define PGT_shared_page PG_mask(6, 3) /* CoW sharable page */
212 #define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
213 #define PGT_type_mask PG_mask(7, 3) /* Bits 61-63. */
214
215 /* Page is locked? */
216 #define _PGT_locked PG_shift(4)
217 #define PGT_locked PG_mask(1, 4)
218 /* Owning guest has pinned this page to its current type? */
219 #define _PGT_pinned PG_shift(5)
220 #define PGT_pinned PG_mask(1, 5)
221 /* Has this page been validated for use as its current type? */
222 #define _PGT_validated PG_shift(6)
223 #define PGT_validated PG_mask(1, 6)
224 /* PAE only: is this an L2 page directory containing Xen-private mappings? */
225 #define _PGT_pae_xen_l2 PG_shift(7)
226 #define PGT_pae_xen_l2 PG_mask(1, 7)
227 /* Has this page been *partially* validated for use as its current type? */
228 #define _PGT_partial PG_shift(8)
229 #define PGT_partial PG_mask(1, 8)
230
231 /* Count of uses of this frame as its current type. */
232 #define PGT_count_width PG_shift(8)
233 #define PGT_count_mask ((1UL<<PGT_count_width)-1)
234
235 /* Are the 'type mask' bits identical? */
236 #define PGT_type_equal(x, y) (!(((x) ^ (y)) & PGT_type_mask))
237
238 /* Cleared when the owning guest 'frees' this page. */
239 #define _PGC_allocated PG_shift(1)
240 #define PGC_allocated PG_mask(1, 1)
241 /* Page is Xen heap? */
242 #define _PGC_xen_heap PG_shift(2)
243 #define PGC_xen_heap PG_mask(1, 2)
244 /* Set when is using a page as a page table */
245 #define _PGC_page_table PG_shift(3)
246 #define PGC_page_table PG_mask(1, 3)
247 /* 3-bit PAT/PCD/PWT cache-attribute hint. */
248 #define PGC_cacheattr_base PG_shift(6)
249 #define PGC_cacheattr_mask PG_mask(7, 6)
250 /* Page is broken? */
251 #define _PGC_broken PG_shift(7)
252 #define PGC_broken PG_mask(1, 7)
253 /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
254 #define PGC_state PG_mask(3, 9)
255 #define PGC_state_inuse PG_mask(0, 9)
256 #define PGC_state_offlining PG_mask(1, 9)
257 #define PGC_state_offlined PG_mask(2, 9)
258 #define PGC_state_free PG_mask(3, 9)
259 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
260
261 /* Count of references to this frame. */
262 #define PGC_count_width PG_shift(9)
263 #define PGC_count_mask ((1UL<<PGC_count_width)-1)
264
265 /*
266 * Page needs to be scrubbed. Since this bit can only be set on a page that is
267 * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
268 */
269 #define _PGC_need_scrub _PGC_allocated
270 #define PGC_need_scrub PGC_allocated
271
272 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
273 #define is_xen_heap_mfn(mfn) \
274 (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
275 #define is_xen_fixed_mfn(mfn) \
276 ((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) && \
277 (((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))
278
279 #define PRtype_info "016lx"/* should only be used for printk's */
280
281 /* The number of out-of-sync shadows we allow per vcpu (prime, please) */
282 #define SHADOW_OOS_PAGES 3
283
284 /* OOS fixup entries */
285 #define SHADOW_OOS_FIXUPS 2
286
287 #define page_get_owner(_p) \
288 ((struct domain *)((_p)->v.inuse._domain ? \
289 pdx_to_virt((_p)->v.inuse._domain) : NULL))
290 #define page_set_owner(_p,_d) \
291 ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0)
292
293 #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
294
295 #define XENSHARE_writable 0
296 #define XENSHARE_readonly 1
297 extern void share_xen_page_with_guest(
298 struct page_info *page, struct domain *d, int readonly);
299 extern int unshare_xen_page_with_guest(struct page_info *page,
300 struct domain *d);
301 extern void share_xen_page_with_privileged_guests(
302 struct page_info *page, int readonly);
303 extern void free_shared_domheap_page(struct page_info *page);
304
305 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
306 extern unsigned long max_page;
307 extern unsigned long total_pages;
308 void init_frametable(void);
309
310 #define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT
311
312 /* Convert between Xen-heap virtual addresses and page-info structures. */
__virt_to_page(const void * v)313 static inline struct page_info *__virt_to_page(const void *v)
314 {
315 unsigned long va = (unsigned long)v;
316
317 ASSERT(va >= XEN_VIRT_START);
318 ASSERT(va < DIRECTMAP_VIRT_END);
319 if ( va < XEN_VIRT_END )
320 va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start;
321 else
322 ASSERT(va >= DIRECTMAP_VIRT_START);
323 return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT);
324 }
325
__page_to_virt(const struct page_info * pg)326 static inline void *__page_to_virt(const struct page_info *pg)
327 {
328 ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE);
329 /*
330 * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The
331 * division and re-multiplication avoids one shift when sizeof(*pg) is a
332 * power of two (otherwise there would be a right shift followed by a
333 * left shift, which the compiler can't know it can fold into one).
334 */
335 return (void *)(DIRECTMAP_VIRT_START +
336 ((unsigned long)pg - FRAMETABLE_VIRT_START) /
337 (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) *
338 (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
339 }
340
341 int free_page_type(struct page_info *page, unsigned long type,
342 int preemptible);
343
344 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
345 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
346 const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
347 bool fill_ro_mpt(mfn_t mfn);
348 void zap_ro_mpt(mfn_t mfn);
349
350 bool is_iomem_page(mfn_t mfn);
351
352 const unsigned long *get_platform_badpages(unsigned int *array_size);
353 /* Per page locks:
354 * page_lock() is used for two purposes: pte serialization, and memory sharing.
355 *
356 * All users of page lock for pte serialization live in mm.c, use it
357 * to lock a page table page during pte updates, do not take other locks within
358 * the critical section delimited by page_lock/unlock, and perform no
359 * nesting.
360 *
361 * All users of page lock for memory sharing live in mm/mem_sharing.c. Page_lock
362 * is used in memory sharing to protect addition (share) and removal (unshare)
363 * of (gfn,domain) tupples to a list of gfn's that the shared page is currently
364 * backing. Nesting may happen when sharing (and locking) two pages -- deadlock
365 * is avoided by locking pages in increasing order.
366 * All memory sharing code paths take the p2m lock of the affected gfn before
367 * taking the lock for the underlying page. We enforce ordering between page_lock
368 * and p2m_lock using an mm-locks.h construct.
369 *
370 * These two users (pte serialization and memory sharing) do not collide, since
371 * sharing is only supported for hvm guests, which do not perform pv pte updates.
372 */
373 int page_lock(struct page_info *page);
374 void page_unlock(struct page_info *page);
375
376 void put_page_type(struct page_info *page);
377 int get_page_type(struct page_info *page, unsigned long type);
378 int put_page_type_preemptible(struct page_info *page);
379 int get_page_type_preemptible(struct page_info *page, unsigned long type);
380 int put_old_guest_table(struct vcpu *);
381 int get_page_from_l1e(
382 l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
383 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
384
get_page_from_mfn(mfn_t mfn,struct domain * d)385 static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
386 {
387 struct page_info *page = __mfn_to_page(mfn_x(mfn));
388
389 if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
390 {
391 gdprintk(XENLOG_WARNING,
392 "Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn));
393 return false;
394 }
395
396 return true;
397 }
398
put_page_and_type(struct page_info * page)399 static inline void put_page_and_type(struct page_info *page)
400 {
401 put_page_type(page);
402 put_page(page);
403 }
404
put_page_and_type_preemptible(struct page_info * page)405 static inline int put_page_and_type_preemptible(struct page_info *page)
406 {
407 int rc = put_page_type_preemptible(page);
408
409 if ( likely(rc == 0) )
410 put_page(page);
411 return rc;
412 }
413
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)414 static inline int get_page_and_type(struct page_info *page,
415 struct domain *domain,
416 unsigned long type)
417 {
418 int rc = get_page(page, domain);
419
420 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
421 {
422 put_page(page);
423 rc = 0;
424 }
425
426 return rc;
427 }
428
429 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
430 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
431 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
432 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
433 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
434 ASSERT(page_get_owner(_p) == (_d))
435
436 int check_descriptor(const struct domain *, struct desc_struct *d);
437
438 extern paddr_t mem_hotplug;
439
440 /******************************************************************************
441 * With shadow pagetables, the different kinds of address start
442 * to get get confusing.
443 *
444 * Virtual addresses are what they usually are: the addresses that are used
445 * to accessing memory while the guest is running. The MMU translates from
446 * virtual addresses to machine addresses.
447 *
448 * (Pseudo-)physical addresses are the abstraction of physical memory the
449 * guest uses for allocation and so forth. For the purposes of this code,
450 * we can largely ignore them.
451 *
452 * Guest frame numbers (gfns) are the entries that the guest puts in its
453 * pagetables. For normal paravirtual guests, they are actual frame numbers,
454 * with the translation done by the guest.
455 *
456 * Machine frame numbers (mfns) are the entries that the hypervisor puts
457 * in the shadow page tables.
458 *
459 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
460 * to a "machine frame number, from the guest's perspective", or in other
461 * words, pseudo-physical frame numbers. However, in the shadow code, the
462 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
463 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
464 * guest L2 page), etc...
465 */
466
467 /*
468 * The MPT (machine->physical mapping table) is an array of word-sized
469 * values, indexed on machine frame number. It is expected that guest OSes
470 * will use it to store a "physical" frame number to give the appearance of
471 * contiguous (or near contiguous) physical memory.
472 */
473 #undef machine_to_phys_mapping
474 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
475 #define INVALID_M2P_ENTRY (~0UL)
476 #define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
477 #define SHARED_M2P_ENTRY (~0UL - 1UL)
478 #define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
479
480 #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
481 #define _set_gpfn_from_mfn(mfn, pfn) ({ \
482 struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
483 unsigned long entry = (d && (d == dom_cow)) ? \
484 SHARED_M2P_ENTRY : (pfn); \
485 ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
486 (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
487 machine_to_phys_mapping[(mfn)] = (entry)); \
488 })
489
490 /*
491 * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
492 * the machine_to_phys_mapping is actually set up.
493 */
494 extern bool machine_to_phys_mapping_valid;
495 #define set_gpfn_from_mfn(mfn, pfn) do { \
496 if ( machine_to_phys_mapping_valid ) \
497 _set_gpfn_from_mfn(mfn, pfn); \
498 } while (0)
499
500 extern struct rangeset *mmio_ro_ranges;
501
502 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
503
504 #define mfn_to_gmfn(_d, mfn) \
505 ( (paging_mode_translate(_d)) \
506 ? get_gpfn_from_mfn(mfn) \
507 : (mfn) )
508
509 #define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
510 #define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
511
512 #ifdef MEMORY_GUARD
513 void memguard_guard_range(void *p, unsigned long l);
514 void memguard_unguard_range(void *p, unsigned long l);
515 #else
516 #define memguard_guard_range(_p,_l) ((void)0)
517 #define memguard_unguard_range(_p,_l) ((void)0)
518 #endif
519
520 void memguard_guard_stack(void *p);
521 void memguard_unguard_stack(void *p);
522
523 struct mmio_ro_emulate_ctxt {
524 unsigned long cr2;
525 unsigned int seg, bdf;
526 };
527
528 extern int mmio_ro_emulated_write(enum x86_segment seg,
529 unsigned long offset,
530 void *p_data,
531 unsigned int bytes,
532 struct x86_emulate_ctxt *ctxt);
533 extern int mmcfg_intercept_write(enum x86_segment seg,
534 unsigned long offset,
535 void *p_data,
536 unsigned int bytes,
537 struct x86_emulate_ctxt *ctxt);
538 int pv_emul_cpuid(uint32_t leaf, uint32_t subleaf,
539 struct cpuid_leaf *res, struct x86_emulate_ctxt *ctxt);
540
541 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
542
543 extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
544 extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
545
546 #ifndef NDEBUG
547
548 #define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )
549 #define AUDIT_ERRORS_OK ( 1u << 1 )
550 #define AUDIT_QUIET ( 1u << 2 )
551
552 void _audit_domain(struct domain *d, int flags);
553 #define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK)
554 void audit_domains(void);
555
556 #else
557
558 #define _audit_domain(_d, _f) ((void)0)
559 #define audit_domain(_d) ((void)0)
560 #define audit_domains() ((void)0)
561
562 #endif
563
564 void make_cr3(struct vcpu *v, mfn_t mfn);
565 void update_cr3(struct vcpu *v);
566 int vcpu_destroy_pagetables(struct vcpu *);
567 void *do_page_walk(struct vcpu *v, unsigned long addr);
568
569 int __sync_local_execstate(void);
570
571 /* Arch-specific portion of memory_op hypercall. */
572 long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
573 long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
574 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
575 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
576
577 #define NIL(type) ((type *)-sizeof(type))
578 #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
579
580 int create_perdomain_mapping(struct domain *, unsigned long va,
581 unsigned int nr, l1_pgentry_t **,
582 struct page_info **);
583 void destroy_perdomain_mapping(struct domain *, unsigned long va,
584 unsigned int nr);
585 void free_perdomain_mappings(struct domain *);
586
587 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
588
589 void domain_set_alloc_bitsize(struct domain *d);
590 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
591
592 unsigned long domain_get_maximum_gpfn(struct domain *d);
593
594 extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
595
596 /* Definition of an mm lock: spinlock with extra fields for debugging */
597 typedef struct mm_lock {
598 spinlock_t lock;
599 int unlock_level;
600 int locker; /* processor which holds the lock */
601 const char *locker_function; /* func that took it */
602 } mm_lock_t;
603
604 typedef struct mm_rwlock {
605 percpu_rwlock_t lock;
606 int unlock_level;
607 int recurse_count;
608 int locker; /* CPU that holds the write lock */
609 const char *locker_function; /* func that took it */
610 } mm_rwlock_t;
611
612 #define arch_free_heap_page(d, pg) \
613 page_list_del2(pg, is_xen_heap_page(pg) ? \
614 &(d)->xenpage_list : &(d)->page_list, \
615 &(d)->arch.relmem_list)
616
617 extern const char zero_page[];
618
619 /* Build a 32bit PSE page table using 4MB pages. */
620 void write_32bit_pse_identmap(uint32_t *l2);
621
622 /*
623 * x86 maps part of physical memory via the directmap region.
624 * Return whether the input MFN falls in that range.
625 */
arch_mfn_in_directmap(unsigned long mfn)626 static inline bool arch_mfn_in_directmap(unsigned long mfn)
627 {
628 unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
629
630 return mfn <= (virt_to_mfn(eva - 1) + 1);
631 }
632
633 #endif /* __ASM_X86_MM_H__ */
634