1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3 
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
9 #include <asm/mce.h>
10 #include <asm/vpmu.h>
11 #include <asm/x86_emulate.h>
12 #include <public/vcpu.h>
13 #include <public/hvm/hvm_info_table.h>
14 
15 #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
16 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
17 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
18 
19 #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
20         (d)->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector)
21 #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
22 #define is_domain_direct_mapped(d) ((void)(d), 0)
23 
24 #define VCPU_TRAP_NMI          1
25 #define VCPU_TRAP_MCE          2
26 #define VCPU_TRAP_LAST         VCPU_TRAP_MCE
27 
28 #define nmi_state              async_exception_state(VCPU_TRAP_NMI)
29 #define mce_state              async_exception_state(VCPU_TRAP_MCE)
30 
31 #define nmi_pending            nmi_state.pending
32 #define mce_pending            mce_state.pending
33 
34 struct trap_bounce {
35     uint32_t      error_code;
36     uint8_t       flags; /* TBF_ */
37     uint16_t      cs;
38     unsigned long eip;
39 };
40 
41 #define MAPHASH_ENTRIES 8
42 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
43 #define MAPHASHENT_NOTINUSE ((u32)~0U)
44 struct mapcache_vcpu {
45     /* Shadow of mapcache_domain.epoch. */
46     unsigned int shadow_epoch;
47 
48     /* Lock-free per-VCPU hash of recently-used mappings. */
49     struct vcpu_maphash_entry {
50         unsigned long mfn;
51         uint32_t      idx;
52         uint32_t      refcnt;
53     } hash[MAPHASH_ENTRIES];
54 };
55 
56 struct mapcache_domain {
57     /* The number of array entries, and a cursor into the array. */
58     unsigned int entries;
59     unsigned int cursor;
60 
61     /* Protects map_domain_page(). */
62     spinlock_t lock;
63 
64     /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
65     unsigned int epoch;
66     u32 tlbflush_timestamp;
67 
68     /* Which mappings are in use, and which are garbage to reap next epoch? */
69     unsigned long *inuse;
70     unsigned long *garbage;
71 };
72 
73 int mapcache_domain_init(struct domain *);
74 int mapcache_vcpu_init(struct vcpu *);
75 void mapcache_override_current(struct vcpu *);
76 
77 /* x86/64: toggle guest between kernel and user modes. */
78 void toggle_guest_mode(struct vcpu *);
79 /* x86/64: toggle guest page tables between kernel and user modes. */
80 void toggle_guest_pt(struct vcpu *);
81 
82 /*
83  * Initialise a hypercall-transfer page. The given pointer must be mapped
84  * in Xen virtual address space (accesses are not validated or checked).
85  */
86 void hypercall_page_initialise(struct domain *d, void *);
87 
88 /************************************************/
89 /*          shadow paging extension             */
90 /************************************************/
91 struct shadow_domain {
92 #ifdef CONFIG_SHADOW_PAGING
93     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
94     struct page_list_head pinned_shadows;
95 
96     /* Memory allocation */
97     struct page_list_head freelist;
98     unsigned int      total_pages;  /* number of pages allocated */
99     unsigned int      free_pages;   /* number of pages on freelists */
100     unsigned int      p2m_pages;    /* number of pages allocates to p2m */
101 
102     /* 1-to-1 map for use when HVM vcpus have paging disabled */
103     pagetable_t unpaged_pagetable;
104 
105     /* reflect guest table dirty status, incremented by write
106      * emulation and remove write permission */
107     atomic_t gtable_dirty_version;
108 
109     /* Shadow hashtable */
110     struct page_info **hash_table;
111     bool_t hash_walking;  /* Some function is walking the hash table */
112 
113     /* Fast MMIO path heuristic */
114     bool_t has_fast_mmio_entries;
115 
116     /* OOS */
117     bool_t oos_active;
118     bool_t oos_off;
119 
120     /* Has this domain ever used HVMOP_pagetable_dying? */
121     bool_t pagetable_dying_op;
122 #endif
123 };
124 
125 struct shadow_vcpu {
126 #ifdef CONFIG_SHADOW_PAGING
127     /* PAE guests: per-vcpu shadow top-level table */
128     l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
129     /* PAE guests: per-vcpu cache of the top-level *guest* entries */
130     l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
131     /* Non-PAE guests: pointer to guest top-level pagetable */
132     void *guest_vtable;
133     /* Last MFN that we emulated a write to as unshadow heuristics. */
134     unsigned long last_emulated_mfn_for_unshadow;
135     /* MFN of the last shadow that we shot a writeable mapping in */
136     unsigned long last_writeable_pte_smfn;
137     /* Last frame number that we emulated a write to. */
138     unsigned long last_emulated_frame;
139     /* Last MFN that we emulated a write successfully */
140     unsigned long last_emulated_mfn;
141 
142     /* Shadow out-of-sync: pages that this vcpu has let go out of sync */
143     mfn_t oos[SHADOW_OOS_PAGES];
144     mfn_t oos_snapshot[SHADOW_OOS_PAGES];
145     struct oos_fixup {
146         int next;
147         mfn_t smfn[SHADOW_OOS_FIXUPS];
148         unsigned long off[SHADOW_OOS_FIXUPS];
149     } oos_fixup[SHADOW_OOS_PAGES];
150 
151     bool_t pagetable_dying;
152 #endif
153 };
154 
155 /************************************************/
156 /*            hardware assisted paging          */
157 /************************************************/
158 struct hap_domain {
159     struct page_list_head freelist;
160     unsigned int      total_pages;  /* number of pages allocated */
161     unsigned int      free_pages;   /* number of pages on freelists */
162     unsigned int      p2m_pages;    /* number of pages allocates to p2m */
163 };
164 
165 /************************************************/
166 /*       common paging data structure           */
167 /************************************************/
168 struct log_dirty_domain {
169     /* log-dirty radix tree to record dirty pages */
170     mfn_t          top;
171     unsigned int   allocs;
172     unsigned int   failed_allocs;
173 
174     /* log-dirty mode stats */
175     unsigned int   fault_count;
176     unsigned int   dirty_count;
177 
178     /* functions which are paging mode specific */
179     const struct log_dirty_ops {
180         int        (*enable  )(struct domain *d, bool log_global);
181         int        (*disable )(struct domain *d);
182         void       (*clean   )(struct domain *d);
183     } *ops;
184 };
185 
186 struct paging_domain {
187     /* paging lock */
188     mm_lock_t lock;
189 
190     /* flags to control paging operation */
191     u32                     mode;
192     /* Has that pool ever run out of memory? */
193     bool_t                  p2m_alloc_failed;
194     /* extension for shadow paging support */
195     struct shadow_domain    shadow;
196     /* extension for hardware-assited paging */
197     struct hap_domain       hap;
198     /* log dirty support */
199     struct log_dirty_domain log_dirty;
200 
201     /* preemption handling */
202     struct {
203         const struct domain *dom;
204         unsigned int op;
205         union {
206             struct {
207                 unsigned long done:PADDR_BITS - PAGE_SHIFT;
208                 unsigned long i4:PAGETABLE_ORDER;
209                 unsigned long i3:PAGETABLE_ORDER;
210             } log_dirty;
211         };
212     } preempt;
213 
214     /* alloc/free pages from the pool for paging-assistance structures
215      * (used by p2m and log-dirty code for their tries) */
216     struct page_info * (*alloc_page)(struct domain *d);
217     void (*free_page)(struct domain *d, struct page_info *pg);
218 };
219 
220 struct paging_vcpu {
221     /* Pointers to mode-specific entry points. */
222     const struct paging_mode *mode;
223     /* Nested Virtualization: paging mode of nested guest */
224     const struct paging_mode *nestedmode;
225     /* HVM guest: last emulate was to a pagetable */
226     unsigned int last_write_was_pt:1;
227     /* HVM guest: last write emulation succeeds */
228     unsigned int last_write_emul_ok:1;
229     /* Translated guest: virtual TLB */
230     struct shadow_vtlb *vtlb;
231     spinlock_t          vtlb_lock;
232 
233     /* paging support extension */
234     struct shadow_vcpu shadow;
235 };
236 
237 #define MAX_NESTEDP2M 10
238 
239 #define MAX_ALTP2M      10 /* arbitrary */
240 #define INVALID_ALTP2M  0xffff
241 #define MAX_EPTP        (PAGE_SIZE / sizeof(uint64_t))
242 struct p2m_domain;
243 struct time_scale {
244     int shift;
245     u32 mul_frac;
246 };
247 
248 struct pv_domain
249 {
250     l1_pgentry_t **gdt_ldt_l1tab;
251 
252     atomic_t nr_l4_pages;
253 
254     /* map_domain_page() mapping cache. */
255     struct mapcache_domain mapcache;
256 
257     struct cpuidmasks *cpuidmasks;
258 };
259 
260 struct monitor_write_data {
261     struct {
262         unsigned int msr : 1;
263         unsigned int cr0 : 1;
264         unsigned int cr3 : 1;
265         unsigned int cr4 : 1;
266     } do_write;
267 
268     uint32_t msr;
269     uint64_t value;
270     uint64_t cr0;
271     uint64_t cr3;
272     uint64_t cr4;
273 };
274 
275 struct arch_domain
276 {
277     struct page_info *perdomain_l3_pg;
278 
279     unsigned int hv_compat_vstart;
280 
281     /* Maximum physical-address bitwidth supported by this guest. */
282     unsigned int physaddr_bitsize;
283 
284     /* I/O-port admin-specified access capabilities. */
285     struct rangeset *ioport_caps;
286     uint32_t pci_cf8;
287     uint8_t cmos_idx;
288 
289     bool_t s3_integrity;
290 
291     struct list_head pdev_list;
292 
293     union {
294         struct pv_domain pv_domain;
295         struct hvm_domain hvm_domain;
296     };
297 
298     struct paging_domain paging;
299     struct p2m_domain *p2m;
300     /* To enforce lock ordering in the pod code wrt the
301      * page_alloc lock */
302     int page_alloc_unlock_level;
303 
304     /* Continuable domain_relinquish_resources(). */
305     enum {
306         RELMEM_not_started,
307         RELMEM_shared,
308         RELMEM_xen,
309         RELMEM_l4,
310         RELMEM_l3,
311         RELMEM_l2,
312         RELMEM_done,
313     } relmem;
314     struct page_list_head relmem_list;
315 
316     const struct arch_csw {
317         void (*from)(struct vcpu *);
318         void (*to)(struct vcpu *);
319         void (*tail)(struct vcpu *);
320     } *ctxt_switch;
321 
322     /* nestedhvm: translate l2 guest physical to host physical */
323     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
324     mm_lock_t nested_p2m_lock;
325 
326     /* altp2m: allow multiple copies of host p2m */
327     bool_t altp2m_active;
328     struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
329     mm_lock_t altp2m_list_lock;
330     uint64_t *altp2m_eptp;
331 
332     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
333     struct radix_tree_root irq_pirq;
334 
335     /* Is a 32-bit PV (non-HVM) guest? */
336     bool_t is_32bit_pv;
337     /* Is shared-info page in 32-bit format? */
338     bool_t has_32bit_shinfo;
339 
340     /* Domain cannot handle spurious page faults? */
341     bool_t suppress_spurious_page_faults;
342 
343     /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
344     bool_t auto_unmask;
345 
346     /*
347      * The width of the FIP/FDP register in the FPU that needs to be
348      * saved/restored during a context switch.  This is needed because
349      * the FPU can either: a) restore the 64-bit FIP/FDP and clear FCS
350      * and FDS; or b) restore the 32-bit FIP/FDP (clearing the upper
351      * 32-bits of FIP/FDP) and restore FCS/FDS.
352      *
353      * Which one is needed depends on the guest.
354      *
355      * This can be either: 8, 4 or 0.  0 means auto-detect the size
356      * based on the width of FIP/FDP values that are written by the
357      * guest.
358      */
359     uint8_t x87_fip_width;
360 
361     /* CPUID and MSR policy objects. */
362     struct cpuid_policy *cpuid;
363     struct msr_domain_policy *msr;
364 
365     struct PITState vpit;
366 
367     /* TSC management (emulation, pv, scaling, stats) */
368     int tsc_mode;            /* see include/asm-x86/time.h */
369     bool_t vtsc;             /* tsc is emulated (may change after migrate) */
370     s_time_t vtsc_last;      /* previous TSC value (guarantee monotonicity) */
371     spinlock_t vtsc_lock;
372     uint64_t vtsc_offset;    /* adjustment for save/restore/migrate */
373     uint32_t tsc_khz;        /* cached guest khz for certain emulated or
374                                 hardware TSC scaling cases */
375     struct time_scale vtsc_to_ns; /* scaling for certain emulated or
376                                      hardware TSC scaling cases */
377     struct time_scale ns_to_vtsc; /* scaling for certain emulated or
378                                      hardware TSC scaling cases */
379     uint32_t incarnation;    /* incremented every restore or live migrate
380                                 (possibly other cases in the future */
381 #if !defined(NDEBUG) || defined(CONFIG_PERF_COUNTERS)
382     uint64_t vtsc_kerncount;
383     uint64_t vtsc_usercount;
384 #endif
385 
386     /* Pseudophysical e820 map (XENMEM_memory_map).  */
387     spinlock_t e820_lock;
388     struct e820entry *e820;
389     unsigned int nr_e820;
390 
391     /* RMID assigned to the domain for CMT */
392     unsigned int psr_rmid;
393     /* COS assigned to the domain for each socket */
394     unsigned int *psr_cos_ids;
395 
396     /* Shared page for notifying that explicit PIRQ EOI is required. */
397     unsigned long *pirq_eoi_map;
398     unsigned long pirq_eoi_map_mfn;
399 
400     /* Arch-specific monitor options */
401     struct {
402         unsigned int write_ctrlreg_enabled                                 : 4;
403         unsigned int write_ctrlreg_sync                                    : 4;
404         unsigned int write_ctrlreg_onchangeonly                            : 4;
405         unsigned int singlestep_enabled                                    : 1;
406         unsigned int software_breakpoint_enabled                           : 1;
407         unsigned int debug_exception_enabled                               : 1;
408         unsigned int debug_exception_sync                                  : 1;
409         unsigned int cpuid_enabled                                         : 1;
410         unsigned int descriptor_access_enabled                             : 1;
411         unsigned int guest_request_userspace_enabled                       : 1;
412         unsigned int emul_unimplemented_enabled                            : 1;
413         struct monitor_msr_bitmap *msr_bitmap;
414         uint64_t write_ctrlreg_mask[4];
415     } monitor;
416 
417     /* Mem_access emulation control */
418     bool_t mem_access_emulate_each_rep;
419 
420     /* Emulated devices enabled bitmap. */
421     uint32_t emulation_flags;
422 } __cacheline_aligned;
423 
424 #define has_vlapic(d)      (!!((d)->arch.emulation_flags & XEN_X86_EMU_LAPIC))
425 #define has_vhpet(d)       (!!((d)->arch.emulation_flags & XEN_X86_EMU_HPET))
426 #define has_vpm(d)         (!!((d)->arch.emulation_flags & XEN_X86_EMU_PM))
427 #define has_vrtc(d)        (!!((d)->arch.emulation_flags & XEN_X86_EMU_RTC))
428 #define has_vioapic(d)     (!!((d)->arch.emulation_flags & XEN_X86_EMU_IOAPIC))
429 #define has_vpic(d)        (!!((d)->arch.emulation_flags & XEN_X86_EMU_PIC))
430 #define has_vvga(d)        (!!((d)->arch.emulation_flags & XEN_X86_EMU_VGA))
431 #define has_viommu(d)      (!!((d)->arch.emulation_flags & XEN_X86_EMU_IOMMU))
432 #define has_vpit(d)        (!!((d)->arch.emulation_flags & XEN_X86_EMU_PIT))
433 #define has_pirq(d)        (!!((d)->arch.emulation_flags & \
434                             XEN_X86_EMU_USE_PIRQ))
435 
436 #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
437 
438 #define gdt_ldt_pt_idx(v) \
439       ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
440 #define pv_gdt_ptes(v) \
441     ((v)->domain->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
442      (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
443 #define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
444 
445 struct pv_vcpu
446 {
447     /* map_domain_page() mapping cache. */
448     struct mapcache_vcpu mapcache;
449 
450     struct trap_info *trap_ctxt;
451 
452     unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE];
453     unsigned long ldt_base;
454     unsigned int gdt_ents, ldt_ents;
455 
456     unsigned long kernel_ss, kernel_sp;
457     unsigned long ctrlreg[8];
458 
459     unsigned long event_callback_eip;
460     unsigned long failsafe_callback_eip;
461     union {
462         unsigned long syscall_callback_eip;
463         struct {
464             unsigned int event_callback_cs;
465             unsigned int failsafe_callback_cs;
466         };
467     };
468 
469     unsigned long syscall32_callback_eip;
470     unsigned long sysenter_callback_eip;
471     unsigned short syscall32_callback_cs;
472     unsigned short sysenter_callback_cs;
473     bool_t syscall32_disables_events;
474     bool_t sysenter_disables_events;
475 
476     /* Segment base addresses. */
477     unsigned long fs_base;
478     unsigned long gs_base_kernel;
479     unsigned long gs_base_user;
480 
481     /* Bounce information for propagating an exception to guest OS. */
482     struct trap_bounce trap_bounce;
483     struct trap_bounce int80_bounce;
484 
485     /* I/O-port access bitmap. */
486     XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */
487     unsigned int iobmp_limit; /* Number of ports represented in the bitmap. */
488 #define IOPL(val) MASK_INSR(val, X86_EFLAGS_IOPL)
489     unsigned int iopl;        /* Current IOPL for this VCPU, shifted left by
490                                * 12 to match the eflags register. */
491 
492     /* Current LDT details. */
493     unsigned long shadow_ldt_mapcnt;
494     spinlock_t shadow_ldt_lock;
495 
496     /* data breakpoint extension MSRs */
497     uint32_t dr_mask[4];
498 
499     /* Deferred VA-based update state. */
500     bool_t need_update_runstate_area;
501     struct vcpu_time_info pending_system_time;
502 };
503 
504 typedef enum __packed {
505     SMAP_CHECK_HONOR_CPL_AC,    /* honor the guest's CPL and AC */
506     SMAP_CHECK_ENABLED,         /* enable the check */
507     SMAP_CHECK_DISABLED,        /* disable the check */
508 } smap_check_policy_t;
509 
510 struct arch_vcpu
511 {
512     /*
513      * guest context (mirroring struct vcpu_guest_context) common
514      * between pv and hvm guests
515      */
516 
517     void              *fpu_ctxt;
518     unsigned long      vgc_flags;
519     struct cpu_user_regs user_regs;
520     unsigned long      debugreg[8];
521 
522     /* other state */
523 
524     unsigned long      flags; /* TF_ */
525 
526     struct vpmu_struct vpmu;
527 
528     /* Virtual Machine Extensions */
529     union {
530         struct pv_vcpu pv_vcpu;
531         struct hvm_vcpu hvm_vcpu;
532     };
533 
534     pagetable_t guest_table_user;       /* (MFN) x86/64 user-space pagetable */
535     pagetable_t guest_table;            /* (MFN) guest notion of cr3 */
536     struct page_info *old_guest_table;  /* partially destructed pagetable */
537     struct page_info *old_guest_ptpg;   /* containing page table of the */
538                                         /* former, if any */
539     /* guest_table holds a ref to the page, and also a type-count unless
540      * shadow refcounts are in use */
541     pagetable_t shadow_table[4];        /* (MFN) shadow(s) of guest */
542     pagetable_t monitor_table;          /* (MFN) hypervisor PT (for HVM) */
543     unsigned long cr3;                  /* (MA) value to install in HW CR3 */
544 
545     /*
546      * The save area for Processor Extended States and the bitmask of the
547      * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
548      * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
549      * #NM handler, we XRSTOR the states we XSAVE-ed;
550      */
551     struct xsave_struct *xsave_area;
552     uint64_t xcr0;
553     /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
554      * itself, as we can never know whether guest OS depends on content
555      * preservation whenever guest OS clears one feature flag (for example,
556      * temporarily).
557      * However, processor should not be able to touch eXtended states before
558      * it explicitly enables it via xcr0.
559      */
560     uint64_t xcr0_accum;
561     /* This variable determines whether nonlazy extended state has been used,
562      * and thus should be saved/restored. */
563     bool_t nonlazy_xstate_used;
564 
565     /*
566      * The SMAP check policy when updating runstate_guest(v) and the
567      * secondary system time.
568      */
569     smap_check_policy_t smap_check_policy;
570 
571     struct vmce vmce;
572 
573     struct paging_vcpu paging;
574 
575     uint32_t gdbsx_vcpu_event;
576 
577     /* A secondary copy of the vcpu time info. */
578     XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
579 
580     struct arch_vm_event *vm_event;
581 
582     struct msr_vcpu_policy *msr;
583 
584     struct {
585         bool next_interrupt_enabled;
586     } monitor;
587 };
588 
589 struct guest_memory_policy
590 {
591     smap_check_policy_t smap_policy;
592     bool nested_guest_mode;
593 };
594 
595 void update_guest_memory_policy(struct vcpu *v,
596                                 struct guest_memory_policy *policy);
597 
598 /* Shorthands to improve code legibility. */
599 #define hvm_vmx         hvm_vcpu.u.vmx
600 #define hvm_svm         hvm_vcpu.u.svm
601 
602 bool update_runstate_area(struct vcpu *);
603 bool update_secondary_system_time(struct vcpu *,
604                                   struct vcpu_time_info *);
605 
606 void vcpu_show_execution_state(struct vcpu *);
607 void vcpu_show_registers(const struct vcpu *);
608 
609 /* Clean up CR4 bits that are not under guest control. */
610 unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4);
611 
612 /* Convert between guest-visible and real CR4 values. */
613 #define pv_guest_cr4_to_real_cr4(v)                         \
614     (((v)->arch.pv_vcpu.ctrlreg[4]                          \
615       | (mmu_cr4_features                                   \
616          & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP |      \
617             X86_CR4_SMAP | X86_CR4_OSXSAVE |                \
618             X86_CR4_FSGSBASE))                              \
619       | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0))         \
620      & ~X86_CR4_DE)
621 #define real_cr4_to_pv_guest_cr4(c)                         \
622     ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD |      \
623              X86_CR4_OSXSAVE | X86_CR4_SMEP |               \
624              X86_CR4_FSGSBASE | X86_CR4_SMAP))
625 
626 #define domain_max_vcpus(d) (is_hvm_domain(d) ? HVM_MAX_VCPUS : MAX_VIRT_CPUS)
627 
alloc_vcpu_guest_context(void)628 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
629 {
630     return vmalloc(sizeof(struct vcpu_guest_context));
631 }
632 
free_vcpu_guest_context(struct vcpu_guest_context * vgc)633 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
634 {
635     vfree(vgc);
636 }
637 
638 struct vcpu_hvm_context;
639 int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx);
640 
641 void pv_inject_event(const struct x86_event *event);
642 
pv_inject_hw_exception(unsigned int vector,int errcode)643 static inline void pv_inject_hw_exception(unsigned int vector, int errcode)
644 {
645     const struct x86_event event = {
646         .vector = vector,
647         .type = X86_EVENTTYPE_HW_EXCEPTION,
648         .error_code = errcode,
649     };
650 
651     pv_inject_event(&event);
652 }
653 
pv_inject_page_fault(int errcode,unsigned long cr2)654 static inline void pv_inject_page_fault(int errcode, unsigned long cr2)
655 {
656     const struct x86_event event = {
657         .vector = TRAP_page_fault,
658         .type = X86_EVENTTYPE_HW_EXCEPTION,
659         .error_code = errcode,
660         .cr2 = cr2,
661     };
662 
663     pv_inject_event(&event);
664 }
665 
pv_inject_sw_interrupt(unsigned int vector)666 static inline void pv_inject_sw_interrupt(unsigned int vector)
667 {
668     const struct x86_event event = {
669         .vector = vector,
670         .type = X86_EVENTTYPE_SW_INTERRUPT,
671         .error_code = X86_EVENT_NO_EC,
672     };
673 
674     pv_inject_event(&event);
675 }
676 
677 #endif /* __ASM_DOMAIN_H__ */
678 
679 /*
680  * Local variables:
681  * mode: C
682  * c-file-style: "BSD"
683  * c-basic-offset: 4
684  * tab-width: 4
685  * indent-tabs-mode: nil
686  * End:
687  */
688