1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
9 #include <asm/mce.h>
10 #include <asm/vpmu.h>
11 #include <asm/x86_emulate.h>
12 #include <public/vcpu.h>
13 #include <public/hvm/hvm_info_table.h>
14
15 #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
16
17 /*
18 * Set to true if either the global vector-type callback or per-vCPU
19 * LAPIC vectors are used. Assume all vCPUs will use
20 * HVMOP_set_evtchn_upcall_vector as long as the initial vCPU does.
21 */
22 #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
23 ((d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector || \
24 (d)->vcpu[0]->arch.hvm.evtchn_upcall_vector))
25
26 #define VCPU_TRAP_NONE 0
27 #define VCPU_TRAP_NMI 1
28 #define VCPU_TRAP_MCE 2
29 #define VCPU_TRAP_LAST VCPU_TRAP_MCE
30
31 #define nmi_state async_exception_state(VCPU_TRAP_NMI)
32 #define mce_state async_exception_state(VCPU_TRAP_MCE)
33
34 #define nmi_pending nmi_state.pending
35 #define mce_pending mce_state.pending
36
37 struct trap_bounce {
38 uint32_t error_code;
39 uint8_t flags; /* TBF_ */
40 uint16_t cs;
41 unsigned long eip;
42 };
43
44 #define MAPHASH_ENTRIES 8
45 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
46 #define MAPHASHENT_NOTINUSE ((u32)~0U)
47 struct mapcache_vcpu {
48 /* Shadow of mapcache_domain.epoch. */
49 unsigned int shadow_epoch;
50
51 /* Lock-free per-VCPU hash of recently-used mappings. */
52 struct vcpu_maphash_entry {
53 unsigned long mfn;
54 uint32_t idx;
55 uint32_t refcnt;
56 } hash[MAPHASH_ENTRIES];
57 };
58
59 struct mapcache_domain {
60 /* The number of array entries, and a cursor into the array. */
61 unsigned int entries;
62 unsigned int cursor;
63
64 /* Protects map_domain_page(). */
65 spinlock_t lock;
66
67 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
68 unsigned int epoch;
69 u32 tlbflush_timestamp;
70
71 /* Which mappings are in use, and which are garbage to reap next epoch? */
72 unsigned long *inuse;
73 unsigned long *garbage;
74 };
75
76 int mapcache_domain_init(struct domain *d);
77 int mapcache_vcpu_init(struct vcpu *v);
78 void mapcache_override_current(struct vcpu *v);
79
80 /* x86/64: toggle guest between kernel and user modes. */
81 void toggle_guest_mode(struct vcpu *v);
82 /* x86/64: toggle guest page tables between kernel and user modes. */
83 void toggle_guest_pt(struct vcpu *v);
84
85 /*
86 * Initialise a hypercall-transfer page. The given pointer must be mapped
87 * in Xen virtual address space (accesses are not validated or checked).
88 */
89 void init_hypercall_page(struct domain *d, void *ptr);
90
91 /************************************************/
92 /* shadow paging extension */
93 /************************************************/
94 struct shadow_domain {
95 #ifdef CONFIG_SHADOW_PAGING
96 unsigned int opt_flags; /* runtime tunable optimizations on/off */
97 struct page_list_head pinned_shadows;
98
99 /* 1-to-1 map for use when HVM vcpus have paging disabled */
100 pagetable_t unpaged_pagetable;
101
102 /* reflect guest table dirty status, incremented by write
103 * emulation and remove write permission */
104 atomic_t gtable_dirty_version;
105
106 /* Shadow hashtable */
107 struct page_info **hash_table;
108 bool hash_walking; /* Some function is walking the hash table */
109
110 /* Fast MMIO path heuristic */
111 bool has_fast_mmio_entries;
112
113 #ifdef CONFIG_HVM
114 /* OOS */
115 bool oos_active;
116
117 /* Has this domain ever used HVMOP_pagetable_dying? */
118 bool pagetable_dying_op;
119 #endif
120
121 #ifdef CONFIG_PV
122 /* PV L1 Terminal Fault mitigation. */
123 struct tasklet pv_l1tf_tasklet;
124 #endif /* CONFIG_PV */
125 #endif
126 };
127
128 struct shadow_vcpu {
129 #ifdef CONFIG_SHADOW_PAGING
130 #ifdef CONFIG_HVM
131 /* PAE guests: per-vcpu shadow top-level table */
132 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
133 /* PAE guests: per-vcpu cache of the top-level *guest* entries */
134 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
135
136 /* shadow(s) of guest (MFN) */
137 pagetable_t shadow_table[4];
138 #else
139 /* shadow of guest (MFN) */
140 pagetable_t shadow_table[1];
141 #endif
142
143 /* Last MFN that we emulated a write to as unshadow heuristics. */
144 unsigned long last_emulated_mfn_for_unshadow;
145 /* MFN of the last shadow that we shot a writeable mapping in */
146 unsigned long last_writeable_pte_smfn;
147 #ifdef CONFIG_HVM
148 /* Last frame number that we emulated a write to. */
149 unsigned long last_emulated_frame;
150 /* Last MFN that we emulated a write successfully */
151 unsigned long last_emulated_mfn;
152
153 /* Shadow out-of-sync: pages that this vcpu has let go out of sync */
154 mfn_t oos[SHADOW_OOS_PAGES];
155 mfn_t oos_snapshot[SHADOW_OOS_PAGES];
156 struct oos_fixup {
157 int next;
158 mfn_t smfn[SHADOW_OOS_FIXUPS];
159 unsigned long off[SHADOW_OOS_FIXUPS];
160 } oos_fixup[SHADOW_OOS_PAGES];
161
162 bool pagetable_dying;
163 #endif
164 #endif
165 };
166
167 /************************************************/
168 /* hardware assisted paging */
169 /************************************************/
170 struct hap_domain {
171 };
172
173 /************************************************/
174 /* common paging data structure */
175 /************************************************/
176 struct log_dirty_domain {
177 /* log-dirty radix tree to record dirty pages */
178 mfn_t top;
179 unsigned int allocs;
180 unsigned int failed_allocs;
181
182 /* log-dirty mode stats */
183 unsigned long fault_count;
184 unsigned long dirty_count;
185
186 /* functions which are paging mode specific */
187 const struct log_dirty_ops {
188 int (*enable )(struct domain *d);
189 int (*disable )(struct domain *d);
190 void (*clean )(struct domain *d);
191 } *ops;
192 };
193
194 struct paging_domain {
195 /* paging lock */
196 mm_lock_t lock;
197
198 /* flags to control paging operation */
199 u32 mode;
200 /* Has that pool ever run out of memory? */
201 bool p2m_alloc_failed;
202 /* extension for shadow paging support */
203 struct shadow_domain shadow;
204 /* extension for hardware-assited paging */
205 struct hap_domain hap;
206
207 /* Memory allocation (common to shadow and HAP) */
208 struct page_list_head freelist;
209 unsigned int total_pages; /* number of pages allocated */
210 unsigned int free_pages; /* number of pages on freelists */
211 unsigned int p2m_pages; /* number of pages allocated to p2m */
212
213 /* log dirty support */
214 struct log_dirty_domain log_dirty;
215
216 /* preemption handling */
217 struct {
218 const struct domain *dom;
219 unsigned int op;
220 union {
221 struct {
222 unsigned long done:PADDR_BITS - PAGE_SHIFT;
223 unsigned long i4:PAGETABLE_ORDER;
224 unsigned long i3:PAGETABLE_ORDER;
225 } log_dirty;
226 };
227 } preempt;
228
229 /* alloc/free pages from the pool for paging-assistance structures
230 * (used by p2m and log-dirty code for their tries) */
231 struct page_info * (*alloc_page)(struct domain *d);
232 void (*free_page)(struct domain *d, struct page_info *pg);
233
234 void (*update_paging_modes)(struct vcpu *v);
235
236 #ifdef CONFIG_HVM
237 /* Flush selected vCPUs TLBs. NULL for all. */
238 bool __must_check (*flush_tlb)(const unsigned long *vcpu_bitmap);
239 #endif
240 };
241
242 struct paging_vcpu {
243 /* Pointers to mode-specific entry points. */
244 const struct paging_mode *mode;
245 /* Nested Virtualization: paging mode of nested guest */
246 const struct paging_mode *nestedmode;
247 #ifdef CONFIG_HVM
248 /* HVM guest: last emulate was to a pagetable */
249 unsigned int last_write_was_pt:1;
250 /* HVM guest: last write emulation succeeds */
251 unsigned int last_write_emul_ok:1;
252 #endif
253 /* Translated guest: virtual TLB */
254 struct shadow_vtlb *vtlb;
255 spinlock_t vtlb_lock;
256
257 /* paging support extension */
258 struct shadow_vcpu shadow;
259 };
260
261 #define MAX_NESTEDP2M 10
262
263 #define MAX_ALTP2M 10 /* arbitrary */
264 #define INVALID_ALTP2M 0xffff
265 #define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
266 struct p2m_domain;
267 struct time_scale {
268 int shift;
269 u32 mul_frac;
270 };
271
272 struct pv_domain
273 {
274 l1_pgentry_t **gdt_ldt_l1tab;
275
276 atomic_t nr_l4_pages;
277
278 /* Is a 32-bit PV guest? */
279 bool is_32bit;
280 /* XPTI active? */
281 bool xpti;
282 /* Use PCID feature? */
283 bool pcid;
284 /* Mitigate L1TF with shadow/crashing? */
285 bool check_l1tf;
286
287 /* map_domain_page() mapping cache. */
288 struct mapcache_domain mapcache;
289
290 struct cpuidmasks *cpuidmasks;
291 };
292
293 struct monitor_write_data {
294 struct {
295 unsigned int msr : 1;
296 unsigned int cr0 : 1;
297 unsigned int cr3 : 1;
298 unsigned int cr4 : 1;
299 } do_write;
300
301 bool cr3_noflush;
302
303 uint32_t msr;
304 uint64_t value;
305 uint64_t cr0;
306 uint64_t cr3;
307 uint64_t cr4;
308 };
309
310 struct arch_domain
311 {
312 struct page_info *perdomain_l3_pg;
313
314 #ifdef CONFIG_PV32
315 unsigned int hv_compat_vstart;
316 #endif
317
318 /* Maximum physical-address bitwidth supported by this guest. */
319 unsigned int physaddr_bitsize;
320
321 /* I/O-port admin-specified access capabilities. */
322 struct rangeset *ioport_caps;
323 uint32_t pci_cf8;
324 uint8_t cmos_idx;
325
326 uint8_t scf; /* See SCF_DOM_MASK */
327
328 union {
329 struct pv_domain pv;
330 struct hvm_domain hvm;
331 };
332
333 struct paging_domain paging;
334 struct p2m_domain *p2m;
335 /* To enforce lock ordering in the pod code wrt the
336 * page_alloc lock */
337 int page_alloc_unlock_level;
338
339 /* Continuable domain_relinquish_resources(). */
340 unsigned int rel_priv;
341 struct page_list_head relmem_list;
342
343 const struct arch_csw {
344 void (*from)(struct vcpu *v);
345 void (*to)(struct vcpu *v);
346 void noreturn (*tail)(void);
347 } *ctxt_switch;
348
349 #ifdef CONFIG_HVM
350 /* nestedhvm: translate l2 guest physical to host physical */
351 struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
352 mm_lock_t nested_p2m_lock;
353
354 /* altp2m: allow multiple copies of host p2m */
355 bool altp2m_active;
356 struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
357 mm_lock_t altp2m_list_lock;
358 uint64_t *altp2m_eptp;
359 uint64_t *altp2m_visible_eptp;
360 #endif
361
362 /* NB. protected by d->event_lock and by irq_desc[irq].lock */
363 struct radix_tree_root irq_pirq;
364
365 /* Is shared-info page in 32-bit format? */
366 bool has_32bit_shinfo;
367
368 /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
369 bool auto_unmask;
370
371 /*
372 * The width of the FIP/FDP register in the FPU that needs to be
373 * saved/restored during a context switch. This is needed because
374 * the FPU can either: a) restore the 64-bit FIP/FDP and clear FCS
375 * and FDS; or b) restore the 32-bit FIP/FDP (clearing the upper
376 * 32-bits of FIP/FDP) and restore FCS/FDS.
377 *
378 * Which one is needed depends on the guest.
379 *
380 * This can be either: 8, 4 or 0. 0 means auto-detect the size
381 * based on the width of FIP/FDP values that are written by the
382 * guest.
383 */
384 uint8_t x87_fip_width;
385
386 /*
387 * The domain's CPU Policy. "cpu_policy" is considered the canonical
388 * pointer, but the "cpuid" and "msr" aliases exist so the most
389 * appropriate one can be used for local code clarity.
390 */
391 union {
392 struct cpu_policy *cpu_policy;
393 struct cpu_policy *cpuid;
394 struct cpu_policy *msr;
395 };
396
397 struct PITState vpit;
398
399 /* TSC management (emulation, pv, scaling, stats) */
400 int tsc_mode; /* see asm/time.h */
401 bool vtsc; /* tsc is emulated (may change after migrate) */
402 s_time_t vtsc_last; /* previous TSC value (guarantee monotonicity) */
403 uint64_t vtsc_offset; /* adjustment for save/restore/migrate */
404 uint32_t tsc_khz; /* cached guest khz for certain emulated or
405 hardware TSC scaling cases */
406 struct time_scale vtsc_to_ns; /* scaling for certain emulated or
407 hardware TSC scaling cases */
408 struct time_scale ns_to_vtsc; /* scaling for certain emulated or
409 hardware TSC scaling cases */
410 uint32_t incarnation; /* incremented every restore or live migrate
411 (possibly other cases in the future */
412
413 /* Pseudophysical e820 map (XENMEM_memory_map). */
414 spinlock_t e820_lock;
415 struct e820entry *e820;
416 unsigned int nr_e820;
417
418 /* RMID assigned to the domain for CMT */
419 unsigned int psr_rmid;
420 /* COS assigned to the domain for each socket */
421 unsigned int *psr_cos_ids;
422
423 /* Shared page for notifying that explicit PIRQ EOI is required. */
424 unsigned long *pirq_eoi_map;
425 unsigned long pirq_eoi_map_mfn;
426
427 /* Arch-specific monitor options */
428 struct {
429 unsigned int write_ctrlreg_enabled : 4;
430 unsigned int write_ctrlreg_sync : 4;
431 unsigned int write_ctrlreg_onchangeonly : 4;
432 unsigned int singlestep_enabled : 1;
433 unsigned int software_breakpoint_enabled : 1;
434 unsigned int debug_exception_enabled : 1;
435 unsigned int debug_exception_sync : 1;
436 unsigned int cpuid_enabled : 1;
437 unsigned int descriptor_access_enabled : 1;
438 unsigned int guest_request_userspace_enabled : 1;
439 unsigned int emul_unimplemented_enabled : 1;
440 unsigned int io_enabled : 1;
441 /*
442 * By default all events are sent.
443 * This is used to filter out pagefaults.
444 */
445 unsigned int inguest_pagefault_disabled : 1;
446 unsigned int control_register_values : 1;
447 unsigned int vmexit_enabled : 1;
448 unsigned int vmexit_sync : 1;
449 struct monitor_msr_bitmap *msr_bitmap;
450 uint64_t write_ctrlreg_mask[4];
451 } monitor;
452
453 /* Mem_access emulation control */
454 bool mem_access_emulate_each_rep;
455
456 /* Don't unconditionally inject #GP for unhandled MSRs. */
457 bool msr_relaxed;
458
459 /* Emulated devices enabled bitmap. */
460 uint32_t emulation_flags;
461 } __cacheline_aligned;
462
463 #ifdef CONFIG_HVM
464 #define X86_EMU_LAPIC XEN_X86_EMU_LAPIC
465 #define X86_EMU_HPET XEN_X86_EMU_HPET
466 #define X86_EMU_PM XEN_X86_EMU_PM
467 #define X86_EMU_RTC XEN_X86_EMU_RTC
468 #define X86_EMU_IOAPIC XEN_X86_EMU_IOAPIC
469 #define X86_EMU_PIC XEN_X86_EMU_PIC
470 #define X86_EMU_VGA XEN_X86_EMU_VGA
471 #define X86_EMU_IOMMU XEN_X86_EMU_IOMMU
472 #define X86_EMU_USE_PIRQ XEN_X86_EMU_USE_PIRQ
473 #define X86_EMU_VPCI XEN_X86_EMU_VPCI
474 #else
475 #define X86_EMU_LAPIC 0
476 #define X86_EMU_HPET 0
477 #define X86_EMU_PM 0
478 #define X86_EMU_RTC 0
479 #define X86_EMU_IOAPIC 0
480 #define X86_EMU_PIC 0
481 #define X86_EMU_VGA 0
482 #define X86_EMU_IOMMU 0
483 #define X86_EMU_USE_PIRQ 0
484 #define X86_EMU_VPCI 0
485 #endif
486
487 #define X86_EMU_PIT XEN_X86_EMU_PIT
488
489 /* This must match XEN_X86_EMU_ALL in xen.h */
490 #define X86_EMU_ALL (X86_EMU_LAPIC | X86_EMU_HPET | \
491 X86_EMU_PM | X86_EMU_RTC | \
492 X86_EMU_IOAPIC | X86_EMU_PIC | \
493 X86_EMU_VGA | X86_EMU_IOMMU | \
494 X86_EMU_PIT | X86_EMU_USE_PIRQ | \
495 X86_EMU_VPCI)
496
497 #define has_vlapic(d) (!!((d)->arch.emulation_flags & X86_EMU_LAPIC))
498 #define has_vhpet(d) (!!((d)->arch.emulation_flags & X86_EMU_HPET))
499 #define has_vpm(d) (!!((d)->arch.emulation_flags & X86_EMU_PM))
500 #define has_vrtc(d) (!!((d)->arch.emulation_flags & X86_EMU_RTC))
501 #define has_vioapic(d) (!!((d)->arch.emulation_flags & X86_EMU_IOAPIC))
502 #define has_vpic(d) (!!((d)->arch.emulation_flags & X86_EMU_PIC))
503 #define has_vvga(d) (!!((d)->arch.emulation_flags & X86_EMU_VGA))
504 #define has_viommu(d) (!!((d)->arch.emulation_flags & X86_EMU_IOMMU))
505 #define has_vpit(d) (!!((d)->arch.emulation_flags & X86_EMU_PIT))
506 #define has_pirq(d) (!!((d)->arch.emulation_flags & X86_EMU_USE_PIRQ))
507 #define has_vpci(d) (!!((d)->arch.emulation_flags & X86_EMU_VPCI))
508
509 #define gdt_ldt_pt_idx(v) \
510 ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
511 #define pv_gdt_ptes(v) \
512 ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
513 (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
514 #define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
515
516 struct pv_vcpu
517 {
518 /* map_domain_page() mapping cache. */
519 struct mapcache_vcpu mapcache;
520
521 unsigned int vgc_flags;
522
523 struct trap_info *trap_ctxt;
524
525 unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE];
526 unsigned long ldt_base;
527 unsigned int gdt_ents, ldt_ents;
528
529 unsigned long kernel_ss, kernel_sp;
530 unsigned long ctrlreg[8];
531
532 unsigned long event_callback_eip;
533 unsigned long failsafe_callback_eip;
534 union {
535 unsigned long syscall_callback_eip;
536 struct {
537 unsigned int event_callback_cs;
538 unsigned int failsafe_callback_cs;
539 };
540 };
541
542 unsigned long syscall32_callback_eip;
543 unsigned long sysenter_callback_eip;
544 unsigned short syscall32_callback_cs;
545 unsigned short sysenter_callback_cs;
546 bool syscall32_disables_events;
547 bool sysenter_disables_events;
548
549 uint16_t ds, es, fs, gs;
550
551 /*
552 * 64bit segment bases.
553 *
554 * FS and the active GS are always stale when the vCPU is in context, as
555 * the guest can change them behind Xen's back with MOV SREG, or
556 * WR{FS,GS}BASE on capable hardware.
557 *
558 * The inactive GS base is never stale, as guests can't use SWAPGS to
559 * access it - all modification is performed by Xen either directly
560 * (hypercall, #GP emulation), or indirectly (toggle_guest_mode()).
561 *
562 * The vCPU context switch path is optimised based on this fact, so any
563 * path updating or swapping the inactive base must update the cached
564 * value as well.
565 *
566 * Which GS base is active and inactive depends on whether the vCPU is in
567 * user or kernel context.
568 */
569 unsigned long fs_base;
570 unsigned long gs_base_kernel;
571 unsigned long gs_base_user;
572
573 /* Bounce information for propagating an exception to guest OS. */
574 struct trap_bounce trap_bounce;
575
576 /* I/O-port access bitmap. */
577 XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */
578 unsigned int iobmp_nr; /* Number of ports represented in the bitmap. */
579 #define IOPL(val) MASK_INSR(val, X86_EFLAGS_IOPL)
580 unsigned int iopl; /* Current IOPL for this VCPU, shifted left by
581 * 12 to match the eflags register. */
582
583 /*
584 * %dr7 bits the guest has set, but aren't loaded into hardware, and are
585 * completely emulated.
586 */
587 uint32_t dr7_emul;
588
589 /* Deferred VA-based update state. */
590 bool need_update_runstate_area;
591 struct vcpu_time_info pending_system_time;
592 };
593
594 struct arch_vcpu
595 {
596 struct cpu_user_regs user_regs;
597
598 /* Debug registers. */
599 unsigned long dr[4];
600 unsigned int dr6;
601 unsigned int dr7;
602
603 /* other state */
604
605 unsigned long flags; /* TF_ */
606
607 struct vpmu_struct vpmu;
608
609 struct {
610 bool pending;
611 uint8_t old_mask;
612 } async_exception_state[VCPU_TRAP_LAST];
613 #define async_exception_state(t) async_exception_state[(t)-1]
614 uint8_t async_exception_mask;
615
616 /* Virtual Machine Extensions */
617 union {
618 struct pv_vcpu pv;
619 struct hvm_vcpu hvm;
620 };
621
622 /*
623 * guest_table{,_user} hold a ref to the page, and also a type-count
624 * unless shadow refcounts are in use
625 */
626 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
627 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
628 struct page_info *old_guest_table; /* partially destructed pagetable */
629 struct page_info *old_guest_ptpg; /* containing page table of the */
630 /* former, if any */
631 bool old_guest_table_partial; /* Are we dropping a type ref, or just
632 * finishing up a partial de-validation? */
633
634 unsigned long cr3; /* (MA) value to install in HW CR3 */
635
636 /*
637 * The save area for Processor Extended States and the bitmask of the
638 * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
639 * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
640 * #NM handler, we XRSTOR the states we XSAVE-ed;
641 */
642 struct xsave_struct *xsave_area;
643 uint64_t xcr0;
644 /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
645 * itself, as we can never know whether guest OS depends on content
646 * preservation whenever guest OS clears one feature flag (for example,
647 * temporarily).
648 * However, processor should not be able to touch eXtended states before
649 * it explicitly enables it via xcr0.
650 */
651 uint64_t xcr0_accum;
652 /* This variable determines whether nonlazy extended state has been used,
653 * and thus should be saved/restored. */
654 bool nonlazy_xstate_used;
655
656 /* Restore all FPU state (lazy and non-lazy state) on context switch? */
657 bool fully_eager_fpu;
658
659 struct vmce vmce;
660
661 struct paging_vcpu paging;
662
663 uint32_t gdbsx_vcpu_event;
664
665 /* A secondary copy of the vcpu time info. */
666 XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
667 struct guest_area time_guest_area;
668
669 struct arch_vm_event *vm_event;
670
671 struct vcpu_msrs *msrs;
672
673 struct {
674 bool next_interrupt_enabled;
675 } monitor;
676 };
677
678 struct guest_memory_policy
679 {
680 bool nested_guest_mode;
681 };
682
683 void update_guest_memory_policy(struct vcpu *v,
684 struct guest_memory_policy *policy);
685
686 void domain_cpu_policy_changed(struct domain *d);
687
688 bool update_secondary_system_time(struct vcpu *v,
689 struct vcpu_time_info *u);
690 void force_update_secondary_system_time(struct vcpu *v,
691 struct vcpu_time_info *map);
692
693 void vcpu_show_registers(struct vcpu *v);
694
alloc_vcpu_guest_context(void)695 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
696 {
697 return vmalloc(sizeof(struct vcpu_guest_context));
698 }
699
free_vcpu_guest_context(struct vcpu_guest_context * vgc)700 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
701 {
702 vfree(vgc);
703 }
704
705 void arch_vcpu_regs_init(struct vcpu *v);
706
707 struct vcpu_hvm_context;
708 int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx);
709
710 #ifdef CONFIG_PV
711 void pv_inject_event(const struct x86_event *event);
712 #else
pv_inject_event(const struct x86_event * event)713 static inline void pv_inject_event(const struct x86_event *event)
714 {
715 ASSERT_UNREACHABLE();
716 }
717 #endif
718
pv_inject_hw_exception(unsigned int vector,int errcode)719 static inline void pv_inject_hw_exception(unsigned int vector, int errcode)
720 {
721 const struct x86_event event = {
722 .vector = vector,
723 .type = X86_ET_HW_EXC,
724 .error_code = errcode,
725 };
726
727 pv_inject_event(&event);
728 }
729
pv_inject_DB(unsigned long pending_dbg)730 static inline void pv_inject_DB(unsigned long pending_dbg)
731 {
732 struct x86_event event = {
733 .vector = X86_EXC_DB,
734 .type = X86_ET_HW_EXC,
735 .error_code = X86_EVENT_NO_EC,
736 };
737
738 event.pending_dbg = pending_dbg;
739
740 pv_inject_event(&event);
741 }
742
pv_inject_page_fault(int errcode,unsigned long cr2)743 static inline void pv_inject_page_fault(int errcode, unsigned long cr2)
744 {
745 struct x86_event event = {
746 .vector = X86_EXC_PF,
747 .type = X86_ET_HW_EXC,
748 .error_code = errcode,
749 };
750
751 event.cr2 = cr2;
752
753 pv_inject_event(&event);
754 }
755
pv_inject_sw_interrupt(unsigned int vector)756 static inline void pv_inject_sw_interrupt(unsigned int vector)
757 {
758 const struct x86_event event = {
759 .vector = vector,
760 .type = X86_ET_SW_INT,
761 .error_code = X86_EVENT_NO_EC,
762 };
763
764 pv_inject_event(&event);
765 }
766
767 #define PV32_VM_ASSIST_MASK ((1UL << VMASST_TYPE_4gb_segments) | \
768 (1UL << VMASST_TYPE_4gb_segments_notify) | \
769 (1UL << VMASST_TYPE_writable_pagetables) | \
770 (1UL << VMASST_TYPE_pae_extended_cr3) | \
771 (1UL << VMASST_TYPE_architectural_iopl) | \
772 (1UL << VMASST_TYPE_runstate_update_flag))
773 /*
774 * Various of what PV32_VM_ASSIST_MASK has isn't really applicable to 64-bit,
775 * but we can't make such requests fail all of the sudden.
776 */
777 #define PV64_VM_ASSIST_MASK (PV32_VM_ASSIST_MASK | \
778 (1UL << VMASST_TYPE_m2p_strict))
779 #define HVM_VM_ASSIST_MASK (1UL << VMASST_TYPE_runstate_update_flag)
780
781 #define arch_vm_assist_valid_mask(d) \
782 (is_hvm_domain(d) ? HVM_VM_ASSIST_MASK \
783 : is_pv_32bit_domain(d) ? PV32_VM_ASSIST_MASK \
784 : PV64_VM_ASSIST_MASK)
785
786 struct arch_vcpu_io {
787 };
788
789 /* Maxphysaddr supportable by the paging infrastructure. */
790 unsigned int domain_max_paddr_bits(const struct domain *d);
791
792 #define arch_init_idle_domain arch_init_idle_domain
793 void arch_init_idle_domain(struct domain *d);
794
795 #endif /* __ASM_DOMAIN_H__ */
796
797 /*
798 * Local variables:
799 * mode: C
800 * c-file-style: "BSD"
801 * c-basic-offset: 4
802 * tab-width: 4
803 * indent-tabs-mode: nil
804 * End:
805 */
806