1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * vmcs.h: VMCS related definitions
4  * Copyright (c) 2004, Intel Corporation.
5  *
6  */
7 #ifndef __ASM_X86_HVM_VMX_VMCS_H__
8 #define __ASM_X86_HVM_VMX_VMCS_H__
9 
10 #include <xen/mm.h>
11 
12 extern void vmcs_dump_vcpu(struct vcpu *v);
13 extern int vmx_vmcs_init(void);
14 int cf_check vmx_cpu_up_prepare(unsigned int cpu);
15 void cf_check vmx_cpu_dead(unsigned int cpu);
16 int cf_check vmx_cpu_up(void);
17 void cf_check vmx_cpu_down(void);
18 
19 struct vmcs_struct {
20     uint32_t revision_id;
21     unsigned char data [0]; /* vmcs size is read from MSR */
22 };
23 
24 struct vmx_msr_entry {
25     u32 index;
26     u32 mbz;
27     u64 data;
28 };
29 
30 #define EPT_DEFAULT_MT      X86_MT_WB
31 
32 struct ept_data {
33     union {
34         struct {
35             uint64_t mt:3,   /* Memory Type. */
36                      wl:3,   /* Walk length -1. */
37                      ad:1,   /* Enable EPT A/D bits. */
38                      :5,     /* rsvd. */
39                      mfn:52;
40         };
41         u64 eptp;
42     };
43     /* Set of PCPUs needing an INVEPT before a VMENTER. */
44     cpumask_var_t invalidate;
45 };
46 
47 #define _VMX_DOMAIN_PML_ENABLED    0
48 #define VMX_DOMAIN_PML_ENABLED     (1ul << _VMX_DOMAIN_PML_ENABLED)
49 struct vmx_domain {
50     mfn_t apic_access_mfn;
51     /* VMX_DOMAIN_* */
52     unsigned int status;
53 
54     /*
55      * Domain permitted to use Executable EPT Superpages?  Cleared to work
56      * around CVE-2018-12207 as appropriate.
57      */
58     bool exec_sp;
59 };
60 
61 /*
62  * Layout of the MSR bitmap, as interpreted by hardware:
63  *  - *_low  covers MSRs 0 to 0x1fff
64  *  - *_ligh covers MSRs 0xc0000000 to 0xc0001fff
65  */
66 struct vmx_msr_bitmap {
67     unsigned long read_low  [0x2000 / BITS_PER_LONG];
68     unsigned long read_high [0x2000 / BITS_PER_LONG];
69     unsigned long write_low [0x2000 / BITS_PER_LONG];
70     unsigned long write_high[0x2000 / BITS_PER_LONG];
71 };
72 
73 struct pi_desc {
74     DECLARE_BITMAP(pir, X86_IDT_VECTORS);
75     union {
76         struct {
77             u16     on     : 1,  /* bit 256 - Outstanding Notification */
78                     sn     : 1,  /* bit 257 - Suppress Notification */
79                     rsvd_1 : 14; /* bit 271:258 - Reserved */
80             u8      nv;          /* bit 279:272 - Notification Vector */
81             u8      rsvd_2;      /* bit 287:280 - Reserved */
82             u32     ndst;        /* bit 319:288 - Notification Destination */
83         };
84         u64 control;
85     };
86     u32 rsvd[6];
87 } __attribute__ ((aligned (64)));
88 
89 #define NR_PML_ENTRIES   512
90 
91 struct pi_blocking_vcpu {
92     struct list_head     list;
93     spinlock_t           *lock;
94 };
95 
96 struct vmx_vcpu {
97     /* Physical address of VMCS. */
98     paddr_t              vmcs_pa;
99     /* VMCS shadow machine address. */
100     paddr_t              vmcs_shadow_maddr;
101 
102     /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */
103     spinlock_t           vmcs_lock;
104 
105     /*
106      * Activation and launch status of this VMCS.
107      *  - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR.
108      *  - Launched on active CPU by VMLAUNCH when current VMCS.
109      */
110     struct list_head     active_list;
111     int                  active_cpu;
112     int                  launched;
113 
114     /* Cache of cpu execution control. */
115     u32                  exec_control;
116     u32                  secondary_exec_control;
117     uint64_t             tertiary_exec_control;
118     u32                  exception_bitmap;
119 
120     uint64_t             shadow_gs;
121     uint64_t             star;
122     uint64_t             lstar;
123     uint64_t             cstar;
124     uint64_t             sfmask;
125 
126     struct vmx_msr_bitmap *msr_bitmap;
127 
128     /*
129      * Most accesses to the MSR host/guest load/save lists are in current
130      * context.  However, the data can be modified by toolstack/migration
131      * actions.  Remote access is only permitted for paused vcpus, and is
132      * protected under the domctl lock.
133      */
134     struct vmx_msr_entry *msr_area;
135     struct vmx_msr_entry *host_msr_area;
136     unsigned int         msr_load_count;
137     unsigned int         msr_save_count;
138     unsigned int         host_msr_count;
139 
140     unsigned long        eoi_exitmap_changed;
141     DECLARE_BITMAP(eoi_exit_bitmap, X86_IDT_VECTORS);
142     struct pi_desc       pi_desc;
143 
144     unsigned long        host_cr0;
145 
146     /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */
147     bool                 ept_spurious_misconfig;
148 
149     /* Processor Trace configured and enabled for the vcpu. */
150     bool                 ipt_active;
151 
152     /* Is the guest in real mode? */
153     uint8_t              vmx_realmode;
154     /* Are we emulating rather than VMENTERing? */
155     uint8_t              vmx_emulate;
156 
157     uint8_t              lbr_flags;
158 
159     /* Bitmask of segments that we can't safely use in virtual 8086 mode */
160     uint16_t             vm86_segment_mask;
161     /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */
162     struct segment_register vm86_saved_seg[x86_seg_tr + 1];
163     /* Remember EFLAGS while in virtual 8086 mode */
164     uint32_t             vm86_saved_eflags;
165     int                  hostenv_migrated;
166 
167     /* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */
168     struct page_info     *vmread_bitmap;
169     struct page_info     *vmwrite_bitmap;
170 
171     struct page_info     *pml_pg;
172 
173     /* Bitmask of trapped CR4 bits. */
174     unsigned long        cr4_host_mask;
175 
176     /*
177      * Before it is blocked, vCPU is added to the per-cpu list.
178      * VT-d engine can send wakeup notification event to the
179      * pCPU and wakeup the related vCPU.
180      */
181     struct pi_blocking_vcpu pi_blocking;
182 };
183 
184 int vmx_create_vmcs(struct vcpu *v);
185 void vmx_destroy_vmcs(struct vcpu *v);
186 void vmx_vmcs_enter(struct vcpu *v);
187 bool __must_check vmx_vmcs_try_enter(struct vcpu *v);
188 void vmx_vmcs_exit(struct vcpu *v);
189 void vmx_vmcs_reload(struct vcpu *v);
190 
191 #define CPU_BASED_VIRTUAL_INTR_PENDING        0x00000004U
192 #define CPU_BASED_USE_TSC_OFFSETING           0x00000008U
193 #define CPU_BASED_HLT_EXITING                 0x00000080U
194 #define CPU_BASED_INVLPG_EXITING              0x00000200U
195 #define CPU_BASED_MWAIT_EXITING               0x00000400U
196 #define CPU_BASED_RDPMC_EXITING               0x00000800U
197 #define CPU_BASED_RDTSC_EXITING               0x00001000U
198 #define CPU_BASED_CR3_LOAD_EXITING            0x00008000U
199 #define CPU_BASED_CR3_STORE_EXITING           0x00010000U
200 #define CPU_BASED_ACTIVATE_TERTIARY_CONTROLS  0x00020000U
201 #define CPU_BASED_CR8_LOAD_EXITING            0x00080000U
202 #define CPU_BASED_CR8_STORE_EXITING           0x00100000U
203 #define CPU_BASED_TPR_SHADOW                  0x00200000U
204 #define CPU_BASED_VIRTUAL_NMI_PENDING         0x00400000U
205 #define CPU_BASED_MOV_DR_EXITING              0x00800000U
206 #define CPU_BASED_UNCOND_IO_EXITING           0x01000000U
207 #define CPU_BASED_ACTIVATE_IO_BITMAP          0x02000000U
208 #define CPU_BASED_MONITOR_TRAP_FLAG           0x08000000U
209 #define CPU_BASED_ACTIVATE_MSR_BITMAP         0x10000000U
210 #define CPU_BASED_MONITOR_EXITING             0x20000000U
211 #define CPU_BASED_PAUSE_EXITING               0x40000000U
212 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000U
213 
214 #define PIN_BASED_EXT_INTR_MASK         0x00000001
215 #define PIN_BASED_NMI_EXITING           0x00000008
216 #define PIN_BASED_VIRTUAL_NMIS          0x00000020
217 #define PIN_BASED_PREEMPT_TIMER         0x00000040
218 #define PIN_BASED_POSTED_INTERRUPT      0x00000080
219 
220 #define VM_EXIT_SAVE_DEBUG_CNTRLS       0x00000004
221 #define VM_EXIT_IA32E_MODE              0x00000200
222 #define VM_EXIT_LOAD_PERF_GLOBAL_CTRL   0x00001000
223 #define VM_EXIT_ACK_INTR_ON_EXIT        0x00008000
224 #define VM_EXIT_SAVE_GUEST_PAT          0x00040000
225 #define VM_EXIT_LOAD_HOST_PAT           0x00080000
226 #define VM_EXIT_SAVE_GUEST_EFER         0x00100000
227 #define VM_EXIT_LOAD_HOST_EFER          0x00200000
228 #define VM_EXIT_SAVE_PREEMPT_TIMER      0x00400000
229 #define VM_EXIT_CLEAR_BNDCFGS           0x00800000
230 
231 #define VM_ENTRY_IA32E_MODE             0x00000200
232 #define VM_ENTRY_SMM                    0x00000400
233 #define VM_ENTRY_DEACT_DUAL_MONITOR     0x00000800
234 #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL  0x00002000
235 #define VM_ENTRY_LOAD_GUEST_PAT         0x00004000
236 #define VM_ENTRY_LOAD_GUEST_EFER        0x00008000
237 #define VM_ENTRY_LOAD_BNDCFGS           0x00010000
238 
239 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001U
240 #define SECONDARY_EXEC_ENABLE_EPT               0x00000002U
241 #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004U
242 #define SECONDARY_EXEC_ENABLE_RDTSCP            0x00000008U
243 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE   0x00000010U
244 #define SECONDARY_EXEC_ENABLE_VPID              0x00000020U
245 #define SECONDARY_EXEC_WBINVD_EXITING           0x00000040U
246 #define SECONDARY_EXEC_UNRESTRICTED_GUEST       0x00000080U
247 #define SECONDARY_EXEC_APIC_REGISTER_VIRT       0x00000100U
248 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY    0x00000200U
249 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING       0x00000400U
250 #define SECONDARY_EXEC_ENABLE_INVPCID           0x00001000U
251 #define SECONDARY_EXEC_ENABLE_VM_FUNCTIONS      0x00002000U
252 #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING    0x00004000U
253 #define SECONDARY_EXEC_ENABLE_PML               0x00020000U
254 #define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS   0x00040000U
255 #define SECONDARY_EXEC_XSAVES                   0x00100000U
256 #define SECONDARY_EXEC_TSC_SCALING              0x02000000U
257 #define SECONDARY_EXEC_BUS_LOCK_DETECTION       0x40000000U
258 #define SECONDARY_EXEC_NOTIFY_VM_EXITING        0x80000000U
259 
260 #define TERTIARY_EXEC_LOADIWKEY_EXITING         BIT(0, UL)
261 #define TERTIARY_EXEC_ENABLE_HLAT               BIT(1, UL)
262 #define TERTIARY_EXEC_EPT_PAGING_WRITE          BIT(2, UL)
263 #define TERTIARY_EXEC_GUEST_PAGING_VERIFY       BIT(3, UL)
264 #define TERTIARY_EXEC_IPI_VIRT                  BIT(4, UL)
265 #define TERTIARY_EXEC_VIRT_SPEC_CTRL            BIT(7, UL)
266 
267 #define cpu_has_vmx_virt_spec_ctrl \
268      (vmx_caps.tertiary_exec_control & TERTIARY_EXEC_VIRT_SPEC_CTRL)
269 
270 #define cpu_has_vmx_ept_paging_write \
271      (vmx_caps.tertiary_exec_control & TERTIARY_EXEC_EPT_PAGING_WRITE)
272 
273 #define VMX_EPT_EXEC_ONLY_SUPPORTED                         0x00000001
274 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED                     0x00000040
275 #define VMX_EPT_MEMORY_TYPE_UC                              0x00000100
276 #define VMX_EPT_MEMORY_TYPE_WB                              0x00004000
277 #define VMX_EPT_SUPERPAGE_2MB                               0x00010000
278 #define VMX_EPT_SUPERPAGE_1GB                               0x00020000
279 #define VMX_EPT_INVEPT_INSTRUCTION                          0x00100000
280 #define VMX_EPT_AD_BIT                                      0x00200000
281 #define VMX_EPT_INVEPT_SINGLE_CONTEXT                       0x02000000
282 #define VMX_EPT_INVEPT_ALL_CONTEXT                          0x04000000
283 #define VMX_VPID_INVVPID_INSTRUCTION                        0x00000001
284 #define VMX_VPID_INVVPID_INDIVIDUAL_ADDR                    0x00000100
285 #define VMX_VPID_INVVPID_SINGLE_CONTEXT                     0x00000200
286 #define VMX_VPID_INVVPID_ALL_CONTEXT                        0x00000400
287 #define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL    0x00000800
288 
289 #define VMX_MISC_ACTIVITY_MASK                  0x000001c0
290 #define VMX_MISC_PROC_TRACE                     0x00004000
291 #define VMX_MISC_CR3_TARGET                     0x01ff0000
292 #define VMX_MISC_VMWRITE_ALL                    0x20000000
293 
294 #define VMX_TSC_MULTIPLIER_MAX                  0xffffffffffffffffULL
295 
296 /* Capabilities and dynamic (run-time adjusted) execution control flags. */
297 struct vmx_caps {
298     uint64_t basic_msr;
299     uint32_t pin_based_exec_control;
300     uint32_t cpu_based_exec_control;
301     uint32_t secondary_exec_control;
302     uint64_t tertiary_exec_control;
303     uint32_t vmexit_control;
304     uint32_t vmentry_control;
305     uint32_t ept;
306     uint32_t vpid;
307     uint64_t vmfunc;
308 };
309 extern struct vmx_caps vmx_caps;
310 
311 #define cpu_has_wbinvd_exiting \
312     (IS_ENABLED(CONFIG_INTEL_VMX) && \
313      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING))
314 #define cpu_has_vmx_virtualize_apic_accesses \
315     (IS_ENABLED(CONFIG_INTEL_VMX) && \
316      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
317 #define cpu_has_vmx_tpr_shadow \
318     (IS_ENABLED(CONFIG_INTEL_VMX) && \
319      (vmx_caps.cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
320 #define cpu_has_vmx_vnmi \
321     (IS_ENABLED(CONFIG_INTEL_VMX) && \
322      (vmx_caps.pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS))
323 #define cpu_has_vmx_msr_bitmap \
324     (IS_ENABLED(CONFIG_INTEL_VMX) && \
325      (vmx_caps.cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP))
326 #define cpu_has_vmx_secondary_exec_control \
327     (IS_ENABLED(CONFIG_INTEL_VMX) && \
328      (vmx_caps.cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
329 #define cpu_has_vmx_tertiary_exec_control \
330     (IS_ENABLED(CONFIG_INTEL_VMX) && \
331      (vmx_caps.cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS))
332 #define cpu_has_vmx_ept \
333     (IS_ENABLED(CONFIG_INTEL_VMX) && \
334      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
335 #define cpu_has_vmx_dt_exiting \
336     (IS_ENABLED(CONFIG_INTEL_VMX) && \
337      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING))
338 #define cpu_has_vmx_rdtscp \
339     (IS_ENABLED(CONFIG_INTEL_VMX) && \
340      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_RDTSCP))
341 #define cpu_has_vmx_vpid \
342     (IS_ENABLED(CONFIG_INTEL_VMX) && \
343      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID))
344 #define cpu_has_monitor_trap_flag \
345     (IS_ENABLED(CONFIG_INTEL_VMX) && \
346      (vmx_caps.cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG))
347 #define cpu_has_vmx_pat \
348     (IS_ENABLED(CONFIG_INTEL_VMX) && \
349      (vmx_caps.vmentry_control & VM_ENTRY_LOAD_GUEST_PAT))
350 #define cpu_has_vmx_efer \
351     (IS_ENABLED(CONFIG_INTEL_VMX) && \
352      (vmx_caps.vmentry_control & VM_ENTRY_LOAD_GUEST_EFER))
353 #define cpu_has_vmx_unrestricted_guest \
354     (IS_ENABLED(CONFIG_INTEL_VMX) && \
355      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST))
356 #define vmx_unrestricted_guest(v)               \
357     ((v)->arch.hvm.vmx.secondary_exec_control & \
358      SECONDARY_EXEC_UNRESTRICTED_GUEST)
359 #define cpu_has_vmx_ple \
360     (IS_ENABLED(CONFIG_INTEL_VMX) && \
361      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING))
362 #define cpu_has_vmx_invpcid \
363     (IS_ENABLED(CONFIG_INTEL_VMX) && \
364      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_INVPCID))
365 #define cpu_has_vmx_apic_reg_virt \
366     (IS_ENABLED(CONFIG_INTEL_VMX) && \
367      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT))
368 #define cpu_has_vmx_virtual_intr_delivery \
369     (IS_ENABLED(CONFIG_INTEL_VMX) && \
370      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
371 #define cpu_has_vmx_virtualize_x2apic_mode \
372     (IS_ENABLED(CONFIG_INTEL_VMX) && \
373      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE))
374 #define cpu_has_vmx_posted_intr_processing \
375     (IS_ENABLED(CONFIG_INTEL_VMX) && \
376      (vmx_caps.pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT))
377 #define cpu_has_vmx_vmcs_shadowing \
378     (IS_ENABLED(CONFIG_INTEL_VMX) && \
379      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING))
380 #define cpu_has_vmx_vmfunc \
381     (IS_ENABLED(CONFIG_INTEL_VMX) && \
382      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS))
383 #define cpu_has_vmx_virt_exceptions \
384     (IS_ENABLED(CONFIG_INTEL_VMX) && \
385      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS))
386 #define cpu_has_vmx_pml \
387     (IS_ENABLED(CONFIG_INTEL_VMX) && \
388      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_ENABLE_PML))
389 #define cpu_has_vmx_mpx \
390     (IS_ENABLED(CONFIG_INTEL_VMX) && \
391      (vmx_caps.vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
392      (vmx_caps.vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
393 #define cpu_has_vmx_xsaves \
394     (IS_ENABLED(CONFIG_INTEL_VMX) && \
395      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_XSAVES))
396 #define cpu_has_vmx_tsc_scaling \
397     (IS_ENABLED(CONFIG_INTEL_VMX) && \
398      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_TSC_SCALING))
399 #define cpu_has_vmx_bus_lock_detection \
400     (IS_ENABLED(CONFIG_INTEL_VMX) && \
401      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_BUS_LOCK_DETECTION))
402 #define cpu_has_vmx_notify_vm_exiting \
403     (IS_ENABLED(CONFIG_INTEL_VMX) && \
404      (vmx_caps.secondary_exec_control & SECONDARY_EXEC_NOTIFY_VM_EXITING))
405 
406 #define VMCS_RID_TYPE_MASK              0x80000000U
407 
408 /* GUEST_INTERRUPTIBILITY_INFO flags. */
409 #define VMX_INTR_SHADOW_STI             0x00000001
410 #define VMX_INTR_SHADOW_MOV_SS          0x00000002
411 #define VMX_INTR_SHADOW_SMI             0x00000004
412 #define VMX_INTR_SHADOW_NMI             0x00000008
413 
414 #define VMX_BASIC_REVISION_MASK         0x7fffffff
415 #define VMX_BASIC_VMCS_SIZE_MASK        (0x1fffULL << 32)
416 #define VMX_BASIC_32BIT_ADDRESSES       (1ULL << 48)
417 #define VMX_BASIC_DUAL_MONITOR          (1ULL << 49)
418 #define VMX_BASIC_MEMORY_TYPE_MASK      (0xfULL << 50)
419 #define VMX_BASIC_INS_OUT_INFO          (1ULL << 54)
420 /*
421  * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that
422  * default to 1 may be cleared to 0.
423  */
424 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
425 
426 #define cpu_has_vmx_ins_outs_instr_info \
427     (!!(vmx_caps.basic_msr & VMX_BASIC_INS_OUT_INFO))
428 
429 /* Guest interrupt status */
430 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
431 #define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8
432 
433 /* VMFUNC leaf definitions */
434 #define VMX_VMFUNC_EPTP_SWITCHING   (1ULL << 0)
435 
436 /* VMCS field encodings. */
437 #define VMCS_HIGH(x) ((x) | 1)
438 enum vmcs_field {
439     /* 16-bit fields. */
440     VIRTUAL_PROCESSOR_ID            = 0x00000000,
441     POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002,
442     EPTP_INDEX                      = 0x00000004,
443 #define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */
444     GUEST_ES_SELECTOR               = 0x00000800,
445     GUEST_CS_SELECTOR               = 0x00000802,
446     GUEST_SS_SELECTOR               = 0x00000804,
447     GUEST_DS_SELECTOR               = 0x00000806,
448     GUEST_FS_SELECTOR               = 0x00000808,
449     GUEST_GS_SELECTOR               = 0x0000080a,
450     GUEST_LDTR_SELECTOR             = 0x0000080c,
451     GUEST_TR_SELECTOR               = 0x0000080e,
452     GUEST_INTR_STATUS               = 0x00000810,
453     GUEST_PML_INDEX                 = 0x00000812,
454     HOST_ES_SELECTOR                = 0x00000c00,
455     HOST_CS_SELECTOR                = 0x00000c02,
456     HOST_SS_SELECTOR                = 0x00000c04,
457     HOST_DS_SELECTOR                = 0x00000c06,
458     HOST_FS_SELECTOR                = 0x00000c08,
459     HOST_GS_SELECTOR                = 0x00000c0a,
460     HOST_TR_SELECTOR                = 0x00000c0c,
461 
462     /* 64-bit fields. */
463     IO_BITMAP_A                     = 0x00002000,
464     IO_BITMAP_B                     = 0x00002002,
465     MSR_BITMAP                      = 0x00002004,
466     VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
467     VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
468     VM_ENTRY_MSR_LOAD_ADDR          = 0x0000200a,
469     PML_ADDRESS                     = 0x0000200e,
470     TSC_OFFSET                      = 0x00002010,
471     VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
472     APIC_ACCESS_ADDR                = 0x00002014,
473     PI_DESC_ADDR                    = 0x00002016,
474     VM_FUNCTION_CONTROL             = 0x00002018,
475     EPT_POINTER                     = 0x0000201a,
476     EOI_EXIT_BITMAP0                = 0x0000201c,
477 #define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */
478     EPTP_LIST_ADDR                  = 0x00002024,
479     VMREAD_BITMAP                   = 0x00002026,
480     VMWRITE_BITMAP                  = 0x00002028,
481     VIRT_EXCEPTION_INFO             = 0x0000202a,
482     XSS_EXIT_BITMAP                 = 0x0000202c,
483     TSC_MULTIPLIER                  = 0x00002032,
484     TERTIARY_VM_EXEC_CONTROL        = 0x00002034,
485     SPEC_CTRL_MASK                  = 0x0000204a,
486     SPEC_CTRL_SHADOW                = 0x0000204c,
487     GUEST_PHYSICAL_ADDRESS          = 0x00002400,
488     VMCS_LINK_POINTER               = 0x00002800,
489     GUEST_IA32_DEBUGCTL             = 0x00002802,
490     GUEST_PAT                       = 0x00002804,
491     GUEST_EFER                      = 0x00002806,
492     GUEST_PERF_GLOBAL_CTRL          = 0x00002808,
493     GUEST_PDPTE0                    = 0x0000280a,
494 #define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
495     GUEST_BNDCFGS                   = 0x00002812,
496     HOST_PAT                        = 0x00002c00,
497     HOST_EFER                       = 0x00002c02,
498     HOST_PERF_GLOBAL_CTRL           = 0x00002c04,
499 
500     /* 32-bit fields. */
501     PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
502     CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
503     EXCEPTION_BITMAP                = 0x00004004,
504     PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
505     PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
506     CR3_TARGET_COUNT                = 0x0000400a,
507     VM_EXIT_CONTROLS                = 0x0000400c,
508     VM_EXIT_MSR_STORE_COUNT         = 0x0000400e,
509     VM_EXIT_MSR_LOAD_COUNT          = 0x00004010,
510     VM_ENTRY_CONTROLS               = 0x00004012,
511     VM_ENTRY_MSR_LOAD_COUNT         = 0x00004014,
512     VM_ENTRY_INTR_INFO              = 0x00004016,
513     VM_ENTRY_EXCEPTION_ERROR_CODE   = 0x00004018,
514     VM_ENTRY_INSTRUCTION_LEN        = 0x0000401a,
515     TPR_THRESHOLD                   = 0x0000401c,
516     SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
517     PLE_GAP                         = 0x00004020,
518     PLE_WINDOW                      = 0x00004022,
519     NOTIFY_WINDOW                   = 0x00004024,
520     VM_INSTRUCTION_ERROR            = 0x00004400,
521     VM_EXIT_REASON                  = 0x00004402,
522     VM_EXIT_INTR_INFO               = 0x00004404,
523     VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
524     IDT_VECTORING_INFO              = 0x00004408,
525     IDT_VECTORING_ERROR_CODE        = 0x0000440a,
526     VM_EXIT_INSTRUCTION_LEN         = 0x0000440c,
527     VMX_INSTRUCTION_INFO            = 0x0000440e,
528 #define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */
529     GUEST_ES_LIMIT                  = 0x00004800,
530     GUEST_CS_LIMIT                  = 0x00004802,
531     GUEST_SS_LIMIT                  = 0x00004804,
532     GUEST_DS_LIMIT                  = 0x00004806,
533     GUEST_FS_LIMIT                  = 0x00004808,
534     GUEST_GS_LIMIT                  = 0x0000480a,
535     GUEST_LDTR_LIMIT                = 0x0000480c,
536     GUEST_TR_LIMIT                  = 0x0000480e,
537     GUEST_GDTR_LIMIT                = 0x00004810,
538     GUEST_IDTR_LIMIT                = 0x00004812,
539 #define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */
540     GUEST_ES_AR_BYTES               = 0x00004814,
541     GUEST_CS_AR_BYTES               = 0x00004816,
542     GUEST_SS_AR_BYTES               = 0x00004818,
543     GUEST_DS_AR_BYTES               = 0x0000481a,
544     GUEST_FS_AR_BYTES               = 0x0000481c,
545     GUEST_GS_AR_BYTES               = 0x0000481e,
546     GUEST_LDTR_AR_BYTES             = 0x00004820,
547     GUEST_TR_AR_BYTES               = 0x00004822,
548     GUEST_INTERRUPTIBILITY_INFO     = 0x00004824,
549     GUEST_ACTIVITY_STATE            = 0x00004826,
550     GUEST_SMBASE                    = 0x00004828,
551     GUEST_SYSENTER_CS               = 0x0000482a,
552     GUEST_PREEMPTION_TIMER          = 0x0000482e,
553     HOST_SYSENTER_CS                = 0x00004c00,
554 
555     /* Natural-width fields. */
556     CR0_GUEST_HOST_MASK             = 0x00006000,
557     CR4_GUEST_HOST_MASK             = 0x00006002,
558     CR0_READ_SHADOW                 = 0x00006004,
559     CR4_READ_SHADOW                 = 0x00006006,
560     CR3_TARGET_VALUE0               = 0x00006008,
561 #define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */
562     EXIT_QUALIFICATION              = 0x00006400,
563     GUEST_LINEAR_ADDRESS            = 0x0000640a,
564     GUEST_CR0                       = 0x00006800,
565     GUEST_CR3                       = 0x00006802,
566     GUEST_CR4                       = 0x00006804,
567 #define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */
568     GUEST_ES_BASE                   = 0x00006806,
569     GUEST_CS_BASE                   = 0x00006808,
570     GUEST_SS_BASE                   = 0x0000680a,
571     GUEST_DS_BASE                   = 0x0000680c,
572     GUEST_FS_BASE                   = 0x0000680e,
573     GUEST_GS_BASE                   = 0x00006810,
574     GUEST_LDTR_BASE                 = 0x00006812,
575     GUEST_TR_BASE                   = 0x00006814,
576     GUEST_GDTR_BASE                 = 0x00006816,
577     GUEST_IDTR_BASE                 = 0x00006818,
578     GUEST_DR7                       = 0x0000681a,
579     GUEST_RSP                       = 0x0000681c,
580     GUEST_RIP                       = 0x0000681e,
581     GUEST_RFLAGS                    = 0x00006820,
582     GUEST_PENDING_DBG_EXCEPTIONS    = 0x00006822,
583     GUEST_SYSENTER_ESP              = 0x00006824,
584     GUEST_SYSENTER_EIP              = 0x00006826,
585     HOST_CR0                        = 0x00006c00,
586     HOST_CR3                        = 0x00006c02,
587     HOST_CR4                        = 0x00006c04,
588     HOST_FS_BASE                    = 0x00006c06,
589     HOST_GS_BASE                    = 0x00006c08,
590     HOST_TR_BASE                    = 0x00006c0a,
591     HOST_GDTR_BASE                  = 0x00006c0c,
592     HOST_IDTR_BASE                  = 0x00006c0e,
593     HOST_SYSENTER_ESP               = 0x00006c10,
594     HOST_SYSENTER_EIP               = 0x00006c12,
595     HOST_RSP                        = 0x00006c14,
596     HOST_RIP                        = 0x00006c16,
597 };
598 
599 #define VMCS_VPID_WIDTH 16
600 
601 /* VM Instruction error numbers */
602 enum vmx_insn_errno
603 {
604     VMX_INSN_SUCCEED                       = 0,
605     VMX_INSN_VMCLEAR_INVALID_PHYADDR       = 2,
606     VMX_INSN_VMCLEAR_WITH_VMXON_PTR        = 3,
607     VMX_INSN_VMLAUNCH_NONCLEAR_VMCS        = 4,
608     VMX_INSN_VMRESUME_NONLAUNCHED_VMCS     = 5,
609     VMX_INSN_INVALID_CONTROL_STATE         = 7,
610     VMX_INSN_INVALID_HOST_STATE            = 8,
611     VMX_INSN_VMPTRLD_INVALID_PHYADDR       = 9,
612     VMX_INSN_VMPTRLD_WITH_VMXON_PTR        = 10,
613     VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID     = 11,
614     VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
615     VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
616     VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS     = 26,
617     VMX_INSN_INVEPT_INVVPID_INVALID_OP     = 28,
618     VMX_INSN_FAIL_INVALID                  = ~0,
619 };
620 
621 /* MSR load/save list infrastructure. */
622 enum vmx_msr_list_type {
623     VMX_MSR_HOST,           /* MSRs loaded on VMExit.                   */
624     VMX_MSR_GUEST,          /* MSRs saved on VMExit, loaded on VMEntry. */
625     VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only.             */
626 };
627 
628 /**
629  * Add an MSR to an MSR list (inserting space for the entry if necessary), and
630  * set the MSRs value.
631  *
632  * It is undefined behaviour to try and insert the same MSR into both the
633  * GUEST and GUEST_LOADONLY list.
634  *
635  * May fail if unable to allocate memory for the list, or the total number of
636  * entries exceeds the memory allocated.
637  */
638 int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
639                 enum vmx_msr_list_type type);
640 
641 /**
642  * Remove an MSR entry from an MSR list.  Returns -ESRCH if the MSR was not
643  * found in the list.
644  */
645 int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
646 
vmx_add_guest_msr(struct vcpu * v,uint32_t msr,uint64_t val)647 static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val)
648 {
649     return vmx_add_msr(v, msr, val, VMX_MSR_GUEST);
650 }
vmx_add_host_load_msr(struct vcpu * v,uint32_t msr,uint64_t val)651 static inline int vmx_add_host_load_msr(struct vcpu *v, uint32_t msr,
652                                         uint64_t val)
653 {
654     return vmx_add_msr(v, msr, val, VMX_MSR_HOST);
655 }
656 
657 struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
658                                    enum vmx_msr_list_type type);
659 
vmx_read_guest_msr(const struct vcpu * v,uint32_t msr,uint64_t * val)660 static inline int vmx_read_guest_msr(const struct vcpu *v, uint32_t msr,
661                                      uint64_t *val)
662 {
663     const struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
664 
665     if ( !ent )
666     {
667         *val = 0;
668         return -ESRCH;
669     }
670 
671     *val = ent->data;
672 
673     return 0;
674 }
675 
vmx_read_guest_loadonly_msr(const struct vcpu * v,uint32_t msr,uint64_t * val)676 static inline int vmx_read_guest_loadonly_msr(
677     const struct vcpu *v, uint32_t msr, uint64_t *val)
678 {
679     const struct vmx_msr_entry *ent =
680         vmx_find_msr(v, msr, VMX_MSR_GUEST_LOADONLY);
681 
682     if ( !ent )
683     {
684         *val = 0;
685         return -ESRCH;
686     }
687 
688     *val = ent->data;
689 
690     return 0;
691 }
692 
vmx_write_guest_msr(struct vcpu * v,uint32_t msr,uint64_t val)693 static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
694                                       uint64_t val)
695 {
696     struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
697 
698     if ( !ent )
699         return -ESRCH;
700 
701     ent->data = val;
702 
703     return 0;
704 }
705 
706 
707 /* MSR intercept bitmap infrastructure. */
708 enum vmx_msr_intercept_type {
709     VMX_MSR_R  = 1,
710     VMX_MSR_W  = 2,
711     VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
712 };
713 
714 void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
715                              enum vmx_msr_intercept_type type);
716 void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
717                            enum vmx_msr_intercept_type type);
718 void vmx_vmcs_switch(paddr_t from, paddr_t to);
719 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
720 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
721 bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap,
722                             unsigned int msr, bool is_write) __nonnull(1);
723 void virtual_vmcs_enter(const struct vcpu *);
724 void virtual_vmcs_exit(const struct vcpu *);
725 u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
726 enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v,
727                                              u32 vmcs_encoding, u64 *val);
728 void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
729 enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
730                                               u32 vmcs_encoding, u64 val);
731 
732 DECLARE_PER_CPU(bool, vmxon);
733 
734 bool vmx_vcpu_pml_enabled(const struct vcpu *v);
735 int vmx_vcpu_enable_pml(struct vcpu *v);
736 void vmx_vcpu_disable_pml(struct vcpu *v);
737 bool vmx_domain_pml_enabled(const struct domain *d);
738 int vmx_domain_enable_pml(struct domain *d);
739 void vmx_domain_disable_pml(struct domain *d);
740 void vmx_domain_flush_pml_buffers(struct domain *d);
741 
742 void vmx_domain_update_eptp(struct domain *d);
743 
744 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
745 
746 /*
747  * Local variables:
748  * mode: C
749  * c-file-style: "BSD"
750  * c-basic-offset: 4
751  * tab-width: 4
752  * indent-tabs-mode: nil
753  * End:
754  */
755