1 /*
2 * vmcs.h: VMCS related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; If not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18 #ifndef __ASM_X86_HVM_VMX_VMCS_H__
19 #define __ASM_X86_HVM_VMX_VMCS_H__
20
21 #include <asm/hvm/io.h>
22 #include <irq_vectors.h>
23
24 extern void vmcs_dump_vcpu(struct vcpu *v);
25 extern void setup_vmcs_dump(void);
26 extern int vmx_cpu_up_prepare(unsigned int cpu);
27 extern void vmx_cpu_dead(unsigned int cpu);
28 extern int vmx_cpu_up(void);
29 extern int _vmx_cpu_up(bool bsp);
30 extern void vmx_cpu_down(void);
31
32 struct vmcs_struct {
33 u32 vmcs_revision_id;
34 unsigned char data [0]; /* vmcs size is read from MSR */
35 };
36
37 struct vmx_msr_entry {
38 u32 index;
39 u32 mbz;
40 u64 data;
41 };
42
43 #define EPT_DEFAULT_MT MTRR_TYPE_WRBACK
44
45 struct ept_data {
46 union {
47 struct {
48 uint64_t mt:3, /* Memory Type. */
49 wl:3, /* Walk length -1. */
50 ad:1, /* Enable EPT A/D bits. */
51 :5, /* rsvd. */
52 mfn:52;
53 };
54 u64 eptp;
55 };
56 /* Set of PCPUs needing an INVEPT before a VMENTER. */
57 cpumask_var_t invalidate;
58 };
59
60 #define _VMX_DOMAIN_PML_ENABLED 0
61 #define VMX_DOMAIN_PML_ENABLED (1ul << _VMX_DOMAIN_PML_ENABLED)
62 struct vmx_domain {
63 unsigned long apic_access_mfn;
64 /* VMX_DOMAIN_* */
65 unsigned int status;
66 };
67
68 /*
69 * Layout of the MSR bitmap, as interpreted by hardware:
70 * - *_low covers MSRs 0 to 0x1fff
71 * - *_ligh covers MSRs 0xc0000000 to 0xc0001fff
72 */
73 struct vmx_msr_bitmap {
74 unsigned long read_low [0x2000 / BITS_PER_LONG];
75 unsigned long read_high [0x2000 / BITS_PER_LONG];
76 unsigned long write_low [0x2000 / BITS_PER_LONG];
77 unsigned long write_high[0x2000 / BITS_PER_LONG];
78 };
79
80 struct pi_desc {
81 DECLARE_BITMAP(pir, NR_VECTORS);
82 union {
83 struct {
84 u16 on : 1, /* bit 256 - Outstanding Notification */
85 sn : 1, /* bit 257 - Suppress Notification */
86 rsvd_1 : 14; /* bit 271:258 - Reserved */
87 u8 nv; /* bit 279:272 - Notification Vector */
88 u8 rsvd_2; /* bit 287:280 - Reserved */
89 u32 ndst; /* bit 319:288 - Notification Destination */
90 };
91 u64 control;
92 };
93 u32 rsvd[6];
94 } __attribute__ ((aligned (64)));
95
96 #define NR_PML_ENTRIES 512
97
98 struct pi_blocking_vcpu {
99 struct list_head list;
100 spinlock_t *lock;
101 };
102
103 struct arch_vmx_struct {
104 /* Physical address of VMCS. */
105 paddr_t vmcs_pa;
106 /* VMCS shadow machine address. */
107 paddr_t vmcs_shadow_maddr;
108
109 /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */
110 spinlock_t vmcs_lock;
111
112 /*
113 * Activation and launch status of this VMCS.
114 * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR.
115 * - Launched on active CPU by VMLAUNCH when current VMCS.
116 */
117 struct list_head active_list;
118 int active_cpu;
119 int launched;
120
121 /* Cache of cpu execution control. */
122 u32 exec_control;
123 u32 secondary_exec_control;
124 u32 exception_bitmap;
125
126 uint64_t shadow_gs;
127 uint64_t star;
128 uint64_t lstar;
129 uint64_t cstar;
130 uint64_t sfmask;
131
132 struct vmx_msr_bitmap *msr_bitmap;
133 unsigned int msr_count;
134 struct vmx_msr_entry *msr_area;
135 unsigned int host_msr_count;
136 struct vmx_msr_entry *host_msr_area;
137
138 unsigned long eoi_exitmap_changed;
139 DECLARE_BITMAP(eoi_exit_bitmap, NR_VECTORS);
140 struct pi_desc pi_desc;
141
142 unsigned long host_cr0;
143
144 /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */
145 bool_t ept_spurious_misconfig;
146
147 /* Is the guest in real mode? */
148 uint8_t vmx_realmode;
149 /* Are we emulating rather than VMENTERing? */
150 uint8_t vmx_emulate;
151
152 uint8_t lbr_fixup_enabled;
153
154 /* Bitmask of segments that we can't safely use in virtual 8086 mode */
155 uint16_t vm86_segment_mask;
156 /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */
157 struct segment_register vm86_saved_seg[x86_seg_tr + 1];
158 /* Remember EFLAGS while in virtual 8086 mode */
159 uint32_t vm86_saved_eflags;
160 int hostenv_migrated;
161
162 /* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */
163 struct page_info *vmread_bitmap;
164 struct page_info *vmwrite_bitmap;
165
166 struct page_info *pml_pg;
167
168 /*
169 * Before it is blocked, vCPU is added to the per-cpu list.
170 * VT-d engine can send wakeup notification event to the
171 * pCPU and wakeup the related vCPU.
172 */
173 struct pi_blocking_vcpu pi_blocking;
174 };
175
176 int vmx_create_vmcs(struct vcpu *v);
177 void vmx_destroy_vmcs(struct vcpu *v);
178 void vmx_vmcs_enter(struct vcpu *v);
179 bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v);
180 void vmx_vmcs_exit(struct vcpu *v);
181 void vmx_vmcs_reload(struct vcpu *v);
182
183 #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
184 #define CPU_BASED_USE_TSC_OFFSETING 0x00000008
185 #define CPU_BASED_HLT_EXITING 0x00000080
186 #define CPU_BASED_INVLPG_EXITING 0x00000200
187 #define CPU_BASED_MWAIT_EXITING 0x00000400
188 #define CPU_BASED_RDPMC_EXITING 0x00000800
189 #define CPU_BASED_RDTSC_EXITING 0x00001000
190 #define CPU_BASED_CR3_LOAD_EXITING 0x00008000
191 #define CPU_BASED_CR3_STORE_EXITING 0x00010000
192 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
193 #define CPU_BASED_CR8_STORE_EXITING 0x00100000
194 #define CPU_BASED_TPR_SHADOW 0x00200000
195 #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
196 #define CPU_BASED_MOV_DR_EXITING 0x00800000
197 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
198 #define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
199 #define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
200 #define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000
201 #define CPU_BASED_MONITOR_EXITING 0x20000000
202 #define CPU_BASED_PAUSE_EXITING 0x40000000
203 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
204 extern u32 vmx_cpu_based_exec_control;
205
206 #define PIN_BASED_EXT_INTR_MASK 0x00000001
207 #define PIN_BASED_NMI_EXITING 0x00000008
208 #define PIN_BASED_VIRTUAL_NMIS 0x00000020
209 #define PIN_BASED_PREEMPT_TIMER 0x00000040
210 #define PIN_BASED_POSTED_INTERRUPT 0x00000080
211 extern u32 vmx_pin_based_exec_control;
212
213 #define VM_EXIT_SAVE_DEBUG_CNTRLS 0x00000004
214 #define VM_EXIT_IA32E_MODE 0x00000200
215 #define VM_EXIT_LOAD_PERF_GLOBAL_CTRL 0x00001000
216 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
217 #define VM_EXIT_SAVE_GUEST_PAT 0x00040000
218 #define VM_EXIT_LOAD_HOST_PAT 0x00080000
219 #define VM_EXIT_SAVE_GUEST_EFER 0x00100000
220 #define VM_EXIT_LOAD_HOST_EFER 0x00200000
221 #define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000
222 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000
223 extern u32 vmx_vmexit_control;
224
225 #define VM_ENTRY_IA32E_MODE 0x00000200
226 #define VM_ENTRY_SMM 0x00000400
227 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
228 #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL 0x00002000
229 #define VM_ENTRY_LOAD_GUEST_PAT 0x00004000
230 #define VM_ENTRY_LOAD_GUEST_EFER 0x00008000
231 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000
232 extern u32 vmx_vmentry_control;
233
234 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
235 #define SECONDARY_EXEC_ENABLE_EPT 0x00000002
236 #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004
237 #define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008
238 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
239 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020
240 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
241 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
242 #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
243 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
244 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
245 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
246 #define SECONDARY_EXEC_ENABLE_VM_FUNCTIONS 0x00002000
247 #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000
248 #define SECONDARY_EXEC_ENABLE_PML 0x00020000
249 #define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000
250 #define SECONDARY_EXEC_XSAVES 0x00100000
251 #define SECONDARY_EXEC_TSC_SCALING 0x02000000
252 extern u32 vmx_secondary_exec_control;
253
254 #define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
255 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040
256 #define VMX_EPT_MEMORY_TYPE_UC 0x00000100
257 #define VMX_EPT_MEMORY_TYPE_WB 0x00004000
258 #define VMX_EPT_SUPERPAGE_2MB 0x00010000
259 #define VMX_EPT_SUPERPAGE_1GB 0x00020000
260 #define VMX_EPT_INVEPT_INSTRUCTION 0x00100000
261 #define VMX_EPT_AD_BIT 0x00200000
262 #define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000
263 #define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000
264 #define VMX_VPID_INVVPID_INSTRUCTION 0x00100000000ULL
265 #define VMX_VPID_INVVPID_INDIVIDUAL_ADDR 0x10000000000ULL
266 #define VMX_VPID_INVVPID_SINGLE_CONTEXT 0x20000000000ULL
267 #define VMX_VPID_INVVPID_ALL_CONTEXT 0x40000000000ULL
268 #define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL
269 extern u64 vmx_ept_vpid_cap;
270
271 #define VMX_MISC_CR3_TARGET 0x01ff0000
272 #define VMX_MISC_VMWRITE_ALL 0x20000000
273
274 #define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
275
276 #define cpu_has_wbinvd_exiting \
277 (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
278 #define cpu_has_vmx_virtualize_apic_accesses \
279 (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
280 #define cpu_has_vmx_tpr_shadow \
281 (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
282 #define cpu_has_vmx_vnmi \
283 (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
284 #define cpu_has_vmx_msr_bitmap \
285 (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
286 #define cpu_has_vmx_secondary_exec_control \
287 (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
288 #define cpu_has_vmx_ept \
289 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
290 #define cpu_has_vmx_dt_exiting \
291 (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
292 #define cpu_has_vmx_vpid \
293 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
294 #define cpu_has_monitor_trap_flag \
295 (vmx_cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG)
296 #define cpu_has_vmx_pat \
297 (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT)
298 #define cpu_has_vmx_unrestricted_guest \
299 (vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST)
300 #define vmx_unrestricted_guest(v) \
301 ((v)->arch.hvm_vmx.secondary_exec_control & \
302 SECONDARY_EXEC_UNRESTRICTED_GUEST)
303 #define cpu_has_vmx_ple \
304 (vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
305 #define cpu_has_vmx_apic_reg_virt \
306 (vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT)
307 #define cpu_has_vmx_virtual_intr_delivery \
308 (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
309 #define cpu_has_vmx_virtualize_x2apic_mode \
310 (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)
311 #define cpu_has_vmx_posted_intr_processing \
312 (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT)
313 #define cpu_has_vmx_vmcs_shadowing \
314 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING)
315 #define cpu_has_vmx_vmfunc \
316 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS)
317 #define cpu_has_vmx_virt_exceptions \
318 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
319 #define cpu_has_vmx_pml \
320 (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
321 #define cpu_has_vmx_mpx \
322 ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
323 (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
324 #define cpu_has_vmx_xsaves \
325 (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
326 #define cpu_has_vmx_tsc_scaling \
327 (vmx_secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
328
329 #define VMCS_RID_TYPE_MASK 0x80000000
330
331 /* GUEST_INTERRUPTIBILITY_INFO flags. */
332 #define VMX_INTR_SHADOW_STI 0x00000001
333 #define VMX_INTR_SHADOW_MOV_SS 0x00000002
334 #define VMX_INTR_SHADOW_SMI 0x00000004
335 #define VMX_INTR_SHADOW_NMI 0x00000008
336
337 #define VMX_BASIC_REVISION_MASK 0x7fffffff
338 #define VMX_BASIC_VMCS_SIZE_MASK (0x1fffULL << 32)
339 #define VMX_BASIC_32BIT_ADDRESSES (1ULL << 48)
340 #define VMX_BASIC_DUAL_MONITOR (1ULL << 49)
341 #define VMX_BASIC_MEMORY_TYPE_MASK (0xfULL << 50)
342 #define VMX_BASIC_INS_OUT_INFO (1ULL << 54)
343 /*
344 * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that
345 * default to 1 may be cleared to 0.
346 */
347 #define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
348
349 extern u64 vmx_basic_msr;
350 #define cpu_has_vmx_ins_outs_instr_info \
351 (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
352
353 /* Guest interrupt status */
354 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
355 #define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8
356
357 /* VMFUNC leaf definitions */
358 #define VMX_VMFUNC_EPTP_SWITCHING (1ULL << 0)
359
360 /* VMCS field encodings. */
361 #define VMCS_HIGH(x) ((x) | 1)
362 enum vmcs_field {
363 VIRTUAL_PROCESSOR_ID = 0x00000000,
364 POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002,
365 EPTP_INDEX = 0x00000004,
366 #define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */
367 GUEST_ES_SELECTOR = 0x00000800,
368 GUEST_CS_SELECTOR = 0x00000802,
369 GUEST_SS_SELECTOR = 0x00000804,
370 GUEST_DS_SELECTOR = 0x00000806,
371 GUEST_FS_SELECTOR = 0x00000808,
372 GUEST_GS_SELECTOR = 0x0000080a,
373 GUEST_LDTR_SELECTOR = 0x0000080c,
374 GUEST_TR_SELECTOR = 0x0000080e,
375 GUEST_INTR_STATUS = 0x00000810,
376 GUEST_PML_INDEX = 0x00000812,
377 HOST_ES_SELECTOR = 0x00000c00,
378 HOST_CS_SELECTOR = 0x00000c02,
379 HOST_SS_SELECTOR = 0x00000c04,
380 HOST_DS_SELECTOR = 0x00000c06,
381 HOST_FS_SELECTOR = 0x00000c08,
382 HOST_GS_SELECTOR = 0x00000c0a,
383 HOST_TR_SELECTOR = 0x00000c0c,
384 IO_BITMAP_A = 0x00002000,
385 IO_BITMAP_B = 0x00002002,
386 MSR_BITMAP = 0x00002004,
387 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
388 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
389 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
390 PML_ADDRESS = 0x0000200e,
391 TSC_OFFSET = 0x00002010,
392 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
393 APIC_ACCESS_ADDR = 0x00002014,
394 PI_DESC_ADDR = 0x00002016,
395 VM_FUNCTION_CONTROL = 0x00002018,
396 EPT_POINTER = 0x0000201a,
397 EOI_EXIT_BITMAP0 = 0x0000201c,
398 #define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */
399 EPTP_LIST_ADDR = 0x00002024,
400 VMREAD_BITMAP = 0x00002026,
401 VMWRITE_BITMAP = 0x00002028,
402 VIRT_EXCEPTION_INFO = 0x0000202a,
403 XSS_EXIT_BITMAP = 0x0000202c,
404 TSC_MULTIPLIER = 0x00002032,
405 GUEST_PHYSICAL_ADDRESS = 0x00002400,
406 VMCS_LINK_POINTER = 0x00002800,
407 GUEST_IA32_DEBUGCTL = 0x00002802,
408 GUEST_PAT = 0x00002804,
409 GUEST_EFER = 0x00002806,
410 GUEST_PERF_GLOBAL_CTRL = 0x00002808,
411 GUEST_PDPTE0 = 0x0000280a,
412 #define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
413 GUEST_BNDCFGS = 0x00002812,
414 HOST_PAT = 0x00002c00,
415 HOST_EFER = 0x00002c02,
416 HOST_PERF_GLOBAL_CTRL = 0x00002c04,
417 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
418 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
419 EXCEPTION_BITMAP = 0x00004004,
420 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
421 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
422 CR3_TARGET_COUNT = 0x0000400a,
423 VM_EXIT_CONTROLS = 0x0000400c,
424 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
425 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
426 VM_ENTRY_CONTROLS = 0x00004012,
427 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
428 VM_ENTRY_INTR_INFO = 0x00004016,
429 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
430 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
431 TPR_THRESHOLD = 0x0000401c,
432 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
433 PLE_GAP = 0x00004020,
434 PLE_WINDOW = 0x00004022,
435 VM_INSTRUCTION_ERROR = 0x00004400,
436 VM_EXIT_REASON = 0x00004402,
437 VM_EXIT_INTR_INFO = 0x00004404,
438 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
439 IDT_VECTORING_INFO = 0x00004408,
440 IDT_VECTORING_ERROR_CODE = 0x0000440a,
441 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
442 VMX_INSTRUCTION_INFO = 0x0000440e,
443 #define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */
444 GUEST_ES_LIMIT = 0x00004800,
445 GUEST_CS_LIMIT = 0x00004802,
446 GUEST_SS_LIMIT = 0x00004804,
447 GUEST_DS_LIMIT = 0x00004806,
448 GUEST_FS_LIMIT = 0x00004808,
449 GUEST_GS_LIMIT = 0x0000480a,
450 GUEST_LDTR_LIMIT = 0x0000480c,
451 GUEST_TR_LIMIT = 0x0000480e,
452 GUEST_GDTR_LIMIT = 0x00004810,
453 GUEST_IDTR_LIMIT = 0x00004812,
454 #define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */
455 GUEST_ES_AR_BYTES = 0x00004814,
456 GUEST_CS_AR_BYTES = 0x00004816,
457 GUEST_SS_AR_BYTES = 0x00004818,
458 GUEST_DS_AR_BYTES = 0x0000481a,
459 GUEST_FS_AR_BYTES = 0x0000481c,
460 GUEST_GS_AR_BYTES = 0x0000481e,
461 GUEST_LDTR_AR_BYTES = 0x00004820,
462 GUEST_TR_AR_BYTES = 0x00004822,
463 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
464 GUEST_ACTIVITY_STATE = 0x00004826,
465 GUEST_SMBASE = 0x00004828,
466 GUEST_SYSENTER_CS = 0x0000482a,
467 GUEST_PREEMPTION_TIMER = 0x0000482e,
468 HOST_SYSENTER_CS = 0x00004c00,
469 CR0_GUEST_HOST_MASK = 0x00006000,
470 CR4_GUEST_HOST_MASK = 0x00006002,
471 CR0_READ_SHADOW = 0x00006004,
472 CR4_READ_SHADOW = 0x00006006,
473 CR3_TARGET_VALUE0 = 0x00006008,
474 #define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */
475 EXIT_QUALIFICATION = 0x00006400,
476 GUEST_LINEAR_ADDRESS = 0x0000640a,
477 GUEST_CR0 = 0x00006800,
478 GUEST_CR3 = 0x00006802,
479 GUEST_CR4 = 0x00006804,
480 #define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */
481 GUEST_ES_BASE = 0x00006806,
482 GUEST_CS_BASE = 0x00006808,
483 GUEST_SS_BASE = 0x0000680a,
484 GUEST_DS_BASE = 0x0000680c,
485 GUEST_FS_BASE = 0x0000680e,
486 GUEST_GS_BASE = 0x00006810,
487 GUEST_LDTR_BASE = 0x00006812,
488 GUEST_TR_BASE = 0x00006814,
489 GUEST_GDTR_BASE = 0x00006816,
490 GUEST_IDTR_BASE = 0x00006818,
491 GUEST_DR7 = 0x0000681a,
492 GUEST_RSP = 0x0000681c,
493 GUEST_RIP = 0x0000681e,
494 GUEST_RFLAGS = 0x00006820,
495 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
496 GUEST_SYSENTER_ESP = 0x00006824,
497 GUEST_SYSENTER_EIP = 0x00006826,
498 HOST_CR0 = 0x00006c00,
499 HOST_CR3 = 0x00006c02,
500 HOST_CR4 = 0x00006c04,
501 HOST_FS_BASE = 0x00006c06,
502 HOST_GS_BASE = 0x00006c08,
503 HOST_TR_BASE = 0x00006c0a,
504 HOST_GDTR_BASE = 0x00006c0c,
505 HOST_IDTR_BASE = 0x00006c0e,
506 HOST_SYSENTER_ESP = 0x00006c10,
507 HOST_SYSENTER_EIP = 0x00006c12,
508 HOST_RSP = 0x00006c14,
509 HOST_RIP = 0x00006c16,
510 };
511
512 #define VMCS_VPID_WIDTH 16
513
514 #define VMX_GUEST_MSR 0
515 #define VMX_HOST_MSR 1
516
517 /* VM Instruction error numbers */
518 enum vmx_insn_errno
519 {
520 VMX_INSN_SUCCEED = 0,
521 VMX_INSN_VMCLEAR_INVALID_PHYADDR = 2,
522 VMX_INSN_VMLAUNCH_NONCLEAR_VMCS = 4,
523 VMX_INSN_VMRESUME_NONLAUNCHED_VMCS = 5,
524 VMX_INSN_INVALID_CONTROL_STATE = 7,
525 VMX_INSN_INVALID_HOST_STATE = 8,
526 VMX_INSN_VMPTRLD_INVALID_PHYADDR = 9,
527 VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID = 11,
528 VMX_INSN_UNSUPPORTED_VMCS_COMPONENT = 12,
529 VMX_INSN_VMXON_IN_VMX_ROOT = 15,
530 VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS = 26,
531 VMX_INSN_FAIL_INVALID = ~0,
532 };
533
534 enum vmx_msr_intercept_type {
535 VMX_MSR_R = 1,
536 VMX_MSR_W = 2,
537 VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
538 };
539
540 void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
541 enum vmx_msr_intercept_type type);
542 void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
543 enum vmx_msr_intercept_type type);
544 int vmx_read_guest_msr(u32 msr, u64 *val);
545 int vmx_write_guest_msr(u32 msr, u64 val);
546 struct vmx_msr_entry *vmx_find_msr(u32 msr, int type);
547 int vmx_add_msr(u32 msr, int type);
548 void vmx_vmcs_switch(paddr_t from, paddr_t to);
549 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
550 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
551 bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap,
552 unsigned int msr, bool is_write) __nonnull(1);
553 void virtual_vmcs_enter(const struct vcpu *);
554 void virtual_vmcs_exit(const struct vcpu *);
555 u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
556 enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v,
557 u32 vmcs_encoding, u64 *val);
558 void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
559 enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
560 u32 vmcs_encoding, u64 val);
561
vmx_add_guest_msr(u32 msr)562 static inline int vmx_add_guest_msr(u32 msr)
563 {
564 return vmx_add_msr(msr, VMX_GUEST_MSR);
565 }
vmx_add_host_load_msr(u32 msr)566 static inline int vmx_add_host_load_msr(u32 msr)
567 {
568 return vmx_add_msr(msr, VMX_HOST_MSR);
569 }
570
571 DECLARE_PER_CPU(bool_t, vmxon);
572
573 bool_t vmx_vcpu_pml_enabled(const struct vcpu *v);
574 int vmx_vcpu_enable_pml(struct vcpu *v);
575 void vmx_vcpu_disable_pml(struct vcpu *v);
576 void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
577 bool_t vmx_domain_pml_enabled(const struct domain *d);
578 int vmx_domain_enable_pml(struct domain *d);
579 void vmx_domain_disable_pml(struct domain *d);
580 void vmx_domain_flush_pml_buffers(struct domain *d);
581
582 void vmx_domain_update_eptp(struct domain *d);
583
584 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
585
586 /*
587 * Local variables:
588 * mode: C
589 * c-file-style: "BSD"
590 * c-basic-offset: 4
591 * tab-width: 4
592 * indent-tabs-mode: nil
593 * End:
594 */
595