1 /*
2  * vmx.h: VMX Architecture related definitions
3  * Copyright (c) 2004, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  */
18 #ifndef __ASM_X86_HVM_VMX_VMX_H__
19 #define __ASM_X86_HVM_VMX_VMX_H__
20 
21 #include <xen/sched.h>
22 #include <asm/types.h>
23 #include <asm/regs.h>
24 #include <asm/asm_defns.h>
25 #include <asm/processor.h>
26 #include <asm/i387.h>
27 #include <asm/hvm/support.h>
28 #include <asm/hvm/trace.h>
29 #include <asm/hvm/vmx/vmcs.h>
30 
31 typedef union {
32     struct {
33         u64 r       :   1,  /* bit 0 - Read permission */
34         w           :   1,  /* bit 1 - Write permission */
35         x           :   1,  /* bit 2 - Execute permission */
36         emt         :   3,  /* bits 5:3 - EPT Memory type */
37         ipat        :   1,  /* bit 6 - Ignore PAT memory type */
38         sp          :   1,  /* bit 7 - Is this a superpage? */
39         a           :   1,  /* bit 8 - Access bit */
40         d           :   1,  /* bit 9 - Dirty bit */
41         recalc      :   1,  /* bit 10 - Software available 1 */
42         snp         :   1,  /* bit 11 - VT-d snoop control in shared
43                                EPT/VT-d usage */
44         mfn         :   40, /* bits 51:12 - Machine physical frame number */
45         sa_p2mt     :   6,  /* bits 57:52 - Software available 2 */
46         access      :   4,  /* bits 61:58 - p2m_access_t */
47         tm          :   1,  /* bit 62 - VT-d transient-mapping hint in
48                                shared EPT/VT-d usage */
49         suppress_ve :   1;  /* bit 63 - suppress #VE */
50     };
51     u64 epte;
52 } ept_entry_t;
53 
54 typedef struct {
55     /*use lxe[0] to save result */
56     ept_entry_t lxe[5];
57 } ept_walk_t;
58 
59 typedef enum {
60     ept_access_n     = 0, /* No access permissions allowed */
61     ept_access_r     = 1, /* Read only */
62     ept_access_w     = 2, /* Write only */
63     ept_access_rw    = 3, /* Read & Write */
64     ept_access_x     = 4, /* Exec Only */
65     ept_access_rx    = 5, /* Read & Exec */
66     ept_access_wx    = 6, /* Write & Exec*/
67     ept_access_all   = 7, /* Full permissions */
68 } ept_access_t;
69 
70 #define EPT_TABLE_ORDER         9
71 #define EPTE_SUPER_PAGE_MASK    0x80
72 #define EPTE_MFN_MASK           0xffffffffff000ULL
73 #define EPTE_AVAIL1_MASK        0xF00
74 #define EPTE_EMT_MASK           0x38
75 #define EPTE_IGMT_MASK          0x40
76 #define EPTE_AVAIL1_SHIFT       8
77 #define EPTE_EMT_SHIFT          3
78 #define EPTE_IGMT_SHIFT         6
79 #define EPTE_RWX_MASK           0x7
80 #define EPTE_FLAG_MASK          0x7f
81 
82 #define EPT_EMT_UC              0
83 #define EPT_EMT_WC              1
84 #define EPT_EMT_RSV0            2
85 #define EPT_EMT_RSV1            3
86 #define EPT_EMT_WT              4
87 #define EPT_EMT_WP              5
88 #define EPT_EMT_WB              6
89 #define EPT_EMT_RSV2            7
90 
91 #define PI_xAPIC_NDST_MASK      0xFF00
92 
93 void vmx_asm_vmexit_handler(struct cpu_user_regs);
94 void vmx_asm_do_vmentry(void);
95 void vmx_intr_assist(void);
96 void noreturn vmx_do_resume(struct vcpu *);
97 void vmx_vlapic_msr_changed(struct vcpu *v);
98 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
99 void vmx_realmode(struct cpu_user_regs *regs);
100 void vmx_update_debug_state(struct vcpu *v);
101 void vmx_update_exception_bitmap(struct vcpu *v);
102 void vmx_update_cpu_exec_control(struct vcpu *v);
103 void vmx_update_secondary_exec_control(struct vcpu *v);
104 
105 #define POSTED_INTR_ON  0
106 #define POSTED_INTR_SN  1
pi_test_and_set_pir(uint8_t vector,struct pi_desc * pi_desc)107 static inline int pi_test_and_set_pir(uint8_t vector, struct pi_desc *pi_desc)
108 {
109     return test_and_set_bit(vector, pi_desc->pir);
110 }
111 
pi_test_pir(uint8_t vector,const struct pi_desc * pi_desc)112 static inline int pi_test_pir(uint8_t vector, const struct pi_desc *pi_desc)
113 {
114     return test_bit(vector, pi_desc->pir);
115 }
116 
pi_test_and_set_on(struct pi_desc * pi_desc)117 static inline int pi_test_and_set_on(struct pi_desc *pi_desc)
118 {
119     return test_and_set_bit(POSTED_INTR_ON, &pi_desc->control);
120 }
121 
pi_set_on(struct pi_desc * pi_desc)122 static inline void pi_set_on(struct pi_desc *pi_desc)
123 {
124     set_bit(POSTED_INTR_ON, &pi_desc->control);
125 }
126 
pi_test_and_clear_on(struct pi_desc * pi_desc)127 static inline int pi_test_and_clear_on(struct pi_desc *pi_desc)
128 {
129     return test_and_clear_bit(POSTED_INTR_ON, &pi_desc->control);
130 }
131 
pi_test_on(struct pi_desc * pi_desc)132 static inline int pi_test_on(struct pi_desc *pi_desc)
133 {
134     return pi_desc->on;
135 }
136 
pi_get_pir(struct pi_desc * pi_desc,int group)137 static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group)
138 {
139     return xchg(&pi_desc->pir[group], 0);
140 }
141 
pi_test_sn(struct pi_desc * pi_desc)142 static inline int pi_test_sn(struct pi_desc *pi_desc)
143 {
144     return pi_desc->sn;
145 }
146 
pi_set_sn(struct pi_desc * pi_desc)147 static inline void pi_set_sn(struct pi_desc *pi_desc)
148 {
149     set_bit(POSTED_INTR_SN, &pi_desc->control);
150 }
151 
pi_clear_sn(struct pi_desc * pi_desc)152 static inline void pi_clear_sn(struct pi_desc *pi_desc)
153 {
154     clear_bit(POSTED_INTR_SN, &pi_desc->control);
155 }
156 
157 /*
158  * Exit Reasons
159  */
160 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
161 
162 #define EXIT_REASON_EXCEPTION_NMI       0
163 #define EXIT_REASON_EXTERNAL_INTERRUPT  1
164 #define EXIT_REASON_TRIPLE_FAULT        2
165 #define EXIT_REASON_INIT                3
166 #define EXIT_REASON_SIPI                4
167 #define EXIT_REASON_IO_SMI              5
168 #define EXIT_REASON_OTHER_SMI           6
169 #define EXIT_REASON_PENDING_VIRT_INTR   7
170 #define EXIT_REASON_PENDING_VIRT_NMI    8
171 #define EXIT_REASON_TASK_SWITCH         9
172 #define EXIT_REASON_CPUID               10
173 #define EXIT_REASON_GETSEC              11
174 #define EXIT_REASON_HLT                 12
175 #define EXIT_REASON_INVD                13
176 #define EXIT_REASON_INVLPG              14
177 #define EXIT_REASON_RDPMC               15
178 #define EXIT_REASON_RDTSC               16
179 #define EXIT_REASON_RSM                 17
180 #define EXIT_REASON_VMCALL              18
181 #define EXIT_REASON_VMCLEAR             19
182 #define EXIT_REASON_VMLAUNCH            20
183 #define EXIT_REASON_VMPTRLD             21
184 #define EXIT_REASON_VMPTRST             22
185 #define EXIT_REASON_VMREAD              23
186 #define EXIT_REASON_VMRESUME            24
187 #define EXIT_REASON_VMWRITE             25
188 #define EXIT_REASON_VMXOFF              26
189 #define EXIT_REASON_VMXON               27
190 #define EXIT_REASON_CR_ACCESS           28
191 #define EXIT_REASON_DR_ACCESS           29
192 #define EXIT_REASON_IO_INSTRUCTION      30
193 #define EXIT_REASON_MSR_READ            31
194 #define EXIT_REASON_MSR_WRITE           32
195 #define EXIT_REASON_INVALID_GUEST_STATE 33
196 #define EXIT_REASON_MSR_LOADING         34
197 #define EXIT_REASON_MWAIT_INSTRUCTION   36
198 #define EXIT_REASON_MONITOR_TRAP_FLAG   37
199 #define EXIT_REASON_MONITOR_INSTRUCTION 39
200 #define EXIT_REASON_PAUSE_INSTRUCTION   40
201 #define EXIT_REASON_MCE_DURING_VMENTRY  41
202 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
203 #define EXIT_REASON_APIC_ACCESS         44
204 #define EXIT_REASON_EOI_INDUCED         45
205 #define EXIT_REASON_ACCESS_GDTR_OR_IDTR 46
206 #define EXIT_REASON_ACCESS_LDTR_OR_TR   47
207 #define EXIT_REASON_EPT_VIOLATION       48
208 #define EXIT_REASON_EPT_MISCONFIG       49
209 #define EXIT_REASON_INVEPT              50
210 #define EXIT_REASON_RDTSCP              51
211 #define EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 52
212 #define EXIT_REASON_INVVPID             53
213 #define EXIT_REASON_WBINVD              54
214 #define EXIT_REASON_XSETBV              55
215 #define EXIT_REASON_APIC_WRITE          56
216 #define EXIT_REASON_INVPCID             58
217 #define EXIT_REASON_VMFUNC              59
218 #define EXIT_REASON_PML_FULL            62
219 #define EXIT_REASON_XSAVES              63
220 #define EXIT_REASON_XRSTORS             64
221 
222 /*
223  * Interruption-information format
224  */
225 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
226 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
227 #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
228 #define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000          /* 12 */
229 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
230 #define INTR_INFO_RESVD_BITS_MASK       0x7ffff000
231 
232 /*
233  * Exit Qualifications for MOV for Control Register Access
234  */
235  /* 3:0 - control register number (CRn) */
236 #define VMX_CONTROL_REG_ACCESS_NUM(eq)  ((eq) & 0xf)
237  /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
238 #define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
239 # define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   0
240 # define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
241 # define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        2
242 # define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        3
243  /* 11:8 - general purpose register operand */
244 #define VMX_CONTROL_REG_ACCESS_GPR(eq)  (((eq) >> 8) & 0xf)
245  /* 31:16 - LMSW source data */
246 #define VMX_CONTROL_REG_ACCESS_DATA(eq)  ((uint32_t)(eq) >> 16)
247 
248 /*
249  * Access Rights
250  */
251 #define X86_SEG_AR_SEG_TYPE     0xf        /* 3:0, segment type */
252 #define X86_SEG_AR_DESC_TYPE    (1u << 4)  /* 4, descriptor type */
253 #define X86_SEG_AR_DPL          0x60       /* 6:5, descriptor privilege level */
254 #define X86_SEG_AR_SEG_PRESENT  (1u << 7)  /* 7, segment present */
255 #define X86_SEG_AR_AVL          (1u << 12) /* 12, available for system software */
256 #define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */
257 #define X86_SEG_AR_DEF_OP_SIZE  (1u << 14) /* 14, default operation size */
258 #define X86_SEG_AR_GRANULARITY  (1u << 15) /* 15, granularity */
259 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
260 
261 #define VMCALL_OPCODE   ".byte 0x0f,0x01,0xc1\n"
262 #define VMCLEAR_OPCODE  ".byte 0x66,0x0f,0xc7\n"        /* reg/opcode: /6 */
263 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
264 #define VMPTRLD_OPCODE  ".byte 0x0f,0xc7\n"             /* reg/opcode: /6 */
265 #define VMPTRST_OPCODE  ".byte 0x0f,0xc7\n"             /* reg/opcode: /7 */
266 #define VMREAD_OPCODE   ".byte 0x0f,0x78\n"
267 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
268 #define VMWRITE_OPCODE  ".byte 0x0f,0x79\n"
269 #define INVEPT_OPCODE   ".byte 0x66,0x0f,0x38,0x80\n"   /* m128,r64/32 */
270 #define INVVPID_OPCODE  ".byte 0x66,0x0f,0x38,0x81\n"   /* m128,r64/32 */
271 #define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4\n"
272 #define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7\n"
273 
274 #define MODRM_EAX_08    ".byte 0x08\n" /* ECX, [EAX] */
275 #define MODRM_EAX_06    ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
276 #define MODRM_EAX_07    ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
277 #define MODRM_EAX_ECX   ".byte 0xc1\n" /* EAX, ECX */
278 
279 extern uint8_t posted_intr_vector;
280 
281 #define cpu_has_vmx_ept_exec_only_supported        \
282     (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
283 
284 #define cpu_has_vmx_ept_wl4_supported           \
285     (vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED)
286 #define cpu_has_vmx_ept_mt_uc (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_UC)
287 #define cpu_has_vmx_ept_mt_wb (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB)
288 #define cpu_has_vmx_ept_2mb   (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
289 #define cpu_has_vmx_ept_1gb   (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
290 #define cpu_has_vmx_ept_ad    (vmx_ept_vpid_cap & VMX_EPT_AD_BIT)
291 #define cpu_has_vmx_ept_invept_single_context   \
292     (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
293 
294 #define EPT_2MB_SHIFT     16
295 #define EPT_1GB_SHIFT     17
296 #define ept_has_2mb(c)    ((c >> EPT_2MB_SHIFT) & 1)
297 #define ept_has_1gb(c)    ((c >> EPT_1GB_SHIFT) & 1)
298 
299 #define INVEPT_SINGLE_CONTEXT   1
300 #define INVEPT_ALL_CONTEXT      2
301 
302 #define cpu_has_vmx_vpid_invvpid_individual_addr                    \
303     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
304 #define cpu_has_vmx_vpid_invvpid_single_context                     \
305     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT)
306 #define cpu_has_vmx_vpid_invvpid_single_context_retaining_global    \
307     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL)
308 
309 #define INVVPID_INDIVIDUAL_ADDR                 0
310 #define INVVPID_SINGLE_CONTEXT                  1
311 #define INVVPID_ALL_CONTEXT                     2
312 #define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3
313 
314 #ifdef HAVE_GAS_VMX
315 # define GAS_VMX_OP(yes, no) yes
316 #else
317 # define GAS_VMX_OP(yes, no) no
318 #endif
319 
__vmptrld(u64 addr)320 static always_inline void __vmptrld(u64 addr)
321 {
322     asm volatile (
323 #ifdef HAVE_GAS_VMX
324                    "vmptrld %0\n"
325 #else
326                    VMPTRLD_OPCODE MODRM_EAX_06
327 #endif
328                    /* CF==1 or ZF==1 --> BUG() */
329                    UNLIKELY_START(be, vmptrld)
330                    _ASM_BUGFRAME_TEXT(0)
331                    UNLIKELY_END_SECTION
332                    :
333 #ifdef HAVE_GAS_VMX
334                    : "m" (addr),
335 #else
336                    : "a" (&addr),
337 #endif
338                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
339                    : "memory");
340 }
341 
__vmpclear(u64 addr)342 static always_inline void __vmpclear(u64 addr)
343 {
344     asm volatile (
345 #ifdef HAVE_GAS_VMX
346                    "vmclear %0\n"
347 #else
348                    VMCLEAR_OPCODE MODRM_EAX_06
349 #endif
350                    /* CF==1 or ZF==1 --> BUG() */
351                    UNLIKELY_START(be, vmclear)
352                    _ASM_BUGFRAME_TEXT(0)
353                    UNLIKELY_END_SECTION
354                    :
355 #ifdef HAVE_GAS_VMX
356                    : "m" (addr),
357 #else
358                    : "a" (&addr),
359 #endif
360                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
361                    : "memory");
362 }
363 
__vmread(unsigned long field,unsigned long * value)364 static always_inline void __vmread(unsigned long field, unsigned long *value)
365 {
366     asm volatile (
367 #ifdef HAVE_GAS_VMX
368                    "vmread %1, %0\n\t"
369 #else
370                    VMREAD_OPCODE MODRM_EAX_ECX
371 #endif
372                    /* CF==1 or ZF==1 --> BUG() */
373                    UNLIKELY_START(be, vmread)
374                    _ASM_BUGFRAME_TEXT(0)
375                    UNLIKELY_END_SECTION
376 #ifdef HAVE_GAS_VMX
377                    : "=rm" (*value)
378                    : "r" (field),
379 #else
380                    : "=c" (*value)
381                    : "a" (field),
382 #endif
383                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
384         );
385 }
386 
__vmwrite(unsigned long field,unsigned long value)387 static always_inline void __vmwrite(unsigned long field, unsigned long value)
388 {
389     asm volatile (
390 #ifdef HAVE_GAS_VMX
391                    "vmwrite %1, %0\n"
392 #else
393                    VMWRITE_OPCODE MODRM_EAX_ECX
394 #endif
395                    /* CF==1 or ZF==1 --> BUG() */
396                    UNLIKELY_START(be, vmwrite)
397                    _ASM_BUGFRAME_TEXT(0)
398                    UNLIKELY_END_SECTION
399                    :
400 #ifdef HAVE_GAS_VMX
401                    : "r" (field) , "rm" (value),
402 #else
403                    : "a" (field) , "c" (value),
404 #endif
405                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
406         );
407 }
408 
vmread_safe(unsigned long field,unsigned long * value)409 static inline enum vmx_insn_errno vmread_safe(unsigned long field,
410                                               unsigned long *value)
411 {
412     unsigned long ret = VMX_INSN_SUCCEED;
413     bool fail_invalid, fail_valid;
414 
415     asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t",
416                               VMREAD_OPCODE MODRM_EAX_ECX)
417                    ASM_FLAG_OUT(, "setc %[invalid]\n\t")
418                    ASM_FLAG_OUT(, "setz %[valid]\n\t")
419                    : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
420                      ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid),
421                      [value] GAS_VMX_OP("=rm", "=c") (*value)
422                    : [field] GAS_VMX_OP("r", "a") (field));
423 
424     if ( unlikely(fail_invalid) )
425         ret = VMX_INSN_FAIL_INVALID;
426     else if ( unlikely(fail_valid) )
427         __vmread(VM_INSTRUCTION_ERROR, &ret);
428 
429     return ret;
430 }
431 
vmwrite_safe(unsigned long field,unsigned long value)432 static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
433                                                unsigned long value)
434 {
435     unsigned long ret = VMX_INSN_SUCCEED;
436     bool fail_invalid, fail_valid;
437 
438     asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
439                               VMWRITE_OPCODE MODRM_EAX_ECX)
440                    ASM_FLAG_OUT(, "setc %[invalid]\n\t")
441                    ASM_FLAG_OUT(, "setz %[valid]\n\t")
442                    : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
443                      ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
444                    : [field] GAS_VMX_OP("r", "a") (field),
445                      [value] GAS_VMX_OP("rm", "c") (value));
446 
447     if ( unlikely(fail_invalid) )
448         ret = VMX_INSN_FAIL_INVALID;
449     else if ( unlikely(fail_valid) )
450         __vmread(VM_INSTRUCTION_ERROR, &ret);
451 
452     return ret;
453 }
454 
__invept(unsigned long type,u64 eptp,u64 gpa)455 static always_inline void __invept(unsigned long type, u64 eptp, u64 gpa)
456 {
457     struct {
458         u64 eptp, gpa;
459     } operand = {eptp, gpa};
460 
461     /*
462      * If single context invalidation is not supported, we escalate to
463      * use all context invalidation.
464      */
465     if ( (type == INVEPT_SINGLE_CONTEXT) &&
466          !cpu_has_vmx_ept_invept_single_context )
467         type = INVEPT_ALL_CONTEXT;
468 
469     asm volatile (
470 #ifdef HAVE_GAS_EPT
471                    "invept %0, %1\n"
472 #else
473                    INVEPT_OPCODE MODRM_EAX_08
474 #endif
475                    /* CF==1 or ZF==1 --> BUG() */
476                    UNLIKELY_START(be, invept)
477                    _ASM_BUGFRAME_TEXT(0)
478                    UNLIKELY_END_SECTION
479                    :
480 #ifdef HAVE_GAS_EPT
481                    : "m" (operand), "r" (type),
482 #else
483                    : "a" (&operand), "c" (type),
484 #endif
485                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
486                    : "memory" );
487 }
488 
__invvpid(unsigned long type,u16 vpid,u64 gva)489 static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva)
490 {
491     struct __packed {
492         u64 vpid:16;
493         u64 rsvd:48;
494         u64 gva;
495     }  operand = {vpid, 0, gva};
496 
497     /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
498     asm volatile ( "1: "
499 #ifdef HAVE_GAS_EPT
500                    "invvpid %0, %1\n"
501 #else
502                    INVVPID_OPCODE MODRM_EAX_08
503 #endif
504                    /* CF==1 or ZF==1 --> BUG() */
505                    UNLIKELY_START(be, invvpid)
506                    _ASM_BUGFRAME_TEXT(0)
507                    UNLIKELY_END_SECTION "\n"
508                    "2:"
509                    _ASM_EXTABLE(1b, 2b)
510                    :
511 #ifdef HAVE_GAS_EPT
512                    : "m" (operand), "r" (type),
513 #else
514                    : "a" (&operand), "c" (type),
515 #endif
516                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
517                    : "memory" );
518 }
519 
ept_sync_all(void)520 static inline void ept_sync_all(void)
521 {
522     __invept(INVEPT_ALL_CONTEXT, 0, 0);
523 }
524 
525 void ept_sync_domain(struct p2m_domain *p2m);
526 
vpid_sync_vcpu_gva(struct vcpu * v,unsigned long gva)527 static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
528 {
529     int type = INVVPID_INDIVIDUAL_ADDR;
530 
531     /*
532      * If individual address invalidation is not supported, we escalate to
533      * use single context invalidation.
534      */
535     if ( likely(cpu_has_vmx_vpid_invvpid_individual_addr) )
536         goto execute_invvpid;
537 
538     type = INVVPID_SINGLE_CONTEXT;
539 
540     /*
541      * If single context invalidation is not supported, we escalate to
542      * use all context invalidation.
543      */
544     if ( !cpu_has_vmx_vpid_invvpid_single_context )
545         type = INVVPID_ALL_CONTEXT;
546 
547 execute_invvpid:
548     __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
549 }
550 
vpid_sync_all(void)551 static inline void vpid_sync_all(void)
552 {
553     __invvpid(INVVPID_ALL_CONTEXT, 0, 0);
554 }
555 
__vmxoff(void)556 static inline void __vmxoff(void)
557 {
558     asm volatile (
559         VMXOFF_OPCODE
560         : : : "memory" );
561 }
562 
__vmxon(u64 addr)563 static inline int __vmxon(u64 addr)
564 {
565     int rc;
566 
567     asm volatile (
568         "1: " VMXON_OPCODE MODRM_EAX_06 "\n"
569         "   setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */
570         "2:\n"
571         ".section .fixup,\"ax\"\n"
572         "3: sub $2,%0 ; jmp 2b\n"    /* #UD or #GP --> rc = -2 */
573         ".previous\n"
574         _ASM_EXTABLE(1b, 3b)
575         : "=q" (rc)
576         : "0" (0), "a" (&addr)
577         : "memory");
578 
579     return rc;
580 }
581 
582 int vmx_guest_x86_mode(struct vcpu *v);
583 unsigned int vmx_get_cpl(void);
584 
585 void vmx_inject_extint(int trap, uint8_t source);
586 void vmx_inject_nmi(void);
587 
588 int ept_p2m_init(struct p2m_domain *p2m);
589 void ept_p2m_uninit(struct p2m_domain *p2m);
590 
591 void ept_walk_table(struct domain *d, unsigned long gfn);
592 bool_t ept_handle_misconfig(uint64_t gpa);
593 void setup_ept_dump(void);
594 void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
595 /* Locate an alternate p2m by its EPTP */
596 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
597 
598 void update_guest_eip(void);
599 
600 int alloc_p2m_hap_data(struct p2m_domain *p2m);
601 void free_p2m_hap_data(struct p2m_domain *p2m);
602 void p2m_init_hap_data(struct p2m_domain *p2m);
603 
604 void vmx_pi_per_cpu_init(unsigned int cpu);
605 void vmx_pi_desc_fixup(unsigned int cpu);
606 
607 void vmx_pi_hooks_assign(struct domain *d);
608 void vmx_pi_hooks_deassign(struct domain *d);
609 
610 #define APIC_INVALID_DEST           0xffffffff
611 
612 /* EPT violation qualifications definitions */
613 typedef union ept_qual {
614     unsigned long raw;
615     struct {
616         bool read:1, write:1, fetch:1,
617             eff_read:1, eff_write:1, eff_exec:1, /* eff_user_exec */:1,
618             gla_valid:1,
619             gla_fault:1; /* Valid iff gla_valid. */
620         unsigned long /* pad */:55;
621     };
622 } __transparent__ ept_qual_t;
623 
624 #define EPT_L4_PAGETABLE_SHIFT      39
625 #define EPT_PAGETABLE_ENTRIES       512
626 
627 /* #VE information page */
628 typedef struct {
629     u32 exit_reason;
630     u32 semaphore;
631     u64 exit_qualification;
632     u64 gla;
633     u64 gpa;
634     u16 eptp_index;
635 } ve_info_t;
636 
637 /* VM-Exit instruction info for LIDT, LGDT, SIDT, SGDT */
638 typedef union idt_or_gdt_instr_info {
639     unsigned long raw;
640     struct {
641         unsigned long scaling   :2,  /* bits 0:1 - Scaling */
642                                 :5,  /* bits 6:2 - Undefined */
643         addr_size               :3,  /* bits 9:7 - Address size */
644                                 :1,  /* bit 10 - Cleared to 0 */
645         operand_size            :1,  /* bit 11 - Operand size */
646                                 :3,  /* bits 14:12 - Undefined */
647         segment_reg             :3,  /* bits 17:15 - Segment register */
648         index_reg               :4,  /* bits 21:18 - Index register */
649         index_reg_invalid       :1,  /* bit 22 - Index register invalid */
650         base_reg                :4,  /* bits 26:23 - Base register */
651         base_reg_invalid        :1,  /* bit 27 - Base register invalid */
652         instr_identity          :1,  /* bit 28 - 0:GDT, 1:IDT */
653         instr_write             :1,  /* bit 29 - 0:store, 1:load */
654                                 :34; /* bits 30:63 - Undefined */
655     };
656 } idt_or_gdt_instr_info_t;
657 
658 /* VM-Exit instruction info for LLDT, LTR, SLDT, STR */
659 typedef union ldt_or_tr_instr_info {
660     unsigned long raw;
661     struct {
662         unsigned long scaling   :2,  /* bits 0:1 - Scaling */
663                                 :1,  /* bit 2 - Undefined */
664         reg1                    :4,  /* bits 6:3 - Reg1 */
665         addr_size               :3,  /* bits 9:7 - Address size */
666         mem_reg                 :1,  /* bit 10 - Mem/Reg */
667                                 :4,  /* bits 14:11 - Undefined */
668         segment_reg             :3,  /* bits 17:15 - Segment register */
669         index_reg               :4,  /* bits 21:18 - Index register */
670         index_reg_invalid       :1,  /* bit 22 - Index register invalid */
671         base_reg                :4,  /* bits 26:23 - Base register */
672         base_reg_invalid        :1,  /* bit 27 - Base register invalid */
673         instr_identity          :1,  /* bit 28 - 0:LDT, 1:TR */
674         instr_write             :1,  /* bit 29 - 0:store, 1:load */
675                                 :34; /* bits 31:63 - Undefined */
676     };
677 } ldt_or_tr_instr_info_t;
678 
679 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */
680