1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * this file contains pure vmx operations
7 */
8
9 #include <types.h>
10 #include <asm/msr.h>
11 #include <asm/per_cpu.h>
12 #include <asm/pgtable.h>
13 #include <asm/vmx.h>
14
15 /**
16 * @pre addr != NULL && addr is 4KB-aligned
17 * rev[31:0] 32 bits located at vmxon region physical address
18 * @pre rev[30:0] == VMCS revision && rev[31] == 0
19 */
exec_vmxon(void * addr)20 static inline void exec_vmxon(void *addr)
21 {
22 /* Turn VMX on, pre-conditions can avoid VMfailInvalid
23 * here no need check RFLAGS since it will generate #GP or #UD
24 * except VMsuccess. SDM 30.3
25 */
26 asm volatile (
27 "vmxon (%%rax)\n"
28 :
29 : "a"(addr)
30 : "cc", "memory");
31
32 }
33
34 /* Per cpu data to hold the vmxon_region for each pcpu.
35 * It will be used again when we start a pcpu after the pcpu was down.
36 * S3 enter/exit will use it.
37 * Only run on current pcpu.
38 */
vmx_on(void)39 void vmx_on(void)
40 {
41 uint64_t tmp64;
42 uint32_t tmp32;
43 void *vmxon_region_va = (void *)get_cpu_var(vmxon_region);
44 uint64_t vmxon_region_pa;
45
46 /* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */
47 tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
48 (void)memcpy_s(vmxon_region_va, 4U, (void *)&tmp32, 4U);
49
50 /* Turn on CR0.NE and CR4.VMXE */
51 CPU_CR_READ(cr0, &tmp64);
52 CPU_CR_WRITE(cr0, tmp64 | CR0_NE);
53 CPU_CR_READ(cr4, &tmp64);
54 CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
55
56 /* Read Feature ControL MSR */
57 tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
58
59 /* Check if feature control is locked */
60 if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
61 /* Lock and enable VMX support */
62 tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
63 MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
64 msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
65 }
66
67 /* Turn ON VMX */
68 vmxon_region_pa = hva2hpa(vmxon_region_va);
69 exec_vmxon(&vmxon_region_pa);
70 }
71
exec_vmxoff(void)72 static inline void exec_vmxoff(void)
73 {
74 asm volatile ("vmxoff" : : : "memory");
75 }
76
77 /**
78 * @pre addr != NULL && addr is 4KB-aligned
79 * @pre addr != VMXON pointer
80 */
exec_vmclear(void * addr)81 void exec_vmclear(void *addr)
82 {
83
84 /* pre-conditions can avoid VMfail
85 * here no need check RFLAGS since it will generate #GP or #UD
86 * except VMsuccess. SDM 30.3
87 */
88 asm volatile (
89 "vmclear (%%rax)\n"
90 :
91 : "a"(addr)
92 : "cc", "memory");
93 }
94
95 /**
96 * @pre addr != NULL && addr is 4KB-aligned
97 * @pre addr != VMXON pointer
98 */
exec_vmptrld(void * addr)99 void exec_vmptrld(void *addr)
100 {
101 /* pre-conditions can avoid VMfail
102 * here no need check RFLAGS since it will generate #GP or #UD
103 * except VMsuccess. SDM 30.3
104 */
105 asm volatile (
106 "vmptrld (%%rax)\n"
107 :
108 : "a"(addr)
109 : "cc", "memory");
110 }
111
112 /*
113 * @pre vcpu != NULL
114 */
load_va_vmcs(const uint8_t * vmcs_va)115 void load_va_vmcs(const uint8_t *vmcs_va)
116 {
117 uint64_t vmcs_pa;
118
119 vmcs_pa = hva2hpa(vmcs_va);
120 exec_vmptrld((void *)&vmcs_pa);
121 }
122
123 /*
124 * @pre vcpu != NULL
125 */
clear_va_vmcs(const uint8_t * vmcs_va)126 void clear_va_vmcs(const uint8_t *vmcs_va)
127 {
128 uint64_t vmcs_pa;
129
130 vmcs_pa = hva2hpa(vmcs_va);
131 exec_vmclear((void *)&vmcs_pa);
132 }
133
134 /**
135 * only run on current pcpu
136 */
vmx_off(void)137 void vmx_off(void)
138 {
139 void **vmcs_ptr = &get_cpu_var(vmcs_run);
140
141 if (*vmcs_ptr != NULL) {
142 clear_va_vmcs(*vmcs_ptr);
143 *vmcs_ptr = NULL;
144 }
145
146 exec_vmxoff();
147 }
148
exec_vmread64(uint32_t field_full)149 uint64_t exec_vmread64(uint32_t field_full)
150 {
151 uint64_t value;
152
153 asm volatile (
154 "vmread %%rdx, %%rax "
155 : "=a" (value)
156 : "d"(field_full)
157 : "cc");
158
159 return value;
160 }
161
exec_vmread32(uint32_t field)162 uint32_t exec_vmread32(uint32_t field)
163 {
164 uint64_t value;
165
166 value = exec_vmread64(field);
167
168 return (uint32_t)value;
169 }
170
exec_vmread16(uint32_t field)171 uint16_t exec_vmread16(uint32_t field)
172 {
173 uint64_t value;
174
175 value = exec_vmread64(field);
176
177 return (uint16_t)value;
178 }
179
exec_vmwrite64(uint32_t field_full,uint64_t value)180 void exec_vmwrite64(uint32_t field_full, uint64_t value)
181 {
182 asm volatile (
183 "vmwrite %%rax, %%rdx "
184 : : "a" (value), "d"(field_full)
185 : "cc");
186 }
187
exec_vmwrite32(uint32_t field,uint32_t value)188 void exec_vmwrite32(uint32_t field, uint32_t value)
189 {
190 exec_vmwrite64(field, (uint64_t)value);
191 }
192
exec_vmwrite16(uint32_t field,uint16_t value)193 void exec_vmwrite16(uint32_t field, uint16_t value)
194 {
195 exec_vmwrite64(field, (uint64_t)value);
196 }
197