1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/errno.h>
5 #include <linux/smp.h>
6 
7 #include "../cpuid.h"
8 #include "hyperv.h"
9 #include "nested.h"
10 #include "vmcs.h"
11 #include "vmx.h"
12 #include "trace.h"
13 
14 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
15 
16 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
17 
18 #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
19 #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
20 		{EVMCS1_OFFSET(name), clean_field}
21 
22 const struct evmcs_field vmcs_field_to_evmcs_1[] = {
23 	/* 64 bit rw */
24 	EVMCS1_FIELD(GUEST_RIP, guest_rip,
25 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
26 	EVMCS1_FIELD(GUEST_RSP, guest_rsp,
27 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
28 	EVMCS1_FIELD(GUEST_RFLAGS, guest_rflags,
29 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
30 	EVMCS1_FIELD(HOST_IA32_PAT, host_ia32_pat,
31 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
32 	EVMCS1_FIELD(HOST_IA32_EFER, host_ia32_efer,
33 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
34 	EVMCS1_FIELD(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl,
35 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
36 	EVMCS1_FIELD(HOST_CR0, host_cr0,
37 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
38 	EVMCS1_FIELD(HOST_CR3, host_cr3,
39 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
40 	EVMCS1_FIELD(HOST_CR4, host_cr4,
41 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
42 	EVMCS1_FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp,
43 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
44 	EVMCS1_FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip,
45 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
46 	EVMCS1_FIELD(HOST_RIP, host_rip,
47 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
48 	EVMCS1_FIELD(IO_BITMAP_A, io_bitmap_a,
49 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
50 	EVMCS1_FIELD(IO_BITMAP_B, io_bitmap_b,
51 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
52 	EVMCS1_FIELD(MSR_BITMAP, msr_bitmap,
53 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP),
54 	EVMCS1_FIELD(GUEST_ES_BASE, guest_es_base,
55 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
56 	EVMCS1_FIELD(GUEST_CS_BASE, guest_cs_base,
57 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
58 	EVMCS1_FIELD(GUEST_SS_BASE, guest_ss_base,
59 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
60 	EVMCS1_FIELD(GUEST_DS_BASE, guest_ds_base,
61 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
62 	EVMCS1_FIELD(GUEST_FS_BASE, guest_fs_base,
63 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
64 	EVMCS1_FIELD(GUEST_GS_BASE, guest_gs_base,
65 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
66 	EVMCS1_FIELD(GUEST_LDTR_BASE, guest_ldtr_base,
67 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
68 	EVMCS1_FIELD(GUEST_TR_BASE, guest_tr_base,
69 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
70 	EVMCS1_FIELD(GUEST_GDTR_BASE, guest_gdtr_base,
71 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
72 	EVMCS1_FIELD(GUEST_IDTR_BASE, guest_idtr_base,
73 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
74 	EVMCS1_FIELD(TSC_OFFSET, tsc_offset,
75 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
76 	EVMCS1_FIELD(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr,
77 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
78 	EVMCS1_FIELD(VMCS_LINK_POINTER, vmcs_link_pointer,
79 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
80 	EVMCS1_FIELD(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl,
81 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
82 	EVMCS1_FIELD(GUEST_IA32_PAT, guest_ia32_pat,
83 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
84 	EVMCS1_FIELD(GUEST_IA32_EFER, guest_ia32_efer,
85 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
86 	EVMCS1_FIELD(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl,
87 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
88 	EVMCS1_FIELD(GUEST_PDPTR0, guest_pdptr0,
89 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
90 	EVMCS1_FIELD(GUEST_PDPTR1, guest_pdptr1,
91 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
92 	EVMCS1_FIELD(GUEST_PDPTR2, guest_pdptr2,
93 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
94 	EVMCS1_FIELD(GUEST_PDPTR3, guest_pdptr3,
95 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
96 	EVMCS1_FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions,
97 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
98 	EVMCS1_FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp,
99 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
100 	EVMCS1_FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip,
101 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
102 	EVMCS1_FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask,
103 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
104 	EVMCS1_FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask,
105 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
106 	EVMCS1_FIELD(CR0_READ_SHADOW, cr0_read_shadow,
107 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
108 	EVMCS1_FIELD(CR4_READ_SHADOW, cr4_read_shadow,
109 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
110 	EVMCS1_FIELD(GUEST_CR0, guest_cr0,
111 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
112 	EVMCS1_FIELD(GUEST_CR3, guest_cr3,
113 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
114 	EVMCS1_FIELD(GUEST_CR4, guest_cr4,
115 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
116 	EVMCS1_FIELD(GUEST_DR7, guest_dr7,
117 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
118 	EVMCS1_FIELD(HOST_FS_BASE, host_fs_base,
119 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
120 	EVMCS1_FIELD(HOST_GS_BASE, host_gs_base,
121 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
122 	EVMCS1_FIELD(HOST_TR_BASE, host_tr_base,
123 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
124 	EVMCS1_FIELD(HOST_GDTR_BASE, host_gdtr_base,
125 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
126 	EVMCS1_FIELD(HOST_IDTR_BASE, host_idtr_base,
127 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
128 	EVMCS1_FIELD(HOST_RSP, host_rsp,
129 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
130 	EVMCS1_FIELD(EPT_POINTER, ept_pointer,
131 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
132 	EVMCS1_FIELD(GUEST_BNDCFGS, guest_bndcfgs,
133 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
134 	EVMCS1_FIELD(XSS_EXIT_BITMAP, xss_exit_bitmap,
135 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
136 	EVMCS1_FIELD(ENCLS_EXITING_BITMAP, encls_exiting_bitmap,
137 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
138 	EVMCS1_FIELD(TSC_MULTIPLIER, tsc_multiplier,
139 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
140 	/*
141 	 * Not used by KVM:
142 	 *
143 	 * EVMCS1_FIELD(0x00006828, guest_ia32_s_cet,
144 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
145 	 * EVMCS1_FIELD(0x0000682A, guest_ssp,
146 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
147 	 * EVMCS1_FIELD(0x0000682C, guest_ia32_int_ssp_table_addr,
148 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
149 	 * EVMCS1_FIELD(0x00002816, guest_ia32_lbr_ctl,
150 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
151 	 * EVMCS1_FIELD(0x00006C18, host_ia32_s_cet,
152 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
153 	 * EVMCS1_FIELD(0x00006C1A, host_ssp,
154 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
155 	 * EVMCS1_FIELD(0x00006C1C, host_ia32_int_ssp_table_addr,
156 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
157 	 */
158 
159 	/* 64 bit read only */
160 	EVMCS1_FIELD(GUEST_PHYSICAL_ADDRESS, guest_physical_address,
161 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
162 	EVMCS1_FIELD(EXIT_QUALIFICATION, exit_qualification,
163 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
164 	/*
165 	 * Not defined in KVM:
166 	 *
167 	 * EVMCS1_FIELD(0x00006402, exit_io_instruction_ecx,
168 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
169 	 * EVMCS1_FIELD(0x00006404, exit_io_instruction_esi,
170 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
171 	 * EVMCS1_FIELD(0x00006406, exit_io_instruction_esi,
172 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
173 	 * EVMCS1_FIELD(0x00006408, exit_io_instruction_eip,
174 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
175 	 */
176 	EVMCS1_FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address,
177 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
178 
179 	/*
180 	 * No mask defined in the spec as Hyper-V doesn't currently support
181 	 * these. Future proof by resetting the whole clean field mask on
182 	 * access.
183 	 */
184 	EVMCS1_FIELD(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr,
185 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
186 	EVMCS1_FIELD(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr,
187 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
188 	EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr,
189 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
190 
191 	/* 32 bit rw */
192 	EVMCS1_FIELD(TPR_THRESHOLD, tpr_threshold,
193 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
194 	EVMCS1_FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info,
195 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
196 	EVMCS1_FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control,
197 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC),
198 	EVMCS1_FIELD(EXCEPTION_BITMAP, exception_bitmap,
199 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN),
200 	EVMCS1_FIELD(VM_ENTRY_CONTROLS, vm_entry_controls,
201 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY),
202 	EVMCS1_FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field,
203 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
204 	EVMCS1_FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE,
205 		     vm_entry_exception_error_code,
206 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
207 	EVMCS1_FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len,
208 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
209 	EVMCS1_FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs,
210 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
211 	EVMCS1_FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control,
212 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
213 	EVMCS1_FIELD(VM_EXIT_CONTROLS, vm_exit_controls,
214 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
215 	EVMCS1_FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control,
216 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
217 	EVMCS1_FIELD(GUEST_ES_LIMIT, guest_es_limit,
218 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
219 	EVMCS1_FIELD(GUEST_CS_LIMIT, guest_cs_limit,
220 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
221 	EVMCS1_FIELD(GUEST_SS_LIMIT, guest_ss_limit,
222 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
223 	EVMCS1_FIELD(GUEST_DS_LIMIT, guest_ds_limit,
224 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
225 	EVMCS1_FIELD(GUEST_FS_LIMIT, guest_fs_limit,
226 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
227 	EVMCS1_FIELD(GUEST_GS_LIMIT, guest_gs_limit,
228 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
229 	EVMCS1_FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit,
230 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
231 	EVMCS1_FIELD(GUEST_TR_LIMIT, guest_tr_limit,
232 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
233 	EVMCS1_FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit,
234 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
235 	EVMCS1_FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit,
236 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
237 	EVMCS1_FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes,
238 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
239 	EVMCS1_FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes,
240 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
241 	EVMCS1_FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes,
242 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
243 	EVMCS1_FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes,
244 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
245 	EVMCS1_FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes,
246 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
247 	EVMCS1_FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes,
248 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
249 	EVMCS1_FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes,
250 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
251 	EVMCS1_FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes,
252 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
253 	EVMCS1_FIELD(GUEST_ACTIVITY_STATE, guest_activity_state,
254 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
255 	EVMCS1_FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs,
256 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
257 
258 	/* 32 bit read only */
259 	EVMCS1_FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error,
260 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
261 	EVMCS1_FIELD(VM_EXIT_REASON, vm_exit_reason,
262 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
263 	EVMCS1_FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info,
264 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
265 	EVMCS1_FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code,
266 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
267 	EVMCS1_FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field,
268 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
269 	EVMCS1_FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code,
270 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
271 	EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len,
272 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
273 	EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info,
274 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
275 
276 	/* No mask defined in the spec (not used) */
277 	EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask,
278 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
279 	EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match,
280 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
281 	EVMCS1_FIELD(CR3_TARGET_COUNT, cr3_target_count,
282 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
283 	EVMCS1_FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count,
284 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
285 	EVMCS1_FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count,
286 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
287 	EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count,
288 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
289 
290 	/* 16 bit rw */
291 	EVMCS1_FIELD(HOST_ES_SELECTOR, host_es_selector,
292 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
293 	EVMCS1_FIELD(HOST_CS_SELECTOR, host_cs_selector,
294 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
295 	EVMCS1_FIELD(HOST_SS_SELECTOR, host_ss_selector,
296 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
297 	EVMCS1_FIELD(HOST_DS_SELECTOR, host_ds_selector,
298 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
299 	EVMCS1_FIELD(HOST_FS_SELECTOR, host_fs_selector,
300 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
301 	EVMCS1_FIELD(HOST_GS_SELECTOR, host_gs_selector,
302 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
303 	EVMCS1_FIELD(HOST_TR_SELECTOR, host_tr_selector,
304 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
305 	EVMCS1_FIELD(GUEST_ES_SELECTOR, guest_es_selector,
306 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
307 	EVMCS1_FIELD(GUEST_CS_SELECTOR, guest_cs_selector,
308 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
309 	EVMCS1_FIELD(GUEST_SS_SELECTOR, guest_ss_selector,
310 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
311 	EVMCS1_FIELD(GUEST_DS_SELECTOR, guest_ds_selector,
312 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
313 	EVMCS1_FIELD(GUEST_FS_SELECTOR, guest_fs_selector,
314 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
315 	EVMCS1_FIELD(GUEST_GS_SELECTOR, guest_gs_selector,
316 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
317 	EVMCS1_FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector,
318 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
319 	EVMCS1_FIELD(GUEST_TR_SELECTOR, guest_tr_selector,
320 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
321 	EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id,
322 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
323 };
324 const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
325 
nested_get_evmptr(struct kvm_vcpu * vcpu)326 u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
327 {
328 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
329 
330 	if (unlikely(kvm_hv_get_assist_page(vcpu)))
331 		return EVMPTR_INVALID;
332 
333 	if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
334 		return EVMPTR_INVALID;
335 
336 	return hv_vcpu->vp_assist_page.current_nested_vmcs;
337 }
338 
nested_get_evmcs_version(struct kvm_vcpu * vcpu)339 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
340 {
341 	/*
342 	 * vmcs_version represents the range of supported Enlightened VMCS
343 	 * versions: lower 8 bits is the minimal version, higher 8 bits is the
344 	 * maximum supported version. KVM supports versions from 1 to
345 	 * KVM_EVMCS_VERSION.
346 	 *
347 	 * Note, do not check the Hyper-V is fully enabled in guest CPUID, this
348 	 * helper is used to _get_ the vCPU's supported CPUID.
349 	 */
350 	if (kvm_cpu_cap_get(X86_FEATURE_VMX) &&
351 	    (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
352 		return (KVM_EVMCS_VERSION << 8) | 1;
353 
354 	return 0;
355 }
356 
357 enum evmcs_revision {
358 	EVMCSv1_LEGACY,
359 	NR_EVMCS_REVISIONS,
360 };
361 
362 enum evmcs_ctrl_type {
363 	EVMCS_EXIT_CTRLS,
364 	EVMCS_ENTRY_CTRLS,
365 	EVMCS_EXEC_CTRL,
366 	EVMCS_2NDEXEC,
367 	EVMCS_3RDEXEC,
368 	EVMCS_PINCTRL,
369 	EVMCS_VMFUNC,
370 	NR_EVMCS_CTRLS,
371 };
372 
373 static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = {
374 	[EVMCS_EXIT_CTRLS] = {
375 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMEXIT_CTRL,
376 	},
377 	[EVMCS_ENTRY_CTRLS] = {
378 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMENTRY_CTRL,
379 	},
380 	[EVMCS_EXEC_CTRL] = {
381 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_EXEC_CTRL,
382 	},
383 	[EVMCS_2NDEXEC] = {
384 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING,
385 	},
386 	[EVMCS_3RDEXEC] = {
387 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_3RDEXEC,
388 	},
389 	[EVMCS_PINCTRL] = {
390 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL,
391 	},
392 	[EVMCS_VMFUNC] = {
393 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMFUNC,
394 	},
395 };
396 
evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)397 static u32 evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)
398 {
399 	enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY;
400 
401 	return evmcs_supported_ctrls[ctrl_type][evmcs_rev];
402 }
403 
evmcs_has_perf_global_ctrl(struct kvm_vcpu * vcpu)404 static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu)
405 {
406 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
407 
408 	/*
409 	 * PERF_GLOBAL_CTRL has a quirk where some Windows guests may fail to
410 	 * boot if a PV CPUID feature flag is not also set.  Treat the fields
411 	 * as unsupported if the flag is not set in guest CPUID.  This should
412 	 * be called only for guest accesses, and all guest accesses should be
413 	 * gated on Hyper-V being enabled and initialized.
414 	 */
415 	if (WARN_ON_ONCE(!hv_vcpu))
416 		return false;
417 
418 	return hv_vcpu->cpuid_cache.nested_ebx & HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
419 }
420 
nested_evmcs_filter_control_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 * pdata)421 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
422 {
423 	u32 ctl_low = (u32)*pdata;
424 	u32 ctl_high = (u32)(*pdata >> 32);
425 	u32 supported_ctrls;
426 
427 	/*
428 	 * Hyper-V 2016 and 2019 try using these features even when eVMCS
429 	 * is enabled but there are no corresponding fields.
430 	 */
431 	switch (msr_index) {
432 	case MSR_IA32_VMX_EXIT_CTLS:
433 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
434 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_EXIT_CTRLS);
435 		if (!evmcs_has_perf_global_ctrl(vcpu))
436 			supported_ctrls &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
437 		ctl_high &= supported_ctrls;
438 		break;
439 	case MSR_IA32_VMX_ENTRY_CTLS:
440 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
441 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_ENTRY_CTRLS);
442 		if (!evmcs_has_perf_global_ctrl(vcpu))
443 			supported_ctrls &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
444 		ctl_high &= supported_ctrls;
445 		break;
446 	case MSR_IA32_VMX_PROCBASED_CTLS:
447 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
448 		ctl_high &= evmcs_get_supported_ctls(EVMCS_EXEC_CTRL);
449 		break;
450 	case MSR_IA32_VMX_PROCBASED_CTLS2:
451 		ctl_high &= evmcs_get_supported_ctls(EVMCS_2NDEXEC);
452 		break;
453 	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
454 	case MSR_IA32_VMX_PINBASED_CTLS:
455 		ctl_high &= evmcs_get_supported_ctls(EVMCS_PINCTRL);
456 		break;
457 	case MSR_IA32_VMX_VMFUNC:
458 		ctl_low &= evmcs_get_supported_ctls(EVMCS_VMFUNC);
459 		break;
460 	}
461 
462 	*pdata = ctl_low | ((u64)ctl_high << 32);
463 }
464 
nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,u32 val)465 static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,
466 					   u32 val)
467 {
468 	return !(val & ~evmcs_get_supported_ctls(ctrl_type));
469 }
470 
nested_evmcs_check_controls(struct vmcs12 * vmcs12)471 int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
472 {
473 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_PINCTRL,
474 					       vmcs12->pin_based_vm_exec_control)))
475 		return -EINVAL;
476 
477 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXEC_CTRL,
478 					       vmcs12->cpu_based_vm_exec_control)))
479 		return -EINVAL;
480 
481 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC,
482 					       vmcs12->secondary_vm_exec_control)))
483 		return -EINVAL;
484 
485 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXIT_CTRLS,
486 					       vmcs12->vm_exit_controls)))
487 		return -EINVAL;
488 
489 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_ENTRY_CTRLS,
490 					       vmcs12->vm_entry_controls)))
491 		return -EINVAL;
492 
493 	/*
494 	 * VM-Func controls are 64-bit, but KVM currently doesn't support any
495 	 * controls in bits 63:32, i.e. dropping those bits on the consistency
496 	 * check is intentional.
497 	 */
498 	if (WARN_ON_ONCE(vmcs12->vm_function_control >> 32))
499 		return -EINVAL;
500 
501 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_VMFUNC,
502 					       vmcs12->vm_function_control)))
503 		return -EINVAL;
504 
505 	return 0;
506 }
507 
508 #if IS_ENABLED(CONFIG_HYPERV)
509 /*
510  * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
511  * is: in case a feature has corresponding fields in eVMCS described and it was
512  * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
513  * feature which has no corresponding eVMCS field, this likely means that KVM
514  * needs to be updated.
515  */
516 #define evmcs_check_vmcs_conf(field, ctrl)					\
517 	do {									\
518 		typeof(vmcs_conf->field) unsupported;				\
519 										\
520 		unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl;	\
521 		if (unsupported) {						\
522 			pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
523 				     (u64)unsupported);				\
524 			vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl;		\
525 		}								\
526 	}									\
527 	while (0)
528 
evmcs_sanitize_exec_ctrls(struct vmcs_config * vmcs_conf)529 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
530 {
531 	evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
532 	evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
533 	evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
534 	evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
535 	evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
536 	evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
537 }
538 #endif
539 
nested_enable_evmcs(struct kvm_vcpu * vcpu,uint16_t * vmcs_version)540 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
541 			uint16_t *vmcs_version)
542 {
543 	struct vcpu_vmx *vmx = to_vmx(vcpu);
544 
545 	vmx->nested.enlightened_vmcs_enabled = true;
546 
547 	if (vmcs_version)
548 		*vmcs_version = nested_get_evmcs_version(vcpu);
549 
550 	return 0;
551 }
552 
nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu * vcpu)553 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
554 {
555 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
556 	struct vcpu_vmx *vmx = to_vmx(vcpu);
557 	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
558 
559 	if (!hv_vcpu || !evmcs)
560 		return false;
561 
562 	if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
563 		return false;
564 
565 	return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
566 }
567 
vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu * vcpu)568 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
569 {
570 	nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
571 }
572