1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/vmx.h>
10 #include <asm/guest/virq.h>
11 #include <asm/mmu.h>
12 #include <asm/guest/vcpu.h>
13 #include <asm/guest/vm.h>
14 #include <asm/guest/vmexit.h>
15 #include <asm/guest/vm_reset.h>
16 #include <asm/guest/vmx_io.h>
17 #include <asm/guest/lock_instr_emul.h>
18 #include <asm/guest/ept.h>
19 #include <asm/guest/vept.h>
20 #include <asm/vtd.h>
21 #include <asm/cpuid.h>
22 #include <asm/guest/vcpuid.h>
23 #include <trace.h>
24 #include <asm/rtcm.h>
25 #include <debug/console.h>
26 
27 /*
28  * According to "SDM APPENDIX C VMX BASIC EXIT REASONS",
29  * there are 65 Basic Exit Reasons.
30  */
31 #define NR_VMX_EXIT_REASONS	70U
32 
33 static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu);
34 static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
35 static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
36 static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
37 static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
38 static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu);
39 static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu);
40 static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu);
41 static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu);
42 static int32_t init_signal_vmexit_handler(__unused struct acrn_vcpu *vcpu);
43 static int32_t mwait_monitor_vmexit_handler (struct acrn_vcpu *vcpu);
44 
45 /* VM Dispatch table for Exit condition handling */
46 static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
47 	[VMX_EXIT_REASON_EXCEPTION_OR_NMI] = {
48 		.handler = exception_vmexit_handler},
49 	[VMX_EXIT_REASON_EXTERNAL_INTERRUPT] = {
50 		.handler = external_interrupt_vmexit_handler},
51 	[VMX_EXIT_REASON_TRIPLE_FAULT] = {
52 		.handler = triple_fault_vmexit_handler},
53 	[VMX_EXIT_REASON_INIT_SIGNAL] = {
54 		.handler = init_signal_vmexit_handler},
55 	[VMX_EXIT_REASON_STARTUP_IPI] = {
56 		.handler = unhandled_vmexit_handler},
57 	[VMX_EXIT_REASON_IO_SMI] = {
58 		.handler = unhandled_vmexit_handler},
59 	[VMX_EXIT_REASON_OTHER_SMI] = {
60 		.handler = unhandled_vmexit_handler},
61 	[VMX_EXIT_REASON_INTERRUPT_WINDOW] = {
62 		.handler = interrupt_window_vmexit_handler},
63 	[VMX_EXIT_REASON_NMI_WINDOW] = {
64 		.handler = nmi_window_vmexit_handler},
65 	[VMX_EXIT_REASON_TASK_SWITCH] = {
66 		.handler = unhandled_vmexit_handler},
67 	[VMX_EXIT_REASON_CPUID] = {
68 		.handler = cpuid_vmexit_handler},
69 	[VMX_EXIT_REASON_GETSEC] = {
70 		.handler = unhandled_vmexit_handler},
71 	[VMX_EXIT_REASON_HLT] = {
72 		.handler = hlt_vmexit_handler},
73 	[VMX_EXIT_REASON_INVD] = {
74 		.handler = unhandled_vmexit_handler},
75 	[VMX_EXIT_REASON_INVLPG] = {
76 		.handler = unhandled_vmexit_handler,},
77 	[VMX_EXIT_REASON_RDPMC] = {
78 		.handler = undefined_vmexit_handler},
79 	[VMX_EXIT_REASON_RDTSC] = {
80 		.handler = unhandled_vmexit_handler},
81 	[VMX_EXIT_REASON_RSM] = {
82 		.handler = unhandled_vmexit_handler},
83 	[VMX_EXIT_REASON_VMCALL] = {
84 		.handler = vmcall_vmexit_handler},
85 	[VMX_EXIT_REASON_VMPTRST] = {
86 		.handler = undefined_vmexit_handler},
87 #ifndef CONFIG_NVMX_ENABLED
88 	[VMX_EXIT_REASON_VMLAUNCH] = {
89 		.handler = undefined_vmexit_handler},
90 	[VMX_EXIT_REASON_VMRESUME] = {
91 		.handler = undefined_vmexit_handler},
92 	[VMX_EXIT_REASON_VMCLEAR] = {
93 		.handler = undefined_vmexit_handler},
94 	[VMX_EXIT_REASON_VMPTRLD] = {
95 		.handler = undefined_vmexit_handler},
96 	[VMX_EXIT_REASON_VMREAD] = {
97 		.handler = undefined_vmexit_handler},
98 	[VMX_EXIT_REASON_VMWRITE] = {
99 		.handler = undefined_vmexit_handler},
100 	[VMX_EXIT_REASON_VMXOFF] = {
101 		.handler = undefined_vmexit_handler},
102 	[VMX_EXIT_REASON_VMXON] = {
103 		.handler = undefined_vmexit_handler},
104 	[VMX_EXIT_REASON_INVEPT] = {
105 		.handler = undefined_vmexit_handler},
106 	[VMX_EXIT_REASON_INVVPID] = {
107 		.handler = undefined_vmexit_handler},
108 #else
109 	[VMX_EXIT_REASON_VMLAUNCH] = {
110 		.handler = vmlaunch_vmexit_handler},
111 	[VMX_EXIT_REASON_VMRESUME] = {
112 		.handler = vmresume_vmexit_handler},
113 	[VMX_EXIT_REASON_VMCLEAR] = {
114 		.handler = vmclear_vmexit_handler,
115 		.need_exit_qualification = 1},
116 	[VMX_EXIT_REASON_VMPTRLD] = {
117 		.handler = vmptrld_vmexit_handler,
118 		.need_exit_qualification = 1},
119 	[VMX_EXIT_REASON_VMREAD] = {
120 		.handler = vmread_vmexit_handler,
121 		.need_exit_qualification = 1},
122 	[VMX_EXIT_REASON_VMWRITE] = {
123 		.handler = vmwrite_vmexit_handler,
124 		.need_exit_qualification = 1},
125 	[VMX_EXIT_REASON_VMXOFF] = {
126 		.handler = vmxoff_vmexit_handler},
127 	[VMX_EXIT_REASON_VMXON] = {
128 		.handler = vmxon_vmexit_handler,
129 		.need_exit_qualification = 1},
130 	[VMX_EXIT_REASON_INVEPT] = {
131 		.handler = invept_vmexit_handler,
132 		.need_exit_qualification = 1},
133 	[VMX_EXIT_REASON_INVVPID] = {
134 		.handler = invvpid_vmexit_handler,
135 		.need_exit_qualification = 1},
136 #endif
137 	[VMX_EXIT_REASON_CR_ACCESS] = {
138 		.handler = cr_access_vmexit_handler,
139 		.need_exit_qualification = 1},
140 	[VMX_EXIT_REASON_DR_ACCESS] = {
141 		.handler = unhandled_vmexit_handler},
142 	[VMX_EXIT_REASON_IO_INSTRUCTION] = {
143 		.handler = pio_instr_vmexit_handler,
144 		.need_exit_qualification = 1},
145 	[VMX_EXIT_REASON_RDMSR] = {
146 		.handler = rdmsr_vmexit_handler},
147 	[VMX_EXIT_REASON_WRMSR] = {
148 		.handler = wrmsr_vmexit_handler},
149 	[VMX_EXIT_REASON_ENTRY_FAILURE_INVALID_GUEST_STATE] = {
150 		.handler = unhandled_vmexit_handler,
151 		.need_exit_qualification = 1},
152 	[VMX_EXIT_REASON_ENTRY_FAILURE_MSR_LOADING] = {
153 		.handler = unhandled_vmexit_handler},
154 	[VMX_EXIT_REASON_MWAIT] = {
155 		.handler = mwait_monitor_vmexit_handler},
156 	[VMX_EXIT_REASON_MONITOR_TRAP] = {
157 		.handler = mtf_vmexit_handler},
158 	[VMX_EXIT_REASON_MONITOR] = {
159 		.handler = mwait_monitor_vmexit_handler},
160 	[VMX_EXIT_REASON_PAUSE] = {
161 		.handler = pause_vmexit_handler},
162 	[VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK] = {
163 		.handler = unhandled_vmexit_handler},
164 	[VMX_EXIT_REASON_TPR_BELOW_THRESHOLD] = {
165 		.handler = tpr_below_threshold_vmexit_handler},
166 	[VMX_EXIT_REASON_APIC_ACCESS] = {
167 		.handler = apic_access_vmexit_handler,
168 		.need_exit_qualification = 1},
169 	[VMX_EXIT_REASON_VIRTUALIZED_EOI] = {
170 		.handler = veoi_vmexit_handler,
171 		.need_exit_qualification = 1},
172 	[VMX_EXIT_REASON_GDTR_IDTR_ACCESS] = {
173 		.handler = unhandled_vmexit_handler},
174 	[VMX_EXIT_REASON_LDTR_TR_ACCESS] = {
175 		.handler = unhandled_vmexit_handler},
176 	[VMX_EXIT_REASON_EPT_VIOLATION] = {
177 		.handler = ept_violation_vmexit_handler,
178 		.need_exit_qualification = 1},
179 	[VMX_EXIT_REASON_EPT_MISCONFIGURATION] = {
180 		.handler = ept_misconfig_vmexit_handler,
181 		.need_exit_qualification = 1},
182 	[VMX_EXIT_REASON_RDTSCP] = {
183 		.handler = unhandled_vmexit_handler},
184 	[VMX_EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED] = {
185 		.handler = unhandled_vmexit_handler},
186 	[VMX_EXIT_REASON_WBINVD] = {
187 		.handler = wbinvd_vmexit_handler},
188 	[VMX_EXIT_REASON_XSETBV] = {
189 		.handler = xsetbv_vmexit_handler},
190 	[VMX_EXIT_REASON_APIC_WRITE] = {
191 		.handler = apic_write_vmexit_handler,
192 		.need_exit_qualification = 1},
193 	[VMX_EXIT_REASON_RDRAND] = {
194 		.handler = unhandled_vmexit_handler},
195 	[VMX_EXIT_REASON_INVPCID] = {
196 		.handler = unhandled_vmexit_handler},
197 	[VMX_EXIT_REASON_VMFUNC] = {
198 		.handler = undefined_vmexit_handler},
199 	[VMX_EXIT_REASON_ENCLS] = {
200 		.handler = unhandled_vmexit_handler},
201 	[VMX_EXIT_REASON_RDSEED] = {
202 		.handler = unhandled_vmexit_handler},
203 	[VMX_EXIT_REASON_PAGE_MODIFICATION_LOG_FULL] = {
204 		.handler = unhandled_vmexit_handler},
205 	[VMX_EXIT_REASON_XSAVES] = {
206 		.handler = unhandled_vmexit_handler},
207 	[VMX_EXIT_REASON_XRSTORS] = {
208 		.handler = unhandled_vmexit_handler},
209 	[VMX_EXIT_REASON_LOADIWKEY] = {
210 		.handler = loadiwkey_vmexit_handler}
211 };
212 
vmexit_handler(struct acrn_vcpu * vcpu)213 int32_t vmexit_handler(struct acrn_vcpu *vcpu)
214 {
215 	struct vm_exit_dispatch *dispatch = NULL;
216 	uint16_t basic_exit_reason;
217 	int32_t ret;
218 
219 	if (get_pcpu_id() != pcpuid_from_vcpu(vcpu)) {
220 		pr_fatal("vcpu is not running on its pcpu!");
221 		ret = -EINVAL;
222 	} else if (is_vcpu_in_l2_guest(vcpu)) {
223 		ret = nested_vmexit_handler(vcpu);
224 	} else {
225 		/* Obtain interrupt info */
226 		vcpu->arch.idt_vectoring_info = exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
227 		/* Filter out HW exception & NMI */
228 		if ((vcpu->arch.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
229 			uint32_t vector_info = vcpu->arch.idt_vectoring_info;
230 			uint32_t vector = vector_info & 0xffU;
231 			uint32_t type = (vector_info & VMX_INT_TYPE_MASK) >> 8U;
232 			uint32_t err_code = 0U;
233 
234 			if (type == VMX_INT_TYPE_HW_EXP) {
235 				if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
236 					err_code = exec_vmread32(VMX_IDT_VEC_ERROR_CODE);
237 				}
238 				(void)vcpu_queue_exception(vcpu, vector, err_code);
239 				vcpu->arch.idt_vectoring_info = 0U;
240 			} else if (type == VMX_INT_TYPE_NMI) {
241 				vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
242 				vcpu->arch.idt_vectoring_info = 0U;
243 			} else {
244 				/* No action on EXT_INT or SW exception. */
245 			}
246 		}
247 
248 		/* Calculate basic exit reason (low 16-bits) */
249 		basic_exit_reason = (uint16_t)(vcpu->arch.exit_reason & 0xFFFFU);
250 
251 		/* Log details for exit */
252 		pr_dbg("Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
253 
254 		/* Ensure exit reason is within dispatch table */
255 		if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) {
256 			pr_err("Invalid Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
257 			ret = -EINVAL;
258 		} else {
259 			/* Calculate dispatch table entry */
260 			dispatch = (struct vm_exit_dispatch *)(dispatch_table + basic_exit_reason);
261 
262 			/* See if an exit qualification is necessary for this exit handler */
263 			if (dispatch->need_exit_qualification != 0U) {
264 				/* Get exit qualification */
265 				vcpu->arch.exit_qualification = exec_vmread(VMX_EXIT_QUALIFICATION);
266 			}
267 
268 			/* exit dispatch handling */
269 			if (basic_exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) {
270 				/* Handling external_interrupt should disable intr */
271 				if (!is_lapic_pt_enabled(vcpu)) {
272 					CPU_IRQ_DISABLE_ON_CONFIG();
273 				}
274 
275 				ret = dispatch->handler(vcpu);
276 
277 				if (!is_lapic_pt_enabled(vcpu)) {
278 					CPU_IRQ_ENABLE_ON_CONFIG();
279 				}
280 			} else {
281 				ret = dispatch->handler(vcpu);
282 			}
283 		}
284 	}
285 
286 	console_vmexit_callback(vcpu);
287 
288 	return ret;
289 }
290 
mwait_monitor_vmexit_handler(struct acrn_vcpu * vcpu)291 static int32_t mwait_monitor_vmexit_handler (struct acrn_vcpu *vcpu)
292 {
293 	pr_fatal("Error: Unsupported mwait option from guest at 0x%016lx ",
294 		exec_vmread(VMX_GUEST_RIP));
295 
296 	vcpu_inject_ud(vcpu);
297 
298 	return 0;
299 }
300 
unhandled_vmexit_handler(struct acrn_vcpu * vcpu)301 static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
302 {
303 	pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016lx ",
304 			exec_vmread(VMX_GUEST_RIP));
305 
306 	pr_fatal("Exit Reason: 0x%016lx ", vcpu->arch.exit_reason);
307 
308 	pr_err("Exit qualification: 0x%016lx ",
309 			exec_vmread(VMX_EXIT_QUALIFICATION));
310 
311 	TRACE_2L(TRACE_VMEXIT_UNHANDLED, vcpu->arch.exit_reason, 0UL);
312 
313 	return 0;
314 }
315 
316 /* MTF is currently only used for split-lock emulation */
mtf_vmexit_handler(struct acrn_vcpu * vcpu)317 static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu)
318 {
319 	vcpu->arch.proc_vm_exec_ctrls &= ~(VMX_PROCBASED_CTLS_MON_TRAP);
320 	exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
321 
322 	vcpu_retain_rip(vcpu);
323 
324 	if (vcpu->arch.emulating_lock) {
325 		vcpu->arch.emulating_lock = false;
326 		vcpu_complete_lock_instr_emulation(vcpu);
327 	}
328 
329 	return 0;
330 }
331 
triple_fault_vmexit_handler(struct acrn_vcpu * vcpu)332 static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu)
333 {
334 	pr_fatal("VM%d: triple fault @ guest RIP 0x%016lx, exit qualification: 0x%016lx",
335 		vcpu->vm->vm_id, exec_vmread(VMX_GUEST_RIP), exec_vmread(VMX_EXIT_QUALIFICATION));
336 	triple_fault_shutdown_vm(vcpu);
337 
338 	return 0;
339 }
340 
pause_vmexit_handler(__unused struct acrn_vcpu * vcpu)341 static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
342 {
343 	yield_current();
344 	return 0;
345 }
346 
hlt_vmexit_handler(struct acrn_vcpu * vcpu)347 static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu)
348 {
349 	if ((vcpu->arch.pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) {
350 		wait_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
351 	}
352 	return 0;
353 }
354 
cpuid_vmexit_handler(struct acrn_vcpu * vcpu)355 int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
356 {
357 	uint32_t eax, ebx, ecx, edx;
358 
359 	eax = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX);
360 	ebx = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RBX);
361 	ecx = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RCX);
362 	edx = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RDX);
363 	TRACE_2L(TRACE_VMEXIT_CPUID, (uint64_t)eax, (uint64_t)ecx);
364 	guest_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
365 	vcpu_set_gpreg(vcpu, CPU_REG_RAX, (uint64_t)eax);
366 	vcpu_set_gpreg(vcpu, CPU_REG_RBX, (uint64_t)ebx);
367 	vcpu_set_gpreg(vcpu, CPU_REG_RCX, (uint64_t)ecx);
368 	vcpu_set_gpreg(vcpu, CPU_REG_RDX, (uint64_t)edx);
369 
370 	return 0;
371 }
372 
373 /*
374  * XSETBV instruction set's the XCR0 that is used to tell for which
375  * components states can be saved on a context switch using xsave.
376  *
377  * According to SDM vol3 25.1.1:
378  * Invalid-opcode exception (UD) and faults based on privilege level (include
379  * virtual-8086 mode previleged instructions are not recognized) have higher
380  * priority than VM exit.
381  *
382  * According to SDM vol2 - XSETBV instruction description:
383  * If CR4.OSXSAVE[bit 18] = 0,
384  * execute "XSETBV" instruction will generate #UD exception.
385  * So VM exit won't happen with VMX_GUEST_CR4.CR4_OSXSAVE = 0.
386  * CR4_OSXSAVE bit is controlled by guest (CR4_OSXSAVE bit
387  * is set as guest expect to see).
388  *
389  * We don't need to handle those case here because we depends on VMX to handle
390  * them.
391  */
xsetbv_vmexit_handler(struct acrn_vcpu * vcpu)392 static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
393 {
394 	int32_t idx, ret = -1;	/* ret < 0 call vcpu_inject_gp(vcpu, 0U) */
395 	uint32_t cpl;
396 	uint64_t val64;
397 
398 	if (vcpu->arch.xsave_enabled && ((vcpu_get_cr4(vcpu) & CR4_OSXSAVE) != 0UL)) {
399 		idx = vcpu->arch.cur_context;
400 		/* get current privilege level */
401 		cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
402 		cpl = (cpl >> 5U) & 3U;
403 
404 		if ((idx < NR_WORLD) && (cpl == 0U)) {
405 			/* to access XCR0,'ecx' should be 0 */
406 			if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) == 0UL) {
407 				val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
408 						(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
409 
410 				/* bit 0(x87 state) of XCR0 can't be cleared */
411 				if (((val64 & 0x01UL) != 0UL) && ((val64 & XCR0_RESERVED_BITS) == 0UL)) {
412 					/*
413 					 * XCR0[2:1] (SSE state & AVX state) can't not be
414 					 * set to 10b as it is necessary to set both bits
415 					 * to use AVX instructions.
416 					 */
417 					if ((val64 & (XCR0_SSE | XCR0_AVX)) != XCR0_AVX) {
418 						/*
419 						 * SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
420 						 * Guest should not set these two bits without MPX support.
421 						 */
422 						if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) == 0UL) {
423 							write_xcr(0, val64);
424 							ret = 0;
425 						}
426 					}
427 				}
428 			}
429 		}
430 	} else {
431 		/* CPUID.01H:ECX.XSAVE[bit 26] = 0 */
432 		vcpu_inject_ud(vcpu);
433 		ret = 0;
434 	}
435 
436 	return ret;
437 }
438 
wbinvd_vmexit_handler(struct acrn_vcpu * vcpu)439 static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
440 {
441 	uint16_t i;
442 	struct acrn_vcpu *other;
443 
444 	/* GUEST_FLAG_RT has not set in post-launched RTVM before it has been created */
445 	if ((!is_software_sram_enabled()) && (!has_rt_vm())) {
446 		flush_invalidate_all_cache();
447 	} else {
448 		if (is_rt_vm(vcpu->vm)) {
449 			walk_ept_table(vcpu->vm, ept_flush_leaf_page);
450 		} else {
451 			spinlock_obtain(&vcpu->vm->wbinvd_lock);
452 			/* Pause other vcpus and let them wait for the wbinvd completion */
453 			foreach_vcpu(i, vcpu->vm, other) {
454 				if (other != vcpu) {
455 					vcpu_make_request(other, ACRN_REQUEST_WAIT_WBINVD);
456 				}
457 			}
458 
459 			walk_ept_table(vcpu->vm, ept_flush_leaf_page);
460 
461 			foreach_vcpu(i, vcpu->vm, other) {
462 				if (other != vcpu) {
463 					signal_event(&other->events[VCPU_EVENT_SYNC_WBINVD]);
464 				}
465 			}
466 			spinlock_release(&vcpu->vm->wbinvd_lock);
467 		}
468 	}
469 
470 	return 0;
471 }
472 
loadiwkey_vmexit_handler(struct acrn_vcpu * vcpu)473 static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu)
474 {
475 	uint64_t xmm[6] = {0};
476 
477 	/* Wrapping key nobackup and randomization are not supported */
478 	if ((vcpu_get_gpreg(vcpu, CPU_REG_RAX) != 0UL)) {
479 		vcpu_inject_gp(vcpu, 0);
480 	} else {
481 		read_xmm_0_2(&xmm[0], &xmm[2], &xmm[4]);
482 		vcpu->arch.IWKey.encryption_key[0] = xmm[2];
483 		vcpu->arch.IWKey.encryption_key[1] = xmm[3];
484 		vcpu->arch.IWKey.encryption_key[2] = xmm[4];
485 		vcpu->arch.IWKey.encryption_key[3] = xmm[5];
486 		vcpu->arch.IWKey.integrity_key[0] = xmm[0];
487 		vcpu->arch.IWKey.integrity_key[1] = xmm[1];
488 
489 		asm_loadiwkey(0);
490 		get_cpu_var(whose_iwkey) = vcpu;
491 	}
492 
493 	return 0;
494 }
495 
496 /*
497  * This handler is only triggered by INIT signal when poweroff from inside of RTVM
498  */
init_signal_vmexit_handler(__unused struct acrn_vcpu * vcpu)499 static int32_t init_signal_vmexit_handler(__unused struct acrn_vcpu *vcpu)
500 {
501 	/*
502 	 * Intel SDM Volume 3, 25.2:
503 	 *   INIT signals. INIT signals cause VM exits. A logical processer performs none
504 	 *   of the operations normally associated with these events. Such exits do not modify
505 	 *   register state or clear pending events as they would outside of VMX operation (If
506 	 *   a logical processor is the wait-for-SIPI state, INIT signals are blocked. They do
507 	 *   not cause VM exits in this case).
508 	 *
509 	 * So, it is safe to ignore the signal but need retain its RIP.
510 	 */
511 	vcpu_retain_rip(vcpu);
512 	return 0;
513 }
514 
515 /*
516  * vmexit handler for just injecting a #UD exception
517  * ACRN doesn't enable VMFUNC, VMFUNC treated as undefined.
518  */
undefined_vmexit_handler(struct acrn_vcpu * vcpu)519 static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu)
520 {
521 	vcpu_inject_ud(vcpu);
522 	return 0;
523 }
524