1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/lib/bits.h>
10 #include <asm/guest/virq.h>
11 #include <asm/lapic.h>
12 #include <asm/mmu.h>
13 #include <asm/vmx.h>
14 #include <asm/guest/vcpu.h>
15 #include <asm/guest/vmcs.h>
16 #include <asm/guest/vm.h>
17 #include <asm/guest/lock_instr_emul.h>
18 #include <trace.h>
19 #include <logmsg.h>
20 #include <asm/irq.h>
21
22 #define EXCEPTION_ERROR_CODE_VALID 8U
23
24 #define DBG_LEVEL_INTR 6U
25
26 #define EXCEPTION_CLASS_BENIGN 1
27 #define EXCEPTION_CLASS_CONT 2
28 #define EXCEPTION_CLASS_PF 3
29
30 /* Exception types */
31 #define EXCEPTION_FAULT 0U
32 #define EXCEPTION_TRAP 1U
33 #define EXCEPTION_ABORT 2U
34 #define EXCEPTION_INTERRUPT 3U
35
36 /* RFLAGS */
37 #define HV_ARCH_VCPU_RFLAGS_TF (1UL<<8U)
38 #define HV_ARCH_VCPU_RFLAGS_IF (1UL<<9U)
39 #define HV_ARCH_VCPU_RFLAGS_RF (1UL<<16U)
40
41 /* Interruptability State info */
42
43 #define HV_ARCH_VCPU_BLOCKED_BY_NMI (1UL<<3U)
44 #define HV_ARCH_VCPU_BLOCKED_BY_MOVSS (1UL<<1U)
45 #define HV_ARCH_VCPU_BLOCKED_BY_STI (1UL<<0U)
46
47 static const uint16_t exception_type[32] = {
48 [0] = VMX_INT_TYPE_HW_EXP,
49 [1] = VMX_INT_TYPE_HW_EXP,
50 [2] = VMX_INT_TYPE_HW_EXP,
51 [3] = VMX_INT_TYPE_HW_EXP,
52 [4] = VMX_INT_TYPE_HW_EXP,
53 [5] = VMX_INT_TYPE_HW_EXP,
54 [6] = VMX_INT_TYPE_HW_EXP,
55 [7] = VMX_INT_TYPE_HW_EXP,
56 [8] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
57 [9] = VMX_INT_TYPE_HW_EXP,
58 [10] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
59 [11] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
60 [12] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
61 [13] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
62 [14] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
63 [15] = VMX_INT_TYPE_HW_EXP,
64 [16] = VMX_INT_TYPE_HW_EXP,
65 [17] = VMX_INT_TYPE_HW_EXP | EXCEPTION_ERROR_CODE_VALID,
66 [18] = VMX_INT_TYPE_HW_EXP,
67 [19] = VMX_INT_TYPE_HW_EXP,
68 [20] = VMX_INT_TYPE_HW_EXP,
69 [21] = VMX_INT_TYPE_HW_EXP,
70 [22] = VMX_INT_TYPE_HW_EXP,
71 [23] = VMX_INT_TYPE_HW_EXP,
72 [24] = VMX_INT_TYPE_HW_EXP,
73 [25] = VMX_INT_TYPE_HW_EXP,
74 [26] = VMX_INT_TYPE_HW_EXP,
75 [27] = VMX_INT_TYPE_HW_EXP,
76 [28] = VMX_INT_TYPE_HW_EXP,
77 [29] = VMX_INT_TYPE_HW_EXP,
78 [30] = VMX_INT_TYPE_HW_EXP,
79 [31] = VMX_INT_TYPE_HW_EXP
80 };
81
get_exception_type(uint32_t vector)82 static uint8_t get_exception_type(uint32_t vector)
83 {
84 uint8_t type;
85
86 /* Treat #DB as trap until decide to support Debug Registers */
87 if ((vector > 31U) || (vector == IDT_NMI)) {
88 type = EXCEPTION_INTERRUPT;
89 } else if ((vector == IDT_DB) || (vector == IDT_BP) || (vector == IDT_OF)) {
90 type = EXCEPTION_TRAP;
91 } else if ((vector == IDT_DF) || (vector == IDT_MC)) {
92 type = EXCEPTION_ABORT;
93 } else {
94 type = EXCEPTION_FAULT;
95 }
96
97 return type;
98 }
99
is_guest_irq_enabled(struct acrn_vcpu * vcpu)100 static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
101 {
102 uint64_t guest_rflags, guest_state;
103 bool status = false;
104
105 /* Read the RFLAGS of the guest */
106 guest_rflags = vcpu_get_rflags(vcpu);
107 /* Check the RFLAGS[IF] bit first */
108 if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) {
109 /* Interrupts are allowed */
110 /* Check for temporarily disabled interrupts */
111 guest_state = exec_vmread32(VMX_GUEST_INTERRUPTIBILITY_INFO);
112
113 if ((guest_state & (HV_ARCH_VCPU_BLOCKED_BY_STI |
114 HV_ARCH_VCPU_BLOCKED_BY_MOVSS)) == 0UL) {
115 status = true;
116 }
117 }
118 return status;
119 }
120
is_nmi_injectable(void)121 static inline bool is_nmi_injectable(void)
122 {
123 uint64_t guest_state;
124
125 guest_state = exec_vmread32(VMX_GUEST_INTERRUPTIBILITY_INFO);
126
127 return ((guest_state & (HV_ARCH_VCPU_BLOCKED_BY_STI |
128 HV_ARCH_VCPU_BLOCKED_BY_MOVSS | HV_ARCH_VCPU_BLOCKED_BY_NMI)) == 0UL);
129 }
vcpu_make_request(struct acrn_vcpu * vcpu,uint16_t eventid)130 void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
131 {
132 bitmap_set_lock(eventid, &vcpu->arch.pending_req);
133 kick_vcpu(vcpu);
134 }
135
136 /*
137 * @retval true when INT is injected to guest.
138 * @retval false when otherwise
139 */
vcpu_do_pending_extint(const struct acrn_vcpu * vcpu)140 static bool vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
141 {
142 struct acrn_vm *vm;
143 struct acrn_vcpu *primary;
144 uint32_t vector;
145 bool ret = false;
146
147 vm = vcpu->vm;
148
149 /* check if there is valid interrupt from vPIC, if yes just inject it */
150 /* PIC only connect with primary CPU */
151 primary = vcpu_from_vid(vm, BSP_CPU_ID);
152 if (vcpu == primary) {
153
154 vpic_pending_intr(vm_pic(vcpu->vm), &vector);
155 if (vector <= NR_MAX_VECTOR) {
156 dev_dbg(DBG_LEVEL_INTR, "VPIC: to inject PIC vector %d\n",
157 vector & 0xFFU);
158 exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
159 VMX_INT_INFO_VALID |
160 (vector & 0xFFU));
161 vpic_intr_accepted(vm_pic(vcpu->vm), vector);
162 ret = true;
163 }
164 }
165
166 return ret;
167 }
168
169 /* SDM Vol3 -6.15, Table 6-4 - interrupt and exception classes */
get_excep_class(uint32_t vector)170 static int32_t get_excep_class(uint32_t vector)
171 {
172 int32_t ret;
173
174 if ((vector == IDT_DE) || (vector == IDT_TS) || (vector == IDT_NP) ||
175 (vector == IDT_SS) || (vector == IDT_GP)) {
176 ret = EXCEPTION_CLASS_CONT;
177 } else if ((vector == IDT_PF) || (vector == IDT_VE)) {
178 ret = EXCEPTION_CLASS_PF;
179 } else {
180 ret = EXCEPTION_CLASS_BENIGN;
181 }
182
183 return ret;
184 }
185
vcpu_queue_exception(struct acrn_vcpu * vcpu,uint32_t vector_arg,uint32_t err_code_arg)186 int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector_arg, uint32_t err_code_arg)
187 {
188 struct acrn_vcpu_arch *arch = &vcpu->arch;
189 uint32_t vector = vector_arg;
190 uint32_t err_code = err_code_arg;
191 int32_t ret = 0;
192
193 /* VECTOR_INVALID is also greater than 32 */
194 if (vector >= 32U) {
195 pr_err("invalid exception vector %d", vector);
196 ret = -EINVAL;
197 } else {
198
199 uint32_t prev_vector = arch->exception_info.exception;
200 int32_t new_class, prev_class;
201
202 /* SDM vol3 - 6.15, Table 6-5 - conditions for generating a
203 * double fault */
204 prev_class = get_excep_class(prev_vector);
205 new_class = get_excep_class(vector);
206 if ((prev_vector == IDT_DF) && (new_class != EXCEPTION_CLASS_BENIGN)) {
207 /* triple fault happen - shutdwon mode */
208 vcpu_make_request(vcpu, ACRN_REQUEST_TRP_FAULT);
209 } else {
210 if (((prev_class == EXCEPTION_CLASS_CONT) && (new_class == EXCEPTION_CLASS_CONT)) ||
211 ((prev_class == EXCEPTION_CLASS_PF) && (new_class != EXCEPTION_CLASS_BENIGN))) {
212 /* generate double fault */
213 vector = IDT_DF;
214 err_code = 0U;
215 } else {
216 /* Trigger the given exception instead of override it with
217 * double/triple fault. */
218 }
219
220 arch->exception_info.exception = vector;
221
222 if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
223 arch->exception_info.error = err_code;
224 } else {
225 arch->exception_info.error = 0U;
226 }
227
228 vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
229 }
230 }
231
232 return ret;
233 }
234
235 /*
236 * @pre vcpu->arch.exception_info.exception < 0x20U
237 */
vcpu_inject_exception(struct acrn_vcpu * vcpu)238 static void vcpu_inject_exception(struct acrn_vcpu *vcpu)
239 {
240 uint32_t vector = vcpu->arch.exception_info.exception;
241
242 if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
243 exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
244 vcpu->arch.exception_info.error);
245 }
246
247 exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
248 (exception_type[vector] << 8U) | (vector & 0xFFU));
249
250 vcpu->arch.exception_info.exception = VECTOR_INVALID;
251
252 /* If this is a fault, we should retain the RIP */
253 if (get_exception_type(vector) == EXCEPTION_FAULT) {
254 vcpu_retain_rip(vcpu);
255 }
256
257 /* SDM 17.3.1.1 For any fault-class exception except a debug exception generated in response to an
258 * instruction breakpoint, the value pushed for RF is 1.
259 * #DB is treated as Trap in get_exception_type, so RF will not be set for instruction breakpoint.
260 */
261 if (get_exception_type(vector) == EXCEPTION_FAULT) {
262 vcpu_set_rflags(vcpu, vcpu_get_rflags(vcpu) | HV_ARCH_VCPU_RFLAGS_RF);
263 }
264 }
265
266 /* Inject external interrupt to guest */
vcpu_inject_extint(struct acrn_vcpu * vcpu)267 void vcpu_inject_extint(struct acrn_vcpu *vcpu)
268 {
269 vcpu_make_request(vcpu, ACRN_REQUEST_EXTINT);
270 signal_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
271 }
272
273 /* Inject NMI to guest */
vcpu_inject_nmi(struct acrn_vcpu * vcpu)274 void vcpu_inject_nmi(struct acrn_vcpu *vcpu)
275 {
276 vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
277 signal_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
278 }
279
280 /* Inject general protection exception(#GP) to guest */
vcpu_inject_gp(struct acrn_vcpu * vcpu,uint32_t err_code)281 void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code)
282 {
283 (void)vcpu_queue_exception(vcpu, IDT_GP, err_code);
284 }
285
286 /* Inject page fault exception(#PF) to guest */
vcpu_inject_pf(struct acrn_vcpu * vcpu,uint64_t addr,uint32_t err_code)287 void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code)
288 {
289 vcpu_set_cr2(vcpu, addr);
290 (void)vcpu_queue_exception(vcpu, IDT_PF, err_code);
291 }
292
293 /* Inject invalid opcode exception(#UD) to guest */
vcpu_inject_ud(struct acrn_vcpu * vcpu)294 void vcpu_inject_ud(struct acrn_vcpu *vcpu)
295 {
296 (void)vcpu_queue_exception(vcpu, IDT_UD, 0);
297 }
298
299 /* Inject stack fault exception(#SS) to guest */
vcpu_inject_ss(struct acrn_vcpu * vcpu)300 void vcpu_inject_ss(struct acrn_vcpu *vcpu)
301 {
302 (void)vcpu_queue_exception(vcpu, IDT_SS, 0);
303 }
304
305 /* Inject thermal sensor interrupt to guest */
vcpu_inject_thermal_interrupt(struct acrn_vcpu * vcpu)306 void vcpu_inject_thermal_interrupt(struct acrn_vcpu *vcpu)
307 {
308 if (is_vtm_configured(vcpu->vm)) {
309 (void)vlapic_set_local_intr(vcpu->vm, vcpu->vcpu_id, APIC_LVT_THERMAL);
310 };
311 }
312
interrupt_window_vmexit_handler(struct acrn_vcpu * vcpu)313 int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
314 {
315 TRACE_2L(TRACE_VMEXIT_INTERRUPT_WINDOW, 0UL, 0UL);
316
317 /* Disable interrupt-window exiting first.
318 * acrn_handle_pending_request will continue handle for this vcpu
319 */
320 vcpu->arch.irq_window_enabled = false;
321 vcpu->arch.proc_vm_exec_ctrls &= ~(VMX_PROCBASED_CTLS_IRQ_WIN);
322 exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
323
324 vcpu_retain_rip(vcpu);
325 return 0;
326 }
327
external_interrupt_vmexit_handler(struct acrn_vcpu * vcpu)328 int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
329 {
330 uint32_t intr_info;
331 struct intr_excp_ctx ctx;
332 int32_t ret;
333
334 intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
335 if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
336 (((intr_info & VMX_INT_TYPE_MASK) >> 8U)
337 != VMX_INT_TYPE_EXT_INT)) {
338 pr_err("Invalid VM exit interrupt info:%x", intr_info);
339 vcpu_retain_rip(vcpu);
340 ret = -EINVAL;
341 } else {
342 ctx.vector = intr_info & 0xFFU;
343 ctx.rip = vcpu_get_rip(vcpu);
344 ctx.rflags = vcpu_get_rflags(vcpu);
345 ctx.cs = exec_vmread32(VMX_GUEST_CS_SEL);
346
347 dispatch_interrupt(&ctx);
348 vcpu_retain_rip(vcpu);
349
350 TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0UL);
351 ret = 0;
352 }
353
354 return ret;
355 }
356
357 static inline void acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
358 uint64_t *pending_req_bits, bool injected);
359
acrn_handle_pending_request(struct acrn_vcpu * vcpu)360 int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
361 {
362 bool injected = false;
363 int32_t ret = 0;
364 struct acrn_vcpu_arch *arch = &vcpu->arch;
365 uint64_t *pending_req_bits = &arch->pending_req;
366
367 if (*pending_req_bits != 0UL) {
368 /* make sure ACRN_REQUEST_INIT_VMCS handler as the first one */
369 if (bitmap_test_and_clear_lock(ACRN_REQUEST_INIT_VMCS, pending_req_bits)) {
370 init_vmcs(vcpu);
371 }
372
373 if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
374 pr_fatal("Triple fault happen -> shutdown!");
375 ret = -EFAULT;
376 } else {
377 if (bitmap_test_and_clear_lock(ACRN_REQUEST_WAIT_WBINVD, pending_req_bits)) {
378 wait_event(&vcpu->events[VCPU_EVENT_SYNC_WBINVD]);
379 }
380
381 if (bitmap_test_and_clear_lock(ACRN_REQUEST_SPLIT_LOCK, pending_req_bits)) {
382 wait_event(&vcpu->events[VCPU_EVENT_SPLIT_LOCK]);
383 }
384
385 if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH, pending_req_bits)) {
386 invept(vcpu->vm->arch_vm.nworld_eptp);
387 if (vcpu->vm->sworld_control.flag.active != 0UL) {
388 invept(vcpu->vm->arch_vm.sworld_eptp);
389 }
390 }
391
392 if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH, pending_req_bits)) {
393 flush_vpid_single(arch->vpid);
394 }
395
396 if (bitmap_test_and_clear_lock(ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE, pending_req_bits)) {
397 vcpu_set_vmcs_eoi_exit(vcpu);
398 }
399
400 if (bitmap_test_and_clear_lock(ACRN_REQUEST_SMP_CALL, pending_req_bits)) {
401 handle_smp_call();
402 }
403
404 }
405 }
406
407 if (ret == 0) {
408 /*
409 * Inject pending exception prior pending interrupt to complete the previous instruction.
410 */
411 if ((*pending_req_bits != 0UL) && bitmap_test_and_clear_lock(ACRN_REQUEST_EXCP, pending_req_bits)) {
412 vcpu_inject_exception(vcpu);
413 injected = true;
414 } else {
415 /* inject NMI before maskable hardware interrupt */
416
417 if ((*pending_req_bits != 0UL) &&
418 bitmap_test_and_clear_lock(ACRN_REQUEST_NMI, pending_req_bits)) {
419 if (is_nmi_injectable()) {
420 /* Inject NMI vector = 2 */
421 exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
422 VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8U) | IDT_NMI);
423 injected = true;
424 } else {
425 /* keep the NMI request for next vmexit */
426 bitmap_set_lock(ACRN_REQUEST_NMI, pending_req_bits);
427 }
428 } else {
429 /* handling pending vector injection:
430 * there are many reason inject failed, we need re-inject again
431 * here should take care
432 * - SW exception (not maskable by IF)
433 * - external interrupt, if IF clear, will keep in IDT_VEC_INFO_FIELD
434 * at next vm exit?
435 */
436 if ((arch->idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
437 exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, arch->idt_vectoring_info);
438 arch->idt_vectoring_info = 0U;
439 injected = true;
440 }
441 }
442 }
443
444 /*
445 * Defer injection of interrupt to be after MTF VM exit,
446 * when emulating the split-lock.
447 */
448 if (!is_lapic_pt_enabled(vcpu) && !vcpu->arch.emulating_lock) {
449 acrn_inject_pending_intr(vcpu, pending_req_bits, injected);
450 }
451
452 /*
453 * If "virtual-interrupt delivered" is enabled, CPU will evaluate
454 * and automatic inject the virtual interrupts in appropriate time.
455 * And from SDM Vol3 29.2.1, the apicv only trigger evaluation of
456 * pending virtual interrupts when "interrupt-window exiting" is 0.
457 *
458 * External interrupt(from vpic) can't be delivered by "virtual-
459 * interrupt delivery", it only deliver interrupt from vlapic.
460 *
461 * So need to enable "interrupt-window exiting", when there is
462 * an ExtInt or there is lapic interrupt and virtual interrupt
463 * deliver is disabled.
464 */
465 if (!is_lapic_pt_enabled(vcpu) && !arch->irq_window_enabled) {
466 /*
467 * TODO: Currently, NMI exiting and virtual NMIs are not enabled,
468 * so use interrupt window to inject NMI.
469 * After enable virtual NMIs, we can use NMI-Window
470 */
471 if (bitmap_test(ACRN_REQUEST_EXTINT, pending_req_bits) ||
472 bitmap_test(ACRN_REQUEST_NMI, pending_req_bits) ||
473 vlapic_has_pending_delivery_intr(vcpu)) {
474 vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_IRQ_WIN;
475 exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
476 arch->irq_window_enabled = true;
477 }
478 }
479 }
480
481 return ret;
482 }
483
acrn_inject_pending_intr(struct acrn_vcpu * vcpu,uint64_t * pending_req_bits,bool injected)484 static inline void acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
485 uint64_t *pending_req_bits, bool injected)
486 {
487 bool ret = injected;
488 bool guest_irq_enabled = is_guest_irq_enabled(vcpu);
489
490 if (guest_irq_enabled && (!ret)) {
491 /* Inject external interrupt first */
492 if (bitmap_test_and_clear_lock(ACRN_REQUEST_EXTINT, pending_req_bits)) {
493 /* has pending external interrupts */
494 ret = vcpu_do_pending_extint(vcpu);
495 }
496 }
497
498 if (bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, pending_req_bits)) {
499 vlapic_inject_intr(vcpu_vlapic(vcpu), guest_irq_enabled, ret);
500 }
501 }
502
503 /*
504 * @pre vcpu != NULL
505 */
exception_vmexit_handler(struct acrn_vcpu * vcpu)506 int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu)
507 {
508 uint32_t intinfo, int_err_code = 0U;
509 uint32_t exception_vector = VECTOR_INVALID;
510 uint32_t cpl;
511 int32_t status = 0;
512 bool queue_exception;
513
514 pr_dbg(" Handling guest exception");
515
516 /* Obtain VM-Exit information field pg 2912 */
517 intinfo = exec_vmread32(VMX_EXIT_INT_INFO);
518 if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
519 exception_vector = intinfo & 0xFFU;
520 /* Check if exception caused by the guest is a HW exception.
521 * If the exit occurred due to a HW exception obtain the
522 * error code to be conveyed to get via the stack
523 */
524 if ((intinfo & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
525 int_err_code = exec_vmread32(VMX_EXIT_INT_ERROR_CODE);
526
527 /* get current privilege level and fault address */
528 cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
529 cpl = (cpl >> 5U) & 3U;
530
531 if (cpl < 3U) {
532 int_err_code &= ~4U;
533 } else {
534 int_err_code |= 4U;
535 }
536 }
537 }
538
539 status = emulate_lock_instr(vcpu, exception_vector, &queue_exception);
540 if ((status == 0) && queue_exception) {
541 vcpu_retain_rip(vcpu);
542 status = vcpu_queue_exception(vcpu, exception_vector, int_err_code);
543 }
544
545 if (exception_vector == IDT_MC) {
546 /* just print error message for #MC, it then will be injected
547 * back to guest */
548 pr_fatal("Exception #MC got from guest!");
549 }
550
551 TRACE_4I(TRACE_VMEXIT_EXCEPTION_OR_NMI,
552 exception_vector, int_err_code, 2U, 0U);
553
554 return status;
555 }
556
nmi_window_vmexit_handler(struct acrn_vcpu * vcpu)557 int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu)
558 {
559 /*
560 * Disable NMI-window exiting here. We will process
561 * the pending request in acrn_handle_pending_request later
562 */
563 vcpu->arch.proc_vm_exec_ctrls &= ~VMX_PROCBASED_CTLS_NMI_WINEXIT;
564 exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
565
566 vcpu_retain_rip(vcpu);
567
568 return 0;
569 }
570