1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <asm/guest/vcpu.h>
9 #include <asm/guest/vm.h>
10 #include <asm/guest/virq.h>
11 #include <event.h>
12 #include <asm/cpu_caps.h>
13 #include <logmsg.h>
14 #include <errno.h>
15 #include <asm/guest/lock_instr_emul.h>
16 
is_guest_ac_enabled(struct acrn_vcpu * vcpu)17 static bool is_guest_ac_enabled(struct acrn_vcpu *vcpu)
18 {
19 	bool ret = false;
20 
21 	if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & MSR_TEST_CTL_AC_SPLITLOCK) != 0UL) {
22 		ret = true;
23 	}
24 
25 	return ret;
26 }
27 
is_guest_gp_enabled(struct acrn_vcpu * vcpu)28 static bool is_guest_gp_enabled(struct acrn_vcpu *vcpu)
29 {
30 	bool ret = false;
31 
32 	if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & MSR_TEST_CTL_GP_UCLOCK) != 0UL) {
33 		ret = true;
34 	}
35 
36 	return ret;
37 }
38 
vcpu_kick_lock_instr_emulation(struct acrn_vcpu * cur_vcpu)39 void vcpu_kick_lock_instr_emulation(struct acrn_vcpu *cur_vcpu)
40 {
41 	struct acrn_vcpu *other;
42 	uint16_t i;
43 
44 	if (cur_vcpu->vm->hw.created_vcpus > 1U) {
45 		get_vm_lock(cur_vcpu->vm);
46 
47 		foreach_vcpu(i, cur_vcpu->vm, other) {
48 			if ((other != cur_vcpu) && (other->state == VCPU_RUNNING)) {
49 				vcpu_make_request(other, ACRN_REQUEST_SPLIT_LOCK);
50 			}
51 		}
52 	}
53 }
54 
vcpu_complete_lock_instr_emulation(struct acrn_vcpu * cur_vcpu)55 void vcpu_complete_lock_instr_emulation(struct acrn_vcpu *cur_vcpu)
56 {
57 	struct acrn_vcpu *other;
58 	uint16_t i;
59 
60 	if (cur_vcpu->vm->hw.created_vcpus > 1U) {
61 		foreach_vcpu(i, cur_vcpu->vm, other) {
62 			if ((other != cur_vcpu) && (other->state == VCPU_RUNNING)) {
63 				signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]);
64 			}
65 		}
66 
67 		put_vm_lock(cur_vcpu->vm);
68 	}
69 }
70 
emulate_lock_instr(struct acrn_vcpu * vcpu,uint32_t exception_vector,bool * queue_exception)71 int32_t emulate_lock_instr(struct acrn_vcpu *vcpu, uint32_t exception_vector, bool *queue_exception)
72 {
73 	int32_t status = 0;
74 	uint8_t inst[1];
75 	uint32_t err_code = 0U;
76 	uint64_t fault_addr;
77 
78 	/* Queue the exception by default if the exception cannot be handled. */
79 	*queue_exception = true;
80 
81 	/*
82 	 * The split-lock/uc-lock detection is enabled by default if the platform supports it.
83 	 * Here, we check if the split-lock detection is really enabled or not. If the
84 	 * split-lock/uc-lock detection is enabled in the platform but not enabled in the guest
85 	 * then we try to emulate it, otherwise, inject the exception back.
86 	 */
87 	if ((is_ac_enabled() && !is_guest_ac_enabled(vcpu)) || (is_gp_enabled() && !is_guest_gp_enabled(vcpu))){
88 		switch (exception_vector) {
89 		case IDT_AC:
90 		case IDT_GP:
91 			status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr);
92 			if (status < 0) {
93 				pr_err("Error copy instruction from Guest!");
94 				if (status == -EFAULT) {
95 					vcpu_inject_pf(vcpu, fault_addr, err_code);
96 					status = 0;
97 					/* For this case, inject #PF, not to queue #AC */
98 					*queue_exception = false;
99 				}
100 			} else {
101 				/*
102 				 * If #AC/#GP is caused by instruction with LOCK prefix or xchg, then emulate it,
103 				 * otherwise, inject it back.
104 				 */
105 				if (inst[0] == 0xf0U) {  /* This is LOCK prefix */
106 					/*
107 					 * Kick other vcpus of the guest to stop execution
108 					 * until the split-lock/uc-lock emulation being completed.
109 					 */
110 					vcpu_kick_lock_instr_emulation(vcpu);
111 
112 					/*
113 					 * Skip the LOCK prefix and re-execute the instruction.
114 					 */
115 					vcpu->arch.inst_len = 1U;
116 					if (vcpu->vm->hw.created_vcpus > 1U) {
117 						/* Enable MTF to start single-stepping execution */
118 						vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_MON_TRAP;
119 						exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
120 						vcpu->arch.emulating_lock = true;
121 					}
122 
123 					/* Skip the #AC/#GP, we have emulated it. */
124 					*queue_exception = false;
125 				} else {
126 					status = decode_instruction(vcpu, false);
127 					if (status >= 0) {
128 						/*
129 						 * If this is the xchg, then emulate it, otherwise,
130 						 * inject it back.
131 						 */
132 						if (is_current_opcode_xchg(vcpu)) {
133 							/*
134 							 * Kick other vcpus of the guest to stop execution
135 							 * until the split-lock/uc-lock emulation being completed.
136 							 */
137 							vcpu_kick_lock_instr_emulation(vcpu);
138 
139 							/*
140 							 * Using emulating_lock to make sure xchg emulation
141 							 * is only called by split-lock/uc-lock emulation.
142 							 */
143 							vcpu->arch.emulating_lock = true;
144 							status = emulate_instruction(vcpu);
145 							vcpu->arch.emulating_lock = false;
146 							if (status < 0) {
147 								if (status == -EFAULT) {
148 									pr_info("page fault happen during emulate_instruction");
149 									status = 0;
150 								}
151 							}
152 
153 							/*
154 							 * Notify other vcpus of the guest to restart execution.
155 							 */
156 							vcpu_complete_lock_instr_emulation(vcpu);
157 
158 							/* Do not inject #AC/#GP, we have emulated it */
159 							*queue_exception = false;
160 						}
161 					} else {
162 						if (status == -EFAULT) {
163 							pr_info("page fault happen during decode_instruction");
164 							/* For this case, Inject #PF, not to queue #AC/#GP */
165 							*queue_exception = false;
166 						}
167 
168 						/* if decode_instruction(full_decode = false) return -1, that means this is an unknown instruction,
169 						 * and has skipped #UD injection. Just keep queue_exception = true to inject #AC back */
170 						status = 0;
171 					}
172 				}
173 			}
174 
175 			break;
176 		default:
177 			break;
178 		}
179 	}
180 
181 	return status;
182 }
183