1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/lib/spinlock.h>
10 #include <asm/guest/vcpu.h>
11 #include <asm/guest/vm.h>
12 #include <asm/guest/virq.h>
13 #include <asm/guest/optee.h>
14 #include <acrn_hv_defs.h>
15 #include <hypercall.h>
16 #include <trace.h>
17 #include <logmsg.h>
18 
19 static spinlock_t vm_id_lock = { .head = 0U, .tail = 0U };
20 struct hc_dispatch {
21 	int32_t (*handler)(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
22 
23 	/* The permission_flags is a bitmap of guest flags indicating whether a VM can invoke this hypercall:
24 	 *
25 	 * - If permission_flags == 0UL (which is the default value), this hypercall can only be invoked by the
26 	 *   Service VM.
27 	 * - Otherwise, this hypercall can only be invoked by a VM whose guest flags have ALL set bits in
28 	 *   permission_flags.
29 	 */
30 	uint64_t permission_flags;
31 };
32 
33 /* VM Dispatch table for Exit condition handling */
34 static const struct hc_dispatch hc_dispatch_table[] = {
35 	[HC_IDX(HC_GET_API_VERSION)] = {
36 		.handler = hcall_get_api_version},
37 	[HC_IDX(HC_SERVICE_VM_OFFLINE_CPU)] = {
38 		.handler = hcall_service_vm_offline_cpu},
39 	[HC_IDX(HC_SET_CALLBACK_VECTOR)] = {
40 		.handler = hcall_set_callback_vector},
41 	[HC_IDX(HC_CREATE_VM)] = {
42 		.handler = hcall_create_vm},
43 	[HC_IDX(HC_DESTROY_VM)] = {
44 		.handler = hcall_destroy_vm},
45 	[HC_IDX(HC_START_VM)] = {
46 		.handler = hcall_start_vm},
47 	[HC_IDX(HC_RESET_VM)] = {
48 		.handler = hcall_reset_vm},
49 	[HC_IDX(HC_PAUSE_VM)] = {
50 		.handler = hcall_pause_vm},
51 	[HC_IDX(HC_SET_VCPU_REGS)] = {
52 		.handler = hcall_set_vcpu_regs},
53 	[HC_IDX(HC_CREATE_VCPU)] = {
54 		.handler = hcall_create_vcpu},
55 	[HC_IDX(HC_SET_IRQLINE)] = {
56 		.handler = hcall_set_irqline},
57 	[HC_IDX(HC_INJECT_MSI)] = {
58 		.handler = hcall_inject_msi},
59 	[HC_IDX(HC_SET_IOREQ_BUFFER)] = {
60 		.handler = hcall_set_ioreq_buffer},
61 	[HC_IDX(HC_ASYNCIO_ASSIGN)] = {
62 		.handler = hcall_asyncio_assign},
63 	[HC_IDX(HC_ASYNCIO_DEASSIGN)] = {
64 		.handler = hcall_asyncio_deassign},
65 	[HC_IDX(HC_NOTIFY_REQUEST_FINISH)] = {
66 		.handler = hcall_notify_ioreq_finish},
67 	[HC_IDX(HC_VM_SET_MEMORY_REGIONS)] = {
68 		.handler = hcall_set_vm_memory_regions},
69 	[HC_IDX(HC_VM_WRITE_PROTECT_PAGE)] = {
70 		.handler = hcall_write_protect_page},
71 	[HC_IDX(HC_VM_GPA2HPA)] = {
72 		.handler = hcall_gpa_to_hpa},
73 	[HC_IDX(HC_ASSIGN_PCIDEV)] = {
74 		.handler = hcall_assign_pcidev},
75 	[HC_IDX(HC_DEASSIGN_PCIDEV)] = {
76 		.handler = hcall_deassign_pcidev},
77 	[HC_IDX(HC_ASSIGN_MMIODEV)] = {
78 		.handler = hcall_assign_mmiodev},
79 	[HC_IDX(HC_DEASSIGN_MMIODEV)] = {
80 		.handler = hcall_deassign_mmiodev},
81 	[HC_IDX(HC_ADD_VDEV)] = {
82 		.handler = hcall_add_vdev},
83 	[HC_IDX(HC_REMOVE_VDEV)] = {
84 		.handler = hcall_remove_vdev},
85 	[HC_IDX(HC_SET_PTDEV_INTR_INFO)] = {
86 		.handler = hcall_set_ptdev_intr_info},
87 	[HC_IDX(HC_RESET_PTDEV_INTR_INFO)] = {
88 		.handler = hcall_reset_ptdev_intr_info},
89 	[HC_IDX(HC_PM_GET_CPU_STATE)] = {
90 		.handler = hcall_get_cpu_pm_state},
91 	[HC_IDX(HC_VM_INTR_MONITOR)] = {
92 		.handler = hcall_vm_intr_monitor},
93 	[HC_IDX(HC_SETUP_SBUF)] = {
94 		.handler = hcall_setup_sbuf},
95 	[HC_IDX(HC_SETUP_HV_NPK_LOG)] = {
96 		.handler = hcall_setup_hv_npk_log},
97 	[HC_IDX(HC_PROFILING_OPS)] = {
98 		.handler = hcall_profiling_ops},
99 	[HC_IDX(HC_GET_HW_INFO)] = {
100 		.handler = hcall_get_hw_info},
101 	[HC_IDX(HC_INITIALIZE_TRUSTY)] = {
102 		.handler = hcall_initialize_trusty,
103 		.permission_flags = GUEST_FLAG_SECURE_WORLD_ENABLED},
104 	[HC_IDX(HC_WORLD_SWITCH)] = {
105 		.handler = hcall_world_switch,
106 		.permission_flags = GUEST_FLAG_SECURE_WORLD_ENABLED},
107 	[HC_IDX(HC_SAVE_RESTORE_SWORLD_CTX)] = {
108 		.handler = hcall_save_restore_sworld_ctx,
109 		.permission_flags = GUEST_FLAG_SECURE_WORLD_ENABLED},
110 	[HC_IDX(HC_TEE_VCPU_BOOT_DONE)] = {
111 		.handler = hcall_handle_tee_vcpu_boot_done,
112 		.permission_flags = GUEST_FLAG_TEE},
113 	[HC_IDX(HC_SWITCH_EE)] = {
114 		.handler = hcall_switch_ee,
115 		.permission_flags = (GUEST_FLAG_TEE | GUEST_FLAG_REE)},
116 };
117 
allocate_dynamical_vmid(struct acrn_vm_creation * cv)118 uint16_t allocate_dynamical_vmid(struct acrn_vm_creation *cv)
119 {
120 	uint16_t vm_id;
121 	struct acrn_vm_config *vm_config;
122 
123 	spinlock_obtain(&vm_id_lock);
124 	vm_id = get_unused_vmid();
125 	if (vm_id != ACRN_INVALID_VMID) {
126 		vm_config = get_vm_config(vm_id);
127 		memcpy_s(vm_config->name, MAX_VM_NAME_LEN, cv->name, MAX_VM_NAME_LEN);
128 		vm_config->cpu_affinity = cv->cpu_affinity;
129 	}
130 	spinlock_release(&vm_id_lock);
131 	return vm_id;
132 }
133 
134 #define GUEST_FLAGS_ALLOWING_HYPERCALLS GUEST_FLAG_SECURE_WORLD_ENABLED
is_guest_hypercall(struct acrn_vm * vm)135 static bool is_guest_hypercall(struct acrn_vm *vm)
136 {
137 	uint64_t guest_flags = get_vm_config(vm->vm_id)->guest_flags;
138 	bool ret = true;
139 
140 	if ((guest_flags & (GUEST_FLAG_SECURE_WORLD_ENABLED |
141 		GUEST_FLAG_TEE | GUEST_FLAG_REE)) == 0UL) {
142 		ret = false;
143 	}
144 
145 	return ret;
146 }
147 
parse_target_vm(struct acrn_vm * service_vm,uint64_t hcall_id,uint64_t param1,__unused uint64_t param2)148 struct acrn_vm *parse_target_vm(struct acrn_vm *service_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2)
149 {
150 	struct acrn_vm *target_vm = NULL;
151 	uint16_t vm_id = ACRN_INVALID_VMID;
152 	struct acrn_vm_creation cv;
153 	struct set_regions regions;
154 	uint16_t relative_vm_id;
155 
156 	switch (hcall_id) {
157 	case HC_CREATE_VM:
158 		if (copy_from_gpa(service_vm, &cv, param1, sizeof(cv)) == 0) {
159 			vm_id = get_vmid_by_name((char *)cv.name);
160 			/* if the vm-name is not found, it indicates that it is not in pre-defined vm_list.
161 			 * So try to allocate one free slot to start one vm based on user-requirement
162 			 */
163 			if (vm_id == ACRN_INVALID_VMID) {
164 				vm_id = allocate_dynamical_vmid(&cv);
165 				/* it doesn't find the available vm_slot for the given vm_name.
166 				 * Maybe the CONFIG_MAX_VM_NUM is too small to start the VM.
167 				 */
168 				if (vm_id == ACRN_INVALID_VMID) {
169 					pr_err("The VM name provided (%s) is invalid, cannot create VM", cv.name);
170 				}
171 			}
172 		}
173 		break;
174 
175 	case HC_PM_GET_CPU_STATE:
176 		vm_id = rel_vmid_2_vmid(service_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT));
177 		break;
178 
179 	case HC_VM_SET_MEMORY_REGIONS:
180 		if (copy_from_gpa(service_vm, &regions, param1, sizeof(regions)) == 0) {
181 			/* the vmid in regions is a relative vm id, need to convert to absolute vm id */
182 			vm_id = rel_vmid_2_vmid(service_vm->vm_id, regions.vmid);
183 		}
184 		break;
185 	case HC_GET_API_VERSION:
186 	case HC_SERVICE_VM_OFFLINE_CPU:
187 	case HC_SET_CALLBACK_VECTOR:
188 	case HC_SETUP_HV_NPK_LOG:
189 	case HC_PROFILING_OPS:
190 	case HC_GET_HW_INFO:
191 		target_vm = service_vm;
192 		break;
193 	default:
194 		relative_vm_id = (uint16_t)param1;
195 		vm_id = rel_vmid_2_vmid(service_vm->vm_id, relative_vm_id);
196 		break;
197 	}
198 
199 	if ((target_vm == NULL) && (vm_id  < CONFIG_MAX_VM_NUM)) {
200 		target_vm = get_vm_from_vmid(vm_id);
201 		if (hcall_id == HC_CREATE_VM) {
202 			target_vm->vm_id = vm_id;
203 		}
204 	}
205 
206 	return target_vm;
207 }
208 
dispatch_hypercall(struct acrn_vcpu * vcpu)209 static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
210 {
211 	int32_t ret = -ENOTTY;
212 	struct acrn_vm *vm = vcpu->vm;
213 	uint64_t guest_flags = get_vm_config(vm->vm_id)->guest_flags;  /* hypercall ID from guest */
214 	uint64_t hcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);  /* hypercall ID from guest */
215 
216 	if (HC_IDX(hcall_id) < ARRAY_SIZE(hc_dispatch_table)) {
217 		const struct hc_dispatch *dispatch = &(hc_dispatch_table[HC_IDX(hcall_id)]);
218 		uint64_t permission_flags = dispatch->permission_flags;
219 
220 		if (dispatch->handler != NULL) {
221 			uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI);  /* hypercall param1 from guest */
222 			uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI);  /* hypercall param2 from guest */
223 
224 			if ((permission_flags == 0UL) && is_service_vm(vm) && !is_ree_vm(vm)) {
225 				/* A permission_flags of 0 indicates that this hypercall is for Service VM to manage
226 				 * post-launched VMs.
227 				 *
228 				 * Though REE VM has its load order to be Service_VM, it does not offer services as
229 				 * Service VM does. The only hypercalls allowed for REE are the ones with permission flag
230 				 * GUEST_FLAG_REE.
231 				 */
232 				struct acrn_vm *target_vm = parse_target_vm(vm, hcall_id, param1, param2);
233 
234 				if ((target_vm != NULL) && !is_prelaunched_vm(target_vm)) {
235 					get_vm_lock(target_vm);
236 					ret = dispatch->handler(vcpu, target_vm, param1, param2);
237 					put_vm_lock(target_vm);
238 				}
239 			} else if ((permission_flags != 0UL) &&
240 					((guest_flags & permission_flags) != 0UL)) {
241 				ret = dispatch->handler(vcpu, vcpu->vm, param1, param2);
242 			} else {
243 				/* The vCPU is not allowed to invoke the given hypercall. Keep `ret` as -ENOTTY and no
244 				 * further actions required.
245 				 */
246 			}
247 		}
248 	}
249 
250 	return ret;
251 }
252 
253 /*
254  * Pass return value to Service VM by register rax.
255  * This function should always return 0 since we shouldn't
256  * deal with hypercall error in hypervisor.
257  */
vmcall_vmexit_handler(struct acrn_vcpu * vcpu)258 int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
259 {
260 	int32_t ret = -EACCES;
261 	struct acrn_vm *vm = vcpu->vm;
262 	/* hypercall ID from guest*/
263 	uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
264 
265 	/*
266 	 * The following permission checks are applied to hypercalls.
267 	 *
268 	 * 1. Only Service VM and VMs with specific guest flags (referred to as 'allowed VMs' hereinafter) can invoke
269 	 *    hypercalls by executing the `vmcall` instruction. Attempts to execute the `vmcall` instruction in the
270 	 *    other VMs will trigger #UD.
271 	 * 2. Attempts to execute the `vmcall` instruction from ring 1, 2 or 3 in an allowed VM will trigger #GP(0).
272 	 * 3. An allowed VM is permitted to only invoke some of the supported hypercalls depending on its load order and
273 	 *    guest flags. Attempts to invoke an unpermitted hypercall will make a vCPU see -EINVAL as the return
274 	 *    value. No exception is triggered in this case.
275 	 */
276 	if (!is_service_vm(vm) && !is_guest_hypercall(vm)) {
277 		vcpu_inject_ud(vcpu);
278 	} else if (!is_hypercall_from_ring0()) {
279 		vcpu_inject_gp(vcpu, 0U);
280 	} else {
281 		ret = dispatch_hypercall(vcpu);
282 		vcpu_set_gpreg(vcpu, CPU_REG_RAX, (uint64_t)ret);
283 	}
284 
285 	if (ret < 0) {
286 		pr_err("ret=%d hypercall=0x%lx failed in %s\n", ret, hypcall_id, __func__);
287 	}
288 	TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id);
289 
290 	return 0;
291 }
292