1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <asm/guest/vm.h>
8 #include <asm/vmx.h>
9 #include <schedule.h>
10 #include <version.h>
11 #include <reloc.h>
12 #include <asm/vtd.h>
13 #include <asm/per_cpu.h>
14 #include <asm/lapic.h>
15 #include <asm/guest/assign.h>
16 #include <asm/guest/ept.h>
17 #include <asm/guest/vm.h>
18 #include <asm/mmu.h>
19 #include <hypercall.h>
20 #include <errno.h>
21 #include <logmsg.h>
22 #include <asm/ioapic.h>
23 #include <mmio_dev.h>
24 #include <ivshmem.h>
25 #include <vmcs9900.h>
26 #include <asm/rtcm.h>
27 #include <asm/irq.h>
28 #include <ticks.h>
29 #include <asm/cpuid.h>
30 #include <vroot_port.h>
31
32 #define DBG_LEVEL_HYCALL 6U
33
34 typedef int32_t (*emul_dev_create) (struct acrn_vm *vm, struct acrn_vdev *dev);
35 typedef int32_t (*emul_dev_destroy) (struct pci_vdev *vdev);
36 struct emul_dev_ops {
37 /*
38 * The low 32 bits represent the vendor id and device id of PCI device,
39 * and the high 32 bits represent the device number of the legacy device
40 */
41 uint64_t dev_id;
42 /* TODO: to re-use vdev_init/vdev_deinit directly in hypercall */
43 emul_dev_create create;
44 emul_dev_destroy destroy;
45 };
46
47 static struct emul_dev_ops emul_dev_ops_tbl[] = {
48 #ifdef CONFIG_IVSHMEM_ENABLED
49 {(IVSHMEM_VENDOR_ID | (IVSHMEM_DEVICE_ID << 16U)), create_ivshmem_vdev , destroy_ivshmem_vdev},
50 #else
51 {(IVSHMEM_VENDOR_ID | (IVSHMEM_DEVICE_ID << 16U)), NULL, NULL},
52 #endif
53 {(MCS9900_VENDOR | (MCS9900_DEV << 16U)), create_vmcs9900_vdev, destroy_vmcs9900_vdev},
54 {(VRP_VENDOR | (VRP_DEVICE << 16U)), create_vrp, destroy_vrp},
55 };
56
is_hypercall_from_ring0(void)57 bool is_hypercall_from_ring0(void)
58 {
59 uint16_t cs_sel;
60 bool ret;
61
62 cs_sel = exec_vmread16(VMX_GUEST_CS_SEL);
63 /* cs_selector[1:0] is CPL */
64 if ((cs_sel & 0x3U) == 0U) {
65 ret = true;
66 } else {
67 ret = false;
68 }
69
70 return ret;
71 }
72
73 /**
74 * @brief offline vcpu from Service VM
75 *
76 * The function offline specific vcpu from Service VM.
77 *
78 * @param vcpu Pointer to vCPU that initiates the hypercall
79 * @param param1 lapic id of the vcpu which wants to offline
80 *
81 * @pre is_service_vm(vcpu->vm)
82 * @return 0 on success, non-zero on error.
83 */
hcall_service_vm_offline_cpu(struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,uint64_t param1,__unused uint64_t param2)84 int32_t hcall_service_vm_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
85 uint64_t param1, __unused uint64_t param2)
86 {
87 struct acrn_vcpu *target_vcpu;
88 uint16_t i;
89 int32_t ret = 0;
90 uint32_t lapicid = (uint32_t)param1;
91
92 pr_info("Service VM offline cpu with lapicid %ld", lapicid);
93
94 foreach_vcpu(i, vcpu->vm, target_vcpu) {
95 if (vlapic_get_apicid(vcpu_vlapic(target_vcpu)) == lapicid) {
96 /* should not offline BSP */
97 if (target_vcpu->vcpu_id == BSP_CPU_ID) {
98 ret = -1;
99 break;
100 }
101 zombie_vcpu(target_vcpu, VCPU_ZOMBIE);
102 offline_vcpu(target_vcpu);
103 }
104 }
105
106 return ret;
107 }
108
109 /**
110 * @brief Get hypervisor api version
111 *
112 * The function only return api version information when VM is Service VM.
113 *
114 * @param vcpu Pointer to vCPU that initiates the hypercall
115 * @param param1 guest physical memory address. The api version returned
116 * will be copied to this gpa
117 *
118 * @pre is_service_vm(vcpu->vm)
119 * @return 0 on success, non-zero on error.
120 */
hcall_get_api_version(struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,uint64_t param1,__unused uint64_t param2)121 int32_t hcall_get_api_version(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
122 uint64_t param1, __unused uint64_t param2)
123 {
124 struct hc_api_version version;
125
126 version.major_version = HV_API_MAJOR_VERSION;
127 version.minor_version = HV_API_MINOR_VERSION;
128
129 return copy_to_gpa(vcpu->vm, &version, param1, sizeof(version));
130 }
131
132 /*
133 * nearest_pow2(n) is the nearest power of 2 integer that is not less than n
134 * The last (most significant) bit set of (n*2-1) matches the above definition
135 */
nearest_pow2(uint32_t n)136 static uint32_t nearest_pow2(uint32_t n)
137 {
138 uint32_t p = n;
139
140 if (n >= 2U) {
141 p = fls32(2U*n - 1U);
142 }
143
144 return p;
145 }
146
get_cache_shift(uint32_t * l2_shift,uint32_t * l3_shift)147 void get_cache_shift(uint32_t *l2_shift, uint32_t *l3_shift)
148 {
149 uint32_t subleaf;
150
151 *l2_shift = 0U;
152 *l3_shift = 0U;
153
154 for (subleaf = 0U;; subleaf++) {
155 uint32_t eax, ebx, ecx, edx;
156 uint32_t cache_type, cache_level, id, shift;
157
158 cpuid_subleaf(0x4U, subleaf, &eax, &ebx, &ecx, &edx);
159
160 cache_type = eax & 0x1fU;
161 cache_level = (eax >> 5U) & 0x7U;
162 id = (eax >> 14U) & 0xfffU;
163 shift = nearest_pow2(id + 1U);
164
165 /* No more caches */
166 if ((cache_type == 0U) || (cache_type >= 4U)) {
167 break;
168 }
169
170 if (cache_level == 2U) {
171 *l2_shift = shift;
172 } else if (cache_level == 3U) {
173 *l3_shift = shift;
174 } else {
175 /* this api only for L2 & L3 cache */
176 }
177 }
178 }
179
180 /**
181 * @brief create virtual machine
182 *
183 * Create a virtual machine based on parameter, currently there is no
184 * limitation for calling times of this function, will add MAX_VM_NUM
185 * support later.
186 *
187 * @param vcpu Pointer to vCPU that initiates the hypercall
188 * @param target_vm Pointer to target VM data structure
189 * @param param1 guest physical memory address. This gpa points to
190 * struct acrn_vm_creation
191 *
192 * @pre is_service_vm(vcpu->vm)
193 * @pre get_vm_config(target_vm->vm_id) != NULL
194 * @return 0 on success, non-zero on error.
195 */
hcall_create_vm(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,uint64_t param1,__unused uint64_t param2)196 int32_t hcall_create_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, __unused uint64_t param2)
197 {
198 struct acrn_vm *vm = vcpu->vm;
199 uint16_t vmid = target_vm->vm_id;
200 int32_t ret = -1;
201 struct acrn_vm *tgt_vm = NULL;
202 struct acrn_vm_creation cv;
203 struct acrn_vm_config *vm_config = get_vm_config(vmid);
204
205 if (copy_from_gpa(vm, &cv, param1, sizeof(cv)) == 0) {
206 if (is_poweroff_vm(get_vm_from_vmid(vmid))) {
207
208 /* Filter out the bits should not set by DM and then assign it to guest_flags */
209 vm_config->guest_flags &= ~DM_OWNED_GUEST_FLAG_MASK;
210 vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK);
211
212 /* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */
213 if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) {
214 /* By default launch VM with all the configured pCPUs */
215 uint64_t pcpu_bitmap = vm_config->cpu_affinity;
216
217 if (cv.cpu_affinity != 0UL) {
218 /* overwrite the statically configured CPU affinity */
219 pcpu_bitmap = cv.cpu_affinity;
220 }
221
222 /*
223 * GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH
224 * set in guest_flags
225 */
226 if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0UL)
227 && ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) {
228 pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
229 } else {
230 if (create_vm(vmid, pcpu_bitmap, vm_config, &tgt_vm) == 0) {
231 /* return a relative vm_id from Service VM view */
232 cv.vmid = vmid_2_rel_vmid(vm->vm_id, vmid);
233 cv.vcpu_num = tgt_vm->hw.created_vcpus;
234 } else {
235 dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed");
236 cv.vmid = ACRN_INVALID_VMID;
237 }
238
239 ret = copy_to_gpa(vm, &cv, param1, sizeof(cv));
240 }
241 } else {
242 pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).",
243 vmid, cv.cpu_affinity);
244 }
245 }
246
247 }
248
249 if (((ret != 0) || (cv.vmid == ACRN_INVALID_VMID)) && (!is_static_configured_vm(target_vm))) {
250 memset(vm_config->name, 0U, MAX_VM_NAME_LEN);
251 }
252
253 return ret;
254 }
255
256 /**
257 * @brief destroy virtual machine
258 *
259 * Destroy a virtual machine, it will pause target VM then shutdown it.
260 * The function will return -1 if the target VM does not exist.
261 *
262 * @param target_vm Pointer to target VM data structure
263 *
264 * @return 0 on success, non-zero on error.
265 */
hcall_destroy_vm(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)266 int32_t hcall_destroy_vm(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
267 __unused uint64_t param1, __unused uint64_t param2)
268 {
269 int32_t ret = -1;
270
271 if (is_paused_vm(target_vm)) {
272 /* TODO: check target_vm guest_flags */
273 ret = shutdown_vm(target_vm);
274 }
275
276 return ret;
277 }
278
279 /**
280 * @brief start virtual machine
281 *
282 * Start a virtual machine, it will schedule target VM's vcpu to run.
283 * The function will return -1 if the target VM does not exist or the
284 * IOReq buffer page for the VM is not ready.
285 *
286 * @param target_vm Pointer to target VM data structure
287 *
288 * @return 0 on success, non-zero on error.
289 */
hcall_start_vm(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)290 int32_t hcall_start_vm(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
291 __unused uint64_t param1, __unused uint64_t param2)
292 {
293 int32_t ret = -1;
294
295 if ((is_created_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
296 /* TODO: check target_vm guest_flags */
297 start_vm(target_vm);
298 ret = 0;
299 }
300
301 return ret;
302 }
303
304 /**
305 * @brief pause virtual machine
306 *
307 * Pause a virtual machine, if the VM is already paused, the function
308 * will return 0 directly for success.
309 * The function will return -1 if the target VM does not exist.
310 *
311 * @param target_vm Pointer to target VM data structure
312 *
313 * @return 0 on success, non-zero on error.
314 */
hcall_pause_vm(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)315 int32_t hcall_pause_vm(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
316 __unused uint64_t param1, __unused uint64_t param2)
317 {
318 int32_t ret = -1;
319
320 if (!is_poweroff_vm(target_vm)) {
321 /* TODO: check target_vm guest_flags */
322 pause_vm(target_vm);
323 ret = 0;
324 }
325 return ret;
326 }
327
328 /**
329 * @brief reset virtual machine
330 *
331 * Reset a virtual machine, it will make target VM rerun from
332 * pre-defined entry. Comparing to start vm, this function reset
333 * each vcpu state and do some initialization for guest.
334 * The function will return -1 if the target VM does not exist.
335 *
336 * @param target_vm Pointer to target VM data structure
337 *
338 * @return 0 on success, non-zero on error.
339 */
hcall_reset_vm(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)340 int32_t hcall_reset_vm(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
341 __unused uint64_t param1, __unused uint64_t param2)
342 {
343 int32_t ret = -1;
344
345 if (is_paused_vm(target_vm)) {
346 /* TODO: check target_vm guest_flags */
347 ret = reset_vm(target_vm, COLD_RESET);
348 }
349 return ret;
350 }
351
352 /**
353 * @brief set vcpu regs
354 *
355 * Set the vcpu regs. It will set the vcpu init regs from DM. Now,
356 * it's only applied to BSP. AP always uses fixed init regs.
357 * The function will return -1 if the targat VM or BSP doesn't exist.
358 *
359 * @param vcpu Pointer to vCPU that initiates the hypercall
360 * @param target_vm Pointer to target VM data structure
361 * @param param2 guest physical address. This gpa points to
362 * struct acrn_vcpu_regs
363 *
364 * @pre is_service_vm(vcpu->vm)
365 * @return 0 on success, non-zero on error.
366 */
hcall_set_vcpu_regs(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)367 int32_t hcall_set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
368 __unused uint64_t param1, uint64_t param2)
369 {
370 struct acrn_vm *vm = vcpu->vm;
371 struct acrn_vcpu_regs vcpu_regs;
372 struct acrn_vcpu *target_vcpu;
373 int32_t ret = -1;
374
375 /* Only allow setup init ctx while target_vm is inactive */
376 if ((!is_poweroff_vm(target_vm)) && (param2 != 0U) && (target_vm->state != VM_RUNNING)) {
377 if (copy_from_gpa(vm, &vcpu_regs, param2, sizeof(vcpu_regs)) != 0) {
378 } else if (vcpu_regs.vcpu_id >= MAX_VCPUS_PER_VM) {
379 pr_err("%s: invalid vcpu_id for set_vcpu_regs\n", __func__);
380 } else {
381 target_vcpu = vcpu_from_vid(target_vm, vcpu_regs.vcpu_id);
382 if (target_vcpu->state != VCPU_OFFLINE) {
383 if (is_valid_cr0_cr4(vcpu_regs.vcpu_regs.cr0, vcpu_regs.vcpu_regs.cr4)) {
384 set_vcpu_regs(target_vcpu, &(vcpu_regs.vcpu_regs));
385 ret = 0;
386 }
387 }
388 }
389 }
390
391 return ret;
392 }
393
hcall_create_vcpu(__unused struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,__unused uint64_t param1,__unused uint64_t param2)394 int32_t hcall_create_vcpu(__unused struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
395 __unused uint64_t param1, __unused uint64_t param2)
396 {
397 return 0;
398 }
399
400 /**
401 * @brief set or clear IRQ line
402 *
403 * Set or clear a virtual IRQ line for a VM, which could be from ISA
404 * or IOAPIC, normally it triggers an edge IRQ.
405 * The function will return -1 if the target VM does not exist.
406 *
407 * @param vcpu Pointer to vCPU that initiates the hypercall
408 * @param target_vm Pointer to target VM data structure
409 * @param param2 info for irqline
410 *
411 * @pre is_service_vm(vcpu->vm)
412 * @return 0 on success, non-zero on error.
413 */
hcall_set_irqline(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)414 int32_t hcall_set_irqline(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
415 __unused uint64_t param1, uint64_t param2)
416 {
417 uint32_t irq_pic;
418 int32_t ret = -1;
419 struct acrn_irqline_ops *ops = (struct acrn_irqline_ops *)¶m2;
420
421 if (is_severity_pass(target_vm->vm_id) && !is_poweroff_vm(target_vm)) {
422 if (ops->gsi < get_vm_gsicount(target_vm)) {
423 if (ops->gsi < vpic_pincount()) {
424 /*
425 * IRQ line for 8254 timer is connected to
426 * I/O APIC pin #2 but PIC pin #0,route GSI
427 * number #2 to PIC IRQ #0.
428 */
429 irq_pic = (ops->gsi == 2U) ? 0U : ops->gsi;
430 vpic_set_irqline(vm_pic(target_vm), irq_pic, ops->op);
431 }
432
433 /* handle IOAPIC irqline */
434 vioapic_set_irqline_lock(target_vm, ops->gsi, ops->op);
435 ret = 0;
436 }
437 }
438
439 return ret;
440 }
441
442 /**
443 * @brief inject MSI interrupt
444 *
445 * Inject a MSI interrupt for a VM.
446 * The function will return -1 if the target VM does not exist.
447 *
448 * @param vcpu Pointer to vCPU that initiates the hypercall
449 * @param target_vm Pointer to target VM data structure
450 * @param param2 guest physical address. This gpa points to struct acrn_msi_entry
451 *
452 * @pre is_service_vm(vcpu->vm)
453 * @return 0 on success, non-zero on error.
454 */
hcall_inject_msi(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)455 int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
456 {
457 struct acrn_vm *vm = vcpu->vm;
458 int32_t ret = -1;
459
460 if (is_severity_pass(target_vm->vm_id) && !is_poweroff_vm(target_vm)) {
461 struct acrn_msi_entry msi;
462
463 if (copy_from_gpa(vm, &msi, param2, sizeof(msi)) == 0) {
464 ret = vlapic_inject_msi(target_vm, msi.msi_addr, msi.msi_data);
465 }
466 }
467
468 return ret;
469 }
470
471 /**
472 * @brief set ioreq shared buffer
473 *
474 * Set the ioreq share buffer for a VM.
475 * The function will return -1 if the target VM does not exist.
476 *
477 * @param vcpu Pointer to vCPU that initiates the hypercall
478 * @param target_vm Pointer to target VM data structure
479 * @param param2 guest physical address. This gpa points to buffer address
480 *
481 * @pre is_service_vm(vcpu->vm)
482 * @return 0 on success, non-zero on error.
483 */
hcall_set_ioreq_buffer(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)484 int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
485 __unused uint64_t param1, uint64_t param2)
486 {
487 struct acrn_vm *vm = vcpu->vm;
488 uint64_t hpa;
489 uint16_t i;
490 int32_t ret = -1;
491
492 if (is_created_vm(target_vm)) {
493 uint64_t iobuf;
494
495 if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
496 dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p",
497 target_vm->vm_id, iobuf);
498
499 hpa = gpa2hpa(vm, iobuf);
500 if (hpa == INVALID_HPA) {
501 pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
502 __func__, vm->vm_id, iobuf);
503 target_vm->sw.io_shared_page = NULL;
504 } else {
505 target_vm->sw.io_shared_page = hpa2hva(hpa);
506 for (i = 0U; i < ACRN_IO_REQUEST_MAX; i++) {
507 set_io_req_state(target_vm, i, ACRN_IOREQ_STATE_FREE);
508 }
509 ret = 0;
510 }
511 }
512 }
513
514 return ret;
515 }
516
517 /**
518 * @brief Setup a share buffer for a VM.
519 *
520 * @param vcpu Pointer to vCPU that initiates the hypercall
521 * @param param1 guest physical address. This gpa points to
522 * struct sbuf_setup_param
523 *
524 * @pre is_service_vm(vcpu->vm)
525 * @return 0 on success, non-zero on error.
526 */
hcall_setup_sbuf(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)527 int32_t hcall_setup_sbuf(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
528 __unused uint64_t param1, uint64_t param2)
529 {
530 struct acrn_vm *vm = vcpu->vm;
531 struct acrn_sbuf_param asp;
532 uint64_t *hva;
533 int ret = -1;
534
535 if (copy_from_gpa(vm, &asp, param2, sizeof(asp)) == 0) {
536 if (asp.gpa != 0U) {
537 hva = (uint64_t *)gpa2hva(vm, asp.gpa);
538 ret = sbuf_setup_common(target_vm, asp.cpu_id, asp.sbuf_id, hva);
539 }
540 }
541 return ret;
542 }
543
hcall_asyncio_assign(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)544 int32_t hcall_asyncio_assign(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
545 __unused uint64_t param1, uint64_t param2)
546 {
547 struct acrn_asyncio_info asyncio_info;
548 struct acrn_vm *vm = vcpu->vm;
549 int ret = -1;
550
551 if (copy_from_gpa(vm, &asyncio_info, param2, sizeof(asyncio_info)) == 0) {
552 add_asyncio(target_vm, &asyncio_info);
553 ret = 0;
554 }
555 return ret;
556 }
557
hcall_asyncio_deassign(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)558 int32_t hcall_asyncio_deassign(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
559 __unused uint64_t param1, uint64_t param2)
560 {
561 struct acrn_asyncio_info asyncio_info;
562 struct acrn_vm *vm = vcpu->vm;
563 int ret = -1;
564
565 if (copy_from_gpa(vm, &asyncio_info, param2, sizeof(asyncio_info)) == 0) {
566 remove_asyncio(target_vm, &asyncio_info);
567 ret = 0;
568 }
569 return ret;
570 }
571
572 /**
573 * @brief notify request done
574 *
575 * Notify the requestor VCPU for the completion of an ioreq.
576 * The function will return -1 if the target VM does not exist.
577 *
578 * @param target_vm Pointer to target VM data structure
579 * @param param2 vcpu ID of the requestor
580 *
581 * @return 0 on success, non-zero on error.
582 */
hcall_notify_ioreq_finish(__unused struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)583 int32_t hcall_notify_ioreq_finish(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
584 __unused uint64_t param1, uint64_t param2)
585 {
586 struct acrn_vcpu *target_vcpu;
587 int32_t ret = -1;
588 uint16_t vcpu_id = (uint16_t)param2;
589
590 /* make sure we have set req_buf */
591 if (is_severity_pass(target_vm->vm_id) &&
592 (!is_poweroff_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
593 dev_dbg(DBG_LEVEL_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
594 target_vm->vm_id, vcpu_id);
595
596 if (vcpu_id >= target_vm->hw.created_vcpus) {
597 pr_err("%s, failed to get VCPU %d context from VM %d\n",
598 __func__, vcpu_id, target_vm->vm_id);
599 } else {
600 target_vcpu = vcpu_from_vid(target_vm, vcpu_id);
601 if (!target_vcpu->vm->sw.is_polling_ioreq) {
602 signal_event(&target_vcpu->events[VCPU_EVENT_IOREQ]);
603 }
604 ret = 0;
605 }
606 }
607
608 return ret;
609 }
610
611 /**
612 *@pre is_service_vm(vm)
613 *@pre gpa2hpa(vm, region->service_vm_gpa) != INVALID_HPA
614 */
add_vm_memory_region(struct acrn_vm * vm,struct acrn_vm * target_vm,const struct vm_memory_region * region,uint64_t * pml4_page)615 static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
616 const struct vm_memory_region *region,uint64_t *pml4_page)
617 {
618 uint64_t prot = 0UL, base_paddr;
619 uint64_t hpa = gpa2hpa(vm, region->service_vm_gpa);
620
621 /* access right */
622 if ((region->prot & MEM_ACCESS_READ) != 0U) {
623 prot |= EPT_RD;
624 }
625 if ((region->prot & MEM_ACCESS_WRITE) != 0U) {
626 prot |= EPT_WR;
627 }
628 if ((region->prot & MEM_ACCESS_EXEC) != 0U) {
629 prot |= EPT_EXE;
630 }
631
632 /* memory type */
633 if ((region->prot & MEM_TYPE_WB) != 0U) {
634 prot |= EPT_WB;
635 } else if ((region->prot & MEM_TYPE_WT) != 0U) {
636 prot |= EPT_WT;
637 } else if ((region->prot & MEM_TYPE_WC) != 0U) {
638 prot |= EPT_WC;
639 } else if ((region->prot & MEM_TYPE_WP) != 0U) {
640 prot |= EPT_WP;
641 } else {
642 prot |= EPT_UNCACHED;
643 }
644
645 /* If Software SRAM is initialized, and HV received a request to map Software SRAM
646 * area to guest, we should add EPT_WB flag to make Software SRAM effective.
647 * TODO: We can enforce WB for any region has overlap with Software SRAM, for simplicity,
648 * and leave it to Service VM to make sure it won't violate.
649 */
650 if (is_software_sram_enabled()) {
651 base_paddr = get_software_sram_base();
652 if ((hpa >= base_paddr) &&
653 ((hpa + region->size) <= (base_paddr + get_software_sram_size()))) {
654 prot |= EPT_WB;
655 }
656 }
657
658 /* create gpa to hpa EPT mapping */
659 ept_add_mr(target_vm, pml4_page, hpa, region->gpa, region->size, prot);
660 }
661
662 /**
663 *@pre is_service_vm(vm)
664 */
set_vm_memory_region(struct acrn_vm * vm,struct acrn_vm * target_vm,const struct vm_memory_region * region)665 static int32_t set_vm_memory_region(struct acrn_vm *vm,
666 struct acrn_vm *target_vm, const struct vm_memory_region *region)
667 {
668 uint64_t *pml4_page;
669 int32_t ret = -EINVAL;
670
671 if ((region->size & (PAGE_SIZE - 1UL)) == 0UL) {
672 pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
673 if (region->type == MR_ADD) {
674 /* if the GPA range is Service VM valid GPA or not */
675 if (ept_is_valid_mr(vm, region->service_vm_gpa, region->size)) {
676 /* FIXME: how to filter the alias mapping ? */
677 add_vm_memory_region(vm, target_vm, region, pml4_page);
678 ret = 0;
679 }
680 } else {
681 if (ept_is_valid_mr(target_vm, region->gpa, region->size)) {
682 ept_del_mr(target_vm, pml4_page, region->gpa, region->size);
683 ret = 0;
684 }
685 }
686 }
687
688 dev_dbg((ret == 0) ? DBG_LEVEL_HYCALL : LOG_ERROR,
689 "[vm%d] type=%d gpa=0x%x service_vm_gpa=0x%x sz=0x%x",
690 target_vm->vm_id, region->type, region->gpa,
691 region->service_vm_gpa, region->size);
692 return ret;
693 }
694
695 /**
696 * @brief setup ept memory mapping for multi regions
697 *
698 * @param vcpu Pointer to vCPU that initiates the hypercall
699 * @param target_vm Pointer to target VM data structure
700 * @param param1 guest physical address. This gpa points to
701 * struct set_memmaps
702 *
703 * @pre is_service_vm(vcpu->vm)
704 * @return 0 on success, non-zero on error.
705 */
hcall_set_vm_memory_regions(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,uint64_t param1,__unused uint64_t param2)706 int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
707 uint64_t param1, __unused uint64_t param2)
708 {
709 struct acrn_vm *vm = vcpu->vm;
710 struct set_regions regions;
711 struct vm_memory_region mr;
712 uint32_t idx;
713 int32_t ret = -1;
714
715 if (copy_from_gpa(vm, ®ions, param1, sizeof(regions)) == 0) {
716
717 if (!is_poweroff_vm(target_vm) &&
718 (is_severity_pass(target_vm->vm_id) || (target_vm->state != VM_RUNNING))) {
719 idx = 0U;
720 while (idx < regions.mr_num) {
721 if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) {
722 pr_err("%s: Copy mr entry fail from vm\n", __func__);
723 break;
724 }
725
726 ret = set_vm_memory_region(vm, target_vm, &mr);
727 if (ret < 0) {
728 break;
729 }
730 idx++;
731 }
732 } else {
733 pr_err("%p %s:target_vm is invalid or Targeting to service vm", target_vm, __func__);
734 }
735 }
736
737 return ret;
738 }
739
740 /**
741 *@pre is_service_vm(vm)
742 */
write_protect_page(struct acrn_vm * vm,const struct wp_data * wp)743 static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
744 {
745 uint64_t hpa, base_paddr;
746 uint64_t prot_set;
747 uint64_t prot_clr;
748 int32_t ret = -EINVAL;
749
750 if (is_severity_pass(vm->vm_id)) {
751 if ((!mem_aligned_check(wp->gpa, PAGE_SIZE)) ||
752 (!ept_is_valid_mr(vm, wp->gpa, PAGE_SIZE))) {
753 pr_err("%s,vm[%hu] gpa 0x%lx,GPA is invalid or not page size aligned.",
754 __func__, vm->vm_id, wp->gpa);
755 } else {
756 hpa = gpa2hpa(vm, wp->gpa);
757 if (hpa == INVALID_HPA) {
758 pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
759 __func__, vm->vm_id, wp->gpa);
760 } else {
761 dev_dbg(DBG_LEVEL_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
762 vm->vm_id, wp->gpa, hpa);
763
764 base_paddr = hva2hpa((void *)(get_hv_image_base()));
765 if (((hpa <= base_paddr) && ((hpa + PAGE_SIZE) > base_paddr)) ||
766 ((hpa >= base_paddr) &&
767 (hpa < (base_paddr + get_hv_image_size())))) {
768 pr_err("%s: overlap the HV memory region.", __func__);
769 } else {
770 prot_set = (wp->set != 0U) ? 0UL : EPT_WR;
771 prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
772
773 ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
774 wp->gpa, PAGE_SIZE, prot_set, prot_clr);
775 ret = 0;
776 }
777 }
778 }
779 }
780
781 return ret;
782 }
783
784 /**
785 * @brief change guest memory page write permission
786 *
787 * @param vcpu Pointer to vCPU that initiates the hypercall
788 * @param target_vm Pointer to target VM data structure
789 * @param param2 guest physical address. This gpa points to
790 * struct wp_data
791 *
792 * @pre is_service_vm(vcpu->vm)
793 * @return 0 on success, non-zero on error.
794 */
hcall_write_protect_page(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)795 int32_t hcall_write_protect_page(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
796 __unused uint64_t param1, uint64_t param2)
797 {
798 struct acrn_vm *vm = vcpu->vm;
799 int32_t ret = -1;
800 uint64_t wp_gpa = param2;
801
802 if (!is_poweroff_vm(target_vm)) {
803 struct wp_data wp;
804
805 if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) == 0) {
806 ret = write_protect_page(target_vm, &wp);
807 }
808 } else {
809 pr_err("%p %s: target_vm is invalid", target_vm, __func__);
810 }
811
812 return ret;
813 }
814
815 /**
816 * @brief translate guest physical address to host physical address
817 *
818 * Translate guest physical address to host physical address for a VM.
819 * The function will return -1 if the target VM does not exist.
820 *
821 * @param vcpu Pointer to vCPU that initiates the hypercall
822 * @param target_vm Pointer to target VM data structure
823 * @param param2 guest physical address. This gpa points to struct vm_gpa2hpa
824 *
825 * @pre is_service_vm(vcpu->vm)
826 * @return 0 on success, non-zero on error.
827 */
hcall_gpa_to_hpa(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)828 int32_t hcall_gpa_to_hpa(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
829 {
830 struct acrn_vm *vm = vcpu->vm;
831 int32_t ret = -1;
832 struct vm_gpa2hpa v_gpa2hpa;
833
834 (void)memset((void *)&v_gpa2hpa, 0U, sizeof(v_gpa2hpa));
835 if (!is_poweroff_vm(target_vm) &&
836 (copy_from_gpa(vm, &v_gpa2hpa, param2, sizeof(v_gpa2hpa)) == 0)) {
837 v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
838 if (v_gpa2hpa.hpa == INVALID_HPA) {
839 pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
840 __func__, target_vm->vm_id, v_gpa2hpa.gpa);
841 } else {
842 ret = copy_to_gpa(vm, &v_gpa2hpa, param2, sizeof(v_gpa2hpa));
843 }
844 } else {
845 pr_err("target_vm is invalid or HCALL gpa2hpa: Unable copy param from vm\n");
846 }
847
848 return ret;
849 }
850
851 /**
852 * @brief Assign one PCI dev to a VM.
853 *
854 * @param vcpu Pointer to vCPU that initiates the hypercall
855 * @param target_vm Pointer to target VM data structure
856 * @param param2 guest physical address. This gpa points to data structure of
857 * acrn_pcidev including assign PCI device info
858 *
859 * @pre is_service_vm(vcpu->vm)
860 * @return 0 on success, non-zero on error.
861 */
hcall_assign_pcidev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)862 int32_t hcall_assign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
863 __unused uint64_t param1, uint64_t param2)
864 {
865 struct acrn_vm *vm = vcpu->vm;
866 int32_t ret = -EINVAL;
867 struct acrn_pcidev pcidev;
868
869 /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/
870 if (is_created_vm(target_vm)) {
871 if (copy_from_gpa(vm, &pcidev, param2, sizeof(pcidev)) == 0) {
872 ret = vpci_assign_pcidev(target_vm, &pcidev);
873 }
874 } else {
875 pr_err("%s, vm[%d] is not a postlaunched VM, or not in CREATED status to be assigned with a pcidev\n", __func__, vm->vm_id);
876 }
877
878 return ret;
879 }
880
881 /**
882 * @brief Deassign one PCI dev from a VM.
883 *
884 * @param vcpu Pointer to vCPU that initiates the hypercall
885 * @param target_vm Pointer to target VM data structure
886 * @param param2 guest physical address. This gpa points to data structure of
887 * acrn_pcidev including deassign PCI device info
888 *
889 * @pre is_service_vm(vcpu->vm)
890 * @return 0 on success, non-zero on error.
891 */
hcall_deassign_pcidev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)892 int32_t hcall_deassign_pcidev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
893 __unused uint64_t param1, uint64_t param2)
894 {
895 struct acrn_vm *vm = vcpu->vm;
896 int32_t ret = -EINVAL;
897 struct acrn_pcidev pcidev;
898
899 /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */
900 if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) {
901 if (copy_from_gpa(vm, &pcidev, param2, sizeof(pcidev)) == 0) {
902 ret = vpci_deassign_pcidev(target_vm, &pcidev);
903 }
904 } else {
905 pr_err("%s, vm[%d] is not a postlaunched VM, or not in PAUSED/CREATED status to be deassigned from a pcidev\n", __func__, vm->vm_id);
906 }
907
908 return ret;
909 }
910
911 /**
912 * @brief Assign one MMIO dev to a VM.
913 *
914 * @param vcpu Pointer to vCPU that initiates the hypercall
915 * @param target_vm Pointer to target VM data structure
916 * @param param2 guest physical address. This gpa points to data structure of
917 * acrn_mmiodev including assign MMIO device info
918 *
919 * @pre is_service_vm(vcpu->vm)
920 * @return 0 on success, non-zero on error.
921 */
hcall_assign_mmiodev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)922 int32_t hcall_assign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
923 __unused uint64_t param1, uint64_t param2)
924 {
925 struct acrn_vm *vm = vcpu->vm;
926 int32_t ret = -EINVAL;
927 struct acrn_mmiodev mmiodev;
928
929 /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/
930 if (is_created_vm(target_vm)) {
931 if (copy_from_gpa(vm, &mmiodev, param2, sizeof(mmiodev)) == 0) {
932 ret = deassign_mmio_dev(vm, &mmiodev);
933 if (ret == 0) {
934 ret = assign_mmio_dev(target_vm, &mmiodev);
935 }
936 }
937 } else {
938 pr_err("vm[%d] %s failed!\n",target_vm->vm_id, __func__);
939 }
940
941 return ret;
942 }
943
944 /**
945 * @brief Deassign one MMIO dev from a VM.
946 *
947 * @param vcpu Pointer to vCPU that initiates the hypercall
948 * @param target_vm Pointer to target VM data structure
949 * @param param2 guest physical address. This gpa points to data structure of
950 * acrn_mmiodev including deassign MMIO device info
951 *
952 * @pre is_service_vm(vcpu->vm)
953 * @return 0 on success, non-zero on error.
954 */
hcall_deassign_mmiodev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)955 int32_t hcall_deassign_mmiodev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
956 __unused uint64_t param1, uint64_t param2)
957 {
958 struct acrn_vm *vm = vcpu->vm;
959 int32_t ret = -EINVAL;
960 struct acrn_mmiodev mmiodev;
961
962 /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */
963 if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) {
964 if (copy_from_gpa(vm, &mmiodev, param2, sizeof(mmiodev)) == 0) {
965 ret = deassign_mmio_dev(target_vm, &mmiodev);
966 if (ret == 0) {
967 ret = assign_mmio_dev(vm, &mmiodev);
968 }
969 }
970 } else {
971 pr_err("vm[%d] %s failed!\n",target_vm->vm_id, __func__);
972 }
973
974 return ret;
975 }
976
977 /**
978 * @brief Set interrupt mapping info of ptdev.
979 *
980 * @param vcpu Pointer to vCPU that initiates the hypercall
981 * @param target_vm Pointer to target VM data structure
982 * @param param2 guest physical address. This gpa points to data structure of
983 * hc_ptdev_irq including intr remapping info
984 *
985 * @pre is_service_vm(vcpu->vm)
986 * @return 0 on success, non-zero on error.
987 */
hcall_set_ptdev_intr_info(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)988 int32_t hcall_set_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
989 __unused uint64_t param1, uint64_t param2)
990 {
991 struct acrn_vm *vm = vcpu->vm;
992 int32_t ret = -1;
993
994 if (is_created_vm(target_vm)) {
995 struct hc_ptdev_irq irq;
996
997 if (copy_from_gpa(vm, &irq, param2, sizeof(irq)) == 0) {
998 if (irq.type == IRQ_INTX) {
999 struct pci_vdev *vdev;
1000 union pci_bdf bdf = {.value = irq.virt_bdf};
1001 struct acrn_vpci *vpci = &target_vm->vpci;
1002
1003 spinlock_obtain(&vpci->lock);
1004 vdev = pci_find_vdev(vpci, bdf);
1005 spinlock_release(&vpci->lock);
1006 /*
1007 * TODO: Change the hc_ptdev_irq structure member names
1008 * virt_pin to virt_gsi
1009 * phys_pin to phys_gsi
1010 */
1011 if ((vdev != NULL) && (vdev->pdev->bdf.value == irq.phys_bdf)) {
1012 if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm)))
1013 || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount())))
1014 && is_gsi_valid(irq.intx.phys_pin)) {
1015 ptirq_remove_intx_remapping(get_service_vm(), irq.intx.phys_pin, false, true);
1016 ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin,
1017 irq.intx.phys_pin, irq.intx.pic_pin);
1018 } else {
1019 pr_err("%s: Invalid phys pin or virt pin\n", __func__);
1020 }
1021 }
1022 } else {
1023 pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
1024 }
1025 }
1026 }
1027 return ret;
1028 }
1029
1030 /**
1031 * @brief Clear interrupt mapping info of ptdev.
1032 *
1033 * @param vcpu Pointer to vCPU that initiates the hypercall
1034 * @param target_vm Pointer to target VM data structure
1035 * @param param2 guest physical address. This gpa points to data structure of
1036 * hc_ptdev_irq including intr remapping info
1037 *
1038 * @pre is_service_vm(vcpu->vm)
1039 * @return 0 on success, non-zero on error.
1040 */
hcall_reset_ptdev_intr_info(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)1041 int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
1042 __unused uint64_t param1, uint64_t param2)
1043 {
1044 struct acrn_vm *vm = vcpu->vm;
1045 int32_t ret = -1;
1046
1047 if (is_created_vm(target_vm) || is_paused_vm(target_vm)) {
1048 struct hc_ptdev_irq irq;
1049
1050 if (copy_from_gpa(vm, &irq, param2, sizeof(irq)) == 0) {
1051 if (irq.type == IRQ_INTX) {
1052 struct pci_vdev *vdev;
1053 union pci_bdf bdf = {.value = irq.virt_bdf};
1054 struct acrn_vpci *vpci = &target_vm->vpci;
1055
1056 spinlock_obtain(&vpci->lock);
1057 vdev = pci_find_vdev(vpci, bdf);
1058 spinlock_release(&vpci->lock);
1059 /*
1060 * TODO: Change the hc_ptdev_irq structure member names
1061 * virt_pin to virt_gsi
1062 * phys_pin to phys_gsi
1063 */
1064 if ((vdev != NULL) && (vdev->pdev->bdf.value == irq.phys_bdf)) {
1065 if (((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) ||
1066 ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) {
1067 ptirq_remove_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.pic_pin, false);
1068 ret = 0;
1069 } else {
1070 pr_err("%s: Invalid virt pin\n", __func__);
1071 }
1072 }
1073 } else {
1074 pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
1075 }
1076 }
1077 }
1078
1079 return ret;
1080 }
1081
is_pt_pstate(__unused const struct acrn_vm * vm)1082 static bool is_pt_pstate(__unused const struct acrn_vm *vm)
1083 {
1084 /* Currently VM's CPU frequency is managed in hypervisor. So no pass through for all VMs. */
1085 return false;
1086 }
1087
1088 /**
1089 * @brief Get VCPU Power state.
1090 *
1091 * @param vcpu Pointer to vCPU that initiates the hypercall
1092 * @param target_vm Pointer to target VM data structure
1093 * @param param1 cmd to show get which VCPU power state data
1094 * @param param2 VCPU power state data
1095 *
1096 * @pre is_service_vm(vcpu->vm)
1097 * @return 0 on success, non-zero on error.
1098 */
hcall_get_cpu_pm_state(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,uint64_t param1,uint64_t param2)1099 int32_t hcall_get_cpu_pm_state(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2)
1100 {
1101 struct acrn_vm *vm = vcpu->vm;
1102 int32_t ret = -1;
1103 uint64_t cmd = param1;
1104
1105 if (is_created_vm(target_vm) || is_paused_vm(target_vm)) {
1106 switch (cmd & PMCMD_TYPE_MASK) {
1107 case ACRN_PMCMD_GET_PX_CNT: {
1108 uint8_t px_cnt;
1109 /* If the VM supports vHWP, then the guest is having continuous p-state. Thus it doesn't have a specific
1110 * px_cnt. The hypercall returns success and px_cnt = 0.
1111 * If the VM's p-state is hidden or hv doesn't have its p-state info, the hypercall returns -1.
1112 */
1113 if (is_vhwp_configured(target_vm)) {
1114 px_cnt = 0U;
1115 } else if (!is_pt_pstate(target_vm)) {
1116 break;
1117 } else if (target_vm->pm.px_cnt == 0U) {
1118 break;
1119 } else {
1120 px_cnt = target_vm->pm.px_cnt;
1121 }
1122 ret = copy_to_gpa(vm, &px_cnt, param2, sizeof(px_cnt));
1123 break;
1124 }
1125 case ACRN_PMCMD_GET_PX_DATA: {
1126 uint8_t pn;
1127 struct acrn_pstate_data *px_data;
1128
1129 if (!is_pt_pstate(target_vm)) {
1130 break;
1131 }
1132
1133 /* For now we put px data as per-vm,
1134 * If it is stored as per-cpu in the future,
1135 * we need to check PMCMD_VCPUID_MASK in cmd.
1136 */
1137 if (target_vm->pm.px_cnt == 0U) {
1138 break;
1139 }
1140
1141 pn = (uint8_t)((cmd & PMCMD_STATE_NUM_MASK) >> PMCMD_STATE_NUM_SHIFT);
1142 if (pn >= target_vm->pm.px_cnt) {
1143 break;
1144 }
1145
1146 px_data = target_vm->pm.px_data + pn;
1147 ret = copy_to_gpa(vm, px_data, param2, sizeof(struct acrn_pstate_data));
1148 break;
1149 }
1150 case ACRN_PMCMD_GET_CX_CNT: {
1151 ret = copy_to_gpa(vm, &(target_vm->pm.cx_cnt), param2, sizeof(target_vm->pm.cx_cnt));
1152 break;
1153 }
1154 case ACRN_PMCMD_GET_CX_DATA: {
1155 uint8_t cx_idx;
1156 struct acrn_cstate_data *cx_data;
1157
1158 if (target_vm->pm.cx_cnt == 0U) {
1159 break;
1160 }
1161
1162 cx_idx = (uint8_t)
1163 ((cmd & PMCMD_STATE_NUM_MASK) >> PMCMD_STATE_NUM_SHIFT);
1164 if ((cx_idx == 0U) || (cx_idx > target_vm->pm.cx_cnt)) {
1165 break;
1166 }
1167
1168 cx_data = target_vm->pm.cx_data + cx_idx;
1169 ret = copy_to_gpa(vm, cx_data, param2, sizeof(struct acrn_cstate_data));
1170 break;
1171 }
1172 default:
1173 /* invalid command */
1174 break;
1175 }
1176 }
1177
1178 return ret;
1179 }
1180
1181 /**
1182 * @brief Get VCPU a VM's interrupt count data.
1183 *
1184 * @param vcpu Pointer to vCPU that initiates the hypercall
1185 * @param target_vm Pointer to target VM data structure
1186 * @param param2 guest physical address. This gpa points to data structure of
1187 * acrn_intr_monitor
1188 *
1189 * @pre is_service_vm(vcpu->vm)
1190 * @return 0 on success, non-zero on error.
1191 */
hcall_vm_intr_monitor(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)1192 int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
1193 __unused uint64_t param1, uint64_t param2)
1194 {
1195 struct acrn_vm *vm = vcpu->vm;
1196 int32_t status = -EINVAL;
1197 struct acrn_intr_monitor *intr_hdr;
1198 uint64_t hpa;
1199
1200 if (!is_poweroff_vm(target_vm)) {
1201 /* the param for this hypercall is page aligned */
1202 hpa = gpa2hpa(vm, param2);
1203 if (hpa != INVALID_HPA) {
1204 intr_hdr = (struct acrn_intr_monitor *)hpa2hva(hpa);
1205 stac();
1206 if (intr_hdr->buf_cnt <= (MAX_PTDEV_NUM * 2U)) {
1207 status = 0;
1208
1209 switch (intr_hdr->cmd) {
1210 case INTR_CMD_GET_DATA:
1211 intr_hdr->buf_cnt = ptirq_get_intr_data(target_vm,
1212 intr_hdr->buffer, intr_hdr->buf_cnt);
1213 break;
1214
1215 case INTR_CMD_DELAY_INT:
1216 /* buffer[0] is the delay time (in MS), if 0 to cancel delay */
1217 target_vm->intr_inject_delay_delta =
1218 intr_hdr->buffer[0] * TICKS_PER_MS;
1219 break;
1220
1221 default:
1222 /* if cmd wrong it goes here should not happen */
1223 status = -EINVAL;
1224 break;
1225 }
1226 }
1227 clac();
1228 }
1229 }
1230
1231 return status;
1232 }
1233
1234 /**
1235 * @brief set upcall notifier vector
1236 *
1237 * This is the API that helps to switch the notifer vecotr. If this API is
1238 * not called, the hypervisor will use the default notifier vector(0xF3)
1239 * to notify the Service VM kernel.
1240 *
1241 * @param vcpu Pointer to vCPU that initiates the hypercall
1242 * @param param1 the expected notifier vector from guest
1243 *
1244 * @pre is_service_vm(vcpu->vm)
1245 * @return 0 on success, non-zero on error.
1246 */
hcall_set_callback_vector(__unused struct acrn_vcpu * vcpu,__unused struct acrn_vm * target_vm,uint64_t param1,__unused uint64_t param2)1247 int32_t hcall_set_callback_vector(__unused struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
1248 uint64_t param1, __unused uint64_t param2)
1249 {
1250 int32_t ret;
1251
1252 if ((param1 > NR_MAX_VECTOR) || (param1 < VECTOR_DYNAMIC_START)) {
1253 pr_err("%s: Invalid passed vector\n", __func__);
1254 ret = -EINVAL;
1255 } else {
1256 set_hsm_notification_vector((uint32_t)param1);
1257 ret = 0;
1258 }
1259
1260 return ret;
1261 }
1262
1263 /*
1264 * @pre dev != NULL
1265 */
find_emul_dev_ops(struct acrn_vdev * dev)1266 static struct emul_dev_ops *find_emul_dev_ops(struct acrn_vdev *dev)
1267 {
1268 struct emul_dev_ops *op = NULL;
1269 uint32_t i;
1270
1271 for (i = 0U; i < ARRAY_SIZE(emul_dev_ops_tbl); i++) {
1272 if (emul_dev_ops_tbl[i].dev_id == dev->id.value) {
1273 op = &emul_dev_ops_tbl[i];
1274 break;
1275 }
1276 }
1277 return op;
1278 }
1279
1280 /**
1281 * @brief Add an emulated device in hypervisor.
1282 *
1283 * @param vcpu Pointer to vCPU that initiates the hypercall
1284 * @param target_vm Pointer to target VM data structure
1285 * @param param guest physical address. This gpa points to data structure of
1286 * acrn_vdev including information about PCI or legacy devices
1287 *
1288 * @pre is_service_vm(vcpu->vm)
1289 * @return 0 on success, non-zero on error.
1290 */
hcall_add_vdev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)1291 int32_t hcall_add_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
1292 {
1293 struct acrn_vm *vm = vcpu->vm;
1294 int32_t ret = -EINVAL;
1295 struct acrn_vdev dev;
1296 struct emul_dev_ops *op;
1297
1298 /* We should only create a device to a post-launched VM at creating time for safety, not runtime or other cases*/
1299 if (is_created_vm(target_vm)) {
1300 if (copy_from_gpa(vm, &dev, param2, sizeof(dev)) == 0) {
1301 op = find_emul_dev_ops(&dev);
1302 if ((op != NULL) && (op->create != NULL)) {
1303 ret = op->create(target_vm, &dev);
1304 }
1305 }
1306 } else {
1307 pr_err("%s, vm[%d] is not a postlaunched VM, or not in CREATED status to create a vdev\n", __func__, target_vm->vm_id);
1308 }
1309 return ret;
1310 }
1311
1312 /**
1313 * @brief Remove an emulated device in hypervisor.
1314 *
1315 * @param vcpu Pointer to vCPU that initiates the hypercall
1316 * @param target_vm Pointer to target VM data structure
1317 * @param param guest physical address. This gpa points to data structure of
1318 * acrn_vdev including information about PCI or legacy devices
1319 *
1320 * @pre is_service_vm(vcpu->vm)
1321 * @return 0 on success, non-zero on error.
1322 */
hcall_remove_vdev(struct acrn_vcpu * vcpu,struct acrn_vm * target_vm,__unused uint64_t param1,uint64_t param2)1323 int32_t hcall_remove_vdev(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2)
1324 {
1325 struct acrn_vm *vm = vcpu->vm;
1326 int32_t ret = -EINVAL;
1327 struct acrn_vdev dev;
1328 struct pci_vdev *vdev;
1329 struct emul_dev_ops *op;
1330 union pci_bdf bdf;
1331
1332 /* We should only destroy a device to a post-launched VM at creating or pausing time for safety, not runtime or other cases*/
1333 if (is_created_vm(target_vm) || is_paused_vm(target_vm)) {
1334 if (copy_from_gpa(vm, &dev, param2, sizeof(dev)) == 0) {
1335 op = find_emul_dev_ops(&dev);
1336 if (op != NULL) {
1337 bdf.value = (uint16_t) dev.slot;
1338 vdev = pci_find_vdev(&target_vm->vpci, bdf);
1339 if (vdev != NULL) {
1340 vdev->pci_dev_config->vbdf.value = UNASSIGNED_VBDF;
1341 if (op->destroy != NULL) {
1342 ret = op->destroy(vdev);
1343 } else {
1344 ret = 0;
1345 }
1346 } else {
1347 pr_warn("%s, failed to destroy emulated device %x:%x.%x\n",
1348 __func__, bdf.bits.b, bdf.bits.d, bdf.bits.f);
1349 }
1350 }
1351 }
1352 } else {
1353 pr_err("%s, vm[%d] is not a postlaunched VM, or not in CREATED/PAUSED status to destroy a vdev\n", __func__, target_vm->vm_id);
1354 }
1355 return ret;
1356 }
1357