1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 
30 #include <sys/ioctl.h>
31 #include <sys/mman.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <stdbool.h>
35 #include <string.h>
36 #include <ctype.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/types.h>
40 #include <sys/stat.h>
41 
42 
43 #include "vmmapi.h"
44 #include "mevent.h"
45 #include "errno.h"
46 
47 #include "dm.h"
48 #include "pci_core.h"
49 #include "log.h"
50 #include "sw_load.h"
51 #include "acpi.h"
52 
53 #define MAP_NOCORE 0
54 #define MAP_ALIGNED_SUPER 0
55 
56 /*
57  * Size of the guard region before and after the virtual address space
58  * mapping the guest physical memory. This must be a multiple of the
59  * superpage size for performance reasons.
60  */
61 #define	VM_MMAP_GUARD_SIZE	(4 * MB)
62 
63 #define SUPPORT_VHM_API_VERSION_MAJOR	1
64 #define SUPPORT_VHM_API_VERSION_MINOR	0
65 
66 #define VM_STATE_STR_LEN                16
67 static const char vm_state_str[VM_SUSPEND_LAST][VM_STATE_STR_LEN] = {
68 	[VM_SUSPEND_NONE]		= "RUNNING",
69 	[VM_SUSPEND_SYSTEM_RESET]	= "SYSTEM_RESET",
70 	[VM_SUSPEND_FULL_RESET]		= "FULL_RESET",
71 	[VM_SUSPEND_POWEROFF]		= "POWEROFF",
72 	[VM_SUSPEND_SUSPEND]		= "SUSPEND",
73 	[VM_SUSPEND_HALT]		= "HALT",
74 	[VM_SUSPEND_TRIPLEFAULT]	= "TRIPLEFAULT"
75 };
76 
vm_state_to_str(enum vm_suspend_how idx)77 const char *vm_state_to_str(enum vm_suspend_how idx)
78 {
79 	return (idx < VM_SUSPEND_LAST) ? vm_state_str[idx] : "UNKNOWN";
80 }
81 
82 static int devfd = -1;
83 static uint64_t cpu_affinity_bitmap = 0UL;
84 
add_one_pcpu(int pcpu_id)85 static void add_one_pcpu(int pcpu_id)
86 {
87 	if (cpu_affinity_bitmap & (1UL << pcpu_id)) {
88 		pr_err("%s: pcpu_id %d has been allocated to this VM.\n", __func__, pcpu_id);
89 		return;
90 	}
91 
92 	cpu_affinity_bitmap |= (1UL << pcpu_id);
93 }
94 
95 /*
96  * example options:
97  *   --cpu_affinity 1,2,3
98  */
acrn_parse_cpu_affinity(char * opt)99 int acrn_parse_cpu_affinity(char *opt)
100 {
101 	char *str, *cp, *cp_opt;
102 	int lapic_id;
103 
104 	cp_opt = cp = strdup(opt);
105 	if (!cp) {
106 		pr_err("%s: strdup returns NULL\n", __func__);
107 		return -1;
108 	}
109 
110 	/* white spaces within the commane line are invalid */
111 	while (cp && isdigit(cp[0])) {
112 		str = strpbrk(cp, ",");
113 
114 		/* no more entries delimited by ',' */
115 		if (!str) {
116 			if (!dm_strtoi(cp, NULL, 10, &lapic_id)) {
117 				add_one_pcpu(lapic_to_pcpu(lapic_id));
118 			}
119 			break;
120 		} else {
121 			if (*str == ',') {
122 				/* after this, 'cp' points to the character after ',' */
123 				str = strsep(&cp, ",");
124 
125 				/* parse the entry before ',' */
126 				if (dm_strtoi(str, NULL, 10, &lapic_id)) {
127 					goto err;
128 				}
129 				add_one_pcpu(lapic_to_pcpu(lapic_id));
130 			}
131 		}
132 	}
133 
134 	free(cp_opt);
135 	return 0;
136 
137 err:
138 	free(cp_opt);
139 	return -1;
140 }
141 
vm_get_cpu_affinity_dm(void)142 uint64_t vm_get_cpu_affinity_dm(void)
143 {
144 	return cpu_affinity_bitmap;
145 }
146 
147 struct vmctx *
vm_create(const char * name,uint64_t req_buf,int * vcpu_num)148 vm_create(const char *name, uint64_t req_buf, int *vcpu_num)
149 {
150 	struct vmctx *ctx;
151 	struct acrn_vm_creation create_vm;
152 	int error, retry = 10;
153 	struct stat tmp_st;
154 
155 	memset(&create_vm, 0, sizeof(struct acrn_vm_creation));
156 	ctx = calloc(1, sizeof(struct vmctx) + strnlen(name, PATH_MAX) + 1);
157 	if ((ctx == NULL) || (devfd != -1))
158 		goto err;
159 
160 	if (stat("/dev/acrn_vhm", &tmp_st) == 0) {
161 		devfd = open("/dev/acrn_vhm", O_RDWR|O_CLOEXEC);
162 	} else if (stat("/dev/acrn_hsm", &tmp_st) == 0) {
163 		devfd = open("/dev/acrn_hsm", O_RDWR|O_CLOEXEC);
164 	} else {
165 		devfd = -1;
166 	}
167 	if (devfd == -1) {
168 		pr_err("Could not open /dev/acrn_vhm\n");
169 		goto err;
170 	}
171 
172 	ctx->gvt_enabled = false;
173 	ctx->fd = devfd;
174 	ctx->lowmem_limit = PCI_EMUL_MEMBASE32;
175 	ctx->highmem_gpa_base = HIGHRAM_START_ADDR;
176 	ctx->name = (char *)(ctx + 1);
177 	strncpy(ctx->name, name, strnlen(name, PATH_MAX) + 1);
178 
179 	/* Set trusty enable flag */
180 	if (trusty_enabled)
181 		create_vm.vm_flag |= GUEST_FLAG_SECURE_WORLD_ENABLED;
182 	else
183 		create_vm.vm_flag &= (~GUEST_FLAG_SECURE_WORLD_ENABLED);
184 
185 	if (lapic_pt) {
186 		create_vm.vm_flag |= GUEST_FLAG_LAPIC_PASSTHROUGH;
187 		create_vm.vm_flag |= GUEST_FLAG_RT;
188 		create_vm.vm_flag |= GUEST_FLAG_IO_COMPLETION_POLLING;
189 		create_vm.vm_flag |= GUEST_FLAG_PMU_PASSTHROUGH;
190 	} else {
191 		create_vm.vm_flag &= (~GUEST_FLAG_LAPIC_PASSTHROUGH);
192 		create_vm.vm_flag &= (~GUEST_FLAG_IO_COMPLETION_POLLING);
193 	}
194 
195 	/* command line arguments specified CPU affinity could overwrite HV's static configuration */
196 	create_vm.cpu_affinity = cpu_affinity_bitmap;
197 	strncpy((char *)create_vm.name, name, strnlen(name, MAX_VM_NAME_LEN));
198 
199 	if (is_rtvm) {
200 		create_vm.vm_flag |= GUEST_FLAG_RT;
201 		create_vm.vm_flag |= GUEST_FLAG_IO_COMPLETION_POLLING;
202 	}
203 
204 	create_vm.ioreq_buf = req_buf;
205 	while (retry > 0) {
206 		error = ioctl(ctx->fd, ACRN_IOCTL_CREATE_VM, &create_vm);
207 		if (error == 0)
208 			break;
209 		usleep(500000);
210 		retry--;
211 	}
212 
213 	if (error) {
214 		pr_err("failed to create VM %s, %s.\n", ctx->name, errormsg(errno));
215 		goto err;
216 	}
217 
218 	*vcpu_num = create_vm.vcpu_num;
219 	ctx->vmid = create_vm.vmid;
220 
221 	return ctx;
222 
223 err:
224 	if (ctx != NULL)
225 		free(ctx);
226 
227 	return NULL;
228 }
229 
230 int
vm_create_ioreq_client(struct vmctx * ctx)231 vm_create_ioreq_client(struct vmctx *ctx)
232 {
233 	int error;
234 	error = ioctl(ctx->fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0);
235 	if (error) {
236 		pr_err("ACRN_IOCTL_CREATE_IOREQ_CLIENT ioctl() returned an error: %s\n", errormsg(errno));
237 	}
238 	return error;
239 }
240 
241 int
vm_destroy_ioreq_client(struct vmctx * ctx)242 vm_destroy_ioreq_client(struct vmctx *ctx)
243 {
244 	int error;
245 	error = ioctl(ctx->fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
246 	if (error) {
247 		pr_err("ACRN_IOCTL_DESTROY_IOREQ_CLIENT ioctl() returned an error: %s\n", errormsg(errno));
248 	}
249 	return error;
250 }
251 
252 int
vm_attach_ioreq_client(struct vmctx * ctx)253 vm_attach_ioreq_client(struct vmctx *ctx)
254 {
255 	int error;
256 
257 	error = ioctl(ctx->fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
258 
259 	if (error) {
260 		pr_err("ACRN_IOCTL_ATTACH_IOREQ_CLIENT ioctl() returned an error: %s\n", errormsg(errno));
261 	}
262 	return error;
263 }
264 
265 int
vm_notify_request_done(struct vmctx * ctx,int vcpu)266 vm_notify_request_done(struct vmctx *ctx, int vcpu)
267 {
268 	int error;
269 	struct acrn_ioreq_notify notify;
270 
271 	bzero(&notify, sizeof(notify));
272 	notify.vmid = ctx->vmid;
273 	notify.vcpu = vcpu;
274 
275 	error = ioctl(ctx->fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify);
276 
277 	if (error) {
278 		pr_err("ACRN_IOCTL_NOTIFY_REQUEST_FINISH ioctl() returned an error: %s\n", errormsg(errno));
279 	}
280 
281 	return error;
282 }
283 
284 void
vm_destroy(struct vmctx * ctx)285 vm_destroy(struct vmctx *ctx)
286 {
287 	if (!ctx)
288 		return;
289 	if (ioctl(ctx->fd, ACRN_IOCTL_DESTROY_VM, NULL)) {
290 		pr_err("ACRN_IOCTL_DESTROY_VM ioctl() returned an error: %s\n", errormsg(errno));
291 	}
292 	close(ctx->fd);
293 	free(ctx);
294 	devfd = -1;
295 }
296 
297 int
vm_setup_asyncio(struct vmctx * ctx,uint64_t base)298 vm_setup_asyncio(struct vmctx *ctx, uint64_t base)
299 {
300 	int error;
301 
302 	error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_ASYNCIO, base);
303 
304 	if (error) {
305 		pr_err("ACRN_IOCTL_SETUP_ASYNCIO ioctl() returned an error: %s\n", errormsg(errno));
306 	}
307 
308 	return error;
309 }
310 
311 int
vm_parse_memsize(const char * optarg,size_t * ret_memsize)312 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
313 {
314 	char *endptr;
315 	size_t optval;
316 	int shift;
317 
318 	optval = strtoul(optarg, &endptr, 0);
319 	switch (tolower((unsigned char)*endptr)) {
320 	case 'g':
321 		shift = 30;
322 		break;
323 	case 'm':
324 		shift = 20;
325 		break;
326 	case 'k':
327 		shift = 10;
328 		break;
329 	case 'b':
330 	case '\0': /* No unit. */
331 		shift = 0;
332 	default:
333 		/* Unrecognized unit. */
334 		return -1;
335 	}
336 
337 	optval = optval << shift;
338 	if (optval < 128 * MB)
339 		return -1;
340 
341 	*ret_memsize = optval;
342 
343 	return 0;
344 }
345 
346 uint32_t
vm_get_lowmem_limit(struct vmctx * ctx)347 vm_get_lowmem_limit(struct vmctx *ctx)
348 {
349 	return ctx->lowmem_limit;
350 }
351 
352 int
vm_map_memseg_vma(struct vmctx * ctx,size_t len,vm_paddr_t gpa,uint64_t vma,int prot)353 vm_map_memseg_vma(struct vmctx *ctx, size_t len, vm_paddr_t gpa,
354 	uint64_t vma, int prot)
355 {
356 	struct acrn_vm_memmap memmap;
357 	int error;
358 	bzero(&memmap, sizeof(struct acrn_vm_memmap));
359 	memmap.type = ACRN_MEMMAP_RAM;
360 	memmap.vma_base = vma;
361 	memmap.len = len;
362 	memmap.user_vm_pa = gpa;
363 	memmap.attr = prot;
364 	error = ioctl(ctx->fd, ACRN_IOCTL_SET_MEMSEG, &memmap);
365 	if (error) {
366 		pr_err("ACRN_IOCTL_SET_MEMSEG ioctl() returned an error: %s\n", errormsg(errno));
367 	}
368 	return error;
369 }
370 
371 int
vm_setup_memory(struct vmctx * ctx,size_t memsize)372 vm_setup_memory(struct vmctx *ctx, size_t memsize)
373 {
374 	/*
375 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
376 	 * create another 'highmem' segment above 4GB for the remainder.
377 	 */
378 	if (memsize > ctx->lowmem_limit) {
379 		ctx->lowmem = ctx->lowmem_limit;
380 		ctx->highmem = memsize - ctx->lowmem_limit;
381 	} else {
382 		ctx->lowmem = memsize;
383 		ctx->highmem = 0;
384 	}
385 
386 	ctx->biosmem = high_bios_size();
387 	ctx->fbmem = (16 * 1024 * 1024);
388 
389 	return hugetlb_setup_memory(ctx);
390 }
391 
392 void
vm_unsetup_memory(struct vmctx * ctx)393 vm_unsetup_memory(struct vmctx *ctx)
394 {
395 	/*
396 	 * For security reason, clean the VM's memory region
397 	 * to avoid secret information leaking in below case:
398 	 * After a User VM is destroyed, the memory will be reclaimed,
399 	 * then if the new User VM starts, that memory region may be
400 	 * allocated the new User VM, the previous User VM sensitive data
401 	 * may be leaked to the new User VM if the memory is not cleared.
402 	 *
403 	 * For rtvm, we can't clean VM's memory as RTVM may still
404 	 * run. But we need to return the memory to Service VM here.
405 	 * Otherwise, VM can't be restart again.
406 	 */
407 
408 	if (!is_rtvm) {
409 		bzero((void *)ctx->baseaddr, ctx->lowmem);
410 		bzero((void *)(ctx->baseaddr + ctx->highmem_gpa_base), ctx->highmem);
411 	}
412 
413 	hugetlb_unsetup_memory(ctx);
414 }
415 
416 /*
417  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
418  * the lowmem or highmem regions.
419  *
420  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
421  * The instruction emulation code depends on this behavior.
422  */
423 void *
vm_map_gpa(struct vmctx * ctx,vm_paddr_t gaddr,size_t len)424 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
425 {
426 
427 	if (ctx->lowmem > 0) {
428 		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
429 		    gaddr + len <= ctx->lowmem)
430 			return (ctx->baseaddr + gaddr);
431 	}
432 
433 	if (ctx->highmem > 0) {
434 		if (gaddr >= ctx->highmem_gpa_base) {
435 			if (gaddr < ctx->highmem_gpa_base + ctx->highmem &&
436 			    len <= ctx->highmem &&
437 			    gaddr + len <= ctx->highmem_gpa_base + ctx->highmem)
438 				return (ctx->baseaddr + gaddr);
439 		}
440 	}
441 
442 	pr_dbg("%s context memory is not valid!\n", __func__);
443 	return NULL;
444 }
445 
446 size_t
vm_get_lowmem_size(struct vmctx * ctx)447 vm_get_lowmem_size(struct vmctx *ctx)
448 {
449 	return ctx->lowmem;
450 }
451 
452 size_t
vm_get_highmem_size(struct vmctx * ctx)453 vm_get_highmem_size(struct vmctx *ctx)
454 {
455 	return ctx->highmem;
456 }
457 
458 int
vm_run(struct vmctx * ctx)459 vm_run(struct vmctx *ctx)
460 {
461 	int error;
462 
463 	error = ioctl(ctx->fd, ACRN_IOCTL_START_VM, &ctx->vmid);
464 	if (error) {
465 		pr_err("ACRN_IOCTL_START_VM ioctl() returned an error: %s\n", errormsg(errno));
466 	}
467 	return error;
468 }
469 
470 void
vm_pause(struct vmctx * ctx)471 vm_pause(struct vmctx *ctx)
472 {
473 	if (ioctl(ctx->fd, ACRN_IOCTL_PAUSE_VM, &ctx->vmid)) {
474 		pr_err("ACRN_IOCTL_PAUSE_VM ioctl() returned an error: %s\n", errormsg(errno));
475 	}
476 }
477 
478 void
vm_reset(struct vmctx * ctx)479 vm_reset(struct vmctx *ctx)
480 {
481 	if (ioctl(ctx->fd, ACRN_IOCTL_RESET_VM, &ctx->vmid)) {
482 		pr_err("ACRN_IOCTL_RESET_VM ioctl() returned an error: %s\n", errormsg(errno));
483 	}
484 }
485 
486 void
vm_clear_ioreq(struct vmctx * ctx)487 vm_clear_ioreq(struct vmctx *ctx)
488 {
489 	if (ioctl(ctx->fd, ACRN_IOCTL_CLEAR_VM_IOREQ, NULL)) {
490 		pr_err("ACRN_IOCTL_CLEAR_VM_IOREQ ioctl() returned an error: %s\n", errormsg(errno));
491 	}
492 }
493 
494 static enum vm_suspend_how suspend_mode = VM_SUSPEND_NONE;
495 
496 void
vm_set_suspend_mode(enum vm_suspend_how how)497 vm_set_suspend_mode(enum vm_suspend_how how)
498 {
499 	pr_notice("VM state changed from[ %s ] to [ %s ]\n", vm_state_to_str(suspend_mode), vm_state_to_str(how));
500 	suspend_mode = how;
501 }
502 
503 int
vm_get_suspend_mode(void)504 vm_get_suspend_mode(void)
505 {
506 	return suspend_mode;
507 }
508 
509 int
vm_suspend(struct vmctx * ctx,enum vm_suspend_how how)510 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
511 {
512 	pr_info("%s: setting VM state to %s\n", __func__, vm_state_to_str(how));
513 	vm_set_suspend_mode(how);
514 	mevent_notify();
515 
516 	return 0;
517 }
518 
519 int
vm_lapic_msi(struct vmctx * ctx,uint64_t addr,uint64_t msg)520 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
521 {
522 	struct acrn_msi_entry msi;
523 	int error;
524 	bzero(&msi, sizeof(msi));
525 	msi.msi_addr = addr;
526 	msi.msi_data = msg;
527 
528 	error = ioctl(ctx->fd, ACRN_IOCTL_INJECT_MSI, &msi);
529 	if (error) {
530 		pr_err("ACRN_IOCTL_INJECT_MSI ioctl() returned an error: %s\n", errormsg(errno));
531 	}
532 	return error;
533 }
534 
535 int
vm_set_gsi_irq(struct vmctx * ctx,int gsi,uint32_t operation)536 vm_set_gsi_irq(struct vmctx *ctx, int gsi, uint32_t operation)
537 {
538 	struct acrn_irqline_ops op;
539 	uint64_t *req =  (uint64_t *)&op;
540 	int error;
541 	op.op = operation;
542 	op.gsi = (uint32_t)gsi;
543 
544 	error = ioctl(ctx->fd, ACRN_IOCTL_SET_IRQLINE, *req);
545 	if (error) {
546 		pr_err("ACRN_IOCTL_SET_IRQLINE ioctl() returned an error: %s\n", errormsg(errno));
547 	}
548 	return error;
549 }
550 
551 int
vm_assign_pcidev(struct vmctx * ctx,struct acrn_pcidev * pcidev)552 vm_assign_pcidev(struct vmctx *ctx, struct acrn_pcidev *pcidev)
553 {
554 	int error;
555 	error = ioctl(ctx->fd, ACRN_IOCTL_ASSIGN_PCIDEV, pcidev);
556 	if (error) {
557 		pr_err("ACRN_IOCTL_ASSIGN_PCIDEV ioctl() returned an error: %s\n", errormsg(errno));
558 	}
559 	return error;
560 }
561 
562 int
vm_deassign_pcidev(struct vmctx * ctx,struct acrn_pcidev * pcidev)563 vm_deassign_pcidev(struct vmctx *ctx, struct acrn_pcidev *pcidev)
564 {
565 	int error;
566 	error = ioctl(ctx->fd, ACRN_IOCTL_DEASSIGN_PCIDEV, pcidev);
567 	if (error) {
568 		pr_err("ACRN_IOCTL_DEASSIGN_PCIDEV ioctl() returned an error: %s\n", errormsg(errno));
569 	}
570 	return error;
571 }
572 
573 int
vm_assign_mmiodev(struct vmctx * ctx,struct acrn_mmiodev * mmiodev)574 vm_assign_mmiodev(struct vmctx *ctx, struct acrn_mmiodev *mmiodev)
575 {
576 	int error;
577 	error = ioctl(ctx->fd, ACRN_IOCTL_ASSIGN_MMIODEV, mmiodev);
578 	if (error) {
579 		pr_err("ACRN_IOCTL_ASSIGN_MMIODEV ioctl() returned an error: %s\n", errormsg(errno));
580 	}
581 	return error;
582 }
583 
584 int
vm_deassign_mmiodev(struct vmctx * ctx,struct acrn_mmiodev * mmiodev)585 vm_deassign_mmiodev(struct vmctx *ctx, struct acrn_mmiodev *mmiodev)
586 {
587 	int error;
588 	error = ioctl(ctx->fd, ACRN_IOCTL_DEASSIGN_MMIODEV, mmiodev);
589 	if (error) {
590 		pr_err("ACRN_IOCTL_DEASSIGN_MMIODEV ioctl() returned an error: %s\n", errormsg(errno));
591 	}
592 	return error;
593 }
594 
595 int
vm_map_ptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)596 vm_map_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
597 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
598 {
599 	struct acrn_vm_memmap memmap;
600 	int error;
601 	bzero(&memmap, sizeof(struct acrn_vm_memmap));
602 	memmap.type = ACRN_MEMMAP_MMIO;
603 	memmap.len = len;
604 	memmap.user_vm_pa = gpa;
605 	memmap.service_vm_pa = hpa;
606 	memmap.attr = ACRN_MEM_ACCESS_RWX;
607 	error = ioctl(ctx->fd, ACRN_IOCTL_SET_MEMSEG, &memmap);
608 	if (error) {
609 		pr_err("ACRN_IOCTL_SET_MEMSEG ioctl() returned an error: %s\n", errormsg(errno));
610 	}
611 	return error;
612 }
613 
614 int
vm_unmap_ptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)615 vm_unmap_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
616 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
617 {
618 	struct acrn_vm_memmap memmap;
619 	int error;
620 	bzero(&memmap, sizeof(struct acrn_vm_memmap));
621 	memmap.type = ACRN_MEMMAP_MMIO;
622 	memmap.len = len;
623 	memmap.user_vm_pa = gpa;
624 	memmap.service_vm_pa = hpa;
625 	memmap.attr = ACRN_MEM_ACCESS_RWX;
626 
627 	error = ioctl(ctx->fd, ACRN_IOCTL_UNSET_MEMSEG, &memmap);
628 	if (error) {
629 		pr_err("ACRN_IOCTL_UNSET_MEMSEG ioctl() returned an error: %s\n", errormsg(errno));
630 	}
631 	return error;
632 }
633 
634 int
vm_add_hv_vdev(struct vmctx * ctx,struct acrn_vdev * dev)635 vm_add_hv_vdev(struct vmctx *ctx, struct acrn_vdev *dev)
636 {
637 	int error;
638 	error = ioctl(ctx->fd, ACRN_IOCTL_CREATE_VDEV, dev);
639 	if (error) {
640 		pr_err("ACRN_IOCTL_CREATE_VDEV ioctl() returned an error: %s\n", errormsg(errno));
641 	}
642 	return error;
643 }
644 
645 int
vm_remove_hv_vdev(struct vmctx * ctx,struct acrn_vdev * dev)646 vm_remove_hv_vdev(struct vmctx *ctx, struct acrn_vdev *dev)
647 {
648 	int error;
649 	error = ioctl(ctx->fd, ACRN_IOCTL_DESTROY_VDEV, dev);
650 	if (error) {
651 		pr_err("ACRN_IOCTL_DESTROY_VDEV ioctl() returned an error: %s\n", errormsg(errno));
652 	}
653 	return error;
654 }
655 
656 int
vm_set_ptdev_intx_info(struct vmctx * ctx,uint16_t virt_bdf,uint16_t phys_bdf,int virt_pin,int phys_pin,bool pic_pin)657 vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
658 		       int virt_pin, int phys_pin, bool pic_pin)
659 {
660 	struct acrn_ptdev_irq ptirq;
661 	int error;
662 	bzero(&ptirq, sizeof(ptirq));
663 	ptirq.type = ACRN_PTDEV_IRQ_INTX;
664 	ptirq.virt_bdf = virt_bdf;
665 	ptirq.phys_bdf = phys_bdf;
666 	ptirq.intx.virt_pin = virt_pin;
667 	ptirq.intx.phys_pin = phys_pin;
668 	ptirq.intx.is_pic_pin = pic_pin;
669 
670 	error = ioctl(ctx->fd, ACRN_IOCTL_SET_PTDEV_INTR, &ptirq);
671 	if (error) {
672 		pr_err("ACRN_IOCTL_SET_PTDEV_INTR ioctl() returned an error: %s\n", errormsg(errno));
673 	}
674 	return error;
675 }
676 
677 int
vm_reset_ptdev_intx_info(struct vmctx * ctx,uint16_t virt_bdf,uint16_t phys_bdf,int virt_pin,bool pic_pin)678 vm_reset_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
679 			int virt_pin, bool pic_pin)
680 {
681 	struct acrn_ptdev_irq ptirq;
682 	int error;
683 	bzero(&ptirq, sizeof(ptirq));
684 	ptirq.type = ACRN_PTDEV_IRQ_INTX;
685 	ptirq.intx.virt_pin = virt_pin;
686 	ptirq.intx.is_pic_pin = pic_pin;
687 	ptirq.virt_bdf = virt_bdf;
688 	ptirq.phys_bdf = phys_bdf;
689 
690 	error = ioctl(ctx->fd, ACRN_IOCTL_RESET_PTDEV_INTR, &ptirq);
691 	if (error) {
692 		pr_err("ACRN_IOCTL_RESET_PTDEV_INTR ioctl() returned an error: %s\n", errormsg(errno));
693 	}
694 	return error;
695 }
696 
697 int
vm_set_vcpu_regs(struct vmctx * ctx,struct acrn_vcpu_regs * vcpu_regs)698 vm_set_vcpu_regs(struct vmctx *ctx, struct acrn_vcpu_regs *vcpu_regs)
699 {
700 	int error;
701 	error = ioctl(ctx->fd, ACRN_IOCTL_SET_VCPU_REGS, vcpu_regs);
702 	if (error) {
703 		pr_err("ACRN_IOCTL_SET_VCPU_REGS ioctl() returned an error: %s\n", errormsg(errno));
704 	}
705 	return error;
706 }
707 
708 int
vm_get_cpu_state(struct vmctx * ctx,void * state_buf)709 vm_get_cpu_state(struct vmctx *ctx, void *state_buf)
710 {
711 	int error;
712 	error = ioctl(ctx->fd, ACRN_IOCTL_PM_GET_CPU_STATE, state_buf);
713 	if (error) {
714 		pr_err("ACRN_IOCTL_PM_GET_CPU_STATE ioctl() returned an error: %s\n", errormsg(errno));
715 	}
716 	return error;
717 }
718 
719 int
vm_intr_monitor(struct vmctx * ctx,void * intr_buf)720 vm_intr_monitor(struct vmctx *ctx, void *intr_buf)
721 {
722 	int error;
723 	error = ioctl(ctx->fd, ACRN_IOCTL_VM_INTR_MONITOR, intr_buf);
724 	if (error) {
725 		pr_err("ACRN_IOCTL_VM_INTR_MONITOR ioctl() returned an error: %s\n", errormsg(errno));
726 	}
727 	return error;
728 }
729 
730 int
vm_ioeventfd(struct vmctx * ctx,struct acrn_ioeventfd * args)731 vm_ioeventfd(struct vmctx *ctx, struct acrn_ioeventfd *args)
732 {
733 	int error;
734 	error = ioctl(ctx->fd, ACRN_IOCTL_IOEVENTFD, args);
735 	if (error) {
736 		pr_err("ACRN_IOCTL_IOEVENTFD ioctl() returned an error: %s\n", errormsg(errno));
737 	}
738 	return error;
739 }
740 
741 int
vm_irqfd(struct vmctx * ctx,struct acrn_irqfd * args)742 vm_irqfd(struct vmctx *ctx, struct acrn_irqfd *args)
743 {
744 	int error;
745 	error = ioctl(ctx->fd, ACRN_IOCTL_IRQFD, args);
746 	if (error) {
747 		pr_err("ACRN_IOCTL_IRQFD ioctl() returned an error: %s\n", errormsg(errno));
748 	}
749 	return error;
750 }
751 
752 char*
errormsg(int error)753 errormsg(int error)
754 {
755 	switch (error){
756 	case ENOTTY:
757 		return "Undefined operation";
758 	case ENOSYS:
759 		return "Obsoleted operation";
760 	default:
761 		return strerror(error);
762 	}
763 }
764