1 /*
2  * Copyright (C) 2019-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/lib/atomic.h>
10 #include <io_req.h>
11 #include <asm/guest/vcpu.h>
12 #include <asm/guest/vm.h>
13 #include <asm/guest/instr_emul.h>
14 #include <asm/guest/vmexit.h>
15 #include <asm/vmx.h>
16 #include <asm/guest/ept.h>
17 #include <asm/pgtable.h>
18 #include <trace.h>
19 #include <logmsg.h>
20 
arch_fire_hsm_interrupt(void)21 void arch_fire_hsm_interrupt(void)
22 {
23 	/*
24 	 * use vLAPIC to inject vector to Service VM vcpu 0 if vlapic is enabled
25 	 * otherwise, send IPI hardcoded to BSP_CPU_ID
26 	 */
27 	struct acrn_vm *service_vm;
28 	struct acrn_vcpu *vcpu;
29 
30 	service_vm = get_service_vm();
31 	vcpu = vcpu_from_vid(service_vm, BSP_CPU_ID);
32 
33 	vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
34 }
35 
36 /**
37  * @brief General complete-work for port I/O emulation
38  *
39  * @pre io_req->io_type == ACRN_IOREQ_TYPE_PORTIO
40  *
41  * @remark This function must be called when \p io_req is completed, after
42  * either a previous call to emulate_io() returning 0 or the corresponding IO
43  * request having transferred to the COMPLETE state.
44  */
45 void
emulate_pio_complete(struct acrn_vcpu * vcpu,const struct io_request * io_req)46 emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
47 {
48 	const struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
49 	uint64_t mask = 0xFFFFFFFFUL >> (32UL - (8UL * pio_req->size));
50 
51 	if (pio_req->direction == ACRN_IOREQ_DIR_READ) {
52 		uint64_t value = (uint64_t)pio_req->value;
53 		uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
54 
55 		rax = ((rax) & ~mask) | (value & mask);
56 		vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
57 	}
58 }
59 
60 
61 /**
62  * @brief The handler of VM exits on I/O instructions
63  *
64  * @param vcpu The virtual CPU which triggers the VM exit on I/O instruction
65  */
pio_instr_vmexit_handler(struct acrn_vcpu * vcpu)66 int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
67 {
68 	int32_t status;
69 	uint64_t exit_qual;
70 	uint32_t mask;
71 	int32_t cur_context_idx = vcpu->arch.cur_context;
72 	struct io_request *io_req = &vcpu->req;
73 	struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
74 
75 	exit_qual = vcpu->arch.exit_qualification;
76 
77 	io_req->io_type = ACRN_IOREQ_TYPE_PORTIO;
78 	pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
79 	pio_req->address = vm_exit_io_instruction_port_number(exit_qual);
80 	if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) {
81 		mask = 0xFFFFFFFFU >> (32U - (8U * pio_req->size));
82 		pio_req->direction = ACRN_IOREQ_DIR_WRITE;
83 		pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX) & mask;
84 	} else {
85 		pio_req->direction = ACRN_IOREQ_DIR_READ;
86 	}
87 
88 	TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION,
89 		(uint32_t)pio_req->address,
90 		(uint32_t)pio_req->direction,
91 		(uint32_t)pio_req->size,
92 		(uint32_t)cur_context_idx);
93 
94 	status = emulate_io(vcpu, io_req);
95 
96 	return status;
97 }
98 
ept_violation_vmexit_handler(struct acrn_vcpu * vcpu)99 int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
100 {
101 	int32_t status = -EINVAL, ret;
102 	uint64_t exit_qual;
103 	uint64_t gpa;
104 	struct io_request *io_req = &vcpu->req;
105 	struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
106 
107 	/* Handle page fault from guest */
108 	exit_qual = vcpu->arch.exit_qualification;
109 	/* Get the guest physical address */
110 	gpa = exec_vmread64(VMX_GUEST_PHYSICAL_ADDR_FULL);
111 
112 	TRACE_2L(TRACE_VMEXIT_EPT_VIOLATION, exit_qual, gpa);
113 
114 	/*caused by instruction fetch */
115 	if ((exit_qual & 0x4UL) != 0UL) {
116 		/* TODO: check wehther the gpa is not a MMIO address. */
117 		if (vcpu->arch.cur_context == NORMAL_WORLD) {
118 			ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.nworld_eptp,
119 				gpa & PAGE_MASK, PAGE_SIZE, EPT_EXE, 0UL);
120 		} else {
121 			ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.sworld_eptp,
122 				gpa & PAGE_MASK, PAGE_SIZE, EPT_EXE, 0UL);
123 		}
124 		vcpu_retain_rip(vcpu);
125 		status = 0;
126 	} else {
127 
128 		io_req->io_type = ACRN_IOREQ_TYPE_MMIO;
129 
130 		/* Specify if read or write operation */
131 		if ((exit_qual & 0x2UL) != 0UL) {
132 			/* Write operation */
133 			mmio_req->direction = ACRN_IOREQ_DIR_WRITE;
134 			mmio_req->value = 0UL;
135 
136 			/* XXX: write access while EPT perm RX -> WP */
137 			if ((exit_qual & 0x38UL) == 0x28UL) {
138 				io_req->io_type = ACRN_IOREQ_TYPE_WP;
139 			}
140 		} else {
141 			/* Read operation */
142 			mmio_req->direction = ACRN_IOREQ_DIR_READ;
143 
144 			/* TODO: Need to determine how sign extension is determined for
145 			 * reads
146 			 */
147 		}
148 
149 		/* Adjust IPA appropriately and OR page offset to get full IPA of abort
150 		 */
151 		mmio_req->address = gpa;
152 
153 		ret = decode_instruction(vcpu, true);
154 		if (ret > 0) {
155 			mmio_req->size = (uint64_t)ret;
156 			/*
157 			 * For MMIO write, ask DM to run MMIO emulation after
158 			 * instruction emulation. For MMIO read, ask DM to run MMIO
159 			 * emulation at first.
160 			 */
161 
162 			/* Determine value being written. */
163 			if (mmio_req->direction == ACRN_IOREQ_DIR_WRITE) {
164 				status = emulate_instruction(vcpu);
165 				if (status != 0) {
166 					ret = -EFAULT;
167 				}
168 			}
169 
170 			if (ret > 0) {
171 				status = emulate_io(vcpu, io_req);
172 			}
173 		} else {
174 			if (ret == -EFAULT) {
175 				pr_info("page fault happen during decode_instruction");
176 				status = 0;
177 			}
178 		}
179 		if (ret <= 0) {
180 			pr_acrnlog("Guest Linear Address: 0x%016lx", exec_vmread(VMX_GUEST_LINEAR_ADDR));
181 			pr_acrnlog("Guest Physical Address address: 0x%016lx", gpa);
182 		}
183 	}
184 
185 	return status;
186 }
187 
188 /**
189  * @brief Allow a VM to access a port I/O range
190  *
191  * This API enables direct access from the given \p vm to the port I/O space
192  * starting from \p port_address to \p port_address + \p nbytes - 1.
193  *
194  * @param vm The VM whose port I/O access permissions is to be changed
195  * @param port_address The start address of the port I/O range
196  * @param nbytes The size of the range, in bytes
197  */
allow_guest_pio_access(struct acrn_vm * vm,uint16_t port_address,uint32_t nbytes)198 void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
199 		uint32_t nbytes)
200 {
201 	uint16_t address = port_address;
202 	uint32_t *b;
203 	uint32_t i;
204 
205 	b = (uint32_t *)vm->arch_vm.io_bitmap;
206 	for (i = 0U; i < nbytes; i++) {
207 		b[address >> 5U] &= ~(1U << (address & 0x1fU));
208 		address++;
209 	}
210 }
211 
deny_guest_pio_access(struct acrn_vm * vm,uint16_t port_address,uint32_t nbytes)212 void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
213 		uint32_t nbytes)
214 {
215 	uint16_t address = port_address;
216 	uint32_t *b;
217 	uint32_t i;
218 
219 	b = (uint32_t *)vm->arch_vm.io_bitmap;
220 	for (i = 0U; i < nbytes; i++) {
221 		b[address >> 5U] |= (1U << (address & 0x1fU));
222 		address++;
223 	}
224 }
225