1 /*
2 * Copyright (C) 2019-2022 Intel Corporation.
3 * SPDX-License-Identifier: BSD-3-Clause
4 */
5
6 #include <asm/guest/vm.h>
7 #include <asm/irq.h>
8 #include <errno.h>
9 #include <logmsg.h>
10 #include <sbuf.h>
11
12 #define DBG_LEVEL_IOREQ 6U
13
14 static uint32_t acrn_hsm_notification_vector = HYPERVISOR_CALLBACK_HSM_VECTOR;
15 #define MMIO_DEFAULT_VALUE_SIZE_1 (0xFFUL)
16 #define MMIO_DEFAULT_VALUE_SIZE_2 (0xFFFFUL)
17 #define MMIO_DEFAULT_VALUE_SIZE_4 (0xFFFFFFFFUL)
18 #define MMIO_DEFAULT_VALUE_SIZE_8 (0xFFFFFFFFFFFFFFFFUL)
19
20 #if defined(HV_DEBUG)
acrn_print_request(uint16_t vcpu_id,const struct acrn_io_request * req)21 __unused static void acrn_print_request(uint16_t vcpu_id, const struct acrn_io_request *req)
22 {
23 switch (req->type) {
24 case ACRN_IOREQ_TYPE_MMIO:
25 dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=MMIO]", vcpu_id);
26 dev_dbg(DBG_LEVEL_IOREQ,
27 "gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
28 req->reqs.mmio_request.address,
29 req->reqs.mmio_request.direction,
30 req->reqs.mmio_request.size,
31 req->reqs.mmio_request.value,
32 req->processed);
33 break;
34 case ACRN_IOREQ_TYPE_PORTIO:
35 dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=PORTIO]", vcpu_id);
36 dev_dbg(DBG_LEVEL_IOREQ,
37 "IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
38 req->reqs.pio_request.address,
39 req->reqs.pio_request.direction,
40 req->reqs.pio_request.size,
41 req->reqs.pio_request.value,
42 req->processed);
43 break;
44 default:
45 dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=%d] NOT support type",
46 vcpu_id, req->type);
47 break;
48 }
49 }
50 #endif
51
52 /**
53 * @brief Reset all IO requests status of the VM
54 *
55 * @param vm The VM whose IO requests to be reset
56 */
reset_vm_ioreqs(struct acrn_vm * vm)57 void reset_vm_ioreqs(struct acrn_vm *vm)
58 {
59 uint16_t i;
60
61 for (i = 0U; i < ACRN_IO_REQUEST_MAX; i++) {
62 set_io_req_state(vm, i, ACRN_IOREQ_STATE_FREE);
63 }
64 }
65
66 /**
67 * @pre vm->asyncio_lock is held
68 */
asyncio_is_conflict(struct acrn_vm * vm,const struct acrn_asyncio_info * async_info)69 static bool asyncio_is_conflict(struct acrn_vm *vm,
70 const struct acrn_asyncio_info *async_info)
71 {
72 struct list_head *pos;
73 struct asyncio_desc *p;
74 struct acrn_asyncio_info *info;
75 bool ret = false;
76
77 /* When either one's match_data is 0, the data matching will be skipped. */
78 list_for_each(pos, &vm->aiodesc_queue) {
79 p = container_of(pos, struct asyncio_desc, list);
80 info = &(p->asyncio_info);
81 if ((info->addr == async_info->addr) &&
82 (info->type == async_info->type) &&
83 ((info->match_data == 0U) || (async_info->match_data == 0U) ||
84 (info->data == async_info->data))) {
85 ret = true;
86 break;
87 }
88 }
89
90 return ret;
91 }
92
add_asyncio(struct acrn_vm * vm,const struct acrn_asyncio_info * async_info)93 int add_asyncio(struct acrn_vm *vm, const struct acrn_asyncio_info *async_info)
94 {
95 uint32_t i;
96 int ret = -1;
97 bool b_conflict;
98 struct asyncio_desc *desc;
99
100 if (async_info->addr != 0UL) {
101 spinlock_obtain(&vm->asyncio_lock);
102 b_conflict = asyncio_is_conflict(vm, async_info);
103 if (!b_conflict) {
104 for (i = 0U; i < ACRN_ASYNCIO_MAX; i++) {
105 desc = &(vm->aio_desc[i]);
106 if ((desc->asyncio_info.addr == 0UL) && (desc->asyncio_info.fd == 0UL)) {
107 (void)memcpy_s(&(desc->asyncio_info), sizeof(struct acrn_asyncio_info),
108 async_info, sizeof(struct acrn_asyncio_info));
109 INIT_LIST_HEAD(&(desc->list));
110 list_add(&(desc->list), &vm->aiodesc_queue);
111 ret = 0;
112 break;
113 }
114 }
115 spinlock_release(&vm->asyncio_lock);
116 if (i == ACRN_ASYNCIO_MAX) {
117 pr_fatal("too much fastio, would not support!");
118 }
119 } else {
120 spinlock_release(&vm->asyncio_lock);
121 pr_err("%s, already registered!", __func__);
122 }
123 } else {
124 pr_err("%s: base = 0 is not supported!", __func__);
125 }
126 return ret;
127 }
128
remove_asyncio(struct acrn_vm * vm,const struct acrn_asyncio_info * async_info)129 int remove_asyncio(struct acrn_vm *vm, const struct acrn_asyncio_info *async_info)
130 {
131 uint32_t i;
132 int ret = -1;
133 struct asyncio_desc *desc;
134 struct acrn_asyncio_info *info;
135
136 if (async_info->addr != 0UL) {
137 spinlock_obtain(&vm->asyncio_lock);
138 for (i = 0U; i < ACRN_ASYNCIO_MAX; i++) {
139 desc = &(vm->aio_desc[i]);
140 info = &(desc->asyncio_info);
141 if ((info->type == async_info->type)
142 && (info->addr == async_info->addr)
143 && (info->fd == async_info->fd)
144 && ((info->match_data == 0U) == (async_info->match_data == 0U))
145 && (info->data == async_info->data)) {
146 list_del_init(&(desc->list));
147 memset(desc, 0, sizeof(vm->aio_desc[0]));
148 ret = 0;
149 break;
150 }
151 }
152 spinlock_release(&vm->asyncio_lock);
153 if (i == ACRN_ASYNCIO_MAX) {
154 pr_fatal("Failed to find asyncio req on addr: %lx!", async_info->addr);
155 }
156 } else {
157 pr_err("%s: base = 0 is not supported!", __func__);
158 }
159 return ret;
160 }
161
has_complete_ioreq(const struct acrn_vcpu * vcpu)162 static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
163 {
164 return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE);
165 }
166
get_asyncio_desc(struct acrn_vcpu * vcpu,const struct io_request * io_req)167 static struct asyncio_desc *get_asyncio_desc(struct acrn_vcpu *vcpu, const struct io_request *io_req)
168 {
169 uint64_t addr = 0UL;
170 uint32_t type;
171 uint64_t value;
172 struct list_head *pos;
173 struct asyncio_desc *iter_desc;
174 struct acrn_asyncio_info *iter_info;
175 struct acrn_vm *vm = vcpu->vm;
176 struct asyncio_desc *ret = NULL;
177 struct shared_buf *sbuf =
178 (struct shared_buf *)vm->sw.asyncio_sbuf;
179
180 if (sbuf != NULL) {
181 switch (io_req->io_type) {
182 case ACRN_IOREQ_TYPE_PORTIO:
183 addr = io_req->reqs.pio_request.address;
184 value = io_req->reqs.pio_request.value;
185 type = ACRN_ASYNCIO_PIO;
186 break;
187
188 case ACRN_IOREQ_TYPE_MMIO:
189 addr = io_req->reqs.mmio_request.address;
190 value = io_req->reqs.mmio_request.value;
191 type = ACRN_ASYNCIO_MMIO;
192 break;
193 default:
194 break;
195 }
196
197 if (addr != 0UL) {
198 spinlock_obtain(&vm->asyncio_lock);
199 list_for_each(pos, &vm->aiodesc_queue) {
200 iter_desc = container_of(pos, struct asyncio_desc, list);
201 iter_info = &(iter_desc->asyncio_info);
202 if ((iter_info->addr == addr) && (iter_info->type == type) &&
203 ((iter_info->match_data == 0U) || (iter_info->data == value))) {
204 ret = iter_desc;
205 break;
206 }
207 }
208 spinlock_release(&vm->asyncio_lock);
209 }
210 }
211
212 return ret;
213
214 }
acrn_insert_asyncio(struct acrn_vcpu * vcpu,const uint64_t asyncio_fd)215 static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const uint64_t asyncio_fd)
216 {
217 struct acrn_vm *vm = vcpu->vm;
218 struct shared_buf *sbuf =
219 (struct shared_buf *)vm->sw.asyncio_sbuf;
220 int ret = -ENODEV;
221
222 if (sbuf != NULL) {
223 spinlock_obtain(&vm->asyncio_lock);
224 while (sbuf_put(sbuf, (uint8_t *)&asyncio_fd, sizeof(asyncio_fd)) == 0U) {
225 /* sbuf is full, try later.. */
226 spinlock_release(&vm->asyncio_lock);
227 asm_pause();
228 if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
229 schedule();
230 }
231 spinlock_obtain(&vm->asyncio_lock);
232 }
233
234 spinlock_release(&vm->asyncio_lock);
235 arch_fire_hsm_interrupt();
236 ret = 0;
237 }
238 return ret;
239 }
240 /**
241 * @brief Deliver \p io_req to Service VM and suspend \p vcpu till its completion
242 *
243 * @param vcpu The virtual CPU that triggers the MMIO access
244 * @param io_req The I/O request holding the details of the MMIO access
245 *
246 * @pre vcpu != NULL && io_req != NULL
247 */
acrn_insert_request(struct acrn_vcpu * vcpu,const struct io_request * io_req)248 int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req)
249 {
250 struct acrn_io_request_buffer *req_buf = NULL;
251 struct acrn_io_request *acrn_io_req;
252 bool is_polling = false;
253 int32_t ret = 0;
254 uint16_t cur;
255
256 if ((vcpu->vm->sw.io_shared_page != NULL)
257 && (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_FREE)) {
258
259 req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
260 cur = vcpu->vcpu_id;
261
262 stac();
263 acrn_io_req = &req_buf->req_slot[cur];
264 /* ACRN insert request to HSM and inject upcall */
265 acrn_io_req->type = io_req->io_type;
266 (void)memcpy_s(&acrn_io_req->reqs, sizeof(acrn_io_req->reqs),
267 &io_req->reqs, sizeof(acrn_io_req->reqs));
268 if (vcpu->vm->sw.is_polling_ioreq) {
269 acrn_io_req->completion_polling = 1U;
270 is_polling = true;
271 }
272 clac();
273
274 /* Before updating the acrn_io_req state, enforce all fill acrn_io_req operations done */
275 cpu_write_memory_barrier();
276
277 /* Must clear the signal before we mark req as pending
278 * Once we mark it pending, HSM may process req and signal us
279 * before we perform upcall.
280 * because HSM can work in pulling mode without wait for upcall
281 */
282 set_io_req_state(vcpu->vm, vcpu->vcpu_id, ACRN_IOREQ_STATE_PENDING);
283
284 /* signal HSM */
285 arch_fire_hsm_interrupt();
286
287 /* Polling completion of the request in polling mode */
288 if (is_polling) {
289 while (true) {
290 if (has_complete_ioreq(vcpu)) {
291 /* we have completed ioreq pending */
292 break;
293 }
294 asm_pause();
295 if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
296 schedule();
297 }
298 }
299 } else {
300 wait_event(&vcpu->events[VCPU_EVENT_IOREQ]);
301 }
302 } else {
303 ret = -EINVAL;
304 }
305
306 return ret;
307 }
308
get_io_req_state(struct acrn_vm * vm,uint16_t vcpu_id)309 uint32_t get_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id)
310 {
311 uint32_t state;
312 struct acrn_io_request_buffer *req_buf = NULL;
313 struct acrn_io_request *acrn_io_req;
314
315 req_buf = (struct acrn_io_request_buffer *)vm->sw.io_shared_page;
316 if (req_buf == NULL) {
317 state = 0xffffffffU;
318 } else {
319 stac();
320 acrn_io_req = &req_buf->req_slot[vcpu_id];
321 state = acrn_io_req->processed;
322 clac();
323 }
324
325 return state;
326 }
327
set_io_req_state(struct acrn_vm * vm,uint16_t vcpu_id,uint32_t state)328 void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
329 {
330 struct acrn_io_request_buffer *req_buf = NULL;
331 struct acrn_io_request *acrn_io_req;
332
333 req_buf = (struct acrn_io_request_buffer *)vm->sw.io_shared_page;
334 if (req_buf != NULL) {
335 stac();
336 acrn_io_req = &req_buf->req_slot[vcpu_id];
337 /*
338 * HV will only set processed to ACRN_IOREQ_STATE_PENDING or ACRN_IOREQ_STATE_FREE.
339 * we don't need to sfence here is that even if the Service-VM/DM sees the previous state,
340 * the only side effect is that it will defer the processing of the new IOReq.
341 * It won't lead wrong processing.
342 */
343 acrn_io_req->processed = state;
344 clac();
345 }
346 }
347
init_asyncio(struct acrn_vm * vm,uint64_t * hva)348 int init_asyncio(struct acrn_vm *vm, uint64_t *hva)
349 {
350 struct shared_buf *sbuf = (struct shared_buf *)hva;
351 int ret = -1;
352
353 stac();
354 if (sbuf != NULL) {
355 if (sbuf->magic == SBUF_MAGIC) {
356 vm->sw.asyncio_sbuf = sbuf;
357 INIT_LIST_HEAD(&vm->aiodesc_queue);
358 spinlock_init(&vm->asyncio_lock);
359 ret = 0;
360 }
361 }
362 clac();
363
364 return ret;
365 }
366
set_hsm_notification_vector(uint32_t vector)367 void set_hsm_notification_vector(uint32_t vector)
368 {
369 acrn_hsm_notification_vector = vector;
370 }
371
get_hsm_notification_vector(void)372 uint32_t get_hsm_notification_vector(void)
373 {
374 return acrn_hsm_notification_vector;
375 }
376
377 /**
378 * @brief General complete-work for MMIO emulation
379 *
380 * @param vcpu The virtual CPU that triggers the MMIO access
381 * @param io_req The I/O request holding the details of the MMIO access
382 *
383 * @pre io_req->io_type == ACRN_IOREQ_TYPE_MMIO
384 *
385 * @remark This function must be called when \p io_req is completed, after
386 * either a previous call to emulate_io() returning 0 or the corresponding HSM
387 * request transferring to the COMPLETE state.
388 */
emulate_mmio_complete(struct acrn_vcpu * vcpu,const struct io_request * io_req)389 static void emulate_mmio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
390 {
391 const struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
392
393 if (mmio_req->direction == ACRN_IOREQ_DIR_READ) {
394 /* Emulate instruction and update vcpu register set */
395 (void)emulate_instruction(vcpu);
396 }
397 }
398
complete_ioreq(struct acrn_vcpu * vcpu,struct io_request * io_req)399 static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
400 {
401 struct acrn_io_request_buffer *req_buf = NULL;
402 struct acrn_io_request *acrn_io_req;
403
404 req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
405
406 stac();
407 acrn_io_req = &req_buf->req_slot[vcpu->vcpu_id];
408 if (io_req != NULL) {
409 switch (vcpu->req.io_type) {
410 case ACRN_IOREQ_TYPE_PORTIO:
411 io_req->reqs.pio_request.value = acrn_io_req->reqs.pio_request.value;
412 break;
413
414 case ACRN_IOREQ_TYPE_MMIO:
415 io_req->reqs.mmio_request.value = acrn_io_req->reqs.mmio_request.value;
416 break;
417
418 default:
419 /*no actions are required for other cases.*/
420 break;
421 }
422 }
423
424 /*
425 * Only HV will check whether processed is ACRN_IOREQ_STATE_FREE on per-vCPU before inject a ioreq.
426 * Only HV will set processed to ACRN_IOREQ_STATE_FREE when ioreq is done.
427 */
428 acrn_io_req->processed = ACRN_IOREQ_STATE_FREE;
429 clac();
430 }
431
432 /**
433 * @brief Complete-work of HSM requests for port I/O emulation
434 *
435 * @pre vcpu->req.io_type == ACRN_IOREQ_TYPE_PORTIO
436 *
437 * @remark This function must be called after the HSM request corresponding to
438 * \p vcpu being transferred to the COMPLETE state.
439 */
dm_emulate_pio_complete(struct acrn_vcpu * vcpu)440 static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu)
441 {
442 struct io_request *io_req = &vcpu->req;
443
444 complete_ioreq(vcpu, io_req);
445
446 emulate_pio_complete(vcpu, io_req);
447 }
448
449 /**
450 * @brief Complete-work of HSM requests for MMIO emulation
451 *
452 * @param vcpu The virtual CPU that triggers the MMIO access
453 *
454 * @pre vcpu->req.io_type == ACRN_IOREQ_TYPE_MMIO
455 *
456 * @remark This function must be called after the HSM request corresponding to
457 * \p vcpu being transferred to the COMPLETE state.
458 */
dm_emulate_mmio_complete(struct acrn_vcpu * vcpu)459 static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu)
460 {
461 struct io_request *io_req = &vcpu->req;
462
463 complete_ioreq(vcpu, io_req);
464
465 emulate_mmio_complete(vcpu, io_req);
466 }
467
468 /**
469 * @brief General complete-work for all kinds of HSM requests for I/O emulation
470 *
471 * @param vcpu The virtual CPU that triggers the MMIO access
472 */
dm_emulate_io_complete(struct acrn_vcpu * vcpu)473 static void dm_emulate_io_complete(struct acrn_vcpu *vcpu)
474 {
475 if (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE) {
476 /*
477 * If vcpu is in Zombie state and will be destroyed soon. Just
478 * mark ioreq done and don't resume vcpu.
479 */
480 if (vcpu->state == VCPU_ZOMBIE) {
481 complete_ioreq(vcpu, NULL);
482 } else {
483 switch (vcpu->req.io_type) {
484 case ACRN_IOREQ_TYPE_MMIO:
485 dm_emulate_mmio_complete(vcpu);
486 break;
487
488 case ACRN_IOREQ_TYPE_PORTIO:
489 case ACRN_IOREQ_TYPE_PCICFG:
490 /*
491 * ACRN_IOREQ_TYPE_PORTIO on 0xcf8 & 0xcfc may switch to
492 * ACRN_IOREQ_TYPE_PCICFG in some cases. It works to apply the post-work
493 * for ACRN_IOREQ_TYPE_PORTIO on ACRN_IOREQ_TYPE_PCICFG because the
494 * format of the first 28 bytes of ACRN_IOREQ_TYPE_PORTIO &
495 * ACRN_IOREQ_TYPE_PCICFG requests are exactly the same and post-work
496 * is mainly interested in the read value.
497 */
498 dm_emulate_pio_complete(vcpu);
499 break;
500
501 default:
502 /*
503 * ACRN_IOREQ_TYPE_WP can only be triggered on writes which do
504 * not need post-work. Just mark the ioreq done.
505 */
506 complete_ioreq(vcpu, NULL);
507 break;
508 }
509
510 }
511 }
512 }
513
514 /**
515 * @pre width < 8U
516 * @pre vcpu != NULL
517 * @pre vcpu->vm != NULL
518 */
pio_default_read(struct acrn_vcpu * vcpu,__unused uint16_t addr,size_t width)519 static bool pio_default_read(struct acrn_vcpu *vcpu,
520 __unused uint16_t addr, size_t width)
521 {
522 struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
523
524 pio_req->value = (uint32_t)((1UL << (width * 8U)) - 1UL);
525
526 return true;
527 }
528
529 /**
530 * @pre width < 8U
531 * @pre vcpu != NULL
532 * @pre vcpu->vm != NULL
533 */
pio_default_write(__unused struct acrn_vcpu * vcpu,__unused uint16_t addr,__unused size_t width,__unused uint32_t v)534 static bool pio_default_write(__unused struct acrn_vcpu *vcpu, __unused uint16_t addr,
535 __unused size_t width, __unused uint32_t v)
536 {
537 return true; /* ignore write */
538 }
539
540 /**
541 * @pre (io_req->reqs.mmio.size == 1U) || (io_req->reqs.mmio.size == 2U) ||
542 * (io_req->reqs.mmio.size == 4U) || (io_req->reqs.mmio.size == 8U)
543 */
mmio_default_access_handler(struct io_request * io_req,__unused void * handler_private_data)544 static int32_t mmio_default_access_handler(struct io_request *io_req,
545 __unused void *handler_private_data)
546 {
547 struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
548
549 if (mmio->direction == ACRN_IOREQ_DIR_READ) {
550 switch (mmio->size) {
551 case 1U:
552 mmio->value = MMIO_DEFAULT_VALUE_SIZE_1;
553 break;
554 case 2U:
555 mmio->value = MMIO_DEFAULT_VALUE_SIZE_2;
556 break;
557 case 4U:
558 mmio->value = MMIO_DEFAULT_VALUE_SIZE_4;
559 break;
560 case 8U:
561 mmio->value = MMIO_DEFAULT_VALUE_SIZE_8;
562 break;
563 default:
564 /* This case is unreachable, this is guaranteed by the design. */
565 break;
566 }
567 }
568
569 return 0;
570 }
571
572 /**
573 * Try handling the given request by any port I/O handler registered in the
574 * hypervisor.
575 *
576 * @pre io_req->io_type == ACRN_IOREQ_TYPE_PORTIO
577 *
578 * @retval 0 Successfully emulated by registered handlers.
579 * @retval -ENODEV No proper handler found.
580 * @retval -EIO The request spans multiple devices and cannot be emulated.
581 */
582 static int32_t
hv_emulate_pio(struct acrn_vcpu * vcpu,struct io_request * io_req)583 hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
584 {
585 int32_t status = -ENODEV;
586 uint16_t port, size;
587 uint32_t idx;
588 struct acrn_vm *vm = vcpu->vm;
589 struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
590 struct vm_io_handler_desc *handler;
591 io_read_fn_t io_read = NULL;
592 io_write_fn_t io_write = NULL;
593
594 if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
595 io_read = pio_default_read;
596 io_write = pio_default_write;
597 }
598
599 port = (uint16_t)pio_req->address;
600 size = (uint16_t)pio_req->size;
601
602 for (idx = 0U; idx < EMUL_PIO_IDX_MAX; idx++) {
603 handler = &(vm->emul_pio[idx]);
604
605 if ((port < handler->port_start) || (port >= handler->port_end)) {
606 continue;
607 }
608
609 if (handler->io_read != NULL) {
610 io_read = handler->io_read;
611 }
612 if (handler->io_write != NULL) {
613 io_write = handler->io_write;
614 }
615 break;
616 }
617
618 if ((pio_req->direction == ACRN_IOREQ_DIR_WRITE) && (io_write != NULL)) {
619 if (io_write(vcpu, port, size, pio_req->value)) {
620 status = 0;
621 }
622 } else if ((pio_req->direction == ACRN_IOREQ_DIR_READ) && (io_read != NULL)) {
623 if (io_read(vcpu, port, size)) {
624 status = 0;
625 }
626 } else {
627 /* do nothing */
628 }
629
630 pr_dbg("IO %s on port %04x, data %08x",
631 (pio_req->direction == ACRN_IOREQ_DIR_READ) ? "read" : "write", port, pio_req->value);
632
633 return status;
634 }
635
636 /**
637 * Use registered MMIO handlers on the given request if it falls in the range of
638 * any of them.
639 *
640 * @pre io_req->io_type == ACRN_IOREQ_TYPE_MMIO
641 *
642 * @retval 0 Successfully emulated by registered handlers.
643 * @retval -ENODEV No proper handler found.
644 * @retval -EIO The request spans multiple devices and cannot be emulated.
645 */
646 static int32_t
hv_emulate_mmio(struct acrn_vcpu * vcpu,struct io_request * io_req)647 hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
648 {
649 int32_t status = -ENODEV;
650 bool hold_lock = true;
651 uint16_t idx;
652 uint64_t address, size, base, end;
653 struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
654 struct mem_io_node *mmio_handler = NULL;
655 hv_mem_io_handler_t read_write = NULL;
656 void *handler_private_data = NULL;
657
658 if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) {
659 read_write = mmio_default_access_handler;
660 }
661
662 address = mmio_req->address;
663 size = mmio_req->size;
664
665 spinlock_obtain(&vcpu->vm->emul_mmio_lock);
666 for (idx = 0U; idx <= vcpu->vm->nr_emul_mmio_regions; idx++) {
667 mmio_handler = &(vcpu->vm->emul_mmio[idx]);
668 if (mmio_handler->read_write != NULL) {
669 base = mmio_handler->range_start;
670 end = mmio_handler->range_end;
671
672 if (((address + size) <= base) || (address >= end)) {
673 continue;
674 } else {
675 if ((address >= base) && ((address + size) <= end)) {
676 hold_lock = mmio_handler->hold_lock;
677 read_write = mmio_handler->read_write;
678 handler_private_data = mmio_handler->handler_private_data;
679 } else {
680 pr_fatal("Err MMIO, address:0x%lx, size:%x", address, size);
681 status = -EIO;
682 }
683 break;
684 }
685 }
686 }
687
688 if ((status == -ENODEV) && (read_write != NULL)) {
689 /* This mmio_handler will never modify once register, so we don't
690 * need to hold the lock when handling the MMIO access.
691 */
692 if (!hold_lock) {
693 spinlock_release(&vcpu->vm->emul_mmio_lock);
694 }
695 status = read_write(io_req, handler_private_data);
696 if (!hold_lock) {
697 spinlock_obtain(&vcpu->vm->emul_mmio_lock);
698 }
699 }
700 spinlock_release(&vcpu->vm->emul_mmio_lock);
701
702 return status;
703 }
704
705 /**
706 * @brief Emulate \p io_req for \p vcpu
707 *
708 * Handle an I/O request by either invoking a hypervisor-internal handler or
709 * deliver to HSM.
710 *
711 * @pre vcpu != NULL
712 * @pre vcpu->vm != NULL
713 * @pre vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
714 *
715 * @param vcpu The virtual CPU that triggers the MMIO access
716 * @param io_req The I/O request holding the details of the MMIO access
717 *
718 * @retval 0 Successfully emulated by registered handlers.
719 * @retval ACRN_IOREQ_STATE_PENDING The I/O request is delivered to HSM.
720 * @retval -EIO The request spans multiple devices and cannot be emulated.
721 * @retval -EINVAL \p io_req has an invalid io_type.
722 * @retval <0 on other errors during emulation.
723 */
724 int32_t
emulate_io(struct acrn_vcpu * vcpu,struct io_request * io_req)725 emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
726 {
727 int32_t status;
728 struct acrn_vm_config *vm_config;
729 struct asyncio_desc *aio_desc;
730
731 vm_config = get_vm_config(vcpu->vm->vm_id);
732
733 switch (io_req->io_type) {
734 case ACRN_IOREQ_TYPE_PORTIO:
735 status = hv_emulate_pio(vcpu, io_req);
736 if (status == 0) {
737 emulate_pio_complete(vcpu, io_req);
738 }
739 break;
740 case ACRN_IOREQ_TYPE_MMIO:
741 case ACRN_IOREQ_TYPE_WP:
742 status = hv_emulate_mmio(vcpu, io_req);
743 if (status == 0) {
744 emulate_mmio_complete(vcpu, io_req);
745 }
746 break;
747 default:
748 /* Unknown I/O request io_type */
749 status = -EINVAL;
750 break;
751 }
752
753 if ((status == -ENODEV) && (vm_config->load_order == POST_LAUNCHED_VM)) {
754 /*
755 * No handler from HV side, search from HSM in Service VM
756 *
757 * ACRN insert request to HSM and inject upcall.
758 */
759 aio_desc = get_asyncio_desc(vcpu, io_req);
760 if (aio_desc) {
761 status = acrn_insert_asyncio(vcpu, aio_desc->asyncio_info.fd);
762 } else {
763 status = acrn_insert_request(vcpu, io_req);
764 if (status == 0) {
765 dm_emulate_io_complete(vcpu);
766 }
767 }
768 if (status != 0) {
769 /* here for both IO & MMIO, the direction, address,
770 * size definition is same
771 */
772 struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
773
774 pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__,
775 pio_req->direction, io_req->io_type,
776 pio_req->address, pio_req->size);
777 }
778 }
779
780 return status;
781 }
782
783
784 /**
785 * @brief Register a port I/O handler
786 *
787 * @param vm The VM to which the port I/O handlers are registered
788 * @param pio_idx The emulated port io index
789 * @param range The emulated port io range
790 * @param io_read_fn_ptr The handler for emulating reads from the given range
791 * @param io_write_fn_ptr The handler for emulating writes to the given range
792 * @pre pio_idx < EMUL_PIO_IDX_MAX
793 */
register_pio_emulation_handler(struct acrn_vm * vm,uint32_t pio_idx,const struct vm_io_range * range,io_read_fn_t io_read_fn_ptr,io_write_fn_t io_write_fn_ptr)794 void register_pio_emulation_handler(struct acrn_vm *vm, uint32_t pio_idx,
795 const struct vm_io_range *range, io_read_fn_t io_read_fn_ptr, io_write_fn_t io_write_fn_ptr)
796 {
797 if (is_service_vm(vm)) {
798 deny_guest_pio_access(vm, range->base, range->len);
799 }
800 vm->emul_pio[pio_idx].port_start = range->base;
801 vm->emul_pio[pio_idx].port_end = range->base + range->len;
802 vm->emul_pio[pio_idx].io_read = io_read_fn_ptr;
803 vm->emul_pio[pio_idx].io_write = io_write_fn_ptr;
804 }
805
806 /**
807 * @brief Find match MMIO node
808 *
809 * This API find match MMIO node from \p vm.
810 *
811 * @param vm The VM to which the MMIO node is belong to.
812 *
813 * @return If there's a match mmio_node return it, otherwise return NULL;
814 */
find_match_mmio_node(struct acrn_vm * vm,uint64_t start,uint64_t end)815 static inline struct mem_io_node *find_match_mmio_node(struct acrn_vm *vm,
816 uint64_t start, uint64_t end)
817 {
818 bool found = false;
819 uint16_t idx;
820 struct mem_io_node *mmio_node;
821
822 for (idx = 0U; idx < CONFIG_MAX_EMULATED_MMIO_REGIONS; idx++) {
823 mmio_node = &(vm->emul_mmio[idx]);
824 if ((mmio_node->range_start == start) && (mmio_node->range_end == end)) {
825 found = true;
826 break;
827 }
828 }
829
830 if (!found) {
831 pr_info("%s, vm[%d] no match mmio region [0x%lx, 0x%lx] is found",
832 __func__, vm->vm_id, start, end);
833 mmio_node = NULL;
834 }
835
836 return mmio_node;
837 }
838
839 /**
840 * @brief Find a free MMIO node
841 *
842 * This API find a free MMIO node from \p vm.
843 *
844 * @param vm The VM to which the MMIO node is belong to.
845 *
846 * @return If there's a free mmio_node return it, otherwise return NULL;
847 */
find_free_mmio_node(struct acrn_vm * vm)848 static inline struct mem_io_node *find_free_mmio_node(struct acrn_vm *vm)
849 {
850 uint16_t idx;
851 struct mem_io_node *mmio_node = find_match_mmio_node(vm, 0UL, 0UL);
852
853 if (mmio_node != NULL) {
854 idx = (uint16_t)(uint64_t)(mmio_node - &(vm->emul_mmio[0U]));
855 if (vm->nr_emul_mmio_regions < idx) {
856 vm->nr_emul_mmio_regions = idx;
857 }
858 }
859
860 return mmio_node;
861 }
862
863 /**
864 * @brief Register a MMIO handler
865 *
866 * This API registers a MMIO handler to \p vm
867 *
868 * @param vm The VM to which the MMIO handler is registered
869 * @param read_write The handler for emulating accesses to the given range
870 * @param start The base address of the range \p read_write can emulate
871 * @param end The end of the range (exclusive) \p read_write can emulate
872 * @param handler_private_data Handler-specific data which will be passed to \p read_write when called
873 */
register_mmio_emulation_handler(struct acrn_vm * vm,hv_mem_io_handler_t read_write,uint64_t start,uint64_t end,void * handler_private_data,bool hold_lock)874 void register_mmio_emulation_handler(struct acrn_vm *vm,
875 hv_mem_io_handler_t read_write, uint64_t start,
876 uint64_t end, void *handler_private_data, bool hold_lock)
877 {
878 struct mem_io_node *mmio_node;
879
880 /* Ensure both a read/write handler and range check function exist */
881 if ((read_write != NULL) && (end > start)) {
882 spinlock_obtain(&vm->emul_mmio_lock);
883 mmio_node = find_free_mmio_node(vm);
884 if (mmio_node != NULL) {
885 /* Fill in information for this node */
886 mmio_node->hold_lock = hold_lock;
887 mmio_node->read_write = read_write;
888 mmio_node->handler_private_data = handler_private_data;
889 mmio_node->range_start = start;
890 mmio_node->range_end = end;
891 }
892 spinlock_release(&vm->emul_mmio_lock);
893 }
894
895 }
896
897 /**
898 * @brief Unregister a MMIO handler
899 *
900 * This API unregisters a MMIO handler to \p vm
901 *
902 * @param vm The VM to which the MMIO handler is unregistered
903 * @param start The base address of the range which wants to unregister
904 * @param end The end of the range (exclusive) which wants to unregister
905 */
unregister_mmio_emulation_handler(struct acrn_vm * vm,uint64_t start,uint64_t end)906 void unregister_mmio_emulation_handler(struct acrn_vm *vm,
907 uint64_t start, uint64_t end)
908 {
909 struct mem_io_node *mmio_node;
910
911 spinlock_obtain(&vm->emul_mmio_lock);
912 mmio_node = find_match_mmio_node(vm, start, end);
913 if (mmio_node != NULL) {
914 (void)memset(mmio_node, 0U, sizeof(struct mem_io_node));
915 }
916 spinlock_release(&vm->emul_mmio_lock);
917 }
918
deinit_emul_io(struct acrn_vm * vm)919 void deinit_emul_io(struct acrn_vm *vm)
920 {
921 (void)memset(vm->emul_mmio, 0U, sizeof(vm->emul_mmio));
922 (void)memset(vm->emul_pio, 0U, sizeof(vm->emul_pio));
923 }
924