Lines Matching refs:vcpu

162 static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)  in has_complete_ioreq()  argument
164 return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE); in has_complete_ioreq()
167 static struct asyncio_desc *get_asyncio_desc(struct acrn_vcpu *vcpu, const struct io_request *io_re… in get_asyncio_desc() argument
175 struct acrn_vm *vm = vcpu->vm; in get_asyncio_desc()
215 static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const uint64_t asyncio_fd) in acrn_insert_asyncio() argument
217 struct acrn_vm *vm = vcpu->vm; in acrn_insert_asyncio()
228 if (need_reschedule(pcpuid_from_vcpu(vcpu))) { in acrn_insert_asyncio()
248 int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req) in acrn_insert_request() argument
256 if ((vcpu->vm->sw.io_shared_page != NULL) in acrn_insert_request()
257 && (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_FREE)) { in acrn_insert_request()
259 req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page); in acrn_insert_request()
260 cur = vcpu->vcpu_id; in acrn_insert_request()
268 if (vcpu->vm->sw.is_polling_ioreq) { in acrn_insert_request()
282 set_io_req_state(vcpu->vm, vcpu->vcpu_id, ACRN_IOREQ_STATE_PENDING); in acrn_insert_request()
290 if (has_complete_ioreq(vcpu)) { in acrn_insert_request()
295 if (need_reschedule(pcpuid_from_vcpu(vcpu))) { in acrn_insert_request()
300 wait_event(&vcpu->events[VCPU_EVENT_IOREQ]); in acrn_insert_request()
389 static void emulate_mmio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req) in emulate_mmio_complete() argument
395 (void)emulate_instruction(vcpu); in emulate_mmio_complete()
399 static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req) in complete_ioreq() argument
404 req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page); in complete_ioreq()
407 acrn_io_req = &req_buf->req_slot[vcpu->vcpu_id]; in complete_ioreq()
409 switch (vcpu->req.io_type) { in complete_ioreq()
440 static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu) in dm_emulate_pio_complete() argument
442 struct io_request *io_req = &vcpu->req; in dm_emulate_pio_complete()
444 complete_ioreq(vcpu, io_req); in dm_emulate_pio_complete()
446 emulate_pio_complete(vcpu, io_req); in dm_emulate_pio_complete()
459 static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu) in dm_emulate_mmio_complete() argument
461 struct io_request *io_req = &vcpu->req; in dm_emulate_mmio_complete()
463 complete_ioreq(vcpu, io_req); in dm_emulate_mmio_complete()
465 emulate_mmio_complete(vcpu, io_req); in dm_emulate_mmio_complete()
473 static void dm_emulate_io_complete(struct acrn_vcpu *vcpu) in dm_emulate_io_complete() argument
475 if (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE) { in dm_emulate_io_complete()
480 if (vcpu->state == VCPU_ZOMBIE) { in dm_emulate_io_complete()
481 complete_ioreq(vcpu, NULL); in dm_emulate_io_complete()
483 switch (vcpu->req.io_type) { in dm_emulate_io_complete()
485 dm_emulate_mmio_complete(vcpu); in dm_emulate_io_complete()
498 dm_emulate_pio_complete(vcpu); in dm_emulate_io_complete()
506 complete_ioreq(vcpu, NULL); in dm_emulate_io_complete()
519 static bool pio_default_read(struct acrn_vcpu *vcpu, in pio_default_read() argument
522 struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request; in pio_default_read()
534 static bool pio_default_write(__unused struct acrn_vcpu *vcpu, __unused uint16_t addr, in pio_default_write() argument
583 hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req) in hv_emulate_pio() argument
588 struct acrn_vm *vm = vcpu->vm; in hv_emulate_pio()
594 if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) { in hv_emulate_pio()
619 if (io_write(vcpu, port, size, pio_req->value)) { in hv_emulate_pio()
623 if (io_read(vcpu, port, size)) { in hv_emulate_pio()
647 hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req) in hv_emulate_mmio() argument
658 if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) { in hv_emulate_mmio()
665 spinlock_obtain(&vcpu->vm->emul_mmio_lock); in hv_emulate_mmio()
666 for (idx = 0U; idx <= vcpu->vm->nr_emul_mmio_regions; idx++) { in hv_emulate_mmio()
667 mmio_handler = &(vcpu->vm->emul_mmio[idx]); in hv_emulate_mmio()
693 spinlock_release(&vcpu->vm->emul_mmio_lock); in hv_emulate_mmio()
697 spinlock_obtain(&vcpu->vm->emul_mmio_lock); in hv_emulate_mmio()
700 spinlock_release(&vcpu->vm->emul_mmio_lock); in hv_emulate_mmio()
725 emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req) in emulate_io() argument
731 vm_config = get_vm_config(vcpu->vm->vm_id); in emulate_io()
735 status = hv_emulate_pio(vcpu, io_req); in emulate_io()
737 emulate_pio_complete(vcpu, io_req); in emulate_io()
742 status = hv_emulate_mmio(vcpu, io_req); in emulate_io()
744 emulate_mmio_complete(vcpu, io_req); in emulate_io()
759 aio_desc = get_asyncio_desc(vcpu, io_req); in emulate_io()
761 status = acrn_insert_asyncio(vcpu, aio_desc->asyncio_info.fd); in emulate_io()
763 status = acrn_insert_request(vcpu, io_req); in emulate_io()
765 dm_emulate_io_complete(vcpu); in emulate_io()