| /hypervisor/arch/x86/guest/ |
| A D | vcpu.c | 43 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_gpreg() 51 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_set_gpreg() 59 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rip() 77 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rsp() 85 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_set_rsp() 94 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_efer() 119 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rflags() 563 vcpu->vm->vm_id, vcpu->vcpu_id, in create_vcpu() 723 vcpu->vm->vm_id, vcpu->vcpu_id); in run_vcpu() 760 vcpu->vm->vm_id, vcpu->vcpu_id); in run_vcpu() [all …]
|
| A D | vmexit.c | 296 vcpu_inject_ud(vcpu); in mwait_monitor_vmexit_handler() 322 vcpu_retain_rip(vcpu); in mtf_vmexit_handler() 349 if ((vcpu->arch.pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) { in hlt_vmexit_handler() 398 if (vcpu->arch.xsave_enabled && ((vcpu_get_cr4(vcpu) & CR4_OSXSAVE) != 0UL)) { in xsetbv_vmexit_handler() 432 vcpu_inject_ud(vcpu); in xsetbv_vmexit_handler() 448 if (is_rt_vm(vcpu->vm)) { in wbinvd_vmexit_handler() 454 if (other != vcpu) { in wbinvd_vmexit_handler() 462 if (other != vcpu) { in wbinvd_vmexit_handler() 479 vcpu_inject_gp(vcpu, 0); in loadiwkey_vmexit_handler() 511 vcpu_retain_rip(vcpu); in init_signal_vmexit_handler() [all …]
|
| A D | vmsr.c | 382 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].value = 0; in prepare_auto_msr_area() 384 vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].value = 0; in prepare_auto_msr_area() 404 vcpu->arch.msr_area.index_of_pqr_assoc = vcpu->arch.msr_area.count; in prepare_auto_msr_area() 408 vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, vcpu_clos); in prepare_auto_msr_area() 440 init_vcat_msrs(vcpu); in init_emulated_msrs() 565 init_vmx_msrs(vcpu); in init_msr_emulation() 873 __func__, vcpu->vm->vm_id, vcpu->vcpu_id, msr); in rdmsr_vmexit_handler() 1018 vcpu_set_efer(vcpu, vcpu_get_efer(vcpu) & ~MSR_IA32_EFER_NXE_BIT); in set_guest_ia32_misc_enalbe() 1275 vcpu->vm->arch_vm.iwkey_backup = vcpu->arch.IWKey; in wrmsr_vmexit_handler() 1295 vcpu->arch.IWKey = vcpu->vm->arch_vm.iwkey_backup; in wrmsr_vmexit_handler() [all …]
|
| A D | virq.c | 133 kick_vcpu(vcpu); in vcpu_make_request() 147 vm = vcpu->vm; in vcpu_do_pending_extint() 152 if (vcpu == primary) { in vcpu_do_pending_extint() 254 vcpu_retain_rip(vcpu); in vcpu_inject_exception() 262 vcpu_set_rflags(vcpu, vcpu_get_rflags(vcpu) | HV_ARCH_VCPU_RFLAGS_RF); in vcpu_inject_exception() 309 (void)vlapic_set_local_intr(vcpu->vm, vcpu->vcpu_id, APIC_LVT_THERMAL); in vcpu_inject_thermal_interrupt() 324 vcpu_retain_rip(vcpu); in interrupt_window_vmexit_handler() 339 vcpu_retain_rip(vcpu); in external_interrupt_vmexit_handler() 370 init_vmcs(vcpu); in acrn_handle_pending_request() 448 if (!is_lapic_pt_enabled(vcpu) && !vcpu->arch.emulating_lock) { in acrn_handle_pending_request() [all …]
|
| A D | lock_instr_emul.c | 87 …if ((is_ac_enabled() && !is_guest_ac_enabled(vcpu)) || (is_gp_enabled() && !is_guest_gp_enabled(vc… in emulate_lock_instr() 91 status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr); in emulate_lock_instr() 110 vcpu_kick_lock_instr_emulation(vcpu); in emulate_lock_instr() 115 vcpu->arch.inst_len = 1U; in emulate_lock_instr() 116 if (vcpu->vm->hw.created_vcpus > 1U) { in emulate_lock_instr() 120 vcpu->arch.emulating_lock = true; in emulate_lock_instr() 132 if (is_current_opcode_xchg(vcpu)) { in emulate_lock_instr() 137 vcpu_kick_lock_instr_emulation(vcpu); in emulate_lock_instr() 143 vcpu->arch.emulating_lock = true; in emulate_lock_instr() 144 status = emulate_instruction(vcpu); in emulate_lock_instr() [all …]
|
| A D | virtual_cr.c | 245 vcpu_inject_gp(vcpu, 0U); in vmx_write_cr0() 263 vcpu_set_efer(vcpu, vcpu_get_efer(vcpu) | MSR_IA32_EFER_LMA_BIT); in vmx_write_cr0() 266 if (is_pae(vcpu)) { in vmx_write_cr0() 284 vcpu_set_efer(vcpu, vcpu_get_efer(vcpu) & ~MSR_IA32_EFER_LMA_BIT); in vmx_write_cr0() 402 load_iwkey(vcpu); in vmx_write_cr4() 522 struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_cr0() 534 vmx_write_cr0(vcpu, val); in vcpu_set_cr0() 539 return vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2; in vcpu_get_cr2() 544 vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2 = val; in vcpu_set_cr2() 550 struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_cr4() [all …]
|
| A D | nested.c | 741 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler() 745 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler() 750 vcpu_inject_gp(vcpu, 0U); in vmxon_vmexit_handler() 765 reset_vvmcs(vcpu); in vmxon_vmexit_handler() 770 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler() 788 vcpu_inject_ud(vcpu); in check_vmx_permission() 791 vcpu_inject_gp(vcpu, 0U); in check_vmx_permission() 810 reset_vvmcs(vcpu); in vmxoff_vmexit_handler() 874 clear_vvmcs(vcpu, vvmcs); in get_or_replace_vvmcs_entry() 1463 vcpu_retain_rip(vcpu); in nested_vmexit_handler() [all …]
|
| A D | optee.c | 65 rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI); in tee_switch_to_ree() 66 rsi = vcpu_get_gpreg(vcpu, CPU_REG_RSI); in tee_switch_to_ree() 70 ree_vm = get_companion_vm(vcpu->vm); in tee_switch_to_ree() 107 sleep_thread(&vcpu->thread_obj); in tee_switch_to_ree() 130 tee_vm = get_companion_vm(vcpu->vm); in ree_switch_to_tee() 162 ree_vm = get_companion_vm(vcpu->vm); in hcall_handle_tee_vcpu_boot_done() 166 sleep_thread(&vcpu->thread_obj); in hcall_handle_tee_vcpu_boot_done() 176 if (is_tee_vm(vcpu->vm)) { in hcall_switch_ee() 177 ret = tee_switch_to_ree(vcpu); in hcall_switch_ee() 178 } else if (is_ree_vm(vcpu->vm)) { in hcall_switch_ee() [all …]
|
| A D | trusty.c | 108 (void)vcpu_get_efer(vcpu); in save_world_ctx() 110 (void)vcpu_get_rsp(vcpu); in save_world_ctx() 111 (void)vcpu_get_rip(vcpu); in save_world_ctx() 112 (void)vcpu_get_cr0(vcpu); in save_world_ctx() 113 (void)vcpu_get_cr4(vcpu); in save_world_ctx() 159 vcpu->arch.contexts[vcpu->arch.cur_context].world_msrs[i] = vcpu->arch.guest_msrs[i]; in save_world_ctx() 212 vcpu->arch.guest_msrs[i] = vcpu->arch.contexts[!vcpu->arch.cur_context].world_msrs[i]; in load_world_ctx() 282 (uint8_t *)vcpu->vm->name, strnlen_s(vcpu->vm->name, MAX_VM_NAME_LEN))) { in setup_trusty_info() 336 vcpu->arch.contexts[NORMAL_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; in init_secure_world_env() 337 vcpu->arch.contexts[SECURE_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; in init_secure_world_env() [all …]
|
| A D | vmx_io.c | 28 struct acrn_vcpu *vcpu; in arch_fire_hsm_interrupt() local 56 vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax); in emulate_pio_complete() 72 struct io_request *io_req = &vcpu->req; in pio_instr_vmexit_handler() 94 status = emulate_io(vcpu, io_req); in pio_instr_vmexit_handler() 104 struct io_request *io_req = &vcpu->req; in ept_violation_vmexit_handler() 118 ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.nworld_eptp, in ept_violation_vmexit_handler() 121 ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.sworld_eptp, in ept_violation_vmexit_handler() 124 vcpu_retain_rip(vcpu); in ept_violation_vmexit_handler() 153 ret = decode_instruction(vcpu, true); in ept_violation_vmexit_handler() 164 status = emulate_instruction(vcpu); in ept_violation_vmexit_handler() [all …]
|
| A D | vm_reset.c | 18 void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu) in triple_fault_shutdown_vm() argument 20 struct acrn_vm *vm = vcpu->vm; in triple_fault_shutdown_vm() 24 struct io_request *io_req = &vcpu->req; in triple_fault_shutdown_vm() 35 (void)send_vm_event(vcpu->vm, &trp_event); in triple_fault_shutdown_vm() 38 (void)emulate_io(vcpu, io_req); in triple_fault_shutdown_vm() 76 struct acrn_vm *vm = vcpu->vm; in handle_reset_reg_read() 93 struct acrn_vm *vm = vcpu->vm; in handle_common_reset_reg_write() 143 if (is_service_vm(vcpu->vm) && (bytes == 1U)) { in handle_kb_read() 148 vcpu->req.reqs.pio_request.value = ~0U; in handle_kb_read() 168 struct acrn_vm *vm = vcpu->vm; in handle_cf9_write() [all …]
|
| A D | instr_emul.c | 803 vie_mmio_write(vcpu, val); in emulate_mov() 811 vie_mmio_read(vcpu, &val); in emulate_mov() 821 vie_mmio_read(vcpu, &val); in emulate_mov() 832 vie_mmio_read(vcpu, &val); in emulate_mov() 845 vie_mmio_write(vcpu, val); in emulate_mov() 1054 vcpu_inject_gp(vcpu, 0U); in get_gva_di_check() 1151 vcpu_retain_rip(vcpu); in emulate_movs() 1206 vcpu_retain_rip(vcpu); in emulate_stos() 1716 vie->vcpu = vcpu; in vie_init() 2339 vcpu_inject_ss(vcpu); in instr_check_gva() [all …]
|
| A D | vmcs.c | 26 struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context]; in init_guest_vmx() 31 vcpu_set_cr4(vcpu, cr4); in init_guest_vmx() 32 vcpu_set_cr0(vcpu, cr0); in init_guest_vmx() 77 struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context]; in init_guest_state() 266 struct acrn_vm *vm = vcpu->vm; in init_exec_ctrl() 454 init_msr_emulation(vcpu); in init_exec_ctrl() 580 init_exec_ctrl(vcpu); in init_vmcs() 581 init_guest_state(vcpu); in init_vmcs() 582 init_entry_ctrl(vcpu); in init_vmcs() 583 init_exit_ctrl(vcpu); in init_vmcs() [all …]
|
| A D | vcat.c | 208 struct acrn_vm *vm = vcpu->vm; in read_vcbm() 211 *rval = vcpu_get_guest_msr(vcpu, vmsr); in read_vcbm() 264 struct acrn_vm *vm = vcpu->vm; in propagate_vcbm() 266 get_cache_id(vcpu, &l2_id, &l3_id); in propagate_vcbm() 313 struct acrn_vm *vm = vcpu->vm; in write_vcbm() 355 propagate_vcbm(vcpu, vmsr, val); in write_vcbm() 381 if (is_vcat_configured(vcpu->vm)) { in read_vclosid() 398 if (is_vcat_configured(vcpu->vm)) { in write_vclosid() 421 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.index_of_pqr_assoc].value = clos2pqr_msr(pclosid); in write_vclosid() 468 if (is_vcat_configured(vcpu->vm)) { in init_vcat_msrs() [all …]
|
| A D | hyperv.c | 77 p->tsc_scale = vcpu->vm->arch_vm.hyperv.tsc_scale; in hyperv_setup_tsc_page() 78 p->tsc_offset = vcpu->vm->arch_vm.hyperv.tsc_offset; in hyperv_setup_tsc_page() 135 page_hva = gpa2hva(vcpu->vm, page_gpa); in hyperv_setup_hypercall_page() 139 if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) { in hyperv_setup_hypercall_page() 156 vcpu->vm->arch_vm.hyperv.guest_os_id.val64 = wval; in hyperv_wrmsr() 167 hyperv_setup_hypercall_page(vcpu, wval); in hyperv_wrmsr() 170 hyperv_setup_tsc_page(vcpu, wval); in hyperv_wrmsr() 185 __func__, msr, wval, vcpu->vcpu_id, vcpu->vm->vm_id); in hyperv_wrmsr() 203 *rval = vcpu->vcpu_id; in hyperv_rdmsr() 206 *rval = hyperv_get_ReferenceTime(vcpu->vm); in hyperv_rdmsr() [all …]
|
| A D | guest_memory.c | 34 enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu) in get_vcpu_paging_mode() argument 38 if (is_paging_enabled(vcpu)) { in get_vcpu_paging_mode() 39 if (is_pae(vcpu)) { in get_vcpu_paging_mode() 40 if (is_long_mode(vcpu)) { in get_vcpu_paging_mode() 80 base = gpa2hva(vcpu->vm, addr); in local_gva2gpa_common() 180 base = (uint64_t *)gpa2hva(vcpu->vm, addr); in local_gva2gpa_pae() 218 enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu); in gva2gpa() 242 pw_info.wp = ((vcpu_get_cr0(vcpu) & CR0_WP) != 0UL); in gva2gpa() 253 ret = local_gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code); in gva2gpa() 256 pw_info.pse = ((vcpu_get_cr4(vcpu) & CR4_PSE) != 0UL); in gva2gpa() [all …]
|
| /hypervisor/include/arch/x86/asm/guest/ |
| A D | vcpu.h | 143 #define foreach_vcpu(idx, vm, vcpu) \ argument 147 if ((vcpu)->state != VCPU_OFFLINE) 329 struct acrn_vcpu *vcpu; member 335 struct acrn_vcpu *vcpu; member 342 return (vcpu->vcpu_id == BSP_CPU_ID); in is_vcpu_bsp() 347 return vcpu->arch.cpu_mode; in get_vcpu_mode() 353 (vcpu)->arch.inst_len = 0U; in vcpu_retain_rip() 358 return &(vcpu->arch.vlapic); in vcpu_vlapic() 372 return &(vcpu->arch.pid); in get_pi_desc() 688 void kick_vcpu(struct acrn_vcpu *vcpu); [all …]
|
| A D | virq.h | 44 void vcpu_inject_extint(struct acrn_vcpu *vcpu); 53 void vcpu_inject_nmi(struct acrn_vcpu *vcpu); 63 void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code); 83 void vcpu_inject_ud(struct acrn_vcpu *vcpu); 92 void vcpu_inject_ss(struct acrn_vcpu *vcpu); 103 void vcpu_inject_thermal_interrupt(struct acrn_vcpu *vcpu); 104 void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid); 109 int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu); 110 int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu); 111 int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu); [all …]
|
| A D | virtual_cr.h | 34 uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu); 44 void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val); 55 uint64_t vcpu_get_cr2(const struct acrn_vcpu *vcpu); 65 void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val); 76 uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu); 86 void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val); 93 int32_t cr_access_vmexit_handler(struct acrn_vcpu *vcpu);
|
| A D | vlapic.h | 93 bool (*has_pending_delivery_intr)(struct acrn_vcpu *vcpu); 94 bool (*has_pending_intr)(struct acrn_vcpu *vcpu); 113 bool vlapic_has_pending_delivery_intr(struct acrn_vcpu *vcpu); 114 bool vlapic_has_pending_intr(struct acrn_vcpu *vcpu); 120 uint32_t vlapic_get_next_pending_intr(struct acrn_vcpu *vcpu); 192 void vlapic_create(struct acrn_vcpu *vcpu, uint16_t pcpu_id); 196 void vlapic_free(struct acrn_vcpu *vcpu); 202 int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu); 203 int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu); 204 int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu); [all …]
|
| /hypervisor/common/ |
| A D | hv_main.c | 24 if (!is_lapic_pt_enabled(vcpu)) { in vcpu_thread() 37 get_vm_lock(vcpu->vm); in vcpu_thread() 38 zombie_vcpu(vcpu, VCPU_ZOMBIE); in vcpu_thread() 39 put_vm_lock(vcpu->vm); in vcpu_thread() 45 profiling_vmenter_handler(vcpu); in vcpu_thread() 48 ret = run_vcpu(vcpu); in vcpu_thread() 51 get_vm_lock(vcpu->vm); in vcpu_thread() 53 put_vm_lock(vcpu->vm); in vcpu_thread() 57 TRACE_2L(TRACE_VM_EXIT, vcpu->arch.exit_reason, vcpu_get_rip(vcpu)); in vcpu_thread() 65 ret = vmexit_handler(vcpu); in vcpu_thread() [all …]
|
| A D | trusty_hypercall.c | 30 int32_t next_world_id = !(vcpu->arch.cur_context); in hcall_world_switch() 34 && (vcpu->vm->sworld_control.flag.active != 0UL)) { in hcall_world_switch() 35 switch_world(vcpu, next_world_id); in hcall_world_switch() 59 if ((vcpu->vm->sworld_control.flag.supported != 0UL) in hcall_initialize_trusty() 60 && (vcpu->vm->sworld_control.flag.active == 0UL) in hcall_initialize_trusty() 61 && (vcpu->arch.cur_context == NORMAL_WORLD)) { in hcall_initialize_trusty() 65 if (initialize_trusty(vcpu, &boot_param)) { in hcall_initialize_trusty() 66 vcpu->vm->sworld_control.flag.active = 1UL; in hcall_initialize_trusty() 87 struct acrn_vm *vm = vcpu->vm; in hcall_save_restore_sworld_ctx() 92 save_sworld_context(vcpu); in hcall_save_restore_sworld_ctx() [all …]
|
| /hypervisor/include/common/ |
| A D | hypercall.h | 73 int32_t hcall_create_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_… 105 int32_t hcall_reset_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t… 121 int32_t hcall_start_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t… 137 int32_t hcall_pause_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t… 232 int32_t hcall_set_vm_memory_regions(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, 375 int32_t hcall_reset_ptdev_intr_info(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, 462 int32_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, 475 int32_t hcall_handle_tee_vcpu_boot_done(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, 488 int32_t hcall_switch_ee(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, 538 int32_t hcall_asyncio_assign(__unused struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, [all …]
|
| /hypervisor/debug/ |
| A D | dump.c | 79 vcpu->vm->vm_id, vcpu->vcpu_id, pcpu_id, in dump_guest_reg() 80 vcpu->arch.cur_context); in dump_guest_reg() 82 vcpu_get_rip(vcpu), in dump_guest_reg() 84 vcpu_get_rflags(vcpu)); in dump_guest_reg() 86 vcpu_get_cr0(vcpu), in dump_guest_reg() 87 vcpu_get_cr2(vcpu), in dump_guest_reg() 118 if (copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP), in dump_guest_stack() 126 vcpu->vcpu_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP)); in dump_guest_stack() 140 if (vcpu != NULL) { in dump_guest_context() 141 dump_guest_reg(vcpu); in dump_guest_context() [all …]
|
| /hypervisor/dm/ |
| A D | io_req.c | 164 return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE); in has_complete_ioreq() 175 struct acrn_vm *vm = vcpu->vm; in get_asyncio_desc() 217 struct acrn_vm *vm = vcpu->vm; in acrn_insert_asyncio() 257 && (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_FREE)) { in acrn_insert_request() 260 cur = vcpu->vcpu_id; in acrn_insert_request() 282 set_io_req_state(vcpu->vm, vcpu->vcpu_id, ACRN_IOREQ_STATE_PENDING); in acrn_insert_request() 409 switch (vcpu->req.io_type) { in complete_ioreq() 444 complete_ioreq(vcpu, io_req); in dm_emulate_pio_complete() 475 if (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE) { in dm_emulate_io_complete() 594 if (is_service_vm(vcpu->vm) || is_prelaunched_vm(vcpu->vm)) { in hv_emulate_pio() [all …]
|