| /arch/arm64/kvm/ |
| A D | inject_fault.c | 138 vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu)); in inject_abt64() 139 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); in inject_abt64() 155 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); in inject_undef64() 226 if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu)) in kvm_inject_sea() 240 __kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr); in kvm_inject_size_fault() 253 esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu)); in kvm_inject_size_fault() 255 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); in kvm_inject_size_fault() 280 if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu)) in kvm_serror_target_is_el2() 304 return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu)); in kvm_serror_undeliverable_at_el2() 311 if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu)) in kvm_inject_serror_esr() [all …]
|
| A D | handle_exit.c | 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc() 49 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in handle_hvc() 108 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in kvm_handle_fpasimd() 136 return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in kvm_handle_wfx() 151 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); in kvm_handle_wfx() 162 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); in kvm_handle_wfx() 191 if (!vcpu->guest_debug && forward_debug_exception(vcpu)) in kvm_handle_guest_debug() 256 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in kvm_handle_ptrauth() 286 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in kvm_handle_eret() 298 kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu)); in handle_svc() [all …]
|
| A D | psci.c | 49 kvm_vcpu_wfi(vcpu); in kvm_psci_vcpu_suspend() 64 struct kvm_vcpu *vcpu = NULL; in kvm_psci_vcpu_on() local 78 if (!vcpu) in kvm_psci_vcpu_on() 114 kvm_vcpu_wake_up(vcpu); in kvm_psci_vcpu_on() 128 struct kvm *kvm = vcpu->kvm; in kvm_psci_vcpu_affinity_info() 185 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); in kvm_prepare_system_event() 232 vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i))); in kvm_psci_narrow_to_32bit() 289 kvm_psci_system_off(vcpu); in kvm_psci_0_2_call() 325 struct kvm *kvm = vcpu->kvm; in kvm_psci_1_x_call() 334 arg = smccc_get_arg1(vcpu); in kvm_psci_1_x_call() [all …]
|
| /arch/arm64/include/asm/ |
| A D | kvm_emulate.h | 104 if (!vcpu_has_run_once(vcpu)) in vcpu_reset_hcr() 140 return vcpu->arch.vsesr_el2; in vcpu_get_vsesr() 145 vcpu->arch.vsesr_el2 = vsesr; in vcpu_set_vsesr() 231 if (!vcpu_has_nv(vcpu)) in is_hyp_ctxt() 252 return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu); in vcpu_is_host_el0() 257 return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu); in is_nested_ctxt() 262 if (!is_nested_ctxt(vcpu)) in vserror_state_is_nested() 326 if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu)) in guest_hyp_wfx_traps_enabled() 432 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); in kvm_vcpu_trap_is_exec_fault() 525 if (vcpu_mode_priv(vcpu)) in kvm_vcpu_is_be() [all …]
|
| /arch/x86/kvm/vmx/ |
| A D | main.c | 70 if (is_td_vcpu(vcpu)) in vt_vcpu_create() 79 tdx_vcpu_free(vcpu); in vt_vcpu_free() 83 vmx_vcpu_free(vcpu); in vt_vcpu_free() 131 tdx_vcpu_put(vcpu); in vt_vcpu_put() 135 vmx_vcpu_put(vcpu); in vt_vcpu_put() 140 if (is_td_vcpu(vcpu)) in vt_vcpu_pre_run() 216 if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) in vt_smi_allowed() 224 if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) in vt_enter_smm() 232 if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) in vt_leave_smm() 240 if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) in vt_enable_smi_window() [all …]
|
| A D | x86_ops.h | 22 int vmx_vcpu_create(struct kvm_vcpu *vcpu); 23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu); 25 void vmx_vcpu_free(struct kvm_vcpu *vcpu); 28 void vmx_vcpu_put(struct kvm_vcpu *vcpu); 42 int vmx_check_intercept(struct kvm_vcpu *vcpu, 64 int vmx_get_cpl(struct kvm_vcpu *vcpu); 91 void vmx_inject_nmi(struct kvm_vcpu *vcpu); 122 void vmx_setup_mce(struct kvm_vcpu *vcpu); 133 void tdx_vcpu_free(struct kvm_vcpu *vcpu); 138 void tdx_vcpu_put(struct kvm_vcpu *vcpu); [all …]
|
| /arch/s390/kvm/ |
| A D | priv.c | 34 vcpu->stat.instruction_ri++; in handle_ri() 39 kvm_s390_retry_instr(vcpu); in handle_ri() 48 return handle_ri(vcpu); in kvm_s390_handle_aa() 66 vcpu->arch.gs_enabled = 1; in handle_gs() 78 return handle_gs(vcpu); in kvm_s390_handle_e3() 444 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); in handle_ipte_interlock() 464 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block() 495 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi() 574 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch() 1053 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw() [all …]
|
| A D | intercept.c | 216 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) in handle_itdb() 229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) argument 233 if (!guestdbg_enabled(vcpu) || !per_event(vcpu)) in should_handle_per_event() 269 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); in handle_prog() 366 rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2], in handle_mvpg_pei() 375 rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1], in handle_mvpg_pei() 390 vcpu->stat.exit_pei++; in handle_partial_execution() 468 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, in handle_operexc() 474 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) in handle_operexc() 596 if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs) in should_handle_per_ifetch() [all …]
|
| A D | diag.c | 28 slots = kvm_vcpu_memslots(vcpu); in do_discard_gfn_range() 43 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 44 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages() 53 mmap_read_lock(vcpu->kvm->mm); in diag_release_pages() 100 rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); in __diag_page_ref_service() 169 kvm_vcpu_on_spin(vcpu, true); in __diag_time_slice_end() 192 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed() 196 if (tid == vcpu->vcpu_id) in __diag_time_slice_end_directed() 214 VCPU_EVENT(vcpu, 5, in __diag_time_slice_end_directed() 228 vcpu->stat.diag_9c_ignored++; in __diag_time_slice_end_directed() [all …]
|
| A D | guestdbg.c | 132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; in kvm_s390_backup_guest_per_regs() 133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; in kvm_s390_backup_guest_per_regs() 134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_backup_guest_per_regs() 135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_backup_guest_per_regs() 140 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; in kvm_s390_restore_guest_per_regs() 141 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; in kvm_s390_restore_guest_per_regs() 142 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; in kvm_s390_restore_guest_per_regs() 143 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; in kvm_s390_restore_guest_per_regs() 590 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, in kvm_s390_handle_per_event() 614 (pssec(vcpu) || hssec(vcpu))) in kvm_s390_handle_per_event() [all …]
|
| /arch/powerpc/kvm/ |
| A D | booke_emulate.c | 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 53 kvmppc_emul_rfi(vcpu); in kvmppc_booke_emulate_op() 80 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); in kvmppc_booke_emulate_op() 86 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); in kvmppc_booke_emulate_op() 90 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() [all …]
|
| A D | book3s_emulate.c | 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 94 vcpu->arch.amr_tm = vcpu->arch.amr; in kvmppc_copyto_vcpu_tm() 96 vcpu->arch.tar_tm = vcpu->arch.tar; in kvmppc_copyto_vcpu_tm() 111 vcpu->arch.ppr = vcpu->arch.ppr_tm; in kvmppc_copyfrom_vcpu_tm() 113 vcpu->arch.amr = vcpu->arch.amr_tm; in kvmppc_copyfrom_vcpu_tm() 115 vcpu->arch.tar = vcpu->arch.tar_tm; in kvmppc_copyfrom_vcpu_tm() 337 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 342 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 388 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr() 396 vcpu->arch.mmu.slbie(vcpu, in kvmppc_core_emulate_op_pr() [all …]
|
| A D | booke.c | 513 set_guest_srr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 517 set_guest_csrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 521 set_guest_dsrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 525 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 530 vcpu->arch.regs.nip = vcpu->arch.ivpr | in kvmppc_booke_irqprio_deliver() 805 vcpu->arch.pgdir = vcpu->kvm->mm->pgd; in kvmppc_vcpu_run() 1894 vcpu->arch.dec = vcpu->arch.decar; in kvmppc_decrementer_func() 2140 vcpu->arch.shared->pir = vcpu->vcpu_id; in kvmppc_core_vcpu_create() 2160 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_create() 2167 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_free() [all …]
|
| A D | emulate_loadstore.c | 31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_fp_disabled() 43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_check_vsx_disabled() 95 vcpu->arch.regs.msr = kvmppc_get_msr(vcpu); in kvmppc_emulate_loadstore() 114 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore() 134 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore() 233 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore() 247 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() 257 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore() 271 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() 316 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() [all …]
|
| A D | book3s_pr.c | 307 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu() 308 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu() 411 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); in kvmppc_restore_tm_pr() 735 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 780 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 1184 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr() 1330 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); in kvmppc_handle_exit_pr() 1494 vcpu->arch.mmu.slbmte(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr() 1495 vcpu->arch.mmu.slbia(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr() 1502 vcpu->arch.mmu.slbmte(vcpu, rs, rb); in kvm_arch_vcpu_ioctl_set_sregs_pr() [all …]
|
| A D | book3s_hv_tm.c | 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation() 79 vcpu->arch.cfar = vcpu->arch.regs.nip; in kvmhv_p9_tm_emulation() 80 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; in kvmhv_p9_tm_emulation() 103 bescr = vcpu->arch.bescr; in kvmhv_p9_tm_emulation() 113 vcpu->arch.cfar = vcpu->arch.regs.nip; in kvmhv_p9_tm_emulation() 114 vcpu->arch.regs.nip = vcpu->arch.ebbrr; in kvmhv_p9_tm_emulation() 156 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | in kvmhv_p9_tm_emulation() 203 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | in kvmhv_p9_tm_emulation() [all …]
|
| /arch/arm64/kvm/hyp/vhe/ |
| A D | switch.c | 53 if (!vcpu_has_nv(vcpu)) in __compute_hcr() 107 ___activate_traps(vcpu, __compute_hcr(vcpu)); in __activate_traps() 212 __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); in kvm_vcpu_load_vhe() 321 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); in kvm_hyp_handle_timer() 351 if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))) in kvm_hyp_handle_eret() 366 if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr))) in kvm_hyp_handle_eret() 406 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); in kvm_hyp_handle_tlbi_el2() 409 vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) || in kvm_hyp_handle_tlbi_el2() 420 if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu) && in kvm_hyp_handle_tlbi_el2() 440 vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2)); in kvm_hyp_handle_cpacr_el1() [all …]
|
| A D | sysreg-sr.c | 41 if (vcpu_el2_e2h_is_set(vcpu)) { in __sysreg_save_vel2_state() 81 if (ctxt_has_sctlr2(&vcpu->arch.ctxt)) in __sysreg_save_vel2_state() 100 if (vcpu_el2_e2h_is_set(vcpu)) { in __sysreg_restore_vel2_state() 214 if (vcpu_has_nv(vcpu)) in __vcpu_load_switch_sysregs() 223 __sysreg32_restore_state(vcpu); in __vcpu_load_switch_sysregs() 226 if (unlikely(is_hyp_ctxt(vcpu))) { in __vcpu_load_switch_sysregs() 227 __sysreg_restore_vel2_state(vcpu); in __vcpu_load_switch_sysregs() 229 if (vcpu_has_nv(vcpu)) { in __vcpu_load_switch_sysregs() 265 if (unlikely(is_hyp_ctxt(vcpu))) in __vcpu_put_switch_sysregs() 266 __sysreg_save_vel2_state(vcpu); in __vcpu_put_switch_sysregs() [all …]
|
| /arch/riscv/kvm/ |
| A D | vcpu.c | 64 memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); in kvm_riscv_vcpu_context_reset() 113 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu() 215 !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); in kvm_arch_vcpu_runnable() 648 vcpu->cpu = -1; in kvm_arch_vcpu_put() 700 (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests() 704 if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests() 859 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit() 881 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run() 885 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run() 889 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run() [all …]
|
| /arch/x86/kvm/ |
| A D | hyperv.h | 66 return vcpu->arch.hyperv; in to_hv_vcpu() 80 return hv_vcpu->vcpu; in hv_synic_to_vcpu() 100 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; in kvm_hv_hypercall_enabled() 113 return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap); in kvm_hv_synic_has_vector() 118 return to_hv_vcpu(vcpu) && in kvm_hv_synic_auto_eoi_set() 139 return hv_vcpu->vcpu; in hv_stimer_to_vcpu() 211 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) in kvm_hv_vcpu_purge_flush_tlb() 214 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); in kvm_hv_vcpu_purge_flush_tlb() 235 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) : in kvm_hv_is_tlb_flush_hcall() 246 if (!to_hv_vcpu(vcpu)) in kvm_hv_verify_vp_assist() [all …]
|
| A D | kvm_cache_regs.h | 118 kvm_x86_call(cache_reg)(vcpu, reg); in kvm_register_read_raw() 120 return vcpu->arch.regs[reg]; in kvm_register_read_raw() 129 vcpu->arch.regs[reg] = val; in kvm_register_write_raw() 130 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw() 174 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits() 196 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits() 211 return vcpu->arch.cr3; in kvm_read_cr3() 221 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax() 227 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode() 228 vcpu->stat.guest_mode = 1; in enter_guest_mode() [all …]
|
| /arch/mips/kvm/ |
| A D | emulate.c | 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc() 1003 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1013 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1022 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1031 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1061 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1091 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1139 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1186 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1290 vcpu->arch.io_pc = vcpu->arch.pc; in kvm_mips_emulate_load() [all …]
|
| /arch/arm64/kvm/hyp/include/hyp/ |
| A D | switch.h | 102 if (vcpu_has_sve(vcpu)) in __activate_cptr_traps_vhe() 106 if (!vcpu_has_nv(vcpu)) in __activate_cptr_traps_vhe() 115 if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu)) in __activate_cptr_traps_vhe() 122 if (is_hyp_ctxt(vcpu)) in __activate_cptr_traps_vhe() 534 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); in __populate_fault_info() 540 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); in kvm_hyp_handle_mops() 572 write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR); in __hyp_sve_restore_guest() 602 zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)); in fpsimd_lazy_switch_to_guest() 624 __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1); in fpsimd_lazy_switch_to_host() 823 if (is_hyp_ctxt(vcpu)) in kvm_handle_cntxct() [all …]
|
| /arch/loongarch/kvm/ |
| A D | exit.c | 46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; in kvm_emu_cpucfg() 130 vcpu->arch.pc -= 4; in kvm_handle_csr() 240 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_complete_iocsr_read() 477 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, in kvm_emu_mmio_read() 494 inst.word, vcpu->arch.pc, vcpu->arch.badv); in kvm_emu_mmio_read() 504 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_complete_mmio_read() 659 inst.word, vcpu->arch.pc, vcpu->arch.badv); in kvm_emu_mmio_write() 756 kvm_own_fpu(vcpu); in kvm_handle_fpu_disabled() 897 vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); in kvm_handle_hypercall() 898 vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); in kvm_handle_hypercall() [all …]
|
| A D | vcpu.c | 233 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests() 393 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); in kvm_arch_pmi_in_guest() 436 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs() 437 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs() 517 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid() 532 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid() 1340 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu() 1360 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx() 1395 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx() 1510 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); in kvm_arch_vcpu_create() [all …]
|