Searched refs:guest_context (Results 1 – 18 of 18) sorted by relevance
| /arch/riscv/kvm/ |
| A D | vcpu_exit.c | 74 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus); in kvm_riscv_vcpu_unpriv_read() 146 if (vcpu->arch.guest_context.sstatus & SR_SPP) in kvm_riscv_vcpu_trap_redirect() 166 vcpu->arch.guest_context.sepc = ncsr_read(CSR_VSTVEC); in kvm_riscv_vcpu_trap_redirect() 169 vcpu->arch.guest_context.sstatus |= SR_SPP; in kvm_riscv_vcpu_trap_redirect() 176 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { in vcpu_redirect() 229 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 235 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 239 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) in kvm_riscv_vcpu_exit() 254 vcpu->arch.guest_context.sepc, in kvm_riscv_vcpu_exit() 255 vcpu->arch.guest_context.sstatus, in kvm_riscv_vcpu_exit() [all …]
|
| A D | vcpu_insn.c | 162 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn() 178 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn() 264 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return() 268 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return() 418 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn() 447 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn() 641 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store() 755 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return() 760 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return() 765 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return() [all …]
|
| A D | vcpu_vector.c | 22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset() 78 vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); in kvm_riscv_vcpu_alloc_vector_context() 79 if (!vcpu->arch.guest_context.vector.datap) in kvm_riscv_vcpu_alloc_vector_context() 91 kfree(vcpu->arch.guest_context.vector.datap); in kvm_riscv_vcpu_free_vector_context() 101 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr() 182 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_vector()
|
| A D | vcpu_sbi_replace.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() 151 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler() 193 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_dbcn_handler()
|
| A D | vcpu_sbi_hsm.c | 18 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start() 65 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status() 84 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
|
| A D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
| A D | vcpu_sbi.c | 121 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 173 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_load_reset_state() 189 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 201 vcpu->arch.guest_context.sepc += 4; in kvm_riscv_vcpu_sbi_return() 455 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
| A D | vcpu.c | 59 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_context_reset() 225 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel() 231 return vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_get_ip() 630 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load() 633 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load() 652 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put() 657 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put() 788 struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_enter_exit()
|
| A D | vcpu_sbi_system.c | 15 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_susp_handler()
|
| A D | vcpu_sbi_sta.c | 83 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_sta_steal_time_set_shmem() 131 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_sta_handler()
|
| A D | trace.h | 25 __entry->pc = vcpu->arch.guest_context.sepc;
|
| A D | vcpu_sbi_base.c | 19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_base_handler()
|
| A D | vcpu_sbi_pmu.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_pmu_handler()
|
| A D | vcpu_sbi_v01.c | 24 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_v01_handler()
|
| A D | vcpu_onereg.c | 418 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core() 451 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core() 966 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in num_fp_f_regs() 995 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in num_fp_d_regs() 1138 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in copy_vector_reg_indices()
|
| A D | aia_imsic.c | 873 vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN; in kvm_riscv_vcpu_aia_imsic_update() 875 vcpu->arch.guest_context.hstatus |= in kvm_riscv_vcpu_aia_imsic_update()
|
| /arch/riscv/kernel/ |
| A D | asm-offsets.c | 136 OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); in asm_offsets() 137 OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); in asm_offsets() 138 OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); in asm_offsets() 139 OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); in asm_offsets() 140 OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); in asm_offsets() 141 OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); in asm_offsets() 142 OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); in asm_offsets() 143 OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); in asm_offsets() 144 OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); in asm_offsets() 145 OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0); in asm_offsets() [all …]
|
| /arch/riscv/include/asm/ |
| A D | kvm_host.h | 208 struct kvm_cpu_context guest_context; member
|
Completed in 35 milliseconds