Lines Matching refs:vcpu

24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)  in kvm_emu_cpucfg()  argument
34 ++vcpu->stat.cpucfg_exits; in kvm_emu_cpucfg()
35 index = vcpu->arch.gprs[rj]; in kvm_emu_cpucfg()
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; in kvm_emu_cpucfg()
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; in kvm_emu_cpucfg()
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_emu_cpucfg()
54 vcpu->arch.gprs[rd] = ret; in kvm_emu_cpucfg()
57 vcpu->arch.gprs[rd] = 0; in kvm_emu_cpucfg()
65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) in kvm_emu_read_csr() argument
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_emu_read_csr()
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc); in kvm_emu_read_csr()
82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) in kvm_emu_write_csr() argument
85 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_emu_write_csr()
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc); in kvm_emu_write_csr()
96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, in kvm_emu_xchg_csr() argument
100 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_emu_xchg_csr()
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc); in kvm_emu_xchg_csr()
113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) in kvm_handle_csr() argument
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) { in kvm_handle_csr()
129 if (kvm_guest_has_pmu(&vcpu->arch)) { in kvm_handle_csr()
130 vcpu->arch.pc -= 4; in kvm_handle_csr()
131 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_handle_csr()
139 val = kvm_emu_read_csr(vcpu, csrid); in kvm_handle_csr()
140 vcpu->arch.gprs[rd] = val; in kvm_handle_csr()
143 val = vcpu->arch.gprs[rd]; in kvm_handle_csr()
144 val = kvm_emu_write_csr(vcpu, csrid, val); in kvm_handle_csr()
145 vcpu->arch.gprs[rd] = val; in kvm_handle_csr()
148 val = vcpu->arch.gprs[rd]; in kvm_handle_csr()
149 csr_mask = vcpu->arch.gprs[rj]; in kvm_handle_csr()
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val); in kvm_handle_csr()
151 vcpu->arch.gprs[rd] = val; in kvm_handle_csr()
157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_emu_iocsr() argument
169 addr = vcpu->arch.gprs[rj]; in kvm_emu_iocsr()
172 val = &vcpu->arch.gprs[rd]; in kvm_emu_iocsr()
209 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_emu_iocsr()
210 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val); in kvm_emu_iocsr()
211 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_emu_iocsr()
221 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_emu_iocsr()
222 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val); in kvm_emu_iocsr()
223 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_emu_iocsr()
229 vcpu->arch.io_gpr = rd; in kvm_emu_iocsr()
237 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_complete_iocsr_read() argument
240 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_complete_iocsr_read()
257 run->iocsr_io.len, vcpu->arch.badv); in kvm_complete_iocsr_read()
265 int kvm_emu_idle(struct kvm_vcpu *vcpu) in kvm_emu_idle() argument
267 ++vcpu->stat.idle_exits; in kvm_emu_idle()
268 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); in kvm_emu_idle()
270 if (!kvm_arch_vcpu_runnable(vcpu)) in kvm_emu_idle()
271 kvm_vcpu_halt(vcpu); in kvm_emu_idle()
276 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) in kvm_trap_handle_gspr() argument
281 struct kvm_run *run = vcpu->run; in kvm_trap_handle_gspr()
284 inst.word = vcpu->arch.badi; in kvm_trap_handle_gspr()
285 curr_pc = vcpu->arch.pc; in kvm_trap_handle_gspr()
286 update_pc(&vcpu->arch); in kvm_trap_handle_gspr()
288 trace_kvm_exit_gspr(vcpu, inst.word); in kvm_trap_handle_gspr()
292 trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG); in kvm_trap_handle_gspr()
293 er = kvm_emu_cpucfg(vcpu, inst); in kvm_trap_handle_gspr()
296 trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR); in kvm_trap_handle_gspr()
297 er = kvm_handle_csr(vcpu, inst); in kvm_trap_handle_gspr()
303 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); in kvm_trap_handle_gspr()
308 er = kvm_emu_iocsr(inst, run, vcpu); in kvm_trap_handle_gspr()
311 er = kvm_emu_idle(vcpu); in kvm_trap_handle_gspr()
333 kvm_arch_vcpu_dump_regs(vcpu); in kvm_trap_handle_gspr()
334 vcpu->arch.pc = curr_pc; in kvm_trap_handle_gspr()
346 static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_gspr() argument
351 er = kvm_trap_handle_gspr(vcpu); in kvm_handle_gspr()
356 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_handle_gspr()
359 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR; in kvm_handle_gspr()
362 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_handle_gspr()
369 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) in kvm_emu_mmio_read() argument
373 struct kvm_run *run = vcpu->run; in kvm_emu_mmio_read()
375 run->mmio.phys_addr = vcpu->arch.badv; in kvm_emu_mmio_read()
376 vcpu->mmio_needed = 2; /* signed */ in kvm_emu_mmio_read()
405 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
412 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
419 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
440 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
447 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
454 vcpu->mmio_needed = 1; /* unsigned */ in kvm_emu_mmio_read()
476 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_emu_mmio_read()
477 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, in kvm_emu_mmio_read()
478 run->mmio.len, &vcpu->arch.gprs[rd]); in kvm_emu_mmio_read()
479 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_emu_mmio_read()
481 update_pc(&vcpu->arch); in kvm_emu_mmio_read()
482 vcpu->mmio_needed = 0; in kvm_emu_mmio_read()
487 vcpu->arch.io_gpr = rd; in kvm_emu_mmio_read()
489 vcpu->mmio_is_write = 0; in kvm_emu_mmio_read()
494 inst.word, vcpu->arch.pc, vcpu->arch.badv); in kvm_emu_mmio_read()
495 kvm_arch_vcpu_dump_regs(vcpu); in kvm_emu_mmio_read()
496 vcpu->mmio_needed = 0; in kvm_emu_mmio_read()
501 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_complete_mmio_read() argument
504 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_complete_mmio_read()
507 update_pc(&vcpu->arch); in kvm_complete_mmio_read()
510 if (vcpu->mmio_needed == 2) in kvm_complete_mmio_read()
516 if (vcpu->mmio_needed == 2) in kvm_complete_mmio_read()
522 if (vcpu->mmio_needed == 2) in kvm_complete_mmio_read()
532 run->mmio.len, vcpu->arch.badv); in kvm_complete_mmio_read()
543 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) in kvm_emu_mmio_write() argument
548 struct kvm_run *run = vcpu->run; in kvm_emu_mmio_write()
555 curr_pc = vcpu->arch.pc; in kvm_emu_mmio_write()
556 update_pc(&vcpu->arch); in kvm_emu_mmio_write()
559 run->mmio.phys_addr = vcpu->arch.badv; in kvm_emu_mmio_write()
569 *(unsigned int *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
573 *(unsigned long *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
583 rd_val = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
614 *(unsigned char *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
618 *(unsigned short *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
622 *(unsigned int *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
626 *(unsigned long *)data = vcpu->arch.gprs[rd]; in kvm_emu_mmio_write()
645 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_emu_mmio_write()
646 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data); in kvm_emu_mmio_write()
647 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_emu_mmio_write()
652 vcpu->mmio_needed = 1; in kvm_emu_mmio_write()
653 vcpu->mmio_is_write = 1; in kvm_emu_mmio_write()
657 vcpu->arch.pc = curr_pc; in kvm_emu_mmio_write()
659 inst.word, vcpu->arch.pc, vcpu->arch.badv); in kvm_emu_mmio_write()
660 kvm_arch_vcpu_dump_regs(vcpu); in kvm_emu_mmio_write()
666 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode) in kvm_handle_rdwr_fault() argument
671 struct kvm_run *run = vcpu->run; in kvm_handle_rdwr_fault()
672 unsigned long badv = vcpu->arch.badv; in kvm_handle_rdwr_fault()
675 if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) { in kvm_handle_rdwr_fault()
676 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); in kvm_handle_rdwr_fault()
680 ret = kvm_handle_mm_fault(vcpu, badv, write, ecode); in kvm_handle_rdwr_fault()
683 inst.word = vcpu->arch.badi; in kvm_handle_rdwr_fault()
685 er = kvm_emu_mmio_write(vcpu, inst); in kvm_handle_rdwr_fault()
688 if (kvm_is_ifetch_fault(&vcpu->arch)) { in kvm_handle_rdwr_fault()
689 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF); in kvm_handle_rdwr_fault()
693 er = kvm_emu_mmio_read(vcpu, inst); in kvm_handle_rdwr_fault()
703 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); in kvm_handle_rdwr_fault()
710 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_read_fault() argument
712 return kvm_handle_rdwr_fault(vcpu, false, ecode); in kvm_handle_read_fault()
715 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_write_fault() argument
717 return kvm_handle_rdwr_fault(vcpu, true, ecode); in kvm_handle_write_fault()
720 int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_complete_user_service() argument
722 update_pc(&vcpu->arch); in kvm_complete_user_service()
723 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret); in kvm_complete_user_service()
736 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_fpu_disabled() argument
738 struct kvm_run *run = vcpu->run; in kvm_handle_fpu_disabled()
740 if (!kvm_guest_has_fpu(&vcpu->arch)) { in kvm_handle_fpu_disabled()
741 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_handle_fpu_disabled()
750 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) { in kvm_handle_fpu_disabled()
756 kvm_own_fpu(vcpu); in kvm_handle_fpu_disabled()
761 static long kvm_save_notify(struct kvm_vcpu *vcpu) in kvm_save_notify() argument
765 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); in kvm_save_notify()
766 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); in kvm_save_notify()
772 vcpu->arch.st.guest_addr = data; in kvm_save_notify()
776 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_save_notify()
777 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_save_notify()
794 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_lsx_disabled() argument
796 if (kvm_own_lsx(vcpu)) in kvm_handle_lsx_disabled()
797 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_handle_lsx_disabled()
810 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_lasx_disabled() argument
812 if (kvm_own_lasx(vcpu)) in kvm_handle_lasx_disabled()
813 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_handle_lasx_disabled()
818 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_lbt_disabled() argument
820 if (kvm_own_lbt(vcpu)) in kvm_handle_lbt_disabled()
821 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_handle_lbt_disabled()
826 static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu) in kvm_send_pv_ipi() argument
831 kvm_read_reg(vcpu, LOONGARCH_GPR_A1), in kvm_send_pv_ipi()
832 kvm_read_reg(vcpu, LOONGARCH_GPR_A2) in kvm_send_pv_ipi()
835 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); in kvm_send_pv_ipi()
837 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); in kvm_send_pv_ipi()
850 static void kvm_handle_service(struct kvm_vcpu *vcpu) in kvm_handle_service() argument
853 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); in kvm_handle_service()
857 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) { in kvm_handle_service()
858 kvm_send_pv_ipi(vcpu); in kvm_handle_service()
863 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_handle_service()
864 ret = kvm_save_notify(vcpu); in kvm_handle_service()
870 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); in kvm_handle_service()
873 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode) in kvm_handle_hypercall() argument
879 inst.word = vcpu->arch.badi; in kvm_handle_hypercall()
885 vcpu->stat.hypercall_exits++; in kvm_handle_hypercall()
886 kvm_handle_service(vcpu); in kvm_handle_hypercall()
889 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) { in kvm_handle_hypercall()
890 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); in kvm_handle_hypercall()
894 vcpu->stat.hypercall_exits++; in kvm_handle_hypercall()
895 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in kvm_handle_hypercall()
896 vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE; in kvm_handle_hypercall()
897 vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); in kvm_handle_hypercall()
898 vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); in kvm_handle_hypercall()
899 vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); in kvm_handle_hypercall()
900 vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); in kvm_handle_hypercall()
901 vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4); in kvm_handle_hypercall()
902 vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5); in kvm_handle_hypercall()
903 vcpu->run->hypercall.flags = 0; in kvm_handle_hypercall()
907 vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE; in kvm_handle_hypercall()
912 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) { in kvm_handle_hypercall()
913 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in kvm_handle_hypercall()
920 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); in kvm_handle_hypercall()
925 update_pc(&vcpu->arch); in kvm_handle_hypercall()
933 static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode) in kvm_fault_ni() argument
939 inst = vcpu->arch.badi; in kvm_fault_ni()
940 badv = vcpu->arch.badv; in kvm_fault_ni()
942 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); in kvm_fault_ni()
943 kvm_arch_vcpu_dump_regs(vcpu); in kvm_fault_ni()
944 kvm_queue_exception(vcpu, EXCCODE_INE, 0); in kvm_fault_ni()
963 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) in kvm_handle_fault() argument
965 return kvm_fault_tables[fault](vcpu, fault); in kvm_handle_fault()