Lines Matching refs:vcpu

110 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)  in kvm_riscv_reset_vcpu()  argument
112 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu()
113 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
114 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
115 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
124 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
126 kvm_arch_vcpu_put(vcpu); in kvm_riscv_reset_vcpu()
128 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
134 kvm_riscv_vcpu_fp_reset(vcpu); in kvm_riscv_reset_vcpu()
136 kvm_riscv_vcpu_timer_reset(vcpu); in kvm_riscv_reset_vcpu()
138 WRITE_ONCE(vcpu->arch.irqs_pending, 0); in kvm_riscv_reset_vcpu()
139 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); in kvm_riscv_reset_vcpu()
141 kvm_riscv_vcpu_pmu_reset(vcpu); in kvm_riscv_reset_vcpu()
143 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
144 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
145 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
149 kvm_arch_vcpu_load(vcpu, smp_processor_id()); in kvm_riscv_reset_vcpu()
158 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
161 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_arch_vcpu_create()
165 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
166 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
167 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
174 set_bit(host_isa, vcpu->arch.isa); in kvm_arch_vcpu_create()
178 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
179 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
180 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
183 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
186 cntx = &vcpu->arch.guest_reset_context; in kvm_arch_vcpu_create()
197 kvm_riscv_vcpu_timer_init(vcpu); in kvm_arch_vcpu_create()
200 kvm_riscv_vcpu_pmu_init(vcpu); in kvm_arch_vcpu_create()
203 kvm_riscv_reset_vcpu(vcpu); in kvm_arch_vcpu_create()
208 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
215 if (vcpu->vcpu_idx != 0) in kvm_arch_vcpu_postcreate()
216 kvm_riscv_vcpu_power_off(vcpu); in kvm_arch_vcpu_postcreate()
219 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
222 kvm_riscv_vcpu_timer_deinit(vcpu); in kvm_arch_vcpu_destroy()
224 kvm_riscv_vcpu_pmu_deinit(vcpu); in kvm_arch_vcpu_destroy()
227 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
230 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
232 return kvm_riscv_vcpu_timer_pending(vcpu); in kvm_cpu_has_pending_timer()
235 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_blocking() argument
239 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_unblocking() argument
243 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
245 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && in kvm_arch_vcpu_runnable()
246 !vcpu->arch.power_off && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
249 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
251 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
254 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
256 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
259 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
264 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_get_reg_config() argument
279 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; in kvm_riscv_vcpu_get_reg_config()
282 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) in kvm_riscv_vcpu_get_reg_config()
287 reg_val = vcpu->arch.mvendorid; in kvm_riscv_vcpu_get_reg_config()
290 reg_val = vcpu->arch.marchid; in kvm_riscv_vcpu_get_reg_config()
293 reg_val = vcpu->arch.mimpid; in kvm_riscv_vcpu_get_reg_config()
305 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_set_reg_config() argument
330 if (!vcpu->arch.ran_atleast_once) { in kvm_riscv_vcpu_set_reg_config()
347 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | in kvm_riscv_vcpu_set_reg_config()
349 vcpu->arch.isa[0] = reg_val; in kvm_riscv_vcpu_set_reg_config()
350 kvm_riscv_vcpu_fp_reset(vcpu); in kvm_riscv_vcpu_set_reg_config()
358 if (!vcpu->arch.ran_atleast_once) in kvm_riscv_vcpu_set_reg_config()
359 vcpu->arch.mvendorid = reg_val; in kvm_riscv_vcpu_set_reg_config()
364 if (!vcpu->arch.ran_atleast_once) in kvm_riscv_vcpu_set_reg_config()
365 vcpu->arch.marchid = reg_val; in kvm_riscv_vcpu_set_reg_config()
370 if (!vcpu->arch.ran_atleast_once) in kvm_riscv_vcpu_set_reg_config()
371 vcpu->arch.mimpid = reg_val; in kvm_riscv_vcpu_set_reg_config()
382 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_get_reg_core() argument
385 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core()
415 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_set_reg_core() argument
418 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core()
450 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_get_reg_csr() argument
453 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_get_reg_csr()
467 kvm_riscv_vcpu_flush_interrupts(vcpu); in kvm_riscv_vcpu_get_reg_csr()
478 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_set_reg_csr() argument
481 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_set_reg_csr()
505 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); in kvm_riscv_vcpu_set_reg_csr()
510 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_get_reg_isa_ext() argument
529 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) in kvm_riscv_vcpu_get_reg_isa_ext()
538 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_set_reg_isa_ext() argument
563 if (!vcpu->arch.ran_atleast_once) { in kvm_riscv_vcpu_set_reg_isa_ext()
570 set_bit(host_isa_ext, vcpu->arch.isa); in kvm_riscv_vcpu_set_reg_isa_ext()
573 clear_bit(host_isa_ext, vcpu->arch.isa); in kvm_riscv_vcpu_set_reg_isa_ext()
576 kvm_riscv_vcpu_fp_reset(vcpu); in kvm_riscv_vcpu_set_reg_isa_ext()
584 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_set_reg() argument
589 return kvm_riscv_vcpu_set_reg_config(vcpu, reg); in kvm_riscv_vcpu_set_reg()
591 return kvm_riscv_vcpu_set_reg_core(vcpu, reg); in kvm_riscv_vcpu_set_reg()
593 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); in kvm_riscv_vcpu_set_reg()
595 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); in kvm_riscv_vcpu_set_reg()
597 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, in kvm_riscv_vcpu_set_reg()
600 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, in kvm_riscv_vcpu_set_reg()
603 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); in kvm_riscv_vcpu_set_reg()
611 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_get_reg() argument
616 return kvm_riscv_vcpu_get_reg_config(vcpu, reg); in kvm_riscv_vcpu_get_reg()
618 return kvm_riscv_vcpu_get_reg_core(vcpu, reg); in kvm_riscv_vcpu_get_reg()
620 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); in kvm_riscv_vcpu_get_reg()
622 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); in kvm_riscv_vcpu_get_reg()
624 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, in kvm_riscv_vcpu_get_reg()
627 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, in kvm_riscv_vcpu_get_reg()
630 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); in kvm_riscv_vcpu_get_reg()
641 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
651 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); in kvm_arch_vcpu_async_ioctl()
653 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); in kvm_arch_vcpu_async_ioctl()
662 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
676 r = kvm_riscv_vcpu_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
678 r = kvm_riscv_vcpu_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
688 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
694 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
700 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
705 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
710 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
716 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
721 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
726 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_flush_interrupts() argument
728 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
731 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) { in kvm_riscv_vcpu_flush_interrupts()
732 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0); in kvm_riscv_vcpu_flush_interrupts()
733 val = READ_ONCE(vcpu->arch.irqs_pending) & mask; in kvm_riscv_vcpu_flush_interrupts()
740 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_sync_interrupts() argument
743 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
744 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
764 kvm_riscv_vcpu_timer_sync(vcpu); in kvm_riscv_vcpu_sync_interrupts()
767 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) in kvm_riscv_vcpu_set_interrupt() argument
774 set_bit(irq, &vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
776 set_bit(irq, &vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
778 kvm_vcpu_kick(vcpu); in kvm_riscv_vcpu_set_interrupt()
783 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) in kvm_riscv_vcpu_unset_interrupt() argument
790 clear_bit(irq, &vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
792 set_bit(irq, &vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
797 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask) in kvm_riscv_vcpu_has_interrupts() argument
799 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
802 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false; in kvm_riscv_vcpu_has_interrupts()
805 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_power_off() argument
807 vcpu->arch.power_off = true; in kvm_riscv_vcpu_power_off()
808 kvm_make_request(KVM_REQ_SLEEP, vcpu); in kvm_riscv_vcpu_power_off()
809 kvm_vcpu_kick(vcpu); in kvm_riscv_vcpu_power_off()
812 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_power_on() argument
814 vcpu->arch.power_off = false; in kvm_riscv_vcpu_power_on()
815 kvm_vcpu_wake_up(vcpu); in kvm_riscv_vcpu_power_on()
818 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
821 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
829 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
836 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
839 kvm_riscv_vcpu_power_off(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
848 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
874 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
876 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
888 kvm_riscv_vcpu_update_config(vcpu->arch.isa); in kvm_arch_vcpu_load()
890 kvm_riscv_gstage_update_hgatp(vcpu); in kvm_arch_vcpu_load()
892 kvm_riscv_vcpu_timer_restore(vcpu); in kvm_arch_vcpu_load()
894 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
895 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
896 vcpu->arch.isa); in kvm_arch_vcpu_load()
898 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
901 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
903 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
905 vcpu->cpu = -1; in kvm_arch_vcpu_put()
907 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
908 vcpu->arch.isa); in kvm_arch_vcpu_put()
909 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
911 kvm_riscv_vcpu_timer_save(vcpu); in kvm_arch_vcpu_put()
924 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) in kvm_riscv_check_vcpu_requests() argument
926 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); in kvm_riscv_check_vcpu_requests()
928 if (kvm_request_pending(vcpu)) { in kvm_riscv_check_vcpu_requests()
929 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { in kvm_riscv_check_vcpu_requests()
930 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_riscv_check_vcpu_requests()
932 (!vcpu->arch.power_off) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
934 kvm_vcpu_srcu_read_lock(vcpu); in kvm_riscv_check_vcpu_requests()
936 if (vcpu->arch.power_off || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
941 kvm_make_request(KVM_REQ_SLEEP, vcpu); in kvm_riscv_check_vcpu_requests()
945 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) in kvm_riscv_check_vcpu_requests()
946 kvm_riscv_reset_vcpu(vcpu); in kvm_riscv_check_vcpu_requests()
948 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) in kvm_riscv_check_vcpu_requests()
949 kvm_riscv_gstage_update_hgatp(vcpu); in kvm_riscv_check_vcpu_requests()
951 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu)) in kvm_riscv_check_vcpu_requests()
952 kvm_riscv_fence_i_process(vcpu); in kvm_riscv_check_vcpu_requests()
958 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu)) in kvm_riscv_check_vcpu_requests()
959 kvm_riscv_hfence_gvma_vmid_all_process(vcpu); in kvm_riscv_check_vcpu_requests()
961 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu)) in kvm_riscv_check_vcpu_requests()
962 kvm_riscv_hfence_vvma_all_process(vcpu); in kvm_riscv_check_vcpu_requests()
964 if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) in kvm_riscv_check_vcpu_requests()
965 kvm_riscv_hfence_process(vcpu); in kvm_riscv_check_vcpu_requests()
969 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) in kvm_riscv_update_hvip() argument
971 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
983 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_enter_exit() argument
986 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
987 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
991 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
995 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
998 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
1000 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
1005 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
1009 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
1013 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
1020 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
1025 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
1029 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1031 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1037 ret = xfer_to_guest_mode_handle_work(vcpu); in kvm_arch_vcpu_ioctl_run()
1042 kvm_riscv_gstage_vmid_update(vcpu); in kvm_arch_vcpu_ioctl_run()
1044 kvm_riscv_check_vcpu_requests(vcpu); in kvm_arch_vcpu_ioctl_run()
1054 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1056 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
1063 kvm_riscv_vcpu_flush_interrupts(vcpu); in kvm_arch_vcpu_ioctl_run()
1066 kvm_riscv_update_hvip(vcpu); in kvm_arch_vcpu_ioctl_run()
1069 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
1070 kvm_request_pending(vcpu) || in kvm_arch_vcpu_ioctl_run()
1072 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1074 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
1084 kvm_riscv_local_tlb_sanitize(vcpu); in kvm_arch_vcpu_ioctl_run()
1088 kvm_riscv_vcpu_enter_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
1090 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1091 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
1098 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()
1105 kvm_riscv_vcpu_sync_interrupts(vcpu); in kvm_arch_vcpu_ioctl_run()
1128 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
1130 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); in kvm_arch_vcpu_ioctl_run()
1133 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1135 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1137 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()