Lines Matching refs:vcpu

55 static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,  in kvm_riscv_vcpu_context_reset()  argument
58 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_context_reset()
59 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_context_reset()
64 memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); in kvm_riscv_vcpu_context_reset()
70 kvm_riscv_vcpu_sbi_load_reset_state(vcpu); in kvm_riscv_vcpu_context_reset()
80 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset) in kvm_riscv_reset_vcpu() argument
90 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
92 kvm_arch_vcpu_put(vcpu); in kvm_riscv_reset_vcpu()
94 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
96 kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset); in kvm_riscv_reset_vcpu()
98 kvm_riscv_vcpu_fp_reset(vcpu); in kvm_riscv_reset_vcpu()
100 kvm_riscv_vcpu_vector_reset(vcpu); in kvm_riscv_reset_vcpu()
102 kvm_riscv_vcpu_timer_reset(vcpu); in kvm_riscv_reset_vcpu()
104 kvm_riscv_vcpu_aia_reset(vcpu); in kvm_riscv_reset_vcpu()
106 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
107 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
109 kvm_riscv_vcpu_pmu_reset(vcpu); in kvm_riscv_reset_vcpu()
111 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
112 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
113 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
115 kvm_riscv_vcpu_sbi_reset(vcpu); in kvm_riscv_reset_vcpu()
119 kvm_arch_vcpu_load(vcpu, smp_processor_id()); in kvm_riscv_reset_vcpu()
128 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
132 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
135 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
136 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
137 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
140 kvm_riscv_vcpu_setup_isa(vcpu); in kvm_arch_vcpu_create()
143 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
144 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
145 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
148 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
150 spin_lock_init(&vcpu->arch.reset_state.lock); in kvm_arch_vcpu_create()
152 rc = kvm_riscv_vcpu_alloc_vector_context(vcpu); in kvm_arch_vcpu_create()
157 kvm_riscv_vcpu_timer_init(vcpu); in kvm_arch_vcpu_create()
160 kvm_riscv_vcpu_pmu_init(vcpu); in kvm_arch_vcpu_create()
163 kvm_riscv_vcpu_aia_init(vcpu); in kvm_arch_vcpu_create()
169 kvm_riscv_vcpu_sbi_init(vcpu); in kvm_arch_vcpu_create()
172 kvm_riscv_reset_vcpu(vcpu, false); in kvm_arch_vcpu_create()
177 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
184 if (vcpu->vcpu_idx != 0) in kvm_arch_vcpu_postcreate()
185 kvm_riscv_vcpu_power_off(vcpu); in kvm_arch_vcpu_postcreate()
188 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
190 kvm_riscv_vcpu_sbi_deinit(vcpu); in kvm_arch_vcpu_destroy()
193 kvm_riscv_vcpu_aia_deinit(vcpu); in kvm_arch_vcpu_destroy()
196 kvm_riscv_vcpu_timer_deinit(vcpu); in kvm_arch_vcpu_destroy()
198 kvm_riscv_vcpu_pmu_deinit(vcpu); in kvm_arch_vcpu_destroy()
201 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
204 kvm_riscv_vcpu_free_vector_context(vcpu); in kvm_arch_vcpu_destroy()
207 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
209 return kvm_riscv_vcpu_timer_pending(vcpu); in kvm_cpu_has_pending_timer()
212 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
214 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && in kvm_arch_vcpu_runnable()
215 !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
218 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
220 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
223 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
225 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
229 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_get_ip() argument
231 return vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_get_ip()
235 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
243 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
253 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); in kvm_arch_vcpu_async_ioctl()
255 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); in kvm_arch_vcpu_async_ioctl()
264 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
278 r = kvm_riscv_vcpu_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
280 r = kvm_riscv_vcpu_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
292 reg_list.n = kvm_riscv_vcpu_num_regs(vcpu); in kvm_arch_vcpu_ioctl()
298 r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
308 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
314 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
320 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
325 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
330 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
336 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
341 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
346 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_flush_interrupts() argument
348 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
351 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
352 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
353 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
360 kvm_riscv_vcpu_aia_flush_interrupts(vcpu); in kvm_riscv_vcpu_flush_interrupts()
363 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_sync_interrupts() argument
366 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
367 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
394 kvm_riscv_vcpu_aia_sync_interrupts(vcpu); in kvm_riscv_vcpu_sync_interrupts()
397 kvm_riscv_vcpu_timer_sync(vcpu); in kvm_riscv_vcpu_sync_interrupts()
400 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) in kvm_riscv_vcpu_set_interrupt() argument
414 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
416 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
418 kvm_vcpu_kick(vcpu); in kvm_riscv_vcpu_set_interrupt()
423 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) in kvm_riscv_vcpu_unset_interrupt() argument
437 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
439 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
444 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) in kvm_riscv_vcpu_has_interrupts() argument
448 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
450 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
452 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
456 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); in kvm_riscv_vcpu_has_interrupts()
459 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) in __kvm_riscv_vcpu_power_off() argument
461 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_riscv_vcpu_power_off()
462 kvm_make_request(KVM_REQ_SLEEP, vcpu); in __kvm_riscv_vcpu_power_off()
463 kvm_vcpu_kick(vcpu); in __kvm_riscv_vcpu_power_off()
466 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_power_off() argument
468 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
469 __kvm_riscv_vcpu_power_off(vcpu); in kvm_riscv_vcpu_power_off()
470 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
473 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) in __kvm_riscv_vcpu_power_on() argument
475 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in __kvm_riscv_vcpu_power_on()
476 kvm_vcpu_wake_up(vcpu); in __kvm_riscv_vcpu_power_on()
479 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_power_on() argument
481 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
482 __kvm_riscv_vcpu_power_on(vcpu); in kvm_riscv_vcpu_power_on()
483 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
486 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_stopped() argument
488 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_riscv_vcpu_stopped()
491 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
494 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
499 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
504 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
508 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
511 __kvm_riscv_vcpu_power_off(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
514 if (vcpu->kvm->arch.mp_state_reset) in kvm_arch_vcpu_ioctl_set_mpstate()
515 kvm_riscv_reset_vcpu(vcpu, false); in kvm_arch_vcpu_ioctl_set_mpstate()
523 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
528 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
532 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
533 vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
535 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
536 vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
542 static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_setup_config() argument
544 const unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_setup_config()
545 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_setup_config()
574 if (vcpu->guest_debug) in kvm_riscv_vcpu_setup_config()
578 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
581 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
582 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_arch_vcpu_load()
625 kvm_riscv_mmu_update_hgatp(vcpu); in kvm_arch_vcpu_load()
627 kvm_riscv_vcpu_timer_restore(vcpu); in kvm_arch_vcpu_load()
629 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
630 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
631 vcpu->arch.isa); in kvm_arch_vcpu_load()
632 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
633 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
634 vcpu->arch.isa); in kvm_arch_vcpu_load()
636 kvm_riscv_vcpu_aia_load(vcpu, cpu); in kvm_arch_vcpu_load()
638 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
640 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
646 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
648 vcpu->cpu = -1; in kvm_arch_vcpu_put()
650 kvm_riscv_vcpu_aia_put(vcpu); in kvm_arch_vcpu_put()
652 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
653 vcpu->arch.isa); in kvm_arch_vcpu_put()
654 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
656 kvm_riscv_vcpu_timer_save(vcpu); in kvm_arch_vcpu_put()
657 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
658 vcpu->arch.isa); in kvm_arch_vcpu_put()
659 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
692 static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) in kvm_riscv_check_vcpu_requests() argument
694 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); in kvm_riscv_check_vcpu_requests()
696 if (kvm_request_pending(vcpu)) { in kvm_riscv_check_vcpu_requests()
697 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { in kvm_riscv_check_vcpu_requests()
698 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_riscv_check_vcpu_requests()
700 (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
702 kvm_vcpu_srcu_read_lock(vcpu); in kvm_riscv_check_vcpu_requests()
704 if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
709 kvm_make_request(KVM_REQ_SLEEP, vcpu); in kvm_riscv_check_vcpu_requests()
713 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) in kvm_riscv_check_vcpu_requests()
714 kvm_riscv_reset_vcpu(vcpu, true); in kvm_riscv_check_vcpu_requests()
716 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) in kvm_riscv_check_vcpu_requests()
717 kvm_riscv_mmu_update_hgatp(vcpu); in kvm_riscv_check_vcpu_requests()
719 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu)) in kvm_riscv_check_vcpu_requests()
720 kvm_riscv_fence_i_process(vcpu); in kvm_riscv_check_vcpu_requests()
722 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvm_riscv_check_vcpu_requests()
723 kvm_riscv_tlb_flush_process(vcpu); in kvm_riscv_check_vcpu_requests()
725 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu)) in kvm_riscv_check_vcpu_requests()
726 kvm_riscv_hfence_vvma_all_process(vcpu); in kvm_riscv_check_vcpu_requests()
728 if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) in kvm_riscv_check_vcpu_requests()
729 kvm_riscv_hfence_process(vcpu); in kvm_riscv_check_vcpu_requests()
731 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in kvm_riscv_check_vcpu_requests()
732 kvm_riscv_vcpu_record_steal_time(vcpu); in kvm_riscv_check_vcpu_requests()
734 if (kvm_dirty_ring_check_request(vcpu)) in kvm_riscv_check_vcpu_requests()
741 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) in kvm_riscv_update_hvip() argument
743 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
746 kvm_riscv_vcpu_aia_update_hvip(vcpu); in kvm_riscv_update_hvip()
749 static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_swap_in_guest_state() argument
751 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_guest_state()
752 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_guest_state()
753 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_guest_state()
755 vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren); in kvm_riscv_vcpu_swap_in_guest_state()
756 vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); in kvm_riscv_vcpu_swap_in_guest_state()
759 vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_guest_state()
763 static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) in kvm_riscv_vcpu_swap_in_host_state() argument
765 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_host_state()
766 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_host_state()
767 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_host_state()
769 csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren); in kvm_riscv_vcpu_swap_in_host_state()
770 csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); in kvm_riscv_vcpu_swap_in_host_state()
774 vcpu->arch.host_sstateen0); in kvm_riscv_vcpu_swap_in_host_state()
784 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu, in kvm_riscv_vcpu_enter_exit() argument
788 struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_enter_exit()
789 struct kvm_cpu_context *hcntx = &vcpu->arch.host_context; in kvm_riscv_vcpu_enter_exit()
799 kvm_riscv_vcpu_swap_in_guest_state(vcpu); in kvm_riscv_vcpu_enter_exit()
828 __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL, in kvm_riscv_vcpu_enter_exit()
847 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
859 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
861 kvm_riscv_vcpu_swap_in_host_state(vcpu); in kvm_riscv_vcpu_enter_exit()
864 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
868 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
870 if (!vcpu->arch.ran_atleast_once) in kvm_arch_vcpu_ioctl_run()
871 kvm_riscv_vcpu_setup_config(vcpu); in kvm_arch_vcpu_ioctl_run()
874 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
876 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
881 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
885 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
889 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
896 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
900 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
901 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
905 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
907 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
913 ret = xfer_to_guest_mode_handle_work(vcpu); in kvm_arch_vcpu_ioctl_run()
918 kvm_riscv_gstage_vmid_update(vcpu); in kvm_arch_vcpu_ioctl_run()
920 ret = kvm_riscv_check_vcpu_requests(vcpu); in kvm_arch_vcpu_ioctl_run()
927 ret = kvm_riscv_vcpu_aia_update(vcpu); in kvm_arch_vcpu_ioctl_run()
941 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
943 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
950 kvm_riscv_vcpu_flush_interrupts(vcpu); in kvm_arch_vcpu_ioctl_run()
953 kvm_riscv_update_hvip(vcpu); in kvm_arch_vcpu_ioctl_run()
955 if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
956 kvm_request_pending(vcpu) || in kvm_arch_vcpu_ioctl_run()
958 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
961 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
971 kvm_riscv_gstage_vmid_sanitize(vcpu); in kvm_arch_vcpu_ioctl_run()
973 trace_kvm_entry(vcpu); in kvm_arch_vcpu_ioctl_run()
977 kvm_riscv_vcpu_enter_exit(vcpu, &trap); in kvm_arch_vcpu_ioctl_run()
979 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
980 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
983 kvm_riscv_vcpu_sync_interrupts(vcpu); in kvm_arch_vcpu_ioctl_run()
1006 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
1008 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); in kvm_arch_vcpu_ioctl_run()
1011 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1013 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1015 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()