Lines Matching refs:vcpu
164 void vcpu_load(struct kvm_vcpu *vcpu) in vcpu_load() argument
168 __this_cpu_write(kvm_running_vcpu, vcpu); in vcpu_load()
169 preempt_notifier_register(&vcpu->preempt_notifier); in vcpu_load()
170 kvm_arch_vcpu_load(vcpu, cpu); in vcpu_load()
175 void vcpu_put(struct kvm_vcpu *vcpu) in vcpu_put() argument
178 kvm_arch_vcpu_put(vcpu); in vcpu_put()
179 preempt_notifier_unregister(&vcpu->preempt_notifier); in vcpu_put()
186 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) in kvm_request_needs_ipi() argument
188 int mode = kvm_vcpu_exiting_guest_mode(vcpu); in kvm_request_needs_ipi()
216 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, in kvm_make_vcpu_request() argument
222 __kvm_make_request(req, vcpu); in kvm_make_vcpu_request()
224 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) in kvm_make_vcpu_request()
237 if (kvm_request_needs_ipi(vcpu, req)) { in kvm_make_vcpu_request()
238 cpu = READ_ONCE(vcpu->cpu); in kvm_make_vcpu_request()
247 struct kvm_vcpu *vcpu; in kvm_make_vcpus_request_mask() local
258 vcpu = kvm_get_vcpu(kvm, i); in kvm_make_vcpus_request_mask()
259 if (!vcpu) in kvm_make_vcpus_request_mask()
261 kvm_make_vcpu_request(vcpu, req, cpus, me); in kvm_make_vcpus_request_mask()
272 struct kvm_vcpu *vcpu; in kvm_make_all_cpus_request() local
283 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_make_all_cpus_request()
284 kvm_make_vcpu_request(vcpu, req, cpus, me); in kvm_make_all_cpus_request()
441 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
443 mutex_init(&vcpu->mutex); in kvm_vcpu_init()
444 vcpu->cpu = -1; in kvm_vcpu_init()
445 vcpu->kvm = kvm; in kvm_vcpu_init()
446 vcpu->vcpu_id = id; in kvm_vcpu_init()
447 vcpu->pid = NULL; in kvm_vcpu_init()
448 rwlock_init(&vcpu->pid_lock); in kvm_vcpu_init()
450 rcuwait_init(&vcpu->wait); in kvm_vcpu_init()
452 kvm_async_pf_vcpu_init(vcpu); in kvm_vcpu_init()
454 kvm_vcpu_set_in_spin_loop(vcpu, false); in kvm_vcpu_init()
455 kvm_vcpu_set_dy_eligible(vcpu, false); in kvm_vcpu_init()
456 vcpu->preempted = false; in kvm_vcpu_init()
457 vcpu->ready = false; in kvm_vcpu_init()
458 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); in kvm_vcpu_init()
459 vcpu->last_used_slot = NULL; in kvm_vcpu_init()
462 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", in kvm_vcpu_init()
466 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_vcpu_destroy() argument
468 kvm_arch_vcpu_destroy(vcpu); in kvm_vcpu_destroy()
469 kvm_dirty_ring_free(&vcpu->dirty_ring); in kvm_vcpu_destroy()
476 put_pid(vcpu->pid); in kvm_vcpu_destroy()
478 free_page((unsigned long)vcpu->run); in kvm_vcpu_destroy()
479 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_vcpu_destroy()
485 struct kvm_vcpu *vcpu; in kvm_destroy_vcpus() local
487 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_destroy_vcpus()
488 kvm_vcpu_destroy(vcpu); in kvm_destroy_vcpus()
1373 struct kvm_vcpu *vcpu; in kvm_trylock_all_vcpus() local
1378 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_trylock_all_vcpus()
1379 if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock)) in kvm_trylock_all_vcpus()
1384 kvm_for_each_vcpu(j, vcpu, kvm) { in kvm_trylock_all_vcpus()
1387 mutex_unlock(&vcpu->mutex); in kvm_trylock_all_vcpus()
1395 struct kvm_vcpu *vcpu; in kvm_lock_all_vcpus() local
1401 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_lock_all_vcpus()
1402 r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock); in kvm_lock_all_vcpus()
1409 kvm_for_each_vcpu(j, vcpu, kvm) { in kvm_lock_all_vcpus()
1412 mutex_unlock(&vcpu->mutex); in kvm_lock_all_vcpus()
1420 struct kvm_vcpu *vcpu; in kvm_unlock_all_vcpus() local
1425 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_unlock_all_vcpus()
1426 mutex_unlock(&vcpu->mutex); in kvm_unlock_all_vcpus()
2632 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
2634 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); in kvm_vcpu_gfn_to_memslot()
2642 if (unlikely(gen != vcpu->last_used_slot_gen)) { in kvm_vcpu_gfn_to_memslot()
2643 vcpu->last_used_slot = NULL; in kvm_vcpu_gfn_to_memslot()
2644 vcpu->last_used_slot_gen = gen; in kvm_vcpu_gfn_to_memslot()
2647 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot()
2658 vcpu->last_used_slot = slot; in kvm_vcpu_gfn_to_memslot()
2673 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument
2675 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_is_visible_gfn()
2681 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size() argument
2688 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size()
2744 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_hva() argument
2746 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); in kvm_vcpu_gfn_to_hva()
2776 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) in kvm_vcpu_gfn_to_hva_prot() argument
2778 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_gfn_to_hva_prot()
3107 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, in __kvm_vcpu_map() argument
3111 .slot = gfn_to_memslot(vcpu->kvm, gfn), in __kvm_vcpu_map()
3141 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map) in kvm_vcpu_unmap() argument
3154 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); in kvm_vcpu_unmap()
3205 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, in kvm_vcpu_read_guest_page() argument
3208 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_page()
3234 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) in kvm_vcpu_read_guest() argument
3242 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_read_guest()
3274 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_vcpu_read_guest_atomic() argument
3278 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_atomic()
3315 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_vcpu_write_guest_page() argument
3318 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_guest_page()
3320 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
3345 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, in kvm_vcpu_write_guest() argument
3354 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_write_guest()
3514 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); in mark_page_dirty_in_slot() local
3517 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) in mark_page_dirty_in_slot()
3520 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); in mark_page_dirty_in_slot()
3527 if (kvm->dirty_ring_size && vcpu) in mark_page_dirty_in_slot()
3528 kvm_dirty_ring_push(vcpu, slot, rel_gfn); in mark_page_dirty_in_slot()
3544 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_mark_page_dirty() argument
3548 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_mark_page_dirty()
3549 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()
3553 void kvm_sigset_activate(struct kvm_vcpu *vcpu) in kvm_sigset_activate() argument
3555 if (!vcpu->sigset_active) in kvm_sigset_activate()
3564 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); in kvm_sigset_activate()
3567 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) in kvm_sigset_deactivate() argument
3569 if (!vcpu->sigset_active) in kvm_sigset_deactivate()
3576 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) in grow_halt_poll_ns() argument
3580 old = val = vcpu->halt_poll_ns; in grow_halt_poll_ns()
3590 vcpu->halt_poll_ns = val; in grow_halt_poll_ns()
3592 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); in grow_halt_poll_ns()
3595 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) in shrink_halt_poll_ns() argument
3599 old = val = vcpu->halt_poll_ns; in shrink_halt_poll_ns()
3610 vcpu->halt_poll_ns = val; in shrink_halt_poll_ns()
3611 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); in shrink_halt_poll_ns()
3614 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) in kvm_vcpu_check_block() argument
3617 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
3619 if (kvm_arch_vcpu_runnable(vcpu)) in kvm_vcpu_check_block()
3621 if (kvm_cpu_has_pending_timer(vcpu)) in kvm_vcpu_check_block()
3625 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) in kvm_vcpu_check_block()
3630 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
3639 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) in kvm_vcpu_block() argument
3641 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); in kvm_vcpu_block()
3644 vcpu->stat.generic.blocking = 1; in kvm_vcpu_block()
3647 kvm_arch_vcpu_blocking(vcpu); in kvm_vcpu_block()
3654 if (kvm_vcpu_check_block(vcpu) < 0) in kvm_vcpu_block()
3663 kvm_arch_vcpu_unblocking(vcpu); in kvm_vcpu_block()
3666 vcpu->stat.generic.blocking = 0; in kvm_vcpu_block()
3671 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, in update_halt_poll_stats() argument
3674 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; in update_halt_poll_stats()
3677 ++vcpu->stat.generic.halt_attempted_poll; in update_halt_poll_stats()
3680 ++vcpu->stat.generic.halt_successful_poll; in update_halt_poll_stats()
3682 if (!vcpu_valid_wakeup(vcpu)) in update_halt_poll_stats()
3683 ++vcpu->stat.generic.halt_poll_invalid; in update_halt_poll_stats()
3693 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) in kvm_vcpu_max_halt_poll_ns() argument
3695 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_max_halt_poll_ns()
3717 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
3719 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); in kvm_vcpu_halt()
3720 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); in kvm_vcpu_halt()
3726 if (vcpu->halt_poll_ns > max_halt_poll_ns) in kvm_vcpu_halt()
3727 vcpu->halt_poll_ns = max_halt_poll_ns; in kvm_vcpu_halt()
3729 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; in kvm_vcpu_halt()
3733 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); in kvm_vcpu_halt()
3736 if (kvm_vcpu_check_block(vcpu) < 0) in kvm_vcpu_halt()
3743 waited = kvm_vcpu_block(vcpu); in kvm_vcpu_halt()
3747 vcpu->stat.generic.halt_wait_ns += in kvm_vcpu_halt()
3749 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, in kvm_vcpu_halt()
3762 update_halt_poll_stats(vcpu, start, poll_end, !waited); in kvm_vcpu_halt()
3766 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); in kvm_vcpu_halt()
3768 if (!vcpu_valid_wakeup(vcpu)) { in kvm_vcpu_halt()
3769 shrink_halt_poll_ns(vcpu); in kvm_vcpu_halt()
3771 if (halt_ns <= vcpu->halt_poll_ns) in kvm_vcpu_halt()
3774 else if (vcpu->halt_poll_ns && in kvm_vcpu_halt()
3776 shrink_halt_poll_ns(vcpu); in kvm_vcpu_halt()
3778 else if (vcpu->halt_poll_ns < max_halt_poll_ns && in kvm_vcpu_halt()
3780 grow_halt_poll_ns(vcpu); in kvm_vcpu_halt()
3782 vcpu->halt_poll_ns = 0; in kvm_vcpu_halt()
3786 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); in kvm_vcpu_halt()
3790 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) in kvm_vcpu_wake_up() argument
3792 if (__kvm_vcpu_wake_up(vcpu)) { in kvm_vcpu_wake_up()
3793 WRITE_ONCE(vcpu->ready, true); in kvm_vcpu_wake_up()
3794 ++vcpu->stat.generic.halt_wakeup; in kvm_vcpu_wake_up()
3806 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait) in __kvm_vcpu_kick() argument
3810 if (kvm_vcpu_wake_up(vcpu)) in __kvm_vcpu_kick()
3820 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { in __kvm_vcpu_kick()
3821 if (vcpu->mode == IN_GUEST_MODE) in __kvm_vcpu_kick()
3822 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); in __kvm_vcpu_kick()
3833 if (kvm_arch_vcpu_should_kick(vcpu)) { in __kvm_vcpu_kick()
3834 cpu = READ_ONCE(vcpu->cpu); in __kvm_vcpu_kick()
3899 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) in kvm_vcpu_eligible_for_directed_yield() argument
3904 eligible = !vcpu->spin_loop.in_spin_loop || in kvm_vcpu_eligible_for_directed_yield()
3905 vcpu->spin_loop.dy_eligible; in kvm_vcpu_eligible_for_directed_yield()
3907 if (vcpu->spin_loop.in_spin_loop) in kvm_vcpu_eligible_for_directed_yield()
3908 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); in kvm_vcpu_eligible_for_directed_yield()
3921 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
3923 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
3926 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) in vcpu_dy_runnable() argument
3928 if (kvm_arch_dy_runnable(vcpu)) in vcpu_dy_runnable()
3932 if (!list_empty_careful(&vcpu->async_pf.done)) in vcpu_dy_runnable()
3946 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_preempted_in_kernel() argument
3948 return kvm_arch_vcpu_in_kernel(vcpu); in kvm_arch_vcpu_preempted_in_kernel()
3951 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) in kvm_arch_dy_has_pending_interrupt() argument
3960 struct kvm_vcpu *vcpu; in kvm_vcpu_on_spin() local
3997 vcpu = xa_load(&kvm->vcpu_array, idx); in kvm_vcpu_on_spin()
3998 if (!READ_ONCE(vcpu->ready)) in kvm_vcpu_on_spin()
4000 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) in kvm_vcpu_on_spin()
4009 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && in kvm_vcpu_on_spin()
4010 !kvm_arch_dy_has_pending_interrupt(vcpu) && in kvm_vcpu_on_spin()
4011 !kvm_arch_vcpu_preempted_in_kernel(vcpu)) in kvm_vcpu_on_spin()
4014 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) in kvm_vcpu_on_spin()
4017 yielded = kvm_vcpu_yield_to(vcpu); in kvm_vcpu_on_spin()
4045 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; in kvm_vcpu_fault() local
4049 page = virt_to_page(vcpu->run); in kvm_vcpu_fault()
4052 page = virt_to_page(vcpu->arch.pio_data); in kvm_vcpu_fault()
4056 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
4058 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) in kvm_vcpu_fault()
4060 &vcpu->dirty_ring, in kvm_vcpu_fault()
4063 return kvm_arch_vcpu_fault(vcpu, vmf); in kvm_vcpu_fault()
4075 struct kvm_vcpu *vcpu = file->private_data; in kvm_vcpu_mmap() local
4078 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || in kvm_vcpu_mmap()
4079 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && in kvm_vcpu_mmap()
4089 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_release() local
4091 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
4106 static int create_vcpu_fd(struct kvm_vcpu *vcpu) in create_vcpu_fd() argument
4110 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); in create_vcpu_fd()
4111 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); in create_vcpu_fd()
4117 struct kvm_vcpu *vcpu = data; in vcpu_get_pid() local
4119 read_lock(&vcpu->pid_lock); in vcpu_get_pid()
4120 *val = pid_nr(vcpu->pid); in vcpu_get_pid()
4121 read_unlock(&vcpu->pid_lock); in vcpu_get_pid()
4127 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) in kvm_create_vcpu_debugfs() argument
4135 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); in kvm_create_vcpu_debugfs()
4137 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
4138 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, in kvm_create_vcpu_debugfs()
4141 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_create_vcpu_debugfs()
4151 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_create_vcpu() local
4181 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); in kvm_vm_ioctl_create_vcpu()
4182 if (!vcpu) { in kvm_vm_ioctl_create_vcpu()
4193 vcpu->run = page_address(page); in kvm_vm_ioctl_create_vcpu()
4195 kvm_vcpu_init(vcpu, kvm, id); in kvm_vm_ioctl_create_vcpu()
4197 r = kvm_arch_vcpu_create(vcpu); in kvm_vm_ioctl_create_vcpu()
4202 r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring, in kvm_vm_ioctl_create_vcpu()
4215 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
4216 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); in kvm_vm_ioctl_create_vcpu()
4229 mutex_lock(&vcpu->mutex); in kvm_vm_ioctl_create_vcpu()
4231 r = create_vcpu_fd(vcpu); in kvm_vm_ioctl_create_vcpu()
4241 mutex_unlock(&vcpu->mutex); in kvm_vm_ioctl_create_vcpu()
4244 kvm_arch_vcpu_postcreate(vcpu); in kvm_vm_ioctl_create_vcpu()
4245 kvm_create_vcpu_debugfs(vcpu); in kvm_vm_ioctl_create_vcpu()
4249 mutex_unlock(&vcpu->mutex); in kvm_vm_ioctl_create_vcpu()
4251 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); in kvm_vm_ioctl_create_vcpu()
4254 kvm_dirty_ring_free(&vcpu->dirty_ring); in kvm_vm_ioctl_create_vcpu()
4256 kvm_arch_vcpu_destroy(vcpu); in kvm_vm_ioctl_create_vcpu()
4258 free_page((unsigned long)vcpu->run); in kvm_vm_ioctl_create_vcpu()
4260 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_vm_ioctl_create_vcpu()
4268 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) in kvm_vcpu_ioctl_set_sigmask() argument
4272 vcpu->sigset_active = 1; in kvm_vcpu_ioctl_set_sigmask()
4273 vcpu->sigset = *sigset; in kvm_vcpu_ioctl_set_sigmask()
4275 vcpu->sigset_active = 0; in kvm_vcpu_ioctl_set_sigmask()
4282 struct kvm_vcpu *vcpu = file->private_data; in kvm_vcpu_stats_read() local
4284 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, in kvm_vcpu_stats_read()
4285 &kvm_vcpu_stats_desc[0], &vcpu->stat, in kvm_vcpu_stats_read()
4286 sizeof(vcpu->stat), user_buffer, size, offset); in kvm_vcpu_stats_read()
4291 struct kvm_vcpu *vcpu = file->private_data; in kvm_vcpu_stats_release() local
4293 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_stats_release()
4304 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_get_stats_fd() argument
4310 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); in kvm_vcpu_ioctl_get_stats_fd()
4316 file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu, in kvm_vcpu_ioctl_get_stats_fd()
4323 kvm_get_kvm(vcpu->kvm); in kvm_vcpu_ioctl_get_stats_fd()
4330 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, in kvm_vcpu_pre_fault_memory() argument
4345 vcpu_load(vcpu); in kvm_vcpu_pre_fault_memory()
4346 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_pre_fault_memory()
4355 r = kvm_arch_vcpu_pre_fault_memory(vcpu, range); in kvm_vcpu_pre_fault_memory()
4367 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_pre_fault_memory()
4368 vcpu_put(vcpu); in kvm_vcpu_pre_fault_memory()
4375 static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu) in kvm_wait_for_vcpu_online() argument
4377 struct kvm *kvm = vcpu->kvm; in kvm_wait_for_vcpu_online()
4383 if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus))) in kvm_wait_for_vcpu_online()
4391 if (mutex_lock_killable(&vcpu->mutex)) in kvm_wait_for_vcpu_online()
4394 mutex_unlock(&vcpu->mutex); in kvm_wait_for_vcpu_online()
4396 if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx))) in kvm_wait_for_vcpu_online()
4405 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_ioctl() local
4411 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_ioctl()
4422 r = kvm_wait_for_vcpu_online(vcpu); in kvm_vcpu_ioctl()
4434 if (mutex_lock_killable(&vcpu->mutex)) in kvm_vcpu_ioctl()
4449 oldpid = vcpu->pid; in kvm_vcpu_ioctl()
4454 r = kvm_arch_vcpu_run_pid_change(vcpu); in kvm_vcpu_ioctl()
4459 write_lock(&vcpu->pid_lock); in kvm_vcpu_ioctl()
4460 vcpu->pid = newpid; in kvm_vcpu_ioctl()
4461 write_unlock(&vcpu->pid_lock); in kvm_vcpu_ioctl()
4465 vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe); in kvm_vcpu_ioctl()
4466 r = kvm_arch_vcpu_ioctl_run(vcpu); in kvm_vcpu_ioctl()
4467 vcpu->wants_to_run = false; in kvm_vcpu_ioctl()
4469 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); in kvm_vcpu_ioctl()
4479 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
4498 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
4507 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
4523 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
4529 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
4544 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
4553 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); in kvm_vcpu_ioctl()
4568 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); in kvm_vcpu_ioctl()
4591 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); in kvm_vcpu_ioctl()
4599 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
4615 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
4619 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); in kvm_vcpu_ioctl()
4629 r = kvm_vcpu_pre_fault_memory(vcpu, &range); in kvm_vcpu_ioctl()
4640 mutex_unlock(&vcpu->mutex); in kvm_vcpu_ioctl()
4650 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_compat_ioctl() local
4654 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_compat_ioctl()
4675 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); in kvm_vcpu_compat_ioctl()
4677 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); in kvm_vcpu_compat_ioctl()
4969 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_reset_dirty_pages() local
4977 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vm_ioctl_reset_dirty_pages()
4978 r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared); in kvm_vm_ioctl_reset_dirty_pages()
5826 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_write() argument
5837 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_write()
5846 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_write() argument
5858 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
5861 r = __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write()
5866 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, in kvm_io_bus_write_cookie() argument
5877 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
5884 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, in kvm_io_bus_write_cookie()
5892 return __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write_cookie()
5895 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_read() argument
5906 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_read()
5915 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_read() argument
5927 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
5930 r = __kvm_io_bus_read(vcpu, bus, &range, val); in kvm_io_bus_read()
6104 struct kvm_vcpu *vcpu; in kvm_get_stat_per_vcpu() local
6108 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_stat_per_vcpu()
6109 *val += *(u64 *)((void *)(&vcpu->stat) + offset); in kvm_get_stat_per_vcpu()
6117 struct kvm_vcpu *vcpu; in kvm_clear_stat_per_vcpu() local
6119 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_clear_stat_per_vcpu()
6120 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; in kvm_clear_stat_per_vcpu()
6343 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_in() local
6345 WRITE_ONCE(vcpu->preempted, false); in kvm_sched_in()
6346 WRITE_ONCE(vcpu->ready, false); in kvm_sched_in()
6348 __this_cpu_write(kvm_running_vcpu, vcpu); in kvm_sched_in()
6349 kvm_arch_vcpu_load(vcpu, cpu); in kvm_sched_in()
6351 WRITE_ONCE(vcpu->scheduled_out, false); in kvm_sched_in()
6357 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_out() local
6359 WRITE_ONCE(vcpu->scheduled_out, true); in kvm_sched_out()
6361 if (task_is_runnable(current) && vcpu->wants_to_run) { in kvm_sched_out()
6362 WRITE_ONCE(vcpu->preempted, true); in kvm_sched_out()
6363 WRITE_ONCE(vcpu->ready, true); in kvm_sched_out()
6365 kvm_arch_vcpu_put(vcpu); in kvm_sched_out()
6380 struct kvm_vcpu *vcpu; in kvm_get_running_vcpu() local
6383 vcpu = __this_cpu_read(kvm_running_vcpu); in kvm_get_running_vcpu()
6386 return vcpu; in kvm_get_running_vcpu()
6401 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); in kvm_guest_state() local
6404 if (!kvm_arch_pmi_in_guest(vcpu)) in kvm_guest_state()
6408 if (!kvm_arch_vcpu_in_kernel(vcpu)) in kvm_guest_state()
6416 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); in kvm_guest_get_ip() local
6419 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) in kvm_guest_get_ip()
6422 return kvm_arch_vcpu_get_ip(vcpu); in kvm_guest_get_ip()