Lines Matching refs:kvm

151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) in kvm_arch_guest_memory_reclaimed() argument
244 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument
258 vcpu = kvm_get_vcpu(kvm, i); in kvm_make_vcpus_request_mask()
270 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
283 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_make_all_cpus_request()
293 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
295 ++kvm->stat.generic.remote_tlb_flush_requests; in kvm_flush_remote_tlbs()
308 if (!kvm_arch_flush_remote_tlbs(kvm) in kvm_flush_remote_tlbs()
309 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
310 ++kvm->stat.generic.remote_tlb_flush; in kvm_flush_remote_tlbs()
314 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument
316 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range()
324 kvm_flush_remote_tlbs(kvm); in kvm_flush_remote_tlbs_range()
327 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, in kvm_flush_remote_tlbs_memslot() argument
337 lockdep_assert_held(&kvm->slots_lock); in kvm_flush_remote_tlbs_memslot()
338 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot()
341 static void kvm_flush_shadow_all(struct kvm *kvm) in kvm_flush_shadow_all() argument
343 kvm_arch_flush_shadow_all(kvm); in kvm_flush_shadow_all()
344 kvm_arch_guest_memory_reclaimed(kvm); in kvm_flush_shadow_all()
441 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
445 vcpu->kvm = kvm; in kvm_vcpu_init()
482 void kvm_destroy_vcpus(struct kvm *kvm) in kvm_destroy_vcpus() argument
487 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_destroy_vcpus()
489 xa_erase(&kvm->vcpu_array, i); in kvm_destroy_vcpus()
497 WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i)); in kvm_destroy_vcpus()
500 atomic_set(&kvm->online_vcpus, 0); in kvm_destroy_vcpus()
505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
507 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
510 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
512 typedef void (*on_lock_fn_t)(struct kvm *kvm);
561 static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm, in kvm_handle_hva_range() argument
585 idx = srcu_read_lock(&kvm->srcu); in kvm_handle_hva_range()
587 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in kvm_handle_hva_range()
590 slots = __kvm_memslots(kvm, i); in kvm_handle_hva_range()
626 KVM_MMU_LOCK(kvm); in kvm_handle_hva_range()
628 range->on_lock(kvm); in kvm_handle_hva_range()
634 r.ret |= range->handler(kvm, &gfn_range); in kvm_handle_hva_range()
639 kvm_flush_remote_tlbs(kvm); in kvm_handle_hva_range()
643 KVM_MMU_UNLOCK(kvm); in kvm_handle_hva_range()
645 srcu_read_unlock(&kvm->srcu, idx); in kvm_handle_hva_range()
656 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_age_hva_range() local
667 return kvm_handle_hva_range(kvm, &range).ret; in kvm_age_hva_range()
678 void kvm_mmu_invalidate_begin(struct kvm *kvm) in kvm_mmu_invalidate_begin() argument
680 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_mmu_invalidate_begin()
686 kvm->mmu_invalidate_in_progress++; in kvm_mmu_invalidate_begin()
688 if (likely(kvm->mmu_invalidate_in_progress == 1)) { in kvm_mmu_invalidate_begin()
689 kvm->mmu_invalidate_range_start = INVALID_GPA; in kvm_mmu_invalidate_begin()
690 kvm->mmu_invalidate_range_end = INVALID_GPA; in kvm_mmu_invalidate_begin()
694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) in kvm_mmu_invalidate_range_add() argument
696 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_mmu_invalidate_range_add()
698 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); in kvm_mmu_invalidate_range_add()
700 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) { in kvm_mmu_invalidate_range_add()
701 kvm->mmu_invalidate_range_start = start; in kvm_mmu_invalidate_range_add()
702 kvm->mmu_invalidate_range_end = end; in kvm_mmu_invalidate_range_add()
713 kvm->mmu_invalidate_range_start = in kvm_mmu_invalidate_range_add()
714 min(kvm->mmu_invalidate_range_start, start); in kvm_mmu_invalidate_range_add()
715 kvm->mmu_invalidate_range_end = in kvm_mmu_invalidate_range_add()
716 max(kvm->mmu_invalidate_range_end, end); in kvm_mmu_invalidate_range_add()
720 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_mmu_unmap_gfn_range() argument
722 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_mmu_unmap_gfn_range()
723 return kvm_unmap_gfn_range(kvm, range); in kvm_mmu_unmap_gfn_range()
729 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
749 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
750 kvm->mn_active_invalidate_count++; in kvm_mmu_notifier_invalidate_range_start()
751 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
763 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end); in kvm_mmu_notifier_invalidate_range_start()
770 if (kvm_handle_hva_range(kvm, &hva_range).found_memslot) in kvm_mmu_notifier_invalidate_range_start()
771 kvm_arch_guest_memory_reclaimed(kvm); in kvm_mmu_notifier_invalidate_range_start()
776 void kvm_mmu_invalidate_end(struct kvm *kvm) in kvm_mmu_invalidate_end() argument
778 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_mmu_invalidate_end()
785 kvm->mmu_invalidate_seq++; in kvm_mmu_invalidate_end()
792 kvm->mmu_invalidate_in_progress--; in kvm_mmu_invalidate_end()
793 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm); in kvm_mmu_invalidate_end()
799 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA); in kvm_mmu_invalidate_end()
805 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
816 kvm_handle_hva_range(kvm, &hva_range); in kvm_mmu_notifier_invalidate_range_end()
819 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
820 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count)) in kvm_mmu_notifier_invalidate_range_end()
821 --kvm->mn_active_invalidate_count; in kvm_mmu_notifier_invalidate_range_end()
822 wake = !kvm->mn_active_invalidate_count; in kvm_mmu_notifier_invalidate_range_end()
823 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
830 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); in kvm_mmu_notifier_invalidate_range_end()
880 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
883 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
884 kvm_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
885 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
897 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
899 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
900 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
905 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
917 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); in kvm_pm_notifier_call() local
919 return kvm_arch_pm_notifier(kvm, state); in kvm_pm_notifier_call()
922 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
924 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; in kvm_init_pm_notifier()
926 kvm->pm_notifier.priority = INT_MAX; in kvm_init_pm_notifier()
927 register_pm_notifier(&kvm->pm_notifier); in kvm_init_pm_notifier()
930 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
932 unregister_pm_notifier(&kvm->pm_notifier); in kvm_destroy_pm_notifier()
935 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
939 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
954 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_free_memslot() argument
961 kvm_arch_free_memslot(kvm, slot); in kvm_free_memslot()
966 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
982 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
998 static void kvm_destroy_vm_debugfs(struct kvm *kvm) in kvm_destroy_vm_debugfs() argument
1004 if (IS_ERR(kvm->debugfs_dentry)) in kvm_destroy_vm_debugfs()
1007 debugfs_remove_recursive(kvm->debugfs_dentry); in kvm_destroy_vm_debugfs()
1009 if (kvm->debugfs_stat_data) { in kvm_destroy_vm_debugfs()
1011 kfree(kvm->debugfs_stat_data[i]); in kvm_destroy_vm_debugfs()
1012 kfree(kvm->debugfs_stat_data); in kvm_destroy_vm_debugfs()
1016 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) in kvm_create_vm_debugfs() argument
1044 kvm->debugfs_dentry = dent; in kvm_create_vm_debugfs()
1045 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, in kvm_create_vm_debugfs()
1046 sizeof(*kvm->debugfs_stat_data), in kvm_create_vm_debugfs()
1048 if (!kvm->debugfs_stat_data) in kvm_create_vm_debugfs()
1057 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
1060 kvm->debugfs_stat_data[i] = stat_data; in kvm_create_vm_debugfs()
1062 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
1072 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
1075 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; in kvm_create_vm_debugfs()
1077 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
1081 kvm_arch_create_vm_debugfs(kvm); in kvm_create_vm_debugfs()
1084 kvm_destroy_vm_debugfs(kvm); in kvm_create_vm_debugfs()
1092 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) in kvm_arch_pre_destroy_vm() argument
1102 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) in kvm_arch_create_vm_debugfs() argument
1106 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) in kvm_create_vm()
1108 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
1112 if (!kvm) in kvm_create_vm()
1115 KVM_MMU_LOCK_INIT(kvm); in kvm_create_vm()
1117 kvm->mm = current->mm; in kvm_create_vm()
1118 kvm_eventfd_init(kvm); in kvm_create_vm()
1119 mutex_init(&kvm->lock); in kvm_create_vm()
1120 mutex_init(&kvm->irq_lock); in kvm_create_vm()
1121 mutex_init(&kvm->slots_lock); in kvm_create_vm()
1122 mutex_init(&kvm->slots_arch_lock); in kvm_create_vm()
1123 spin_lock_init(&kvm->mn_invalidate_lock); in kvm_create_vm()
1124 rcuwait_init(&kvm->mn_memslots_update_rcuwait); in kvm_create_vm()
1125 xa_init(&kvm->vcpu_array); in kvm_create_vm()
1127 xa_init(&kvm->mem_attr_array); in kvm_create_vm()
1130 INIT_LIST_HEAD(&kvm->gpc_list); in kvm_create_vm()
1131 spin_lock_init(&kvm->gpc_lock); in kvm_create_vm()
1133 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
1134 kvm->max_vcpus = KVM_MAX_VCPUS; in kvm_create_vm()
1142 kvm->debugfs_dentry = ERR_PTR(-ENOENT); in kvm_create_vm()
1144 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", in kvm_create_vm()
1148 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
1150 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
1153 r = kvm_init_irq_routing(kvm); in kvm_create_vm()
1157 refcount_set(&kvm->users_count, 1); in kvm_create_vm()
1159 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in kvm_create_vm()
1161 slots = &kvm->__memslots[i][j]; in kvm_create_vm()
1173 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); in kvm_create_vm()
1178 rcu_assign_pointer(kvm->buses[i], in kvm_create_vm()
1180 if (!kvm->buses[i]) in kvm_create_vm()
1184 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
1193 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
1196 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
1200 r = kvm_coalesced_mmio_init(kvm); in kvm_create_vm()
1204 r = kvm_create_vm_debugfs(kvm, fdname); in kvm_create_vm()
1209 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
1213 kvm_init_pm_notifier(kvm); in kvm_create_vm()
1215 return kvm; in kvm_create_vm()
1218 kvm_coalesced_mmio_free(kvm); in kvm_create_vm()
1221 if (kvm->mmu_notifier.ops) in kvm_create_vm()
1222 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); in kvm_create_vm()
1227 kvm_arch_destroy_vm(kvm); in kvm_create_vm()
1229 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); in kvm_create_vm()
1231 kfree(kvm_get_bus(kvm, i)); in kvm_create_vm()
1232 kvm_free_irq_routing(kvm); in kvm_create_vm()
1234 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
1236 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
1238 kvm_arch_free_vm(kvm); in kvm_create_vm()
1243 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
1258 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { in kvm_destroy_devices()
1264 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
1267 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
1269 kvm_destroy_pm_notifier(kvm); in kvm_destroy_vm()
1270 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); in kvm_destroy_vm()
1271 kvm_destroy_vm_debugfs(kvm); in kvm_destroy_vm()
1273 list_del(&kvm->vm_list); in kvm_destroy_vm()
1275 kvm_arch_pre_destroy_vm(kvm); in kvm_destroy_vm()
1277 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
1279 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); in kvm_destroy_vm()
1283 kvm->buses[i] = NULL; in kvm_destroy_vm()
1285 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
1287 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
1300 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); in kvm_destroy_vm()
1301 if (kvm->mn_active_invalidate_count) in kvm_destroy_vm()
1302 kvm->mn_active_invalidate_count = 0; in kvm_destroy_vm()
1304 WARN_ON(kvm->mmu_invalidate_in_progress); in kvm_destroy_vm()
1306 kvm_flush_shadow_all(kvm); in kvm_destroy_vm()
1308 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
1309 kvm_destroy_devices(kvm); in kvm_destroy_vm()
1310 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in kvm_destroy_vm()
1311 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); in kvm_destroy_vm()
1312 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); in kvm_destroy_vm()
1314 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
1315 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
1317 xa_destroy(&kvm->mem_attr_array); in kvm_destroy_vm()
1319 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
1325 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
1327 refcount_inc(&kvm->users_count); in kvm_get_kvm()
1335 bool kvm_get_kvm_safe(struct kvm *kvm) in kvm_get_kvm_safe() argument
1337 return refcount_inc_not_zero(&kvm->users_count); in kvm_get_kvm_safe()
1341 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
1343 if (refcount_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
1344 kvm_destroy_vm(kvm); in kvm_put_kvm()
1355 void kvm_put_kvm_no_destroy(struct kvm *kvm) in kvm_put_kvm_no_destroy() argument
1357 WARN_ON(refcount_dec_and_test(&kvm->users_count)); in kvm_put_kvm_no_destroy()
1363 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
1365 kvm_irqfd_release(kvm); in kvm_vm_release()
1367 kvm_put_kvm(kvm); in kvm_vm_release()
1371 int kvm_trylock_all_vcpus(struct kvm *kvm) in kvm_trylock_all_vcpus() argument
1376 lockdep_assert_held(&kvm->lock); in kvm_trylock_all_vcpus()
1378 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_trylock_all_vcpus()
1379 if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock)) in kvm_trylock_all_vcpus()
1384 kvm_for_each_vcpu(j, vcpu, kvm) { in kvm_trylock_all_vcpus()
1393 int kvm_lock_all_vcpus(struct kvm *kvm) in kvm_lock_all_vcpus() argument
1399 lockdep_assert_held(&kvm->lock); in kvm_lock_all_vcpus()
1401 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_lock_all_vcpus()
1402 r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock); in kvm_lock_all_vcpus()
1409 kvm_for_each_vcpu(j, vcpu, kvm) { in kvm_lock_all_vcpus()
1418 void kvm_unlock_all_vcpus(struct kvm *kvm) in kvm_unlock_all_vcpus() argument
1423 lockdep_assert_held(&kvm->lock); in kvm_unlock_all_vcpus()
1425 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_unlock_all_vcpus()
1445 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) in kvm_get_inactive_memslots() argument
1447 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); in kvm_get_inactive_memslots()
1450 return &kvm->__memslots[as_id][node_idx_inactive]; in kvm_get_inactive_memslots()
1525 static void kvm_replace_memslot(struct kvm *kvm, in kvm_replace_memslot() argument
1530 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); in kvm_replace_memslot()
1586 static int check_memory_region_flags(struct kvm *kvm, in check_memory_region_flags() argument
1591 if (kvm_arch_has_private_mem(kvm)) in check_memory_region_flags()
1603 if (kvm_arch_has_readonly_mem(kvm) && in check_memory_region_flags()
1613 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) in kvm_swap_active_memslots() argument
1615 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); in kvm_swap_active_memslots()
1618 u64 gen = __kvm_memslots(kvm, as_id)->generation; in kvm_swap_active_memslots()
1628 spin_lock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1629 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); in kvm_swap_active_memslots()
1630 while (kvm->mn_active_invalidate_count) { in kvm_swap_active_memslots()
1632 spin_unlock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1634 spin_lock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1636 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); in kvm_swap_active_memslots()
1637 rcu_assign_pointer(kvm->memslots[as_id], slots); in kvm_swap_active_memslots()
1638 spin_unlock(&kvm->mn_invalidate_lock); in kvm_swap_active_memslots()
1645 mutex_unlock(&kvm->slots_arch_lock); in kvm_swap_active_memslots()
1647 synchronize_srcu_expedited(&kvm->srcu); in kvm_swap_active_memslots()
1664 gen += kvm_arch_nr_memslot_as_ids(kvm); in kvm_swap_active_memslots()
1666 kvm_arch_memslots_updated(kvm, gen); in kvm_swap_active_memslots()
1671 static int kvm_prepare_memory_region(struct kvm *kvm, in kvm_prepare_memory_region() argument
1690 else if (kvm_use_dirty_bitmap(kvm)) { in kvm_prepare_memory_region()
1695 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) in kvm_prepare_memory_region()
1700 r = kvm_arch_prepare_memory_region(kvm, old, new, change); in kvm_prepare_memory_region()
1709 static void kvm_commit_memory_region(struct kvm *kvm, in kvm_commit_memory_region() argument
1721 kvm->nr_memslot_pages -= old->npages; in kvm_commit_memory_region()
1723 kvm->nr_memslot_pages += new->npages; in kvm_commit_memory_region()
1727 atomic_set(&kvm->nr_memslots_dirty_logging, in kvm_commit_memory_region()
1728 atomic_read(&kvm->nr_memslots_dirty_logging) + change); in kvm_commit_memory_region()
1731 kvm_arch_commit_memory_region(kvm, old, new, change); in kvm_commit_memory_region()
1739 kvm_free_memslot(kvm, old); in kvm_commit_memory_region()
1771 static void kvm_activate_memslot(struct kvm *kvm, in kvm_activate_memslot() argument
1777 kvm_swap_active_memslots(kvm, as_id); in kvm_activate_memslot()
1780 kvm_replace_memslot(kvm, old, new); in kvm_activate_memslot()
1796 static void kvm_invalidate_memslot(struct kvm *kvm, in kvm_invalidate_memslot() argument
1807 kvm_replace_memslot(kvm, old, invalid_slot); in kvm_invalidate_memslot()
1814 kvm_swap_active_memslots(kvm, old->as_id); in kvm_invalidate_memslot()
1822 kvm_arch_flush_shadow_memslot(kvm, old); in kvm_invalidate_memslot()
1823 kvm_arch_guest_memory_reclaimed(kvm); in kvm_invalidate_memslot()
1826 mutex_lock(&kvm->slots_arch_lock); in kvm_invalidate_memslot()
1838 static void kvm_create_memslot(struct kvm *kvm, in kvm_create_memslot() argument
1842 kvm_replace_memslot(kvm, NULL, new); in kvm_create_memslot()
1843 kvm_activate_memslot(kvm, NULL, new); in kvm_create_memslot()
1846 static void kvm_delete_memslot(struct kvm *kvm, in kvm_delete_memslot() argument
1854 kvm_replace_memslot(kvm, old, NULL); in kvm_delete_memslot()
1855 kvm_activate_memslot(kvm, invalid_slot, NULL); in kvm_delete_memslot()
1858 static void kvm_move_memslot(struct kvm *kvm, in kvm_move_memslot() argument
1867 kvm_replace_memslot(kvm, old, new); in kvm_move_memslot()
1868 kvm_activate_memslot(kvm, invalid_slot, new); in kvm_move_memslot()
1871 static void kvm_update_flags_memslot(struct kvm *kvm, in kvm_update_flags_memslot() argument
1880 kvm_replace_memslot(kvm, old, new); in kvm_update_flags_memslot()
1881 kvm_activate_memslot(kvm, old, new); in kvm_update_flags_memslot()
1884 static int kvm_set_memslot(struct kvm *kvm, in kvm_set_memslot() argument
1906 mutex_lock(&kvm->slots_arch_lock); in kvm_set_memslot()
1924 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1927 kvm_invalidate_memslot(kvm, old, invalid_slot); in kvm_set_memslot()
1930 r = kvm_prepare_memory_region(kvm, old, new, change); in kvm_set_memslot()
1939 kvm_activate_memslot(kvm, invalid_slot, old); in kvm_set_memslot()
1942 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1955 kvm_create_memslot(kvm, new); in kvm_set_memslot()
1957 kvm_delete_memslot(kvm, old, invalid_slot); in kvm_set_memslot()
1959 kvm_move_memslot(kvm, old, new, invalid_slot); in kvm_set_memslot()
1961 kvm_update_flags_memslot(kvm, old, new); in kvm_set_memslot()
1974 kvm_commit_memory_region(kvm, old, new, change); in kvm_set_memslot()
1992 static int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
2003 lockdep_assert_held(&kvm->slots_lock); in kvm_set_memory_region()
2005 r = check_memory_region_flags(kvm, mem); in kvm_set_memory_region()
2028 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM) in kvm_set_memory_region()
2043 slots = __kvm_memslots(kvm, as_id); in kvm_set_memory_region()
2055 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) in kvm_set_memory_region()
2058 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); in kvm_set_memory_region()
2071 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) in kvm_set_memory_region()
2106 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset); in kvm_set_memory_region()
2111 r = kvm_set_memslot(kvm, old, new, change); in kvm_set_memory_region()
2125 int kvm_set_internal_memslot(struct kvm *kvm, in kvm_set_internal_memslot() argument
2134 return kvm_set_memory_region(kvm, mem); in kvm_set_internal_memslot()
2138 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
2144 guard(mutex)(&kvm->slots_lock); in kvm_vm_ioctl_set_memory_region()
2145 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
2156 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, in kvm_get_dirty_log() argument
2165 if (!kvm_use_dirty_bitmap(kvm)) in kvm_get_dirty_log()
2173 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) in kvm_get_dirty_log()
2176 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
2181 kvm_arch_sync_dirty_log(kvm, *memslot); in kvm_get_dirty_log()
2219 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_get_dirty_log_protect() argument
2230 if (!kvm_use_dirty_bitmap(kvm)) in kvm_get_dirty_log_protect()
2235 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) in kvm_get_dirty_log_protect()
2238 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
2245 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_get_dirty_log_protect()
2249 if (kvm->manual_dirty_log_protect) { in kvm_get_dirty_log_protect()
2263 KVM_MMU_LOCK(kvm); in kvm_get_dirty_log_protect()
2276 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
2279 KVM_MMU_UNLOCK(kvm); in kvm_get_dirty_log_protect()
2283 kvm_flush_remote_tlbs_memslot(kvm, memslot); in kvm_get_dirty_log_protect()
2310 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
2315 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
2317 r = kvm_get_dirty_log_protect(kvm, log); in kvm_vm_ioctl_get_dirty_log()
2319 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
2329 static int kvm_clear_dirty_log_protect(struct kvm *kvm, in kvm_clear_dirty_log_protect() argument
2342 if (!kvm_use_dirty_bitmap(kvm)) in kvm_clear_dirty_log_protect()
2347 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) in kvm_clear_dirty_log_protect()
2353 slots = __kvm_memslots(kvm, as_id); in kvm_clear_dirty_log_protect()
2367 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_clear_dirty_log_protect()
2374 KVM_MMU_LOCK(kvm); in kvm_clear_dirty_log_protect()
2393 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_clear_dirty_log_protect()
2397 KVM_MMU_UNLOCK(kvm); in kvm_clear_dirty_log_protect()
2400 kvm_flush_remote_tlbs_memslot(kvm, memslot); in kvm_clear_dirty_log_protect()
2405 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_clear_dirty_log() argument
2410 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2412 r = kvm_clear_dirty_log_protect(kvm, log); in kvm_vm_ioctl_clear_dirty_log()
2414 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2420 static u64 kvm_supported_mem_attributes(struct kvm *kvm) in kvm_supported_mem_attributes() argument
2422 if (!kvm || kvm_arch_has_private_mem(kvm)) in kvm_supported_mem_attributes()
2432 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_range_has_memory_attributes() argument
2435 XA_STATE(xas, &kvm->mem_attr_array, start); in kvm_range_has_memory_attributes()
2439 mask &= kvm_supported_mem_attributes(kvm); in kvm_range_has_memory_attributes()
2444 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs; in kvm_range_has_memory_attributes()
2463 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm, in kvm_handle_gfn_range() argument
2485 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in kvm_handle_gfn_range()
2486 slots = __kvm_memslots(kvm, i); in kvm_handle_gfn_range()
2499 KVM_MMU_LOCK(kvm); in kvm_handle_gfn_range()
2501 range->on_lock(kvm); in kvm_handle_gfn_range()
2504 ret |= range->handler(kvm, &gfn_range); in kvm_handle_gfn_range()
2509 kvm_flush_remote_tlbs(kvm); in kvm_handle_gfn_range()
2512 KVM_MMU_UNLOCK(kvm); in kvm_handle_gfn_range()
2515 static bool kvm_pre_set_memory_attributes(struct kvm *kvm, in kvm_pre_set_memory_attributes() argument
2529 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); in kvm_pre_set_memory_attributes()
2531 return kvm_arch_pre_set_memory_attributes(kvm, range); in kvm_pre_set_memory_attributes()
2535 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, in kvm_vm_set_mem_attributes() argument
2563 mutex_lock(&kvm->slots_lock); in kvm_vm_set_mem_attributes()
2566 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) in kvm_vm_set_mem_attributes()
2574 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT); in kvm_vm_set_mem_attributes()
2581 kvm_handle_gfn_range(kvm, &pre_set_range); in kvm_vm_set_mem_attributes()
2584 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry, in kvm_vm_set_mem_attributes()
2586 KVM_BUG_ON(r, kvm); in kvm_vm_set_mem_attributes()
2590 kvm_handle_gfn_range(kvm, &post_set_range); in kvm_vm_set_mem_attributes()
2593 mutex_unlock(&kvm->slots_lock); in kvm_vm_set_mem_attributes()
2597 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm, in kvm_vm_ioctl_set_mem_attributes() argument
2605 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm)) in kvm_vm_ioctl_set_mem_attributes()
2622 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes); in kvm_vm_ioctl_set_mem_attributes()
2626 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2628 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2665 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2667 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2738 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
2740 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2769 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
2771 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
3092 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write) in __gfn_to_page() argument
3096 .slot = gfn_to_memslot(kvm, gfn), in __gfn_to_page()
3111 .slot = gfn_to_memslot(vcpu->kvm, gfn), in __kvm_vcpu_map()
3196 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
3199 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
3214 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
3222 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
3286 static int __kvm_write_guest_page(struct kvm *kvm, in __kvm_write_guest_page() argument
3302 mark_page_dirty_in_slot(kvm, memslot, gfn); in __kvm_write_guest_page()
3306 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
3309 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
3311 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); in kvm_write_guest_page()
3320 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
3324 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
3333 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
3407 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
3410 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
3415 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_offset_cached() argument
3419 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached()
3435 return kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_offset_cached()
3440 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); in kvm_write_guest_offset_cached()
3446 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
3449 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_write_guest_cached()
3453 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_offset_cached() argument
3457 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_offset_cached()
3473 return kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_offset_cached()
3483 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
3486 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_read_guest_cached()
3490 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
3499 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg); in kvm_clear_guest()
3510 void mark_page_dirty_in_slot(struct kvm *kvm, in mark_page_dirty_in_slot() argument
3517 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) in mark_page_dirty_in_slot()
3520 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); in mark_page_dirty_in_slot()
3527 if (kvm->dirty_ring_size && vcpu) in mark_page_dirty_in_slot()
3535 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
3539 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
3540 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
3549 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()
3617 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
3630 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
3695 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_max_halt_poll_ns() local
3697 if (kvm->override_halt_poll_ns) { in kvm_vcpu_max_halt_poll_ns()
3705 return READ_ONCE(kvm->max_halt_poll_ns); in kvm_vcpu_max_halt_poll_ns()
3959 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
3963 nr_vcpus = atomic_read(&kvm->online_vcpus); in kvm_vcpu_on_spin()
3991 start = READ_ONCE(kvm->last_boosted_vcpu) + 1; in kvm_vcpu_on_spin()
3997 vcpu = xa_load(&kvm->vcpu_array, idx); in kvm_vcpu_on_spin()
4019 WRITE_ONCE(kvm->last_boosted_vcpu, i); in kvm_vcpu_on_spin()
4032 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) in kvm_page_in_dirty_ring() argument
4037 kvm->dirty_ring_size / PAGE_SIZE); in kvm_page_in_dirty_ring()
4056 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
4058 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) in kvm_vcpu_fault()
4078 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || in kvm_vcpu_mmap()
4079 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && in kvm_vcpu_mmap()
4091 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
4137 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
4148 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) in kvm_vm_ioctl_create_vcpu() argument
4166 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4167 if (kvm->created_vcpus >= kvm->max_vcpus) { in kvm_vm_ioctl_create_vcpu()
4168 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4172 r = kvm_arch_vcpu_precreate(kvm, id); in kvm_vm_ioctl_create_vcpu()
4174 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4178 kvm->created_vcpus++; in kvm_vm_ioctl_create_vcpu()
4179 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4195 kvm_vcpu_init(vcpu, kvm, id); in kvm_vm_ioctl_create_vcpu()
4201 if (kvm->dirty_ring_size) { in kvm_vm_ioctl_create_vcpu()
4202 r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring, in kvm_vm_ioctl_create_vcpu()
4203 id, kvm->dirty_ring_size); in kvm_vm_ioctl_create_vcpu()
4208 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4210 if (kvm_get_vcpu_by_id(kvm, id)) { in kvm_vm_ioctl_create_vcpu()
4215 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
4216 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); in kvm_vm_ioctl_create_vcpu()
4230 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
4240 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
4243 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4250 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_create_vcpu()
4251 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); in kvm_vm_ioctl_create_vcpu()
4253 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4262 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4263 kvm->created_vcpus--; in kvm_vm_ioctl_create_vcpu()
4264 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
4293 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_stats_release()
4323 kvm_get_kvm(vcpu->kvm); in kvm_vcpu_ioctl_get_stats_fd()
4346 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_pre_fault_memory()
4367 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_pre_fault_memory()
4377 struct kvm *kvm = vcpu->kvm; in kvm_wait_for_vcpu_online() local
4383 if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus))) in kvm_wait_for_vcpu_online()
4396 if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx))) in kvm_wait_for_vcpu_online()
4411 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_ioctl()
4654 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) in kvm_vcpu_compat_ioctl()
4720 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) in kvm_device_ioctl()
4741 struct kvm *kvm = dev->kvm; in kvm_device_release() local
4744 mutex_lock(&kvm->lock); in kvm_device_release()
4748 mutex_unlock(&kvm->lock); in kvm_device_release()
4751 kvm_put_kvm(kvm); in kvm_device_release()
4795 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
4820 dev->kvm = kvm; in kvm_ioctl_create_device()
4822 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4825 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4829 list_add_rcu(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
4830 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4835 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
4838 kvm_put_kvm_no_destroy(kvm); in kvm_ioctl_create_device()
4839 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4844 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4854 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
4889 if (kvm) in kvm_vm_ioctl_check_extension_generic()
4890 return kvm_arch_nr_memslot_as_ids(kvm); in kvm_vm_ioctl_check_extension_generic()
4916 return kvm_supported_mem_attributes(kvm); in kvm_vm_ioctl_check_extension_generic()
4920 return !kvm || kvm_arch_has_private_mem(kvm); in kvm_vm_ioctl_check_extension_generic()
4925 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
4928 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) in kvm_vm_ioctl_enable_dirty_log_ring() argument
4940 if (size < kvm_dirty_ring_get_rsvd_entries(kvm) * in kvm_vm_ioctl_enable_dirty_log_ring()
4949 if (kvm->dirty_ring_size) in kvm_vm_ioctl_enable_dirty_log_ring()
4952 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4954 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_dirty_log_ring()
4958 kvm->dirty_ring_size = size; in kvm_vm_ioctl_enable_dirty_log_ring()
4962 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4966 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) in kvm_vm_ioctl_reset_dirty_pages() argument
4972 if (!kvm->dirty_ring_size) in kvm_vm_ioctl_reset_dirty_pages()
4975 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4977 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vm_ioctl_reset_dirty_pages()
4978 r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared); in kvm_vm_ioctl_reset_dirty_pages()
4983 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4986 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_reset_dirty_pages()
4991 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, in kvm_vm_ioctl_enable_cap() argument
4997 bool kvm_are_all_memslots_empty(struct kvm *kvm) in kvm_are_all_memslots_empty() argument
5001 lockdep_assert_held(&kvm->slots_lock); in kvm_are_all_memslots_empty()
5003 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { in kvm_are_all_memslots_empty()
5004 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) in kvm_are_all_memslots_empty()
5012 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, in kvm_vm_ioctl_enable_cap_generic() argument
5025 kvm->manual_dirty_log_protect = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
5033 kvm->max_halt_poll_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
5042 kvm->override_halt_poll_ns = true; in kvm_vm_ioctl_enable_cap_generic()
5048 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) in kvm_vm_ioctl_enable_cap_generic()
5051 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap_generic()
5056 !kvm->dirty_ring_size || cap->flags) in kvm_vm_ioctl_enable_cap_generic()
5059 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap_generic()
5066 if (kvm_are_all_memslots_empty(kvm)) { in kvm_vm_ioctl_enable_cap_generic()
5067 kvm->dirty_ring_with_bitmap = true; in kvm_vm_ioctl_enable_cap_generic()
5071 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap_generic()
5076 return kvm_vm_ioctl_enable_cap(kvm, cap); in kvm_vm_ioctl_enable_cap_generic()
5083 struct kvm *kvm = file->private_data; in kvm_vm_stats_read() local
5085 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, in kvm_vm_stats_read()
5086 &kvm_vm_stats_desc[0], &kvm->stat, in kvm_vm_stats_read()
5087 sizeof(kvm->stat), user_buffer, size, offset); in kvm_vm_stats_read()
5092 struct kvm *kvm = file->private_data; in kvm_vm_stats_release() local
5094 kvm_put_kvm(kvm); in kvm_vm_stats_release()
5105 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) in kvm_vm_ioctl_get_stats_fd() argument
5115 &kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD); in kvm_vm_ioctl_get_stats_fd()
5121 kvm_get_kvm(kvm); in kvm_vm_ioctl_get_stats_fd()
5138 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
5142 if (kvm->mm != current->mm || kvm->vm_dead) in kvm_vm_ioctl()
5146 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
5154 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); in kvm_vm_ioctl()
5189 r = kvm_vm_ioctl_set_memory_region(kvm, &mem); in kvm_vm_ioctl()
5198 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
5208 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_ioctl()
5219 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
5228 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
5238 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
5247 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
5257 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
5270 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
5295 if (!kvm_arch_can_set_irq_routing(kvm)) in kvm_vm_ioctl()
5310 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
5324 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs); in kvm_vm_ioctl()
5335 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
5347 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
5350 r = kvm_vm_ioctl_reset_dirty_pages(kvm); in kvm_vm_ioctl()
5353 r = kvm_vm_ioctl_get_stats_fd(kvm); in kvm_vm_ioctl()
5363 r = kvm_gmem_create(kvm, &guest_memfd); in kvm_vm_ioctl()
5403 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
5406 if (kvm->mm != current->mm || kvm->vm_dead) in kvm_vm_compat_ioctl()
5428 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
5444 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
5471 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
5480 kvm = kvm_create_vm(type, fdname); in kvm_dev_ioctl_create_vm()
5481 if (IS_ERR(kvm)) { in kvm_dev_ioctl_create_vm()
5482 r = PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
5486 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); in kvm_dev_ioctl_create_vm()
5498 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); in kvm_dev_ioctl_create_vm()
5504 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
5858 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
5877 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
5927 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
5935 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
5942 lockdep_assert_held(&kvm->slots_lock); in kvm_io_bus_register_dev()
5944 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_register_dev()
5972 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
5973 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
5979 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
5985 lockdep_assert_held(&kvm->slots_lock); in kvm_io_bus_unregister_dev()
5987 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_unregister_dev()
6009 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
6010 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
6027 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_get_dev() argument
6034 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_io_bus_get_dev()
6036 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); in kvm_io_bus_get_dev()
6047 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_io_bus_get_dev()
6065 if (!kvm_get_kvm_safe(stat_data->kvm)) in kvm_debugfs_open()
6072 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_open()
6082 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_release()
6087 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vm() argument
6089 *val = *(u64 *)((void *)(&kvm->stat) + offset); in kvm_get_stat_per_vm()
6094 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vm() argument
6096 *(u64 *)((void *)(&kvm->stat) + offset) = 0; in kvm_clear_stat_per_vm()
6101 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vcpu() argument
6108 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_stat_per_vcpu()
6114 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vcpu() argument
6119 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_clear_stat_per_vcpu()
6132 r = kvm_get_stat_per_vm(stat_data->kvm, in kvm_stat_data_get()
6136 r = kvm_get_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_get()
6154 r = kvm_clear_stat_per_vm(stat_data->kvm, in kvm_stat_data_clear()
6158 r = kvm_clear_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_clear()
6184 struct kvm *kvm; in vm_stat_get() local
6189 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_get()
6190 kvm_get_stat_per_vm(kvm, offset, &tmp_val); in vm_stat_get()
6200 struct kvm *kvm; in vm_stat_clear() local
6206 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_clear()
6207 kvm_clear_stat_per_vm(kvm, offset); in vm_stat_clear()
6220 struct kvm *kvm; in vcpu_stat_get() local
6225 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_get()
6226 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); in vcpu_stat_get()
6236 struct kvm *kvm; in vcpu_stat_clear() local
6242 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_clear()
6243 kvm_clear_stat_per_vcpu(kvm, offset); in vcpu_stat_clear()
6254 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) in kvm_uevent_notify_change() argument
6259 if (!kvm_dev.this_device || !kvm) in kvm_uevent_notify_change()
6282 kvm->userspace_pid = task_pid_nr(current); in kvm_uevent_notify_change()
6286 add_uevent_var(env, "PID=%d", kvm->userspace_pid); in kvm_uevent_notify_change()
6288 if (!IS_ERR(kvm->debugfs_dentry)) { in kvm_uevent_notify_change()
6292 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); in kvm_uevent_notify_change()