Lines Matching refs:kvm
15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu() argument
23 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu()
24 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu()
25 kvm->arch.tdp_mmu_zap_wq = wq; in kvm_mmu_init_tdp_mmu()
30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument
34 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
36 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument
44 destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); in kvm_mmu_uninit_tdp_mmu()
46 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); in kvm_mmu_uninit_tdp_mmu()
47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); in kvm_mmu_uninit_tdp_mmu()
79 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
86 struct kvm *kvm = root->tdp_mmu_async_data; in tdp_mmu_zap_root_work() local
88 read_lock(&kvm->mmu_lock); in tdp_mmu_zap_root_work()
98 tdp_mmu_zap_root(kvm, root, true); in tdp_mmu_zap_root_work()
107 kvm_tdp_mmu_put_root(kvm, root, true); in tdp_mmu_zap_root_work()
109 read_unlock(&kvm->mmu_lock); in tdp_mmu_zap_root_work()
112 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) in tdp_mmu_schedule_zap_root() argument
114 root->tdp_mmu_async_data = kvm; in tdp_mmu_schedule_zap_root()
116 queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); in tdp_mmu_schedule_zap_root()
129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, in kvm_tdp_mmu_put_root() argument
132 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in kvm_tdp_mmu_put_root()
173 tdp_mmu_schedule_zap_root(kvm, root); in kvm_tdp_mmu_put_root()
177 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
179 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
193 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root() argument
202 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
206 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
214 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
221 kvm_tdp_mmu_put_root(kvm, prev_root, shared); in tdp_mmu_next_root()
305 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_get_vcpu_root_hpa() local
308 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
314 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { in kvm_tdp_mmu_get_vcpu_root_hpa()
325 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
326 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); in kvm_tdp_mmu_get_vcpu_root_hpa()
327 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
333 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
348 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log() argument
361 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); in handle_changed_spte_dirty_log()
362 mark_page_dirty_in_slot(kvm, slot, gfn); in handle_changed_spte_dirty_log()
366 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page() argument
369 atomic64_inc(&kvm->arch.tdp_mmu_pages); in tdp_account_mmu_page()
372 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page() argument
375 atomic64_dec(&kvm->arch.tdp_mmu_pages); in tdp_unaccount_mmu_page()
387 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, in tdp_mmu_unlink_sp() argument
390 tdp_unaccount_mmu_page(kvm, sp); in tdp_mmu_unlink_sp()
396 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
398 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_unlink_sp()
401 untrack_possible_nx_huge_page(kvm, sp); in tdp_mmu_unlink_sp()
404 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
424 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) in handle_removed_pt() argument
433 tdp_mmu_unlink_sp(kvm, sp, shared); in handle_removed_pt()
500 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt()
522 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in __handle_changed_spte() argument
593 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); in __handle_changed_spte()
607 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); in __handle_changed_spte()
610 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
614 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, in handle_changed_spte()
617 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, in handle_changed_spte()
638 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, in tdp_mmu_set_spte_atomic() argument
652 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_set_spte_atomic()
661 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic()
668 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, in tdp_mmu_zap_spte_atomic() argument
679 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); in tdp_mmu_zap_spte_atomic()
683 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); in tdp_mmu_zap_spte_atomic()
721 static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, in __tdp_mmu_set_spte() argument
725 lockdep_assert_held_write(&kvm->mmu_lock); in __tdp_mmu_set_spte()
738 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); in __tdp_mmu_set_spte()
743 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, in __tdp_mmu_set_spte()
748 static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in _tdp_mmu_set_spte() argument
754 iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, in _tdp_mmu_set_spte()
760 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_set_spte() argument
763 _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); in tdp_mmu_set_spte()
766 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, in tdp_mmu_set_spte_no_acc_track() argument
770 _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); in tdp_mmu_set_spte_no_acc_track()
773 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, in tdp_mmu_set_spte_no_dirty_log() argument
777 _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); in tdp_mmu_set_spte_no_dirty_log()
807 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, in tdp_mmu_iter_cond_resched() argument
817 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
819 kvm_flush_remote_tlbs(kvm); in tdp_mmu_iter_cond_resched()
824 cond_resched_rwlock_read(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
826 cond_resched_rwlock_write(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
849 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root() argument
859 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in __tdp_mmu_zap_root()
869 tdp_mmu_set_spte(kvm, &iter, 0); in __tdp_mmu_zap_root()
870 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) in __tdp_mmu_zap_root()
875 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root() argument
891 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in tdp_mmu_zap_root()
905 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); in tdp_mmu_zap_root()
906 __tdp_mmu_zap_root(kvm, root, shared, root->role.level); in tdp_mmu_zap_root()
911 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp() argument
926 __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, in kvm_tdp_mmu_zap_sp()
939 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs() argument
946 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_zap_leafs()
952 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { in tdp_mmu_zap_leafs()
961 tdp_mmu_set_spte(kvm, &iter, 0); in tdp_mmu_zap_leafs()
979 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, in kvm_tdp_mmu_zap_leafs() argument
984 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) in kvm_tdp_mmu_zap_leafs()
985 flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); in kvm_tdp_mmu_zap_leafs()
990 void kvm_tdp_mmu_zap_all(struct kvm *kvm) in kvm_tdp_mmu_zap_all() argument
1008 for_each_tdp_mmu_root_yield_safe(kvm, root, i) in kvm_tdp_mmu_zap_all()
1009 tdp_mmu_zap_root(kvm, root, false); in kvm_tdp_mmu_zap_all()
1017 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) in kvm_tdp_mmu_zap_invalidated_roots() argument
1019 flush_workqueue(kvm->arch.tdp_mmu_zap_wq); in kvm_tdp_mmu_zap_invalidated_roots()
1039 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) in kvm_tdp_mmu_invalidate_all_roots() argument
1043 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_invalidate_all_roots()
1044 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { in kvm_tdp_mmu_invalidate_all_roots()
1048 tdp_mmu_schedule_zap_root(kvm, root); in kvm_tdp_mmu_invalidate_all_roots()
1078 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) in tdp_mmu_map_handle_target_level()
1082 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); in tdp_mmu_map_handle_target_level()
1120 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp() argument
1127 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp()
1131 tdp_mmu_set_spte(kvm, iter, spte); in tdp_mmu_link_sp()
1134 tdp_account_mmu_page(kvm, sp); in tdp_mmu_link_sp()
1139 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1149 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_map() local
1191 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1193 r = tdp_mmu_link_sp(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1206 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1208 track_possible_nx_huge_page(kvm, sp); in kvm_tdp_mmu_map()
1209 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1228 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, in kvm_tdp_mmu_unmap_gfn_range() argument
1231 return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, in kvm_tdp_mmu_unmap_gfn_range()
1235 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1238 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, in kvm_tdp_mmu_handle_gfn() argument
1250 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { in kvm_tdp_mmu_handle_gfn()
1254 ret |= handler(kvm, &iter, range); in kvm_tdp_mmu_handle_gfn()
1266 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range() argument
1290 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); in age_gfn_range()
1295 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_age_gfn_range() argument
1297 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); in kvm_tdp_mmu_age_gfn_range()
1300 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn() argument
1306 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_test_age_gfn() argument
1308 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); in kvm_tdp_mmu_test_age_gfn()
1311 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, in set_spte_gfn() argument
1329 tdp_mmu_set_spte(kvm, iter, 0); in set_spte_gfn()
1335 tdp_mmu_set_spte(kvm, iter, new_spte); in set_spte_gfn()
1347 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_set_spte_gfn() argument
1354 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); in kvm_tdp_mmu_set_spte_gfn()
1362 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
1375 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in wrprot_gfn_range()
1385 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in wrprot_gfn_range()
1400 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, in kvm_tdp_mmu_wrprot_slot() argument
1406 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_wrprot_slot()
1408 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_wrprot_slot()
1409 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1434 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, in tdp_mmu_alloc_sp_for_split() argument
1456 read_unlock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1458 write_unlock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1464 read_lock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1466 write_lock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1474 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page() argument
1486 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); in tdp_mmu_split_huge_page()
1496 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); in tdp_mmu_split_huge_page()
1505 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); in tdp_mmu_split_huge_page()
1512 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, in tdp_mmu_split_huge_pages_root() argument
1536 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in tdp_mmu_split_huge_pages_root()
1543 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); in tdp_mmu_split_huge_pages_root()
1558 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) in tdp_mmu_split_huge_pages_root()
1581 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_tdp_mmu_try_split_huge_pages() argument
1589 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in kvm_tdp_mmu_try_split_huge_pages()
1591 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { in kvm_tdp_mmu_try_split_huge_pages()
1592 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); in kvm_tdp_mmu_try_split_huge_pages()
1594 kvm_tdp_mmu_put_root(kvm, root, shared); in kvm_tdp_mmu_try_split_huge_pages()
1607 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
1618 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in clear_dirty_gfn_range()
1636 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in clear_dirty_gfn_range()
1653 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_slot() argument
1659 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_clear_dirty_slot()
1661 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_clear_dirty_slot()
1662 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1675 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
1706 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in clear_dirty_pt_masked()
1719 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_pt_masked() argument
1726 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_clear_dirty_pt_masked()
1727 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_clear_dirty_pt_masked()
1728 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1731 static void zap_collapsible_spte_range(struct kvm *kvm, in zap_collapsible_spte_range() argument
1744 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in zap_collapsible_spte_range()
1769 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, in zap_collapsible_spte_range()
1775 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) in zap_collapsible_spte_range()
1786 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_tdp_mmu_zap_collapsible_sptes() argument
1791 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_zap_collapsible_sptes()
1793 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_zap_collapsible_sptes()
1794 zap_collapsible_spte_range(kvm, root, slot); in kvm_tdp_mmu_zap_collapsible_sptes()
1802 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1824 tdp_mmu_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1838 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, in kvm_tdp_mmu_write_protect_gfn() argument
1845 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_write_protect_gfn()
1846 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_write_protect_gfn()
1847 spte_set |= write_protect_gfn(kvm, root, gfn, min_level); in kvm_tdp_mmu_write_protect_gfn()