Lines Matching defs:kvm
15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu()
30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held()
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu()
86 struct kvm *kvm = root->tdp_mmu_async_data; in tdp_mmu_zap_root_work() local
112 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) in tdp_mmu_schedule_zap_root()
129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, in kvm_tdp_mmu_put_root()
193 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root()
305 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_get_vcpu_root_hpa() local
348 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log()
366 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page()
372 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page()
387 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, in tdp_mmu_unlink_sp()
424 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) in handle_removed_pt()
522 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in __handle_changed_spte()
610 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte()
638 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, in tdp_mmu_set_spte_atomic()
668 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, in tdp_mmu_zap_spte_atomic()
721 static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, in __tdp_mmu_set_spte()
748 static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in _tdp_mmu_set_spte()
760 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_set_spte()
766 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, in tdp_mmu_set_spte_no_acc_track()
773 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, in tdp_mmu_set_spte_no_dirty_log()
807 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, in tdp_mmu_iter_cond_resched()
849 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root()
875 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root()
911 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp()
939 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs()
979 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, in kvm_tdp_mmu_zap_leafs()
990 void kvm_tdp_mmu_zap_all(struct kvm *kvm) in kvm_tdp_mmu_zap_all()
1017 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) in kvm_tdp_mmu_zap_invalidated_roots()
1039 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) in kvm_tdp_mmu_invalidate_all_roots()
1120 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp()
1149 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_map() local
1228 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, in kvm_tdp_mmu_unmap_gfn_range()
1238 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, in kvm_tdp_mmu_handle_gfn()
1266 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range()
1295 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_age_gfn_range()
1300 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn()
1306 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_test_age_gfn()
1311 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, in set_spte_gfn()
1347 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_set_spte_gfn()
1362 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range()
1400 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, in kvm_tdp_mmu_wrprot_slot()
1434 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, in tdp_mmu_alloc_sp_for_split()
1474 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page()
1512 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, in tdp_mmu_split_huge_pages_root()
1581 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_tdp_mmu_try_split_huge_pages()
1607 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range()
1653 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_slot()
1675 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked()
1719 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_pt_masked()
1731 static void zap_collapsible_spte_range(struct kvm *kvm, in zap_collapsible_spte_range()
1786 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_tdp_mmu_zap_collapsible_sptes()
1802 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn()
1838 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, in kvm_tdp_mmu_write_protect_gfn()