Lines Matching refs:gfn_t
273 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
279 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep()
293 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn()
709 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
747 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation()
768 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); in kvm_mmu_page_set_access()
777 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
787 gfn_t gfn, int count) in update_gfn_disallow_lpage_count()
799 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_disallow_lpage()
804 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_allow_lpage()
813 gfn_t gfn; in account_shadowed()
862 gfn_t gfn; in unaccount_shadowed()
892 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap()
1067 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, in gfn_to_rmap()
1089 gfn_t gfn; in rmap_remove()
1302 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked()
1335 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_clear_dirty_pt_masked()
1368 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
1380 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1381 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1444 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_zap_rmap()
1451 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmap()
1493 gfn_t start_gfn;
1494 gfn_t end_gfn;
1499 gfn_t gfn;
1519 int end_level, gfn_t start_gfn, gfn_t end_gfn) in slot_rmap_walk_init()
1560 struct kvm_memory_slot *slot, gfn_t gfn,
1605 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmap()
1619 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_test_age_rmap()
1636 u64 *spte, gfn_t gfn, unsigned int access) in __rmap_add()
1658 u64 *spte, gfn_t gfn, unsigned int access) in rmap_add()
1742 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn()
2083 gfn_t gfn, in kvm_mmu_find_shadow_page()
2168 gfn_t gfn, in kvm_mmu_alloc_shadow_page()
2205 gfn_t gfn, in __kvm_mmu_get_shadow_page()
2225 gfn_t gfn, in kvm_mmu_get_shadow_page()
2284 u64 *sptep, gfn_t gfn, in kvm_mmu_get_child_sp()
2687 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page()
2739 gfn_t gfn, bool can_unsync, bool prefetch) in mmu_try_to_unsync_pages()
2840 u64 *sptep, unsigned int pte_access, gfn_t gfn, in mmu_set_spte()
2925 gfn_t gfn; in direct_pte_prefetch_many()
3022 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level()
3087 const struct kvm_memory_slot *slot, gfn_t gfn, in kvm_mmu_max_mapping_level()
3168 gfn_t base_gfn = fault->gfn; in direct_map()
3207 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_send_hwpoison_signal()
3588 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) in mmu_check_root()
3600 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, in mmu_alloc_root()
3730 gfn_t root_gfn, root_pgd; in mmu_alloc_shadow_roots()
4114 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault()
4177 gfn_t gfn) in kvm_arch_setup_async_pf()
4452 gfn_t base = gfn_round_for_level(fault->gfn, in kvm_tdp_page_fault()
4612 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte()
5601 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write()
5824 gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, in slot_handle_level_range()
6137 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_rmap_zap_gfn_range()
6143 gfn_t start, end; in kvm_rmap_zap_gfn_range()
6172 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range()
6285 gfn_t gfn; in shadow_mmu_get_sp_for_split()
6316 gfn_t gfn; in shadow_mmu_split_huge_page()
6358 gfn_t gfn; in shadow_mmu_try_split_huge_page()
6439 gfn_t start, gfn_t end, in kvm_shadow_mmu_try_split_huge_pages()