Lines Matching refs:gfn
279 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() local
281 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
284 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
287 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte()
289 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
712 return sp->gfn; in kvm_mmu_page_get_gfn()
717 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
747 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument
750 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation()
757 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation()
759 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index), in kvm_mmu_page_set_translation()
762 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn); in kvm_mmu_page_set_translation()
768 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); in kvm_mmu_page_set_access() local
770 kvm_mmu_page_set_translation(sp, index, gfn, access); in kvm_mmu_page_set_access()
777 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
782 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
787 gfn_t gfn, int count) in update_gfn_disallow_lpage_count() argument
793 linfo = lpage_info_slot(gfn, slot, i); in update_gfn_disallow_lpage_count()
799 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_disallow_lpage() argument
801 update_gfn_disallow_lpage_count(slot, gfn, 1); in kvm_mmu_gfn_disallow_lpage()
804 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_allow_lpage() argument
806 update_gfn_disallow_lpage_count(slot, gfn, -1); in kvm_mmu_gfn_allow_lpage()
813 gfn_t gfn; in account_shadowed() local
816 gfn = sp->gfn; in account_shadowed()
818 slot = __gfn_to_memslot(slots, gfn); in account_shadowed()
822 return kvm_slot_page_track_add_page(kvm, slot, gfn, in account_shadowed()
825 kvm_mmu_gfn_disallow_lpage(slot, gfn); in account_shadowed()
827 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in account_shadowed()
828 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K); in account_shadowed()
862 gfn_t gfn; in unaccount_shadowed() local
865 gfn = sp->gfn; in unaccount_shadowed()
867 slot = __gfn_to_memslot(slots, gfn); in unaccount_shadowed()
869 return kvm_slot_page_track_remove_page(kvm, slot, gfn, in unaccount_shadowed()
872 kvm_mmu_gfn_allow_lpage(slot, gfn); in unaccount_shadowed()
892 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
897 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in gfn_to_memslot_dirty_bitmap()
1067 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, in gfn_to_rmap() argument
1072 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap()
1089 gfn_t gfn; in rmap_remove() local
1093 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte)); in rmap_remove()
1102 slot = __gfn_to_memslot(slots, gfn); in rmap_remove()
1103 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in rmap_remove()
1408 struct kvm_memory_slot *slot, u64 gfn, in kvm_mmu_slot_gfn_write_protect() argument
1417 rmap_head = gfn_to_rmap(gfn, i, slot); in kvm_mmu_slot_gfn_write_protect()
1424 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); in kvm_mmu_slot_gfn_write_protect()
1429 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) in kvm_vcpu_write_protect_gfn() argument
1433 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_protect_gfn()
1434 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); in kvm_vcpu_write_protect_gfn()
1444 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_zap_rmap() argument
1451 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmap() argument
1466 sptep, *sptep, gfn, level); in kvm_set_pte_rmap()
1483 kvm_flush_remote_tlbs_gfn(kvm, gfn, level); in kvm_set_pte_rmap()
1499 gfn_t gfn; member
1511 iterator->gfn = iterator->start_gfn; in rmap_walk_init_level()
1512 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot); in rmap_walk_init_level()
1538 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); in slot_rmap_walk_next()
1560 struct kvm_memory_slot *slot, gfn_t gfn,
1572 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, in kvm_handle_gfn_range()
1605 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmap() argument
1619 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_test_age_rmap() argument
1636 u64 *spte, gfn_t gfn, unsigned int access) in __rmap_add() argument
1643 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access); in __rmap_add()
1646 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in __rmap_add()
1653 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in __rmap_add()
1658 u64 *spte, gfn_t gfn, unsigned int access) in rmap_add() argument
1662 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access); in rmap_add()
1742 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn() argument
1744 return hash_64(gfn, KVM_MMU_HASH_SHIFT); in kvm_page_table_hashfn()
1915 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2037 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn); in mmu_sync_children()
2083 gfn_t gfn, in kvm_mmu_find_shadow_page() argument
2093 if (sp->gfn != gfn) { in kvm_mmu_find_shadow_page()
2168 gfn_t gfn, in kvm_mmu_alloc_shadow_page() argument
2192 sp->gfn = gfn; in kvm_mmu_alloc_shadow_page()
2205 gfn_t gfn, in __kvm_mmu_get_shadow_page() argument
2212 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; in __kvm_mmu_get_shadow_page()
2214 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2217 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2225 gfn_t gfn, in kvm_mmu_get_shadow_page() argument
2234 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role); in kvm_mmu_get_shadow_page()
2284 u64 *sptep, gfn_t gfn, in kvm_mmu_get_child_sp() argument
2293 return kvm_mmu_get_shadow_page(vcpu, gfn, role); in kvm_mmu_get_child_sp()
2687 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page() argument
2693 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); in kvm_mmu_unprotect_page()
2696 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2697 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, in kvm_mmu_unprotect_page()
2739 gfn_t gfn, bool can_unsync, bool prefetch) in mmu_try_to_unsync_pages() argument
2749 if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) in mmu_try_to_unsync_pages()
2758 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { in mmu_try_to_unsync_pages()
2840 u64 *sptep, unsigned int pte_access, gfn_t gfn, in mmu_set_spte() argument
2857 *sptep, write_fault, gfn); in mmu_set_spte()
2861 mark_mmio_spte(vcpu, sptep, gfn, pte_access); in mmu_set_spte()
2886 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, in mmu_set_spte()
2893 trace_kvm_mmu_set_spte(level, gfn, sptep); in mmu_set_spte()
2902 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); in mmu_set_spte()
2908 rmap_add(vcpu, slot, sptep, gfn, pte_access); in mmu_set_spte()
2925 gfn_t gfn; in direct_pte_prefetch_many() local
2927 gfn = kvm_mmu_page_get_gfn(sp, spte_index(start)); in direct_pte_prefetch_many()
2928 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); in direct_pte_prefetch_many()
2932 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); in direct_pte_prefetch_many()
2936 for (i = 0; i < ret; i++, gfn++, start++) { in direct_pte_prefetch_many()
2937 mmu_set_spte(vcpu, slot, start, access, gfn, in direct_pte_prefetch_many()
3022 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level() argument
3041 hva = __gfn_to_hva_memslot(slot, gfn); in host_pfn_mapping_level()
3087 const struct kvm_memory_slot *slot, gfn_t gfn, in kvm_mmu_max_mapping_level() argument
3095 linfo = lpage_info_slot(gfn, slot, max_level); in kvm_mmu_max_mapping_level()
3103 host_level = host_pfn_mapping_level(kvm, gfn, slot); in kvm_mmu_max_mapping_level()
3128 fault->gfn, fault->max_level); in kvm_mmu_hugepage_adjust()
3138 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask)); in kvm_mmu_hugepage_adjust()
3158 fault->pfn |= fault->gfn & page_mask; in disallowed_hugepage_adjust()
3168 gfn_t base_gfn = fault->gfn; in direct_map()
3181 base_gfn = gfn_round_for_level(fault->gfn, it.level); in direct_map()
3207 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_send_hwpoison_signal() argument
3209 unsigned long hva = gfn_to_hva_memslot(slot, gfn); in kvm_send_hwpoison_signal()
3230 kvm_send_hwpoison_signal(fault->slot, fault->gfn); in kvm_handle_error_pfn()
3243 vcpu_cache_mmio_info(vcpu, gva, fault->gfn, in kvm_handle_noslot_fault()
3261 if (unlikely(fault->gfn > kvm_mmu_max_gfn())) in kvm_handle_noslot_fault()
3327 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn); in fast_pf_fix_direct_spte()
3600 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, in mmu_alloc_root() argument
3612 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role); in mmu_alloc_root()
4114 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault() local
4123 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
4124 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
4148 if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) in page_fault_handle_page_track()
4177 gfn_t gfn) in kvm_arch_setup_async_pf() argument
4182 arch.gfn = gfn; in kvm_arch_setup_async_pf()
4187 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
4242 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async, in __kvm_faultin_pfn()
4249 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_faultin_pfn()
4250 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { in __kvm_faultin_pfn()
4251 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_faultin_pfn()
4254 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) { in __kvm_faultin_pfn()
4264 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL, in __kvm_faultin_pfn()
4452 gfn_t base = gfn_round_for_level(fault->gfn, in kvm_tdp_page_fault()
4612 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
4616 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
4621 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
5601 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write() local
5623 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
5838 iterator.gfn - start_gfn + 1); in slot_handle_level_range()
6285 gfn_t gfn; in shadow_mmu_get_sp_for_split() local
6287 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); in shadow_mmu_get_sp_for_split()
6303 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role); in shadow_mmu_get_sp_for_split()
6316 gfn_t gfn; in shadow_mmu_split_huge_page() local
6323 gfn = kvm_mmu_page_get_gfn(sp, index); in shadow_mmu_split_huge_page()
6346 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); in shadow_mmu_split_huge_page()
6358 gfn_t gfn; in shadow_mmu_try_split_huge_page() local
6362 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); in shadow_mmu_try_split_huge_page()
6387 trace_kvm_mmu_split_huge_page(gfn, spte, level, r); in shadow_mmu_try_split_huge_page()
6528 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn, in kvm_mmu_zap_collapsible_spte()
6990 slot = gfn_to_memslot(kvm, sp->gfn); in kvm_recover_nx_huge_pages()