| /linux/arch/x86/kvm/mmu/ |
| A D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 225 __entry->gfn = gfn; 247 __entry->gfn = gfn; 340 __field(u64, gfn) 351 __entry->gfn = gfn; 376 __field(u64, gfn) 382 __entry->gfn = fault->gfn; 398 __field(u64, gfn) 408 __entry->gfn = gfn; [all …]
|
| A D | page_track.c | 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 91 gfn_t gfn) in __kvm_write_track_add_gfn() argument 101 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn() 107 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn() 114 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn() argument 124 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn() 130 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn() 326 slot = gfn_to_memslot(kvm, gfn); in kvm_write_track_add_gfn() 333 __kvm_write_track_add_gfn(kvm, slot, gfn); in kvm_write_track_add_gfn() 356 slot = gfn_to_memslot(kvm, gfn); in kvm_write_track_remove_gfn() [all …]
|
| A D | tdp_mmu.c | 205 sp->gfn = gfn; in tdp_mmu_init_sp() 346 gfn_t base_gfn = sp->gfn; in handle_removed_pt() 675 iter->gfn, iter->level); in tdp_mmu_iter_set_spte() 1118 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { in kvm_tdp_mmu_map() 1573 gfn + BITS_PER_LONG) { in clear_dirty_pt_masked() 1581 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked() 1584 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked() 1653 if (iter.gfn < start || iter.gfn >= end) in zap_collapsible_spte_range() 1699 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { in write_protect_gfn() 1754 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk() [all …]
|
| A D | mmu_internal.h | 81 gfn_t gfn; member 161 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument 163 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level() 167 gfn_t gfn, bool can_unsync, bool prefetch); 172 struct kvm_memory_slot *slot, u64 gfn, 178 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn() 233 gfn_t gfn; member 288 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit() 321 fault.gfn = fault.addr >> PAGE_SHIFT; in kvm_mmu_do_page_fault() 322 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault() [all …]
|
| A D | tdp_iter.c | 15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep() 29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart() 97 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in try_step_down() 116 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side() 120 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side() 121 iter->next_last_level_gfn = iter->gfn; in try_step_side() 139 iter->gfn = gfn_round_for_level(iter->gfn, iter->level); in try_step_up()
|
| A D | mmu.c | 808 gfn_t gfn; in account_shadowed() local 820 gfn = sp->gfn; in account_shadowed() 865 gfn_t gfn; in unaccount_shadowed() local 868 gfn = sp->gfn; in unaccount_shadowed() 1087 gfn_t gfn; in rmap_remove() local 1428 gfn_t gfn; member 2149 if (sp->gfn != gfn) { in kvm_mmu_find_shadow_page() 2248 sp->gfn = gfn; in kvm_mmu_alloc_shadow_page() 4303 arch.gfn = fault->gfn; in kvm_arch_setup_async_pf() 7639 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { in hugepage_has_attrs() [all …]
|
| A D | paging_tmpl.h | 91 gfn_t gfn; member 322 gfn_t gfn; in FNAME() local 445 gfn += pse36_gfn_delta(pte); in FNAME() 538 gfn_t gfn; in FNAME() local 544 gfn = gpte_to_gfn(gpte); in FNAME() 634 gfn_t base_gfn = fault->gfn; in FNAME() 636 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME() 803 fault->gfn = walker.gfn; in FNAME() 885 gpa = gfn_to_gpa(walker.gfn); in FNAME() 913 gfn_t gfn; in FNAME() local [all …]
|
| A D | page_track.h | 19 gfn_t gfn); 21 struct kvm_memory_slot *slot, gfn_t gfn); 24 const struct kvm_memory_slot *slot, gfn_t gfn);
|
| A D | tdp_mmu.h | 41 gfn_t gfn, unsigned long mask, 47 struct kvm_memory_slot *slot, gfn_t gfn, 67 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
|
| /linux/drivers/gpu/drm/i915/gvt/ |
| A D | page_track.c | 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument 87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() 90 intel_gvt_page_track_remove(vgpu, gfn); in intel_vgpu_unregister_page_track() 108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() 115 ret = intel_gvt_page_track_add(vgpu, gfn); in intel_vgpu_enable_page_track() 135 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_disable_page_track() [all …]
|
| A D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
| A D | kvmgt.c | 92 gfn_t gfn; member 100 gfn_t gfn; member 206 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_unmap_page() 236 if (gfn < itr->gfn) in __gvt_cache_find_gfn() 238 else if (gfn > itr->gfn) in __gvt_cache_find_gfn() 257 new->gfn = gfn; in __gvt_cache_add() 268 if (gfn < itr->gfn) in __gvt_cache_add() 356 if (gfn == p->gfn) { in __kvmgt_protect_table_find() 381 if (WARN(!p, "gfn: 0x%llx\n", gfn)) in kvmgt_protect_table_add() 384 p->gfn = gfn; in kvmgt_protect_table_add() [all …]
|
| /linux/arch/powerpc/kvm/ |
| A D | book3s_hv_uvmem.c | 295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn() 339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn() 369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn() 383 *gfn = i; in kvmppc_next_nontransitioned_gfn() 617 unsigned long uvmem_pfn, gfn; in kvmppc_uvmem_drop_pages() local 624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 904 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page() 920 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page() 962 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in() 1067 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out() [all …]
|
| A D | book3s_64_mmu_hv.c | 578 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 859 gfn_t gfn; in kvm_unmap_gfn_range_hv() local 862 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv() 865 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv() 875 unsigned long gfn; in kvmppc_core_flush_memslot_hv() local 900 unsigned long gfn) in kvm_age_rmapp() argument 956 gfn_t gfn; in kvm_age_gfn_hv() local 960 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv() 963 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv() 1107 unsigned long gfn; in kvmppc_harvest_vpa_dirty() local [all …]
|
| A D | e500_mmu_host.c | 353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() 416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map() 449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map() 453 __func__, (long)gfn); in kvmppc_e500_shadow_map() 488 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map() [all …]
|
| A D | book3s_hv_rm_mmu.c | 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 131 unsigned long gfn; in revmap_for_hpte() local 138 *gfnp = gfn; in revmap_for_hpte() 156 unsigned long gfn; in remove_revmap_chain() local 179 kvmppc_update_dirty_map(memslot, gfn, in remove_revmap_chain() 188 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local 227 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter() 242 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter() [all …]
|
| /linux/include/linux/ |
| A D | kvm_host.h | 296 kvm_pfn_t gfn; member 1293 mark_page_dirty(kvm, gfn); \ 1591 gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range() argument 1730 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot() 1753 if (gfn >= slot->base_gfn) { in search_memslots() 1770 slot = try_get_memslot(slot, gfn); in ____gfn_to_memslot() 1810 return gfn_to_memslot(kvm, gfn)->id; in memslot_id() 1821 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() argument 1823 return (gpa_t)gfn << PAGE_SHIFT; in gfn_to_gpa() 2060 gfn_t gfn) in mmu_invalidate_retry_gfn() argument [all …]
|
| /linux/include/xen/ |
| A D | xen-ops.h | 66 xen_pfn_t *gfn, int nr, 79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 176 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
|
| /linux/arch/loongarch/kvm/ |
| A D | mmu.c | 68 offset = (addr >> PAGE_SHIFT) - ctx->gfn; in kvm_mkclean_pte() 373 ctx.gfn = base_gfn; in kvm_arch_mmu_enable_log_dirty_pt_masked() 557 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page_fast() local 586 slot = gfn_to_memslot(kvm, gfn); in kvm_map_page_fast() 612 mark_page_dirty(kvm, gfn); in kvm_map_page_fast() 701 hva = __gfn_to_hva_memslot(slot, gfn); in host_pfn_mapping_level() 766 return child + (gfn & (PTRS_PER_PTE - 1)); in kvm_split_huge() 795 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page() local 806 memslot = gfn_to_memslot(kvm, gfn); in kvm_map_page() 896 gfn = gfn & ~(PTRS_PER_PTE - 1); in kvm_map_page() [all …]
|
| /linux/drivers/xen/ |
| A D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
| /linux/virt/kvm/ |
| A D | guest_memfd.c | 33 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio() local 37 index, gfn, pfn, rc); in __kvm_gmem_prepare_folio() 57 gfn_t gfn, struct folio *folio) in kvm_gmem_prepare_folio() argument 81 index = gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_prepare_folio() 556 gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared, in __kvm_gmem_get_pfn() argument 559 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff; in __kvm_gmem_get_pfn() 593 gfn_t gfn, kvm_pfn_t *pfn, int *max_order) in kvm_gmem_get_pfn() argument 610 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); in kvm_gmem_get_pfn() 650 gfn_t gfn = start_gfn + i; in kvm_gmem_populate() local 673 WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) || in kvm_gmem_populate() [all …]
|
| A D | dirty_ring.c | 92 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument 94 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid() 97 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument 99 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied() 102 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument 104 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
|
| A D | kvm_main.c | 2698 gfn_t gfn) in gfn_to_hva_memslot() argument 2706 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva() 2712 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); in kvm_vcpu_gfn_to_hva() 3040 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn() 3121 map->gfn = gfn; in kvm_vcpu_map() 3305 ++gfn; in kvm_read_guest() 3325 ++gfn; in kvm_vcpu_read_guest() 3416 ++gfn; in kvm_write_guest() 3437 ++gfn; in kvm_vcpu_write_guest() 3581 ++gfn; in kvm_clear_guest() [all …]
|
| /linux/arch/x86/include/asm/ |
| A D | kvm_page_track.h | 43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages, 52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn); 53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
|
| /linux/include/trace/events/ |
| A D | kvm.h | 261 TP_PROTO(u64 gva, u64 gfn), 263 TP_ARGS(gva, gfn), 267 __field(u64, gfn) 272 __entry->gfn = gfn; 275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 280 TP_PROTO(u64 gva, u64 gfn), 282 TP_ARGS(gva, gfn) 287 TP_PROTO(u64 gva, u64 gfn), 289 TP_ARGS(gva, gfn)
|