Home
last modified time | relevance | path

Searched refs:gfn_t (Results 1 – 25 of 39) sorted by relevance

12

/linux-6.3-rc2/arch/x86/kvm/mmu/
A Dtdp_mmu.h23 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
24 gfn_t end, bool can_yield, bool flush);
44 gfn_t gfn, unsigned long mask,
50 struct kvm_memory_slot *slot, gfn_t gfn,
55 gfn_t start, gfn_t end,
A Dmmu_internal.h73 gfn_t gfn;
159 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level()
165 gfn_t gfn, bool can_unsync, bool prefetch);
167 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
168 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
177 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn()
233 gfn_t gfn;
332 const struct kvm_memory_slot *slot, gfn_t gfn,
A Dtdp_mmu.c428 gfn_t base_gfn = sp->gfn; in handle_removed_pt()
855 gfn_t start = 0; in __tdp_mmu_zap_root()
940 gfn_t start, gfn_t end, bool can_yield, bool flush) in tdp_mmu_zap_leafs()
979 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, in kvm_tdp_mmu_zap_leafs()
1363 gfn_t start, gfn_t end, int min_level) in wrprot_gfn_range()
1514 gfn_t start, gfn_t end, in tdp_mmu_split_huge_pages_root()
1583 gfn_t start, gfn_t end, in kvm_tdp_mmu_try_split_huge_pages()
1608 gfn_t start, gfn_t end) in clear_dirty_gfn_range()
1735 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range()
1863 gfn_t gfn = addr >> PAGE_SHIFT; in kvm_tdp_mmu_get_walk()
[all …]
A Dtdp_iter.h63 gfn_t next_last_level_gfn;
69 gfn_t yielded_gfn;
75 gfn_t gfn;
114 int min_level, gfn_t next_last_level_gfn);
A Dmmu.c813 gfn_t gfn; in account_shadowed()
862 gfn_t gfn; in unaccount_shadowed()
1089 gfn_t gfn; in rmap_remove()
1499 gfn_t gfn;
1519 int end_level, gfn_t start_gfn, gfn_t end_gfn) in slot_rmap_walk_init()
2925 gfn_t gfn; in direct_pte_prefetch_many()
5824 gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, in slot_handle_level_range()
6285 gfn_t gfn; in shadow_mmu_get_sp_for_split()
6316 gfn_t gfn; in shadow_mmu_split_huge_page()
6358 gfn_t gfn; in shadow_mmu_try_split_huge_page()
[all …]
A Dpaging_tmpl.h83 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
91 gfn_t gfn;
96 static inline gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta()
104 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl()
205 gfn_t table_gfn; in FNAME()
309 gfn_t table_gfn; in FNAME()
322 gfn_t gfn; in FNAME()
526 gfn_t gfn; in FNAME()
625 gfn_t base_gfn = fault->gfn; in FNAME()
646 gfn_t table_gfn; in FNAME()
[all …]
A Dpage_track.c88 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track()
116 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page()
155 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page()
180 gfn_t gfn, enum kvm_page_track_mode mode) in kvm_slot_page_track_is_active()
A Dmmutrace.h212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
217 __field(gfn_t, gfn)
235 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
240 __field(gfn_t, gfn)
335 TP_PROTO(int level, gfn_t gfn, u64 *sptep),
393 TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
A Dtdp_iter.c40 int min_level, gfn_t next_last_level_gfn) in tdp_iter_start()
A Dspte.h463 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
/linux-6.3-rc2/include/linux/
A Dkvm_host.h261 gfn_t start;
262 gfn_t end;
581 gfn_t base_gfn;
1030 gfn_t start) in kvm_memslot_iter_start()
1156 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1239 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1383 gfn_t gfn_offset,
1701 static inline gfn_t
1709 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa()
1714 static inline gfn_t gpa_to_gfn(gpa_t gpa) in gpa_to_gfn()
[all …]
A Dkvm_types.h41 typedef u64 gfn_t; typedef
/linux-6.3-rc2/arch/mips/kvm/
A Dmmu.c270 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in kvm_mips_flush_gpa_pt()
396 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
416 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
419 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
420 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
433 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, in BUILD_PTE_RANGE_OP()
434 gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast()
592 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page()
/linux-6.3-rc2/arch/x86/kvm/
A Dmmu.h66 static inline gfn_t kvm_mmu_max_gfn(void) in kvm_mmu_max_gfn()
214 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
243 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index()
A Dx86.h205 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info()
297 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
301 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
A Dmtrr.c310 gfn_t start, end; in update_mtrr()
616 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type()
692 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency()
/linux-6.3-rc2/arch/x86/include/asm/
A Dkvm_page_track.h61 struct kvm_memory_slot *slot, gfn_t gfn,
64 struct kvm_memory_slot *slot, gfn_t gfn,
68 gfn_t gfn, enum kvm_page_track_mode mode);
A Dkvm_host.h906 gfn_t mmio_gfn;
925 gfn_t gfns[ASYNC_PF_PER_VCPU];
1659 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1779 gfn_t gfn;
2008 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
2149 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
/linux-6.3-rc2/virt/kvm/
A Dkvm_main.c1900 gfn_t start, gfn_t end) in kvm_check_memslot_overlap()
1927 gfn_t base_gfn; in __kvm_set_memory_region()
2163 gfn_t offset; in kvm_get_dirty_log_protect()
2232 gfn_t offset; in kvm_clear_dirty_log_protect()
2416 gfn_t *nr_pages) in gfn_to_hva_many()
2422 gfn_t gfn) in gfn_to_hva_memslot()
2779 gfn_t entry = 0; in gfn_to_page_many_atomic()
3020 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest()
3040 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest()
3171 gfn_t nr_pages_avail; in __kvm_gfn_to_hva_cache_init()
[all …]
A Dpfncache.c278 gfn_t gfn = gpa_to_gfn(gpa); in __kvm_gpc_refresh()
/linux-6.3-rc2/drivers/gpu/drm/i915/gvt/
A Dkvmgt.c92 gfn_t gfn;
100 gfn_t gfn;
230 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) in __gvt_cache_find_gfn()
248 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, in __gvt_cache_add()
351 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) in __kvmgt_protect_table_find()
365 static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) in kvmgt_gfn_is_write_protected()
373 static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_add()
388 static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_del()
1624 gfn_t gfn; in kvmgt_page_track_flush_slot()
/linux-6.3-rc2/arch/mips/include/asm/
A Dkvm_host.h808 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
/linux-6.3-rc2/arch/powerpc/kvm/
A De500_mmu_host.c323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map()
560 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_tlb1_map()
612 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
/linux-6.3-rc2/arch/riscv/kvm/
A Dvcpu_exit.c19 gfn_t gfn; in gstage_page_fault()
/linux-6.3-rc2/arch/arm64/kvm/
A Dmmu.c985 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked()
1003 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
1202 gfn_t gfn; in user_mem_abort()
1438 gfn_t gfn; in kvm_handle_guest_abort()

Completed in 86 milliseconds

12