Home
last modified time | relevance | path

Searched refs:PG_LEVEL_4K (Results 1 – 25 of 26) sorted by relevance

12

/arch/x86/include/asm/
A Dkfence.h33 if (level != PG_LEVEL_4K) in arch_kfence_init_pool()
46 if (WARN_ON(!pte || level != PG_LEVEL_4K)) in kfence_protect_page()
A Dsev.h132 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
133 #define PG_LEVEL_TO_RMP(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
A Dpgtable_types.h546 PG_LEVEL_4K, enumerator
A Dkvm_host.h156 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
/arch/x86/coco/tdx/
A Dtdx-shared.c24 case PG_LEVEL_4K: in try_accept_one()
64 accept_size = try_accept_one(start, len, PG_LEVEL_4K); in tdx_accept_memory()
/arch/x86/kvm/mmu/
A Dspte.c226 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && in make_spte()
239 if (level > PG_LEVEL_4K) in make_spte()
291 WARN_ON_ONCE(level > PG_LEVEL_4K); in make_spte()
351 if (role.level == PG_LEVEL_4K) { in make_small_spte()
370 KVM_BUG_ON(!is_shadow_present_pte(small_spte) || level == PG_LEVEL_4K, kvm); in make_huge_spte()
A Dpage_track.c80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn()
147 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked()
A Dpaging_tmpl.h74 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
295 gpte |= level - PG_LEVEL_4K - 1; in FNAME()
444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in FNAME()
556 if (level == PG_LEVEL_4K) { in FNAME()
581 if (sp->role.level > PG_LEVEL_4K) in FNAME()
851 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in FNAME()
A Dmmu.c1388 PG_LEVEL_4K, slot); in kvm_mmu_write_protect_pt_masked()
1411 PG_LEVEL_4K, slot); in kvm_mmu_clear_dirty_pt_masked()
1630 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); in walk_slot_rmaps_4k()
2143 if (level == PG_LEVEL_4K) in mmu_pages_next()
3227 int level = PG_LEVEL_4K; in host_pfn_mapping_level()
3305 if (max_level == PG_LEVEL_4K) in __kvm_mmu_max_mapping_level()
3306 return PG_LEVEL_4K; in __kvm_mmu_max_mapping_level()
4520 return PG_LEVEL_4K; in kvm_max_level_for_order()
4529 return PG_LEVEL_4K; in kvm_max_private_mapping_level()
4533 return PG_LEVEL_4K; in kvm_max_private_mapping_level()
[all …]
A Dmmu_internal.h368 .req_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
369 .goal_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
A Dtdp_iter.h134 for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
A Dspte.h345 return (level == PG_LEVEL_4K) || is_large_pte(pte); in is_last_spte()
A Dtdp_mmu.c582 WARN_ON_ONCE(level < PG_LEVEL_4K); in handle_changed_spte()
919 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K); in tdp_mmu_zap_root()
967 for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs()
1707 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
/arch/x86/xen/
A Dp2m.c240 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list()
438 BUG_ON(!ptep || level != PG_LEVEL_4K); in get_phys_to_machine()
532 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_alloc_p2m_entry()
656 BUG_ON(!ptep || level != PG_LEVEL_4K); in __set_phys_to_machine()
/arch/x86/mm/
A Dmem_encrypt_amd.c223 case PG_LEVEL_4K: in pg_level_to_pfn()
412 if (level == PG_LEVEL_4K) { in early_set_memory_enc_dec()
A Dpti.c295 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) in pti_setup_vsyscall()
A Dkmmio.c174 case PG_LEVEL_4K: in clear_page_presence()
A Dinit_32.c371 update_page_count(PG_LEVEL_4K, pages_4k); in kernel_physical_mapping_init()
A Dinit_64.c497 update_page_count(PG_LEVEL_4K, pages); in phys_pte_init()
1123 update_page_count(PG_LEVEL_4K, -pages); in remove_pte_table()
/arch/x86/kvm/vmx/
A Dcapabilities.h327 return PG_LEVEL_4K; in ept_caps_to_lpage_level()
A Dtdx.c1648 if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm)) in tdx_sept_set_private_spte()
1681 if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm)) in tdx_sept_drop_private_spte()
1785 WARN_ON_ONCE(level != PG_LEVEL_4K); in tdx_sept_zap_private_spte()
3159 u8 level = PG_LEVEL_4K; in tdx_gmem_post_populate()
3323 return PG_LEVEL_4K; in tdx_gmem_private_max_mapping_level()
/arch/x86/kvm/svm/
A Dsev.c307 if (kvm_rmp_make_shared(kvm, pfn, PG_LEVEL_4K)) in snp_page_reclaim()
2269 fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K); in sev_gmem_post_populate()
2312 kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K); in sev_gmem_post_populate()
4764 if (rmp_level == PG_LEVEL_4K) in sev_handle_rmp_fault()
4775 assigned && rmp_level == PG_LEVEL_4K) in sev_handle_rmp_fault()
4821 return PG_LEVEL_4K; in max_level_for_order()
4833 if (max_level_for_order(order) > PG_LEVEL_4K && in is_large_rmp_possible()
4869 level = PG_LEVEL_4K; in sev_gmem_prepare()
4907 rmp_level > PG_LEVEL_4K; in sev_gmem_invalidate()
4914 if (!use_2m_update && rmp_level > PG_LEVEL_4K) { in sev_gmem_invalidate()
[all …]
/arch/x86/mm/pat/
A Dcpa-test.c206 if (level != PG_LEVEL_4K) { in pageattr_test()
A Dset_memory.c124 direct_pages_count[PG_LEVEL_4K] << 2); in arch_report_meminfo()
581 if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) in protect_kernel_text_ro()
765 *level = PG_LEVEL_4K; in lookup_address_in_pgd_attr()
1853 if (level == PG_LEVEL_4K) { in __change_page_attr()
/arch/x86/virt/svm/
A Dsev.c947 if (level == PG_LEVEL_4K) in adjust_direct_map()

Completed in 98 milliseconds

12