| /arch/x86/include/asm/ |
| A D | kfence.h | 33 if (level != PG_LEVEL_4K) in arch_kfence_init_pool() 46 if (WARN_ON(!pte || level != PG_LEVEL_4K)) in kfence_protect_page()
|
| A D | sev.h | 132 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) 133 #define PG_LEVEL_TO_RMP(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
|
| A D | pgtable_types.h | 546 PG_LEVEL_4K, enumerator
|
| A D | kvm_host.h | 156 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
|
| /arch/x86/coco/tdx/ |
| A D | tdx-shared.c | 24 case PG_LEVEL_4K: in try_accept_one() 64 accept_size = try_accept_one(start, len, PG_LEVEL_4K); in tdx_accept_memory()
|
| /arch/x86/kvm/mmu/ |
| A D | spte.c | 226 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && in make_spte() 239 if (level > PG_LEVEL_4K) in make_spte() 291 WARN_ON_ONCE(level > PG_LEVEL_4K); in make_spte() 351 if (role.level == PG_LEVEL_4K) { in make_small_spte() 370 KVM_BUG_ON(!is_shadow_present_pte(small_spte) || level == PG_LEVEL_4K, kvm); in make_huge_spte()
|
| A D | page_track.c | 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn() 147 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked()
|
| A D | paging_tmpl.h | 74 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K) 163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME() 295 gpte |= level - PG_LEVEL_4K - 1; in FNAME() 444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in FNAME() 556 if (level == PG_LEVEL_4K) { in FNAME() 581 if (sp->role.level > PG_LEVEL_4K) in FNAME() 851 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in FNAME()
|
| A D | mmu.c | 1388 PG_LEVEL_4K, slot); in kvm_mmu_write_protect_pt_masked() 1411 PG_LEVEL_4K, slot); in kvm_mmu_clear_dirty_pt_masked() 1630 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); in walk_slot_rmaps_4k() 2143 if (level == PG_LEVEL_4K) in mmu_pages_next() 3227 int level = PG_LEVEL_4K; in host_pfn_mapping_level() 3305 if (max_level == PG_LEVEL_4K) in __kvm_mmu_max_mapping_level() 3306 return PG_LEVEL_4K; in __kvm_mmu_max_mapping_level() 4520 return PG_LEVEL_4K; in kvm_max_level_for_order() 4529 return PG_LEVEL_4K; in kvm_max_private_mapping_level() 4533 return PG_LEVEL_4K; in kvm_max_private_mapping_level() [all …]
|
| A D | mmu_internal.h | 368 .req_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault() 369 .goal_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
|
| A D | tdp_iter.h | 134 for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
|
| A D | spte.h | 345 return (level == PG_LEVEL_4K) || is_large_pte(pte); in is_last_spte()
|
| A D | tdp_mmu.c | 582 WARN_ON_ONCE(level < PG_LEVEL_4K); in handle_changed_spte() 919 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K); in tdp_mmu_zap_root() 967 for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs() 1707 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
|
| /arch/x86/xen/ |
| A D | p2m.c | 240 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list() 438 BUG_ON(!ptep || level != PG_LEVEL_4K); in get_phys_to_machine() 532 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_alloc_p2m_entry() 656 BUG_ON(!ptep || level != PG_LEVEL_4K); in __set_phys_to_machine()
|
| /arch/x86/mm/ |
| A D | mem_encrypt_amd.c | 223 case PG_LEVEL_4K: in pg_level_to_pfn() 412 if (level == PG_LEVEL_4K) { in early_set_memory_enc_dec()
|
| A D | pti.c | 295 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) in pti_setup_vsyscall()
|
| A D | kmmio.c | 174 case PG_LEVEL_4K: in clear_page_presence()
|
| A D | init_32.c | 371 update_page_count(PG_LEVEL_4K, pages_4k); in kernel_physical_mapping_init()
|
| A D | init_64.c | 497 update_page_count(PG_LEVEL_4K, pages); in phys_pte_init() 1123 update_page_count(PG_LEVEL_4K, -pages); in remove_pte_table()
|
| /arch/x86/kvm/vmx/ |
| A D | capabilities.h | 327 return PG_LEVEL_4K; in ept_caps_to_lpage_level()
|
| A D | tdx.c | 1648 if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm)) in tdx_sept_set_private_spte() 1681 if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm)) in tdx_sept_drop_private_spte() 1785 WARN_ON_ONCE(level != PG_LEVEL_4K); in tdx_sept_zap_private_spte() 3159 u8 level = PG_LEVEL_4K; in tdx_gmem_post_populate() 3323 return PG_LEVEL_4K; in tdx_gmem_private_max_mapping_level()
|
| /arch/x86/kvm/svm/ |
| A D | sev.c | 307 if (kvm_rmp_make_shared(kvm, pfn, PG_LEVEL_4K)) in snp_page_reclaim() 2269 fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K); in sev_gmem_post_populate() 2312 kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K); in sev_gmem_post_populate() 4764 if (rmp_level == PG_LEVEL_4K) in sev_handle_rmp_fault() 4775 assigned && rmp_level == PG_LEVEL_4K) in sev_handle_rmp_fault() 4821 return PG_LEVEL_4K; in max_level_for_order() 4833 if (max_level_for_order(order) > PG_LEVEL_4K && in is_large_rmp_possible() 4869 level = PG_LEVEL_4K; in sev_gmem_prepare() 4907 rmp_level > PG_LEVEL_4K; in sev_gmem_invalidate() 4914 if (!use_2m_update && rmp_level > PG_LEVEL_4K) { in sev_gmem_invalidate() [all …]
|
| /arch/x86/mm/pat/ |
| A D | cpa-test.c | 206 if (level != PG_LEVEL_4K) { in pageattr_test()
|
| A D | set_memory.c | 124 direct_pages_count[PG_LEVEL_4K] << 2); in arch_report_meminfo() 581 if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) in protect_kernel_text_ro() 765 *level = PG_LEVEL_4K; in lookup_address_in_pgd_attr() 1853 if (level == PG_LEVEL_4K) { in __change_page_attr()
|
| /arch/x86/virt/svm/ |
| A D | sev.c | 947 if (level == PG_LEVEL_4K) in adjust_direct_map()
|