| /linux/arch/x86/mm/ |
| A D | init.c | 235 page_size_mask |= 1 << PG_LEVEL_2M; in probe_page_size_mask() 350 if ((page_size_mask & (1<<PG_LEVEL_2M)) && in adjust_range_page_size_mask() 351 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { in adjust_range_page_size_mask() 361 mr[i].page_size_mask |= 1<<PG_LEVEL_2M; in adjust_range_page_size_mask() 390 mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string() 393 if (mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string() 444 page_size_mask & (1<<PG_LEVEL_2M)); in split_mem_range() 455 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); in split_mem_range() 464 page_size_mask & (1<<PG_LEVEL_2M)); in split_mem_range()
|
| A D | mem_encrypt_amd.c | 227 case PG_LEVEL_2M: in pg_level_to_pfn() 417 if (level == PG_LEVEL_2M) in early_set_memory_enc_dec() 420 split_page_size_mask = 1 << PG_LEVEL_2M; in early_set_memory_enc_dec()
|
| A D | init_32.c | 254 int use_pse = page_size_mask == (1<<PG_LEVEL_2M); in kernel_physical_mapping_init() 370 update_page_count(PG_LEVEL_2M, pages_2m); in kernel_physical_mapping_init()
|
| A D | init_64.c | 558 if (page_size_mask & (1 << PG_LEVEL_2M)) { in phys_pmd_init() 567 if (page_size_mask & (1<<PG_LEVEL_2M)) { in phys_pmd_init() 585 update_page_count(PG_LEVEL_2M, pages); in phys_pmd_init() 1158 update_page_count(PG_LEVEL_2M, -pages); in remove_pmd_table()
|
| A D | mmio-mod.c | 90 if (level == PG_LEVEL_2M) { in print_pte()
|
| A D | kmmio.c | 171 case PG_LEVEL_2M: in clear_page_presence()
|
| /linux/arch/x86/coco/tdx/ |
| A D | tdx-shared.c | 27 case PG_LEVEL_2M: in try_accept_one() 62 accept_size = try_accept_one(start, len, PG_LEVEL_2M); in tdx_accept_memory()
|
| /linux/arch/x86/virt/svm/ |
| A D | sev.c | 460 if (WARN_ON_ONCE(rmp_level > PG_LEVEL_2M)) in adjust_direct_map() 466 if (rmp_level == PG_LEVEL_2M && in adjust_direct_map() 476 if (rmp_level == PG_LEVEL_2M) in adjust_direct_map()
|
| /linux/arch/x86/mm/pat/ |
| A D | set_memory.c | 102 if (level == PG_LEVEL_2M) in split_page_count() 116 direct_pages_count[PG_LEVEL_2M] << 11); in arch_report_meminfo() 119 direct_pages_count[PG_LEVEL_2M] << 12); in arch_report_meminfo() 705 *level = PG_LEVEL_2M; in lookup_address_in_pgd_attr() 822 case PG_LEVEL_2M: in slow_virt_to_phys() 897 case PG_LEVEL_2M: in __should_split_large_page() 1102 case PG_LEVEL_2M: in __split_large_page()
|
| A D | cpa-test.c | 72 } else if (level == PG_LEVEL_2M) { in print_split()
|
| /linux/tools/testing/selftests/kvm/x86_64/ |
| A D | nx_huge_pages_test.c | 147 virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M); in run_test()
|
| /linux/arch/x86/kvm/vmx/ |
| A D | capabilities.h | 327 return PG_LEVEL_2M; in ept_caps_to_lpage_level()
|
| /linux/arch/x86/include/asm/ |
| A D | sev.h | 100 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
|
| A D | pgtable_types.h | 547 PG_LEVEL_2M, enumerator
|
| /linux/tools/testing/selftests/kvm/lib/x86_64/ |
| A D | processor.c | 222 pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level); in __virt_pg_map() 310 pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M); in __vm_get_page_table_entry() 311 if (vm_is_target_pte(pde, level, PG_LEVEL_2M)) in __vm_get_page_table_entry()
|
| /linux/arch/x86/kvm/mmu/ |
| A D | mmu.c | 785 for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { in update_gfn_disallow_lpage_count() 1353 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M); in kvm_arch_mmu_enable_log_dirty_pt_masked() 1359 PG_LEVEL_2M); in kvm_arch_mmu_enable_log_dirty_pt_masked() 3136 level = PG_LEVEL_2M; in host_pfn_mapping_level() 4348 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && in kvm_max_level_for_order() 4354 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) in kvm_max_level_for_order() 4355 return PG_LEVEL_2M; in kvm_max_level_for_order() 4626 fault->max_level = PG_LEVEL_2M; in nonpaging_page_fault() 5101 if (huge_page_level < PG_LEVEL_2M) in __reset_rsvds_bits_mask_ept() 6316 max_huge_page_level = PG_LEVEL_2M; in kvm_configure_mmu() [all …]
|
| A D | tdp_mmu.c | 812 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M); in tdp_mmu_zap_root() 1629 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { in zap_collapsible_spte_range()
|
| /linux/tools/testing/selftests/kvm/include/x86_64/ |
| A D | processor.h | 1336 PG_LEVEL_2M, enumerator 1346 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
|
| /linux/arch/x86/kvm/svm/ |
| A D | sev.c | 3607 pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1); in snp_rmptable_psmash() 4810 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) in max_level_for_order() 4811 return PG_LEVEL_2M; in max_level_for_order() 4857 level = PG_LEVEL_2M; in sev_gmem_prepare() 4917 rc = rmp_make_shared(pfn, use_2m_update ? PG_LEVEL_2M : PG_LEVEL_4K); in sev_gmem_invalidate()
|
| /linux/arch/x86/kvm/ |
| A D | x86.c | 13133 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); in kvm_mmu_slot_apply_flags()
|