| /arch/riscv/mm/ |
| A D | hugetlbpage.c | 51 if (sz == PUD_SIZE) { in huge_pte_alloc() 104 if (sz == PUD_SIZE) in huge_pte_offset() 134 case PUD_SIZE: in hugetlb_mask_last_page() 135 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page() 138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 226 else if (sz >= PUD_SIZE) in num_contig_ptes_from_size() 420 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in __hugetlb_valid_size()
|
| A D | kasan_init.c | 97 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_populate_pud() 98 (next - vaddr) >= PUD_SIZE) { in kasan_populate_pud() 99 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); in kasan_populate_pud() 102 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE); in kasan_populate_pud() 181 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { in kasan_early_clear_pud() 252 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_early_populate_pud() 253 (next - vaddr) >= PUD_SIZE) { in kasan_early_populate_pud()
|
| A D | tlbflush.c | 190 else if (stride_size >= PUD_SIZE) in flush_tlb_range() 191 stride_size = PUD_SIZE; in flush_tlb_range() 221 start, end - start, PUD_SIZE); in flush_pud_tlb_range()
|
| A D | init.c | 645 if (sz == PUD_SIZE) { in create_pud_mapping() 757 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size() 758 return PUD_SIZE; in best_map_size() 880 PUD_SIZE, PAGE_TABLE); in set_satp_mode() 1105 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm() 1175 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm() 1194 (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm() 1205 (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm() 1719 free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap); in remove_pud_mapping()
|
| /arch/arm64/mm/ |
| A D | hugetlbpage.c | 56 case PUD_SIZE: in __hugetlb_valid_size() 258 if (sz == PUD_SIZE) { in huge_pte_alloc() 299 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset() 329 case PUD_SIZE: in hugetlb_mask_last_page() 331 return PGDIR_SIZE - PUD_SIZE; in hugetlb_mask_last_page() 335 return PUD_SIZE - CONT_PMD_SIZE; in hugetlb_mask_last_page() 337 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 353 case PUD_SIZE: in arch_make_huge_pte()
|
| /arch/powerpc/include/asm/nohash/64/ |
| A D | pgtable-4k.h | 36 #define PUD_SIZE (1UL << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE-1))
|
| /arch/s390/mm/ |
| A D | hugetlbpage.c | 150 size = PUD_SIZE; in clear_huge_pte_skeys() 219 if (sz == PUD_SIZE) in huge_pte_alloc() 241 if (sz == PUD_SIZE) in huge_pte_offset() 254 else if (cpu_has_edat2() && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
| A D | vmem.c | 328 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 329 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table() 336 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 337 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
|
| /arch/x86/include/asm/ |
| A D | pgtable_64_types.h | 81 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 82 #define PUD_MASK (~(PUD_SIZE - 1))
|
| /arch/arm64/include/asm/ |
| A D | hugetlb.h | 80 case PUD_SIZE: in __flush_hugetlb_tlb_range() 81 __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1); in __flush_hugetlb_tlb_range()
|
| A D | pgtable-hwdef.h | 66 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 67 #define PUD_MASK (~(PUD_SIZE-1))
|
| /arch/um/include/asm/ |
| A D | pgtable-4level.h | 23 #define PUD_SIZE (1UL << PUD_SHIFT) macro 24 #define PUD_MASK (~(PUD_SIZE-1))
|
| /arch/x86/mm/ |
| A D | hugetlbpage.c | 28 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) in arch_hugetlb_valid_size()
|
| A D | kasan_init_64.c | 82 ((end - addr) == PUD_SIZE) && in kasan_populate_pud() 83 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud() 84 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud() 87 memblock_free(p, PUD_SIZE); in kasan_populate_pud()
|
| A D | init.c | 367 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 368 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 439 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 452 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 453 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
|
| /arch/sparc/mm/ |
| A D | hugetlbpage.c | 183 if (sz >= PUD_SIZE) in huge_pte_alloc() 230 if (size >= PUD_SIZE) in __set_huge_pte_at() 273 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
|
| /arch/arc/include/asm/ |
| A D | pgtable-levels.h | 75 #define PUD_SIZE BIT(PUD_SHIFT) macro 76 #define PUD_MASK (~(PUD_SIZE - 1))
|
| /arch/s390/boot/ |
| A D | vmem.c | 164 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in kasan_pud_populate_zero_shadow() 320 !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE)) in try_get_large_pud_pa() 324 if (!IS_ALIGNED(pa, PUD_SIZE)) in try_get_large_pud_pa()
|
| /arch/powerpc/mm/book3s64/ |
| A D | radix_hugetlbpage.c | 38 if (end - start >= PUD_SIZE) in radix__flush_hugetlb_tlb_range()
|
| A D | radix_pgtable.c | 92 if (map_page_size == PUD_SIZE) { in early_map_kernel_page() 158 if (map_page_size == PUD_SIZE) { in __map_kernel_page() 325 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping() 327 mapping_size = PUD_SIZE; in create_physical_mapping() 875 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table() 876 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 1643 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
|
| /arch/riscv/kvm/ |
| A D | mmu.c | 287 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_gfn() 310 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_test_age_gfn() 373 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) in kvm_riscv_mmu_map() 387 if (vma_pagesize != PUD_SIZE && in kvm_riscv_mmu_map()
|
| /arch/mips/include/asm/ |
| A D | pgtable-64.h | 60 #define PUD_SIZE (1UL << PUD_SHIFT) macro 61 #define PUD_MASK (~(PUD_SIZE-1))
|
| /arch/x86/mm/pat/ |
| A D | set_memory.c | 1510 if (start & (PUD_SIZE - 1)) { in unmap_pud_range() 1511 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in unmap_pud_range() 1523 while (end - start >= PUD_SIZE) { in unmap_pud_range() 1528 unmap_pmd_range(pud, start, start + PUD_SIZE); in unmap_pud_range() 1530 start += PUD_SIZE; in unmap_pud_range() 1673 if (start & (PUD_SIZE - 1)) { in populate_pud() 1675 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud() 1708 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud() 1712 start += PUD_SIZE; in populate_pud() 1713 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud() [all …]
|
| /arch/powerpc/include/asm/nohash/ |
| A D | pgtable.h | 76 else if (sz < PUD_SIZE) in pte_update() 79 pdsize = PUD_SIZE; in pte_update()
|
| /arch/riscv/include/asm/ |
| A D | pgtable-64.h | 36 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE - 1))
|