/linux-6.3-rc2/arch/m68k/mm/ |
A D | kmap.c | 50 #define IO_SIZE PMD_SIZE 85 virtaddr += PMD_SIZE; in __free_io_area() 86 size -= PMD_SIZE; in __free_io_area() 254 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap() 269 physaddr += PMD_SIZE; in __ioremap() 270 virtaddr += PMD_SIZE; in __ioremap() 271 size -= PMD_SIZE; in __ioremap() 379 virtaddr += PMD_SIZE; in kernel_set_cachemode() 380 size -= PMD_SIZE; in kernel_set_cachemode()
|
/linux-6.3-rc2/arch/x86/mm/ |
A D | init.c | 341 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 407 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 409 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 411 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 421 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 448 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 629 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, in memory_map_top_down() 631 memblock_phys_free(addr, PMD_SIZE); in memory_map_top_down() 632 real_end = addr + PMD_SIZE; in memory_map_top_down() 635 step_size = PMD_SIZE; in memory_map_top_down() [all …]
|
A D | init_64.c | 378 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping() 441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap() 887 if (likely(IS_ALIGNED(end, PMD_SIZE))) in vmemmap_use_sub_pmd() 918 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 926 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 1118 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table() 1119 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table() 1514 addr_end = addr + PMD_SIZE; in vmemmap_set_pmd() 1515 p_end = p + PMD_SIZE; in vmemmap_set_pmd() 1517 if (!IS_ALIGNED(addr, PMD_SIZE) || in vmemmap_set_pmd() [all …]
|
A D | mem_encrypt_identity.c | 96 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch"); 201 ppd->vaddr += PMD_SIZE; in __sme_map_range_pmd() 202 ppd->paddr += PMD_SIZE; in __sme_map_range_pmd() 228 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); in __sme_map_range() 328 kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE); in sme_encrypt_kernel() 363 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; in sme_encrypt_kernel() 386 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); in sme_encrypt_kernel()
|
A D | mem_encrypt_amd.c | 170 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem() 171 paddr += PMD_SIZE; in __sme_early_map_unmap_mem() 172 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; in __sme_early_map_unmap_mem()
|
/linux-6.3-rc2/arch/s390/mm/ |
A D | vmem.c | 113 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd() 123 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd() 131 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 138 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 145 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd() 229 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table() 230 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table() 242 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table() 243 IS_ALIGNED(next, PMD_SIZE) && in modify_pmd_table() 262 if (!IS_ALIGNED(addr, PMD_SIZE) || in modify_pmd_table() [all …]
|
A D | kasan_init.c | 159 if (IS_ALIGNED(address, PMD_SIZE) && in kasan_early_pgtable_populate() 160 end - address >= PMD_SIZE) { in kasan_early_pgtable_populate() 163 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate() 172 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate() 179 address = (address + PMD_SIZE) & PMD_MASK; in kasan_early_pgtable_populate()
|
/linux-6.3-rc2/arch/riscv/mm/ |
A D | init.c | 461 if (sz == PMD_SIZE) { in create_pmd_mapping() 675 base &= PMD_SIZE - 1; in best_map_size() 676 if (!base && size >= PMD_SIZE) in best_map_size() 677 return PMD_SIZE; in best_map_size() 770 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode() 772 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode() 774 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode() 841 PMD_SIZE, PAGE_KERNEL); in create_kernel_page_table() 852 PMD_SIZE, in create_kernel_page_table() 883 pa, PMD_SIZE, PAGE_KERNEL); in create_fdt_early_page_table() [all …]
|
/linux-6.3-rc2/arch/sh/include/asm/ |
A D | pgtable-3level.h | 23 #define PMD_SIZE (1UL << PMD_SHIFT) macro 24 #define PMD_MASK (~(PMD_SIZE-1)) 26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
/linux-6.3-rc2/arch/arm64/mm/ |
A D | hugetlbpage.c | 67 case PMD_SIZE: in __hugetlb_valid_size() 116 *pgsize = PMD_SIZE; in find_num_contig() 135 case PMD_SIZE: in num_contig_ptes() 139 *pgsize = PMD_SIZE; in num_contig_ptes() 318 } else if (sz == PMD_SIZE) { in huge_pte_alloc() 362 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset() 385 case PMD_SIZE: in hugetlb_mask_last_page() 386 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 388 return PMD_SIZE - CONT_PTE_SIZE; in hugetlb_mask_last_page() 405 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte()
|
/linux-6.3-rc2/arch/parisc/kernel/ |
A D | pci-dma.c | 85 if (end > PMD_SIZE) in map_pte_uncached() 86 end = PMD_SIZE; in map_pte_uncached() 120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 121 orig_vaddr += PMD_SIZE; in map_pmd_uncached() 170 if (end > PMD_SIZE) in unmap_uncached_pte() 171 end = PMD_SIZE; in unmap_uncached_pte() 210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
|
/linux-6.3-rc2/arch/x86/include/asm/ |
A D | pgtable_32_types.h | 12 # define PMD_SIZE (1UL << PMD_SHIFT) macro 13 # define PMD_MASK (~(PMD_SIZE - 1))
|
A D | pgtable_64_types.h | 99 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 100 #define PMD_MASK (~(PMD_SIZE - 1))
|
/linux-6.3-rc2/arch/arm64/kvm/ |
A D | pkvm.c | 83 hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE), in kvm_hyp_reserve() 84 PMD_SIZE); in kvm_hyp_reserve() 88 hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE); in kvm_hyp_reserve()
|
/linux-6.3-rc2/arch/nios2/mm/ |
A D | ioremap.c | 33 if (end > PMD_SIZE) in remap_area_pte() 34 end = PMD_SIZE; in remap_area_pte() 70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
|
/linux-6.3-rc2/include/asm-generic/ |
A D | pgtable-nopmd.h | 22 #define PMD_SIZE (1UL << PMD_SHIFT) macro 23 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-6.3-rc2/arch/powerpc/include/asm/nohash/64/ |
A D | pgtable-4k.h | 31 #define PMD_SIZE (1UL << PMD_SHIFT) macro 32 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | vmlinux.lds.S | 69 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 70 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 81 . = ALIGN(PMD_SIZE); \ 86 . = ALIGN(PMD_SIZE); \
|
/linux-6.3-rc2/arch/loongarch/include/asm/ |
A D | pgtable.h | 29 #define PMD_SIZE (1UL << PMD_SHIFT) macro 30 #define PMD_MASK (~(PMD_SIZE-1)) 34 #define PMD_SIZE (1UL << PMD_SHIFT) macro 35 #define PMD_MASK (~(PMD_SIZE-1)) 91 …_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE) 93 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
|
/linux-6.3-rc2/arch/um/include/asm/ |
A D | pgtable-3level.h | 27 #define PMD_SIZE (1UL << PMD_SHIFT) macro 28 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-6.3-rc2/arch/m68k/include/asm/ |
A D | pgtable_mm.h | 41 #define PMD_SIZE (1UL << PMD_SHIFT) macro 42 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | mmu.c | 1124 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps() 1216 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds() 1219 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds() 1252 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds() 1254 else if (!IS_ALIGNED(block_end, PMD_SIZE)) in adjust_lowmem_bounds() 1273 memblock_limit = round_down(memblock_limit, PMD_SIZE); in adjust_lowmem_bounds() 1303 for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE) in prepare_page_table() 1314 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table() 1320 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table() 1322 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) in prepare_page_table() [all …]
|
/linux-6.3-rc2/arch/x86/boot/compressed/ |
A D | ident_map_64.c | 98 start = round_down(start, PMD_SIZE); in kernel_add_identity_map() 99 end = round_up(end, PMD_SIZE); in kernel_add_identity_map() 369 end = address + PMD_SIZE; in do_boot_page_fault()
|
/linux-6.3-rc2/arch/sparc/mm/ |
A D | hugetlbpage.c | 299 if (sz >= PMD_SIZE) in huge_pte_alloc() 343 else if (size >= PMD_SIZE) in set_huge_pte_at() 380 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear() 511 addr += PMD_SIZE; in hugetlb_free_pgd_range() 521 end -= PMD_SIZE; in hugetlb_free_pgd_range()
|
/linux-6.3-rc2/arch/arm/include/asm/ |
A D | pgtable-2level.h | 87 #define PMD_SIZE (1UL << PMD_SHIFT) macro 88 #define PMD_MASK (~(PMD_SIZE-1))
|