Home
last modified time | relevance | path

Searched refs:PMD_SIZE (Results 1 – 25 of 113) sorted by relevance

12345

/arch/m68k/mm/
A Dkmap.c50 #define IO_SIZE PMD_SIZE
85 virtaddr += PMD_SIZE; in __free_io_area()
86 size -= PMD_SIZE; in __free_io_area()
254 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap()
269 physaddr += PMD_SIZE; in __ioremap()
270 virtaddr += PMD_SIZE; in __ioremap()
271 size -= PMD_SIZE; in __ioremap()
379 virtaddr += PMD_SIZE; in kernel_set_cachemode()
380 size -= PMD_SIZE; in kernel_set_cachemode()
/arch/s390/mm/
A Dvmem.c125 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd()
135 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd()
143 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
150 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
157 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd()
237 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
238 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
250 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
251 IS_ALIGNED(next, PMD_SIZE) && in modify_pmd_table()
270 if (!IS_ALIGNED(addr, PMD_SIZE) || in modify_pmd_table()
[all …]
/arch/sh/include/asm/
A Dpgtable-3level.h23 #define PMD_SIZE (1UL << PMD_SHIFT) macro
24 #define PMD_MASK (~(PMD_SIZE-1))
26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
/arch/x86/mm/
A Dinit.c355 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
421 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range()
423 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
425 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
435 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
462 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
643 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, in memory_map_top_down()
649 memblock_phys_free(addr, PMD_SIZE); in memory_map_top_down()
650 real_end = addr + PMD_SIZE; in memory_map_top_down()
654 step_size = PMD_SIZE; in memory_map_top_down()
[all …]
A Dinit_64.c378 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping()
441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
891 if (likely(IS_ALIGNED(end, PMD_SIZE))) in vmemmap_use_sub_pmd()
922 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
930 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
1142 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table()
1143 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
1526 addr_end = addr + PMD_SIZE; in vmemmap_set_pmd()
1527 p_end = p + PMD_SIZE; in vmemmap_set_pmd()
1529 if (!IS_ALIGNED(addr, PMD_SIZE) || in vmemmap_set_pmd()
[all …]
/arch/x86/include/asm/
A Dpgtable_32_types.h12 # define PMD_SIZE (1UL << PMD_SHIFT) macro
13 # define PMD_MASK (~(PMD_SIZE - 1))
A Dpgtable_64_types.h79 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
80 #define PMD_MASK (~(PMD_SIZE - 1))
/arch/riscv/mm/
A Dinit.c543 if (sz == PMD_SIZE) { in create_pmd_mapping()
761 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
762 return PMD_SIZE; in best_map_size()
884 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
886 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
887 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
888 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
956 PMD_SIZE, PAGE_KERNEL); in create_kernel_page_table()
967 PMD_SIZE, in create_kernel_page_table()
993 pa, PMD_SIZE, PAGE_KERNEL); in create_fdt_early_page_table()
[all …]
A Dhugetlbpage.c56 if (sz == PMD_SIZE) { in huge_pte_alloc()
112 if (sz == PMD_SIZE) in huge_pte_offset()
137 case PMD_SIZE: in hugetlb_mask_last_page()
138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
140 return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER); in hugetlb_mask_last_page()
228 else if (sz >= PMD_SIZE) in num_contig_ptes_from_size()
A Dtlbflush.c192 else if (stride_size >= PMD_SIZE) in flush_tlb_range()
193 stride_size = PMD_SIZE; in flush_tlb_range()
214 start, end - start, PMD_SIZE); in flush_pmd_tlb_range()
/arch/parisc/kernel/
A Dpci-dma.c85 if (end > PMD_SIZE) in map_pte_uncached()
86 end = PMD_SIZE; in map_pte_uncached()
120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached()
121 orig_vaddr += PMD_SIZE; in map_pmd_uncached()
170 if (end > PMD_SIZE) in unmap_uncached_pte()
171 end = PMD_SIZE; in unmap_uncached_pte()
210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd()
211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
/arch/arm64/mm/
A Dhugetlbpage.c60 case PMD_SIZE: in __hugetlb_valid_size()
95 *pgsize = PMD_SIZE; in find_num_contig()
109 *pgsize = PMD_SIZE; in num_contig_ptes()
267 } else if (sz == PMD_SIZE) { in huge_pte_alloc()
311 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset()
336 case PMD_SIZE: in hugetlb_mask_last_page()
337 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
339 return PMD_SIZE - CONT_PTE_SIZE; in hugetlb_mask_last_page()
360 case PMD_SIZE: in arch_make_huge_pte()
/arch/arm64/kvm/hyp/nvhe/
A Dmm.c335 phys = ALIGN(hyp_memory[i].base, PMD_SIZE); in create_fixblock()
336 if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size)) in create_fixblock()
344 addr = ALIGN(__io_map_base, PMD_SIZE); in create_fixblock()
345 ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE); in create_fixblock()
349 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP); in create_fixblock()
353 ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker); in create_fixblock()
367 *size = PMD_SIZE; in hyp_fixblock_map()
/arch/x86/boot/startup/
A Dsme.c92 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
193 ppd->vaddr += PMD_SIZE; in __sme_map_range_pmd()
194 ppd->paddr += PMD_SIZE; in __sme_map_range_pmd()
220 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); in __sme_map_range()
319 kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE); in sme_encrypt_kernel()
346 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; in sme_encrypt_kernel()
369 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); in sme_encrypt_kernel()
A Dmap_kernel.c53 for (; paddr < paddr_end; paddr += PMD_SIZE) { in sme_postprocess_startup()
179 for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { in __startup_64()
182 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; in __startup_64()
/arch/arm64/include/asm/
A Dkernel-pgtable.h21 #if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
A Dhugetlb.h85 case PMD_SIZE: in __flush_hugetlb_tlb_range()
86 __flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2); in __flush_hugetlb_tlb_range()
/arch/powerpc/include/asm/nohash/64/
A Dpgtable-4k.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/arch/nios2/mm/
A Dioremap.c33 if (end > PMD_SIZE) in remap_area_pte()
34 end = PMD_SIZE; in remap_area_pte()
70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/arch/powerpc/mm/book3s64/
A Dradix_pgtable.c102 if (map_page_size == PMD_SIZE) { in early_map_kernel_page()
165 if (map_page_size == PMD_SIZE) { in __map_kernel_page()
329 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping()
331 mapping_size = PMD_SIZE; in create_physical_mapping()
742 return !vmemmap_populated(start, PMD_SIZE); in vmemmap_pmd_is_unused()
835 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table()
836 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
1006 VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE)); in vmemmap_set_pmd()
1142 start = ALIGN_DOWN(start, PMD_SIZE); in radix__vmemmap_populate()
1337 addr_pfn += (PMD_SIZE >> PAGE_SHIFT); in vmemmap_populate_compound_pages()
[all …]
/arch/loongarch/kvm/
A Dmmu.c399 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) in kvm_arch_prepare_memory_region()
400 && IS_ALIGNED(hva_start, PMD_SIZE)) in kvm_arch_prepare_memory_region()
427 gpa_offset = gpa_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
428 hva_offset = hva_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
433 gpa_offset = PMD_SIZE; in kvm_arch_prepare_memory_region()
434 if ((size + gpa_offset) < (PMD_SIZE * 2)) in kvm_arch_prepare_memory_region()
638 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); in fault_supports_huge_mapping()
/arch/m68k/include/asm/
A Dpgtable_mm.h39 #define PMD_SIZE (1UL << PMD_SHIFT) macro
40 #define PMD_MASK (~(PMD_SIZE-1))
/arch/loongarch/include/asm/
A Dpgtable.h29 #define PMD_SIZE (1UL << PMD_SHIFT) macro
30 #define PMD_MASK (~(PMD_SIZE-1))
34 #define PMD_SIZE (1UL << PMD_SHIFT) macro
35 #define PMD_MASK (~(PMD_SIZE-1))
96 …R_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - …
100 …D * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - …
103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
/arch/x86/kernel/
A Dvmlinux.lds.S68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
80 . = ALIGN(PMD_SIZE); \
86 . = ALIGN(PMD_SIZE); \
/arch/riscv/include/asm/
A Dcrash_reserve.h5 #define CRASH_ALIGN PMD_SIZE

Completed in 61 milliseconds

12345