| /arch/sh/mm/ |
| A D | numa.c | 25 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 30 start_pfn = PFN_DOWN(start); in setup_bootmem_node() 38 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 48 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
| A D | init.c | 204 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 206 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 212 NODE_DATA(nid)->node_start_pfn = start_pfn; in allocate_pgdat() 213 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 218 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 222 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) in do_init_bootmem() 223 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem() 236 unsigned long start_pfn; in early_reserve_mem() local 244 start_pfn = PFN_UP(__pa(_end)); in early_reserve_mem() 252 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); in early_reserve_mem()
|
| /arch/x86/xen/ |
| A D | setup.c | 90 start_r = xen_extra_mem[i].start_pfn; in xen_del_extra_mem() 94 if (start_r == start_pfn) { in xen_del_extra_mem() 96 xen_extra_mem[i].start_pfn += n_pfns; in xen_del_extra_mem() 107 if (start_pfn > start_r && start_pfn < start_r + size_r) { in xen_del_extra_mem() 112 (start_pfn + n_pfns)); in xen_del_extra_mem() 129 if (pfn >= xen_extra_mem[i].start_pfn && in xen_chk_extra_mem() 148 pfn_s = xen_extra_mem[i].start_pfn; in xen_inv_extra_mem() 220 WARN_ON(start_pfn > end_pfn); in xen_set_identity_and_release_chunk() 348 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk() 402 if (start_pfn >= ini_nr_pages) in xen_count_remap_pages() [all …]
|
| A D | enlighten.c | 414 void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns) in xen_add_extra_mem() argument 425 xen_extra_mem[i].start_pfn = start_pfn; in xen_add_extra_mem() 430 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == in xen_add_extra_mem() 431 start_pfn) { in xen_add_extra_mem() 439 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem() 462 pfn_to_page(xen_extra_mem[i].start_pfn + j); in arch_xen_unpopulated_init()
|
| /arch/powerpc/platforms/powernv/ |
| A D | memtrace.c | 98 unsigned long pfn, start_pfn; in memtrace_alloc_node() local 109 start_pfn = page_to_pfn(page); in memtrace_alloc_node() 115 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), in memtrace_alloc_node() 116 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_alloc_node() 123 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 126 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); in memtrace_alloc_node() 128 return PFN_PHYS(start_pfn); in memtrace_alloc_node() 202 const unsigned long start_pfn = PHYS_PFN(start); in memtrace_free() local 210 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free() 213 free_contig_range(start_pfn, nr_pages); in memtrace_free()
|
| /arch/x86/mm/ |
| A D | init.c | 330 if (start_pfn < end_pfn) { in save_mr() 333 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 412 pfn = start_pfn = PFN_DOWN(start); in split_mem_range() 429 if (start_pfn < end_pfn) { in split_mem_range() 444 if (start_pfn < end_pfn) { in split_mem_range() 454 if (start_pfn < end_pfn) { in split_mem_range() 464 if (start_pfn < end_pfn) { in split_mem_range() 472 start_pfn = pfn; in split_mem_range() 512 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) in add_pfn_range_mapped() 522 if ((start_pfn >= pfn_mapped[i].start) && in pfn_range_is_mapped() [all …]
|
| A D | numa.c | 128 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 134 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 135 if (start_pfn >= end_pfn) in numa_register_nodes()
|
| /arch/mips/loongson64/ |
| A D | numa.c | 85 unsigned long start_pfn, end_pfn; in node_mem_init() local 91 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 93 node, start_pfn, end_pfn); in node_mem_init() 97 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 98 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 120 memblock_reserve(0, PAGE_SIZE * start_pfn); in node_mem_init()
|
| /arch/sparc/mm/ |
| A D | init_32.c | 64 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages() local 70 if (start_pfn < max_low_pfn) in calc_highpages() 71 start_pfn = max_low_pfn; in calc_highpages() 73 nr += end_pfn - start_pfn; in calc_highpages() 133 unsigned long start_pfn, bytes_avail, size; in bootmem_init() local 171 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); in bootmem_init() 174 start_pfn >>= PAGE_SHIFT; in bootmem_init() 192 size = (start_pfn << PAGE_SHIFT) - phys_base; in bootmem_init()
|
| /arch/parisc/mm/ |
| A D | init.c | 133 if (pmem_ranges[j-1].start_pfn < in setup_bootmem() 134 pmem_ranges[j].start_pfn) { in setup_bootmem() 149 if (pmem_ranges[i].start_pfn - in setup_bootmem() 150 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 155 pmem_ranges[i].start_pfn - in setup_bootmem() 156 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 249 unsigned long start_pfn; in setup_bootmem() local 254 start_pfn = pmem_ranges[i].start_pfn; in setup_bootmem() 257 start = start_pfn << PAGE_SHIFT; in setup_bootmem() 263 if ((start_pfn + npages) > max_pfn) in setup_bootmem() [all …]
|
| /arch/powerpc/mm/ |
| A D | mem.c | 128 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, in add_pages() argument 133 ret = __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 138 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, in add_pages() 147 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 154 rc = add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 162 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 165 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
| A D | init_64.c | 77 unsigned long start_pfn; in vmemmap_subsection_start() local 81 start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK; in vmemmap_subsection_start() 82 return pfn_to_page(start_pfn); in vmemmap_subsection_start() 192 unsigned long start_pfn = page_to_pfn((struct page *)start); in altmap_cross_boundary() local 194 if ((start_pfn + nr_pfn - 1) > altmap->end_pfn) in altmap_cross_boundary() 197 if (start_pfn < altmap->base_pfn) in altmap_cross_boundary()
|
| A D | numa.c | 1041 unsigned long start_pfn, end_pfn; in setup_nonnuma() local 1048 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { in setup_nonnuma() 1050 memblock_set_node(PFN_PHYS(start_pfn), in setup_nonnuma() 1051 PFN_PHYS(end_pfn - start_pfn), in setup_nonnuma() 1093 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 1095 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() 1100 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 1210 unsigned long start_pfn, end_pfn; in initmem_init() local 1212 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in initmem_init() 1213 setup_node_data(nid, start_pfn, end_pfn); in initmem_init()
|
| /arch/loongarch/mm/ |
| A D | init.c | 85 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 89 ret = __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 100 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 102 struct page *page = pfn_to_page(start_pfn); in arch_remove_memory() 107 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
| /arch/s390/mm/ |
| A D | init.c | 242 mem_data.start = arg->start_pfn << PAGE_SHIFT; in s390_cma_mem_notifier() 264 unsigned long start_pfn = PFN_DOWN(start); in arch_add_memory() local 276 rc = __add_pages(nid, start_pfn, size_pages, params); in arch_add_memory() 284 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 287 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
| /arch/mips/mm/ |
| A D | ioremap.c | 25 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument 31 if (pfn_valid(start_pfn + i) && in __ioremap_check_ram() 32 !PageReserved(pfn_to_page(start_pfn + i))) in __ioremap_check_ram()
|
| /arch/loongarch/kernel/ |
| A D | numa.c | 145 unsigned long start_pfn, end_pfn; in node_mem_init() local 152 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 154 node, start_pfn, end_pfn); in node_mem_init() 292 unsigned long start_pfn, end_pfn; in paging_init() local 294 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in paging_init()
|
| /arch/sh/kernel/ |
| A D | setup.c | 193 void __init __add_active_range(unsigned int nid, unsigned long start_pfn, in __add_active_range() argument 201 start = start_pfn << PAGE_SHIFT; in __add_active_range() 211 start_pfn, end_pfn); in __add_active_range() 235 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
| /arch/x86/platform/efi/ |
| A D | efi_32.c | 38 u64 start_pfn, end_pfn, end; in efi_map_region() local 42 start_pfn = PFN_DOWN(md->phys_addr); in efi_map_region() 47 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in efi_map_region()
|
| /arch/x86/virt/vmx/tdx/ |
| A D | tdx.c | 201 tmb->start_pfn = start_pfn; in add_tdx_memblock() 230 unsigned long start_pfn, end_pfn; in build_tdx_memlist() local 233 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in build_tdx_memlist() 240 start_pfn = max(start_pfn, PHYS_PFN(SZ_1M)); in build_tdx_memlist() 241 if (start_pfn >= end_pfn) in build_tdx_memlist() 249 ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn, nid); in build_tdx_memlist() 390 start = TDMR_ALIGN_DOWN(PFN_PHYS(tmb->start_pfn)); in fill_out_tdmrs() 736 start = PFN_PHYS(tmb->start_pfn); in tdmr_populate_rsvd_holes() 1344 static bool is_tdx_memory(unsigned long start_pfn, unsigned long end_pfn) in is_tdx_memory() argument 1358 if (start_pfn >= tmb->start_pfn && end_pfn <= tmb->end_pfn) in is_tdx_memory() [all …]
|
| /arch/mips/sgi-ip27/ |
| A D | ip27-memory.c | 354 unsigned long start_pfn, end_pfn; in node_mem_init() local 356 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 365 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 366 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init()
|
| /arch/x86/mm/pat/ |
| A D | memtype.c | 402 unsigned long start_pfn = start >> PAGE_SHIFT; in pat_pagerange_is_ram() local 404 struct pagerange_state state = {start_pfn, 0, 0}; in pat_pagerange_is_ram() 413 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) in pat_pagerange_is_ram() 414 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; in pat_pagerange_is_ram() 416 if (start_pfn < end_pfn) { in pat_pagerange_is_ram() 417 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, in pat_pagerange_is_ram()
|
| /arch/sh/include/asm/ |
| A D | mmzone.h | 37 void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
| /arch/s390/include/asm/ |
| A D | diag.h | 61 static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) in diag10_range() argument 65 start_addr = pfn_to_phys(start_pfn); in diag10_range() 66 end_addr = pfn_to_phys(start_pfn + num_pfn - 1); in diag10_range()
|
| /arch/x86/include/asm/ |
| A D | page_types.h | 63 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
|