Lines Matching refs:pfn

232 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)  in check_pfn_span()  argument
249 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span()
259 struct page *pfn_to_online_page(unsigned long pfn) in pfn_to_online_page() argument
261 unsigned long nr = pfn_to_section_nr(pfn); in pfn_to_online_page()
276 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) in pfn_to_online_page()
279 if (!pfn_section_valid(ms, pfn)) in pfn_to_online_page()
283 return pfn_to_page(pfn); in pfn_to_online_page()
291 pgmap = get_dev_pagemap(pfn, NULL); in pfn_to_online_page()
298 return pfn_to_page(pfn); in pfn_to_online_page()
302 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
305 const unsigned long end_pfn = pfn + nr_pages; in __add_pages()
313 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages()
319 if (altmap->base_pfn != pfn in __add_pages()
327 if (check_pfn_span(pfn, nr_pages)) { in __add_pages()
328 WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); in __add_pages()
332 for (; pfn < end_pfn; pfn += cur_nr_pages) { in __add_pages()
334 cur_nr_pages = min(end_pfn - pfn, in __add_pages()
335 SECTION_ALIGN_UP(pfn + 1) - pfn); in __add_pages()
336 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap, in __add_pages()
372 unsigned long pfn; in find_biggest_section_pfn() local
375 pfn = end_pfn - 1; in find_biggest_section_pfn()
376 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { in find_biggest_section_pfn()
377 if (unlikely(!pfn_to_online_page(pfn))) in find_biggest_section_pfn()
380 if (unlikely(pfn_to_nid(pfn) != nid)) in find_biggest_section_pfn()
383 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
386 return pfn; in find_biggest_section_pfn()
395 unsigned long pfn; in shrink_zone_span() local
405 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
407 if (pfn) { in shrink_zone_span()
408 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
409 zone->zone_start_pfn = pfn; in shrink_zone_span()
421 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
423 if (pfn) in shrink_zone_span()
424 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
466 unsigned long pfn, cur_nr_pages; in remove_pfn_range_from_zone() local
469 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { in remove_pfn_range_from_zone()
474 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); in remove_pfn_range_from_zone()
475 page_init_poison(pfn_to_page(pfn), in remove_pfn_range_from_zone()
495 static void __remove_section(unsigned long pfn, unsigned long nr_pages, in __remove_section() argument
499 struct mem_section *ms = __pfn_to_section(pfn); in __remove_section()
504 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); in __remove_section()
518 void __remove_pages(unsigned long pfn, unsigned long nr_pages, in __remove_pages() argument
521 const unsigned long end_pfn = pfn + nr_pages; in __remove_pages()
527 if (check_pfn_span(pfn, nr_pages)) { in __remove_pages()
528 WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); in __remove_pages()
532 for (; pfn < end_pfn; pfn += cur_nr_pages) { in __remove_pages()
535 cur_nr_pages = min(end_pfn - pfn, in __remove_pages()
536 SECTION_ALIGN_UP(pfn + 1) - pfn); in __remove_pages()
537 __remove_section(pfn, cur_nr_pages, map_offset, altmap); in __remove_pages()
596 unsigned long pfn; in online_pages_range() local
607 for (pfn = start_pfn; pfn < end_pfn;) { in online_pages_range()
608 int order = min(MAX_ORDER - 1UL, __ffs(pfn)); in online_pages_range()
610 (*online_page_callback)(pfn_to_page(pfn), order); in online_pages_range()
611 pfn += (1UL << order); in online_pages_range()
666 static void section_taint_zone_device(unsigned long pfn) in section_taint_zone_device() argument
668 struct mem_section *ms = __pfn_to_section(pfn); in section_taint_zone_device()
673 static inline void section_taint_zone_device(unsigned long pfn) in section_taint_zone_device() argument
902 unsigned long pfn, in auto_movable_zone_for_pfn() argument
928 pfn = ALIGN_DOWN(pfn, group->d.unit_pages); in auto_movable_zone_for_pfn()
929 end_pfn = pfn + group->d.unit_pages; in auto_movable_zone_for_pfn()
930 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in auto_movable_zone_for_pfn()
931 page = pfn_to_online_page(pfn); in auto_movable_zone_for_pfn()
958 return default_kernel_zone_for_pfn(nid, pfn, nr_pages); in auto_movable_zone_for_pfn()
1026 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, in mhp_init_memmap_on_memory() argument
1029 unsigned long end_pfn = pfn + nr_pages; in mhp_init_memmap_on_memory()
1032 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); in mhp_init_memmap_on_memory()
1036 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); in mhp_init_memmap_on_memory()
1039 SetPageVmemmapSelfHosted(pfn_to_page(pfn + i)); in mhp_init_memmap_on_memory()
1047 online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); in mhp_init_memmap_on_memory()
1052 void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) in mhp_deinit_memmap_on_memory() argument
1054 unsigned long end_pfn = pfn + nr_pages; in mhp_deinit_memmap_on_memory()
1062 offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); in mhp_deinit_memmap_on_memory()
1068 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); in mhp_deinit_memmap_on_memory()
1069 kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); in mhp_deinit_memmap_on_memory()
1072 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, in online_pages() argument
1088 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) || in online_pages()
1089 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) in online_pages()
1095 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
1097 arg.start_pfn = pfn; in online_pages()
1124 online_pages_range(pfn, nr_pages); in online_pages()
1125 adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); in online_pages()
1132 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); in online_pages()
1156 (unsigned long long) pfn << PAGE_SHIFT, in online_pages()
1157 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); in online_pages()
1159 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
1579 unsigned long pfn; in scan_movable_pages() local
1581 for (pfn = start; pfn < end; pfn++) { in scan_movable_pages()
1585 if (!pfn_valid(pfn)) in scan_movable_pages()
1587 page = pfn_to_page(pfn); in scan_movable_pages()
1615 pfn += skip - 1; in scan_movable_pages()
1619 *movable_pfn = pfn; in scan_movable_pages()
1625 unsigned long pfn; in do_migrate_range() local
1631 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in do_migrate_range()
1635 if (!pfn_valid(pfn)) in do_migrate_range()
1637 page = pfn_to_page(pfn); in do_migrate_range()
1642 pfn = page_to_pfn(head) + compound_nr(head) - 1; in do_migrate_range()
1646 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; in do_migrate_range()
1681 pr_warn("failed to isolate pfn %lx\n", pfn); in do_migrate_range()
1792 unsigned long pfn, system_ram_pages = 0; in offline_pages() local
1869 pfn = start_pfn; in offline_pages()
1879 ret = scan_movable_pages(pfn, end_pfn, &pfn); in offline_pages()
1885 do_migrate_range(pfn, end_pfn); in offline_pages()