Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 41) sorted by relevance

12

/mm/
A Dpage_isolation.c38 unsigned long pfn; in has_unmovable_pages() local
55 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in has_unmovable_pages()
319 unsigned long pfn; in isolate_single_pageblock() local
371 for (pfn = start_pfn; pfn < boundary_pfn;) { in isolate_single_pageblock()
372 struct page *page = __first_valid_page(pfn, boundary_pfn - pfn); in isolate_single_pageblock()
429 pfn++; in isolate_single_pageblock()
478 unsigned long pfn; in start_isolate_page_range() local
566 pfn++; in __test_page_isolated_in_pageblock()
574 pfn++; in __test_page_isolated_in_pageblock()
579 return pfn; in __test_page_isolated_in_pageblock()
[all …]
A Dpage_ext.c313 pfn &= PAGE_SECTION_MASK; in init_section_page_ext()
381 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
388 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
389 __free_page_ext(pfn); in online_page_ext()
411 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
416 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
417 __free_page_ext(pfn); in offline_page_ext()
450 unsigned long pfn; in page_ext_init() local
466 for (pfn = start_pfn; pfn < end_pfn; in page_ext_init()
467 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { in page_ext_init()
[all …]
A Dmemory-failure.c88 if (pfn != -1UL) in num_poisoned_pages_sub()
745 unsigned long pfn; member
769 if (!pfn || pfn != poisoned_pfn) in check_hwpoisoned_entry()
781 unsigned long pfn; in check_hwpoisoned_pmd_entry() local
786 pfn = pmd_pfn(pmd); in check_hwpoisoned_pmd_entry()
787 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { in check_hwpoisoned_pmd_entry()
788 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); in check_hwpoisoned_pmd_entry()
880 .pfn = pfn, in kill_accessing_process()
1598 pfn); in unmap_poisoned_folio()
2478 .pfn = pfn, in memory_failure_queue()
[all …]
A Dmemory_hotplug.c419 for (; pfn < end_pfn; pfn += cur_nr_pages) { in __add_pages()
422 SECTION_ALIGN_UP(pfn + 1) - pfn); in __add_pages()
510 if (pfn) in shrink_zone_span()
556 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { in remove_pfn_range_from_zone()
561 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); in remove_pfn_range_from_zone()
604 for (; pfn < end_pfn; pfn += cur_nr_pages) { in __remove_pages()
608 SECTION_ALIGN_UP(pfn + 1) - pfn); in __remove_pages()
672 for (pfn = start_pfn; pfn < end_pfn;) { in online_pages_range()
992 pfn = ALIGN_DOWN(pfn, group->d.unit_pages); in auto_movable_zone_for_pfn()
1225 undo_isolate_page_range(pfn, pfn + nr_pages); in online_pages()
[all …]
A Dsparse.c198 - (pfn & ~PAGE_SECTION_MASK)); in subsection_map_init()
206 pfn += pfns; in subsection_map_init()
219 unsigned long pfn; in memory_present() local
223 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present()
633 unsigned long pfn; in online_mem_sections() local
635 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections()
651 unsigned long pfn; in offline_mem_sections() local
653 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections()
708 pfn, nr_pages)) in clear_subsection_map()
754 kvfree(pfn_to_page(pfn)); in depopulate_section_memmap()
[all …]
A Dpage_idle.c125 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local
131 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read()
132 if (pfn >= max_pfn) in page_idle_bitmap_read()
139 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read()
140 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read()
143 folio = page_idle_get_folio(pfn); in page_idle_bitmap_read()
170 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local
176 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_write()
177 if (pfn >= max_pfn) in page_idle_bitmap_write()
184 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_write()
[all …]
A Dpage_owner.c436 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in pagetypeinfo_showmixedcount_print()
445 for (; pfn < block_end_pfn; pfn++) { in pagetypeinfo_showmixedcount_print()
560 pfn, in print_page_owner()
663 pfn = *ppos; in read_page_owner()
665 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) in read_page_owner()
666 pfn++; in read_page_owner()
669 for (; pfn < max_pfn; pfn++) { in read_page_owner()
682 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { in read_page_owner()
732 *ppos = pfn + 1; in read_page_owner()
775 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in init_pages_in_zone()
[all …]
A Dmm_init.c622 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
685 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); in __init_page_from_nid()
787 unsigned long pfn; in reserve_bootmem_region() local
852 unsigned long pfn; in init_unavailable_range() local
856 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
905 for (pfn = start_pfn; pfn < end_pfn; ) { in memmap_init_range()
919 page = pfn_to_page(pfn); in memmap_init_range()
940 pfn++; in memmap_init_range()
1095 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { in memmap_init_compound()
1138 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { in memmap_init_zone_device()
[all …]
A Dcompaction.c67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
442 pfn = pageblock_end_pfn(pfn); in update_cached_migrate()
710 for (; pfn < end_pfn; pfn += isolated, in isolate_freepages_range()
757 return pfn; in isolate_freepages_range()
1333 for (; pfn < end_pfn; pfn = block_end_pfn, in isolate_migratepages_range()
1578 if (pfn >= min_pfn && pfn > high_pfn) { in fast_isolate_freepages()
1935 return pfn; in fast_find_migrateblock()
1942 return pfn; in fast_find_migrateblock()
1949 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
[all …]
A Dcma.c166 for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count; in cma_activate_area()
192 for (pfn = early_pfn[r]; pfn < end_pfn; pfn++) in cma_activate_area()
784 unsigned long pfn = -1; in cma_range_alloc() local
826 ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp); in cma_range_alloc()
838 __func__, pfn, pfn_to_page(pfn)); in cma_range_alloc()
840 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_range_alloc()
939 unsigned long pfn, end; in cma_pages_valid() local
947 pfn = page_to_pfn(pages); in cma_pages_valid()
953 if (pfn >= cmr->base_pfn && pfn < end) { in cma_pages_valid()
988 pfn = page_to_pfn(pages); in cma_release()
[all …]
A Dpage_vma_mapped.c106 unsigned long pfn; in check_pte() local
118 pfn = swp_offset_pfn(entry); in check_pte()
128 pfn = swp_offset_pfn(entry); in check_pte()
133 pfn = pte_pfn(ptent); in check_pte()
136 if ((pfn + pte_nr - 1) < pvmw->pfn) in check_pte()
138 if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) in check_pte()
144 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) in check_pmd() argument
146 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) in check_pmd()
148 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd()
348 .pfn = page_to_pfn(page), in page_mapped_in_vma()
A Dpage_alloc.c351 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx()
942 unsigned long pfn, in __free_one_page() argument
1514 if (pfn == end) in split_large_buddy()
1945 for (pfn = start_pfn; pfn < end_pfn;) { in __move_freepages_block()
1948 pfn++; in __move_freepages_block()
1994 for (pfn = start; pfn < end;) { in prep_move_freepages_block()
2000 pfn += nr; in prep_move_freepages_block()
2010 pfn++; in prep_move_freepages_block()
2052 return pfn; in find_large_buddy()
7166 pfn++; in __offline_isolated_pages()
[all …]
A Dsparse-vmemmap.c119 unsigned long pfn, nr_pfns, nr_align; in altmap_alloc_block_buf() local
127 pfn = vmem_altmap_next_pfn(altmap); in altmap_alloc_block_buf()
130 nr_align = ALIGN(pfn, nr_align) - pfn; in altmap_alloc_block_buf()
136 pfn += nr_align; in altmap_alloc_block_buf()
140 return __va(__pfn_to_phys(pfn)); in altmap_alloc_block_buf()
146 unsigned long pfn = pte_pfn(ptep_get(pte)); in vmemmap_verify() local
147 int actual_node = early_pfn_to_nid(pfn); in vmemmap_verify()
322 unsigned long maddr, pfn; in vmemmap_undo_hvo() local
348 pfn = pte_pfn(ptep_get(pte)); in vmemmap_undo_hvo()
350 memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE); in vmemmap_undo_hvo()
[all …]
A Dbootmem_info.c107 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
117 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node()
121 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node()
128 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node()
129 register_page_bootmem_info_section(pfn); in register_page_bootmem_info_node()
A Dhwpoison-inject.c16 unsigned long pfn = val; in hwpoison_inject() local
24 if (!pfn_valid(pfn)) in hwpoison_inject()
27 p = pfn_to_page(pfn); in hwpoison_inject()
51 pr_info("Injecting memory failure at pfn %#lx\n", pfn); in hwpoison_inject()
52 err = memory_failure(pfn, MF_SW_SIMULATED); in hwpoison_inject()
A Dmemremap.c51 unsigned long pfn = PHYS_PFN(range->start); in pfn_first() local
54 return pfn; in pfn_first()
55 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); in pfn_first()
58 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) in pgmap_pfn_valid() argument
65 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid()
66 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid()
67 return pfn >= pfn_first(pgmap, i); in pgmap_pfn_valid()
402 struct dev_pagemap *get_dev_pagemap(unsigned long pfn, in get_dev_pagemap() argument
405 resource_size_t phys = PFN_PHYS(pfn); in get_dev_pagemap()
A Dhmm.c203 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local
214 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in hmm_vma_handle_pmd()
215 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { in hmm_vma_handle_pmd()
217 hmm_pfns[i] |= pfn | cpu_flags; in hmm_vma_handle_pmd()
441 unsigned long i, npages, pfn; in hmm_vma_walk_pud() local
459 for (i = 0; i < npages; ++i, ++pfn) { in hmm_vma_walk_pud()
461 hmm_pfns[i] |= pfn | cpu_flags; in hmm_vma_walk_pud()
482 unsigned long addr = start, i, pfn; in hmm_vma_walk_hugetlb_entry() local
519 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); in hmm_vma_walk_hugetlb_entry()
520 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) { in hmm_vma_walk_hugetlb_entry()
[all …]
A Dmigrate_device.c117 unsigned long mpfn = 0, pfn; in migrate_vma_collect_pmd() local
155 pfn = pte_pfn(pte); in migrate_vma_collect_pmd()
156 if (is_zero_pfn(pfn) && in migrate_vma_collect_pmd()
174 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; in migrate_vma_collect_pmd()
664 unsigned long pfn = pte_pfn(orig_pte); in migrate_vma_insert_page() local
666 if (!is_zero_pfn(pfn)) in migrate_vma_insert_page()
912 folio = folio_get_nontail_page(pfn_to_page(pfn)); in migrate_device_pfn_lock()
921 return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; in migrate_device_pfn_lock()
946 unsigned long i, pfn; in migrate_device_range() local
948 for (pfn = start, i = 0; i < npages; pfn++, i++) in migrate_device_range()
[all …]
A Dmemory.c610 if (is_zero_pfn(pfn)) in vm_normal_page()
621 if (!pfn_valid(pfn)) in vm_normal_page()
636 if (is_zero_pfn(pfn)) in vm_normal_page()
676 if (!pfn_valid(pfn)) in vm_normal_page_pmd()
2573 unsigned long pfn) in vmf_insert_pfn() argument
2588 if (is_zero_pfn(pfn)) in vm_mixed_ok()
2660 unsigned long pfn) in vmf_insert_mixed() argument
2701 pfn++; in remap_pte_range()
2808 vma->vm_pgoff = pfn; in remap_pfn_range_internal()
2864 ctx->pfn = pfn; in pfnmap_track_ctx_alloc()
[all …]
A Dpage_table_check.c63 static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) in page_table_check_clear() argument
70 if (!pfn_valid(pfn)) in page_table_check_clear()
73 page = pfn_to_page(pfn); in page_table_check_clear()
97 static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, in page_table_check_set() argument
105 if (!pfn_valid(pfn)) in page_table_check_set()
108 page = pfn_to_page(pfn); in page_table_check_set()
A Drmap.c1141 unsigned long pfn; member
1150 .pfn = state->pfn, in mapping_wrprotect_range_one()
1188 unsigned long pfn, unsigned long nr_pages) in mapping_wrprotect_range() argument
1193 .pfn = pfn, in mapping_wrprotect_range()
1228 .pfn = pfn, in pfn_mkclean_range()
1888 unsigned long pfn; in try_to_unmap_one() local
1970 pfn = pte_pfn(pteval); in try_to_unmap_one()
1972 pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); in try_to_unmap_one()
2290 unsigned long pfn; in try_to_migrate_one() local
2360 pfn = pte_pfn(pteval); in try_to_migrate_one()
[all …]
A Dhugetlb_vmemmap.c791 unsigned long ns, i, pnum, pfn, nr_pages; in hugetlb_vmemmap_init_early() local
816 pfn = PHYS_PFN(paddr); in hugetlb_vmemmap_init_early()
817 map = pfn_to_page(pfn); in hugetlb_vmemmap_init_early()
827 pnum = pfn_to_section_nr(pfn); in hugetlb_vmemmap_init_early()
845 unsigned long pfn, nr_mmap; in hugetlb_vmemmap_init_late() local
861 pfn = PHYS_PFN(phys); in hugetlb_vmemmap_init_late()
871 map = pfn_to_page(pfn); in hugetlb_vmemmap_init_late()
A Ddebug.c71 unsigned long pfn, unsigned long idx) in __dump_folio() argument
82 folio->index + idx, pfn); in __dump_folio()
116 is_migrate_cma_folio(folio, pfn) ? " CMA" : ""); in __dump_folio()
138 __dump_folio(&ps.folio_snapshot, &ps.page_snapshot, ps.pfn, ps.idx); in __dump_page()
A Dinternal.h707 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) in find_buddy_page_pfn() argument
709 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); in find_buddy_page_pfn()
712 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
745 extern void memblock_free_pages(struct page *page, unsigned long pfn,
1154 void init_deferred_page(unsigned long pfn, int nid);
1228 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1251 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) in unmap_poisoned_folio() argument
1561 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1563 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
/mm/damon/
A Dops-common.c26 struct folio *damon_get_folio(unsigned long pfn) in damon_get_folio() argument
28 struct page *page = pfn_to_online_page(pfn); in damon_get_folio()
49 unsigned long pfn; in damon_ptep_mkold() local
52 pfn = pte_pfn(pteval); in damon_ptep_mkold()
54 pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); in damon_ptep_mkold()
56 folio = damon_get_folio(pfn); in damon_ptep_mkold()

Completed in 827 milliseconds

12