| /mm/ |
| A D | memremap.c | 83 pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; in pfn_len() 120 percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); in memunmap_pages() 270 .pgmap = pgmap, in memremap_pages() 285 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages() 381 pgmap); in devm_memremap_pages() 410 if (pgmap) { in get_dev_pagemap() 419 if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref)) in get_dev_pagemap() 423 return pgmap; in get_dev_pagemap() 429 struct dev_pagemap *pgmap = folio->pgmap; in free_zone_device_folio() local 468 if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) in free_zone_device_folio() [all …]
|
| A D | sparse-vmemmap.c | 480 struct dev_pagemap *pgmap) in reuse_compound_section() argument 482 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); in reuse_compound_section() 484 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); in reuse_compound_section() 509 struct dev_pagemap *pgmap) in vmemmap_populate_compound_pages() argument 515 if (reuse_compound_section(start_pfn, pgmap)) { in vmemmap_populate_compound_pages() 529 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); in vmemmap_populate_compound_pages() 563 struct dev_pagemap *pgmap) in __populate_section_memmap() argument 573 if (vmemmap_can_optimize(altmap, pgmap)) in __populate_section_memmap() 574 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); in __populate_section_memmap()
|
| A D | sparse.c | 419 struct dev_pagemap *pgmap) in __populate_section_memmap() argument 672 struct dev_pagemap *pgmap) in populate_section_memmap() argument 674 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); in populate_section_memmap() 745 struct dev_pagemap *pgmap) in populate_section_memmap() argument 870 struct dev_pagemap *pgmap) in section_activate() argument 902 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); in section_activate() 932 struct dev_pagemap *pgmap) in sparse_add_section() argument 943 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); in sparse_add_section()
|
| A D | mm_init.c | 1009 struct dev_pagemap *pgmap) in __init_zone_device_page() argument 1028 page_folio(page)->pgmap = pgmap; in __init_zone_device_page() 1055 switch (pgmap->type) { in __init_zone_device_page() 1077 struct dev_pagemap *pgmap) in compound_nr_pages() argument 1079 if (!vmemmap_can_optimize(altmap, pgmap)) in compound_nr_pages() 1080 return pgmap_vmemmap_nr(pgmap); in compound_nr_pages() 1088 struct dev_pagemap *pgmap, in memmap_init_compound() argument 1092 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound() 1115 struct dev_pagemap *pgmap) in memmap_init_zone_device() argument 1119 struct vmem_altmap *altmap = pgmap_altmap(pgmap); in memmap_init_zone_device() [all …]
|
| A D | memory-failure.c | 1768 struct dev_pagemap *pgmap) in mf_generic_kill_procs() argument 1791 switch (pgmap->type) { in mf_generic_kill_procs() 2164 struct dev_pagemap *pgmap) in memory_failure_dev_pagemap() argument 2169 if (!pgmap_pfn_valid(pgmap, pfn)) in memory_failure_dev_pagemap() 2176 if (pgmap_has_memory_failure(pgmap)) { in memory_failure_dev_pagemap() 2177 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); in memory_failure_dev_pagemap() 2189 put_dev_pagemap(pgmap); in memory_failure_dev_pagemap() 2240 struct dev_pagemap *pgmap; in memory_failure() local 2261 pgmap = get_dev_pagemap(pfn, NULL); in memory_failure() 2263 if (pgmap) { in memory_failure() [all …]
|
| A D | migrate_device.c | 116 struct dev_pagemap *pgmap; in migrate_vma_collect_pmd() local 144 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 147 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd() 167 pgmap = page_pgmap(page); in migrate_vma_collect_pmd() 171 pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
|
| A D | gup.c | 32 struct dev_pagemap *pgmap; member 817 struct dev_pagemap **pgmap) in follow_page_pte() argument 929 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 942 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 949 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 1527 if (ctx.pgmap) in __get_user_pages() 1528 put_dev_pagemap(ctx.pgmap); in __get_user_pages() 2856 struct dev_pagemap *pgmap = NULL; in gup_fast_pte_range() local 2929 if (pgmap) in gup_fast_pte_range() 2930 put_dev_pagemap(pgmap); in gup_fast_pte_range()
|
| A D | memory_hotplug.c | 349 struct dev_pagemap *pgmap; in pfn_to_online_page() local 378 pgmap = get_dev_pagemap(pfn, NULL); in pfn_to_online_page() 379 put_dev_pagemap(pgmap); in pfn_to_online_page() 382 if (pgmap) in pfn_to_online_page() 424 params->pgmap); in __add_pages()
|
| A D | memory.c | 4508 struct dev_pagemap *pgmap; in do_swap_page() local 4512 pgmap = page_pgmap(vmf->page); in do_swap_page() 4513 ret = pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
|