| /mm/ |
| A D | hugetlb_cma.c | 23 int nid = folio_nid(folio); in hugetlb_cma_free_folio() local 36 if (hugetlb_cma[nid]) in hugetlb_cma_alloc_folio() 61 int node = *nid; in hugetlb_cma_alloc_bootmem() 63 cma = hugetlb_cma[*nid]; in hugetlb_cma_alloc_bootmem() 75 *nid = node; in hugetlb_cma_alloc_bootmem() 94 int nid, count = 0; in cmdline_parse_hugetlb_cma() local 142 int nid; in hugetlb_cma_reserve() local 158 for (nid = 0; nid < MAX_NUMNODES; nid++) { in hugetlb_cma_reserve() 225 &hugetlb_cma[nid], nid); in hugetlb_cma_reserve() 228 res, nid); in hugetlb_cma_reserve() [all …]
|
| A D | mm_init.c | 62 int nid; in mminit_verify_zonelist() local 318 int i, nid; in early_calculate_totalpages() local 359 int i, nid; in find_zone_movable_pfns_for_nodes() local 383 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? in find_zone_movable_pfns_for_nodes() 420 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? in find_zone_movable_pfns_for_nodes() 620 int nid; in __early_pfn_to_nid() local 632 return nid; in __early_pfn_to_nid() 638 int nid; in early_pfn_to_nid() local 642 if (nid < 0) in early_pfn_to_nid() 646 return nid; in early_pfn_to_nid() [all …]
|
| A D | sparse.c | 73 nid); in sparse_index_alloc() 247 int i, nid; in memblocks_present() local 326 int nid; in sparse_early_usemaps_alloc_pgdat_section() local 377 if (usemap_nid != nid) { in check_usemap_section_nr() 379 nid, usemap_snr); in check_usemap_section_nr() 389 usemap_snr, pgdat_snr, nid); in check_usemap_section_nr() 516 NODE_DATA(nid), size); in sparse_usage_init() 561 nid, NULL, NULL); in sparse_init_nid() 564 __func__, nid); in sparse_init_nid() 613 if (nid == nid_begin) { in sparse_init() [all …]
|
| A D | shrinker.c | 66 int nid; in free_shrinker_info() local 68 for_each_node(nid) { in free_shrinker_info() 79 int nid, ret = 0; in alloc_shrinker_info() local 107 int nid) in shrinker_info_protected() argument 118 int nid; in expand_one_shrinker_info() local 339 int nid = sc->nid; in xchg_nr_deferred() local 342 nid = 0; in xchg_nr_deferred() 356 int nid = sc->nid; in add_nr_deferred() local 359 nid = 0; in add_nr_deferred() 530 .nid = nid, in shrink_slab_memcg() [all …]
|
| A D | list_lru.c | 58 return mlru ? &mlru->node[nid] : NULL; in list_lru_from_memcg_idx() 60 return &lru->node[nid].lru; in list_lru_from_memcg_idx() 134 return &lru->node[nid].lru; in list_lru_from_memcg_idx() 186 int nid = page_to_nid(virt_to_page(item)); in list_lru_add_obj() local 193 ret = list_lru_add(lru, item, nid, NULL); in list_lru_add_obj() 223 int nid = page_to_nid(virt_to_page(item)); in list_lru_del_obj() local 253 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 274 nlru = &lru->node[nid]; in list_lru_count_node() 407 int nid; in memcg_init_list_lru_one() local 414 for_each_node(nid) in memcg_init_list_lru_one() [all …]
|
| A D | memory_hotplug.c | 1161 .nid = NUMA_NO_NODE, in online_pages() 1186 node_arg.nid = nid; in online_pages() 1238 kswapd_run(nid); in online_pages() 1239 kcompactd_run(nid); in online_pages() 1302 if (node_online(nid)) in __try_online_node() 1522 nid = group->nid; in add_memory_resource() 1909 .nid = NUMA_NO_NODE, in offline_pages() 1975 node_arg.nid = node; in offline_pages() 2104 int *nid = arg; in check_memblock_offlined_cb() local 2106 *nid = mem->nid; in check_memblock_offlined_cb() [all …]
|
| A D | numa_memblks.c | 141 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { in numa_add_memblk_to() 143 nid, start, end - 1); in numa_add_memblk_to() 154 mi->blk[mi->nr_blks].nid = nid; in numa_add_memblk_to() 281 if (bi->nid != bj->nid) { in numa_cleanup_meminfo() 297 if (bi->nid != bj->nid) in numa_cleanup_meminfo() 304 if (bi->nid == bk->nid) in numa_cleanup_meminfo() 564 return mi->blk[i].nid; in meminfo_to_nid() 576 if (nid != NUMA_NO_NODE) in phys_to_target_node() 577 return nid; in phys_to_target_node() 588 nid = numa_meminfo.blk[0].nid; in memory_add_physaddr_to_nid() [all …]
|
| A D | numa_emulation.c | 30 if (mi->blk[i].nid == nid) in emu_find_memblk_by_nid() 64 eb->nid = nid; in emu_setup_memblk() 67 emu_nid_to_phys[nid] = pb->nid; in emu_setup_memblk() 93 int nid = 0; in split_nodes_interleave() local 144 if (nid < big) in split_nodes_interleave() 223 int nid) in split_nodes_size_interleave_uniform() argument 320 return nid; in split_nodes_size_interleave_uniform() 401 int nid = 0; in numa_emulation() local 425 nid = ret; in numa_emulation() 549 int physnid, nid; in numa_add_cpu() local [all …]
|
| A D | page_ext.c | 212 MEMBLOCK_ALLOC_ACCESSIBLE, nid); in alloc_node_page_ext() 215 NODE_DATA(nid)->node_page_ext = base; in alloc_node_page_ext() 224 int nid, fail; in page_ext_init_flatmem() local 229 for_each_online_node(nid) { in page_ext_init_flatmem() 230 fail = alloc_node_page_ext(nid); in page_ext_init_flatmem() 275 addr = vzalloc_node(size, nid); in alloc_page_ext() 374 int nid = pfn_to_nid(start_pfn); in online_page_ext() local 451 int nid; in page_ext_init() local 459 start_pfn = node_start_pfn(nid); in page_ext_init() 460 end_pfn = node_end_pfn(nid); in page_ext_init() [all …]
|
| A D | mempolicy.c | 189 int nid; in reduce_interleave_weights() local 2163 nid = next_node_in(nid, nodemask); in weighted_interleave_nid() 2179 int nid; in interleave_nid() local 2187 nid = next_node(nid, nodemask); in interleave_nid() 2257 int nid; in huge_node() local 2705 int nid; in alloc_pages_bulk_mempolicy_noprof() local 3672 if (nid < 0 || nid >= nr_node_ids) in sysfs_wi_node_delete() 3694 for (nid = 0; nid < nr_node_ids; nid++) in sysfs_wi_node_delete_all() 3740 if (nid < 0 || nid >= nr_node_ids) { in sysfs_wi_node_add() 3760 new_attr->nid = nid; in sysfs_wi_node_add() [all …]
|
| A D | memblock.c | 577 int nid, in memblock_insert_region() argument 774 int nid, i; in memblock_validate_numa_coverage() local 966 int nid; in memmap_init_kho_scratch_pages() local 1151 if (numa_valid_node(nid) && nid != m_nid) in should_skip_region() 1393 if (!numa_valid_node(nid) || nid == r_nid) in __next_mem_pfn_range() 1713 int nid) in memblock_alloc_exact_nid_raw() argument 1745 int nid) in memblock_alloc_try_nid_raw() argument 1775 int nid) in memblock_alloc_try_nid() argument 1864 if (nid == memblock_get_region_node(r) || !numa_valid_node(nid)) in memblock_reserved_kern_size() 2302 int nid; in memmap_init_reserved_pages() local [all …]
|
| A D | numa.c | 12 void __init alloc_node_data(int nid) in alloc_node_data() argument 19 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); in alloc_node_data() 22 nd_size, nid); in alloc_node_data() 25 pr_info("NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, in alloc_node_data() 28 if (tnid != nid) in alloc_node_data() 29 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); in alloc_node_data() 31 node_data[nid] = __va(nd_pa); in alloc_node_data() 32 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); in alloc_node_data() 35 void __init alloc_offline_node_data(int nid) in alloc_offline_node_data() argument 38 node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); in alloc_offline_node_data()
|
| A D | shrinker_debug.c | 23 int nid; in shrinker_count_objects() local 25 for_each_node(nid) { in shrinker_count_objects() 26 if (nid == 0 || (shrinker->flags & SHRINKER_NUMA_AWARE)) { in shrinker_count_objects() 29 .nid = nid, in shrinker_count_objects() 40 count_per_node[nid] = nr; in shrinker_count_objects() 54 int ret = 0, nid; in shrinker_debugfs_count_show() local 74 for_each_node(nid) in shrinker_debugfs_count_show() 75 seq_printf(m, " %lu", count_per_node[nid]); in shrinker_debugfs_count_show() 114 int nid; in shrinker_debugfs_scan_write() local 125 if (nid < 0 || nid >= nr_node_ids) in shrinker_debugfs_scan_write() [all …]
|
| A D | hugetlb.c | 1372 int nid; in dequeue_hugetlb_folio_vma() local 1412 nid = next_node_in(nid, *nodes_allowed); in next_node_allowed() 1421 nid = next_node_allowed(nid, nodes_allowed); in get_valid_node_allowed() 1434 int nid; in hstate_next_node_to_alloc() local 3462 for (nid = start; nid < end; nid++) in gather_bootmem_prealloc_parallel() 3513 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode() 4518 for (nid = 0; nid < nr_node_ids; nid++) { in kobj_to_node_hstate() 5247 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo() 5248 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo() 5249 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo() [all …]
|
| A D | ksm.c | 183 int nid; member 1146 int nid; in remove_all_stable_nodes() local 1149 for (nid = 0; nid < ksm_nr_node_ids; nid++) { in remove_all_stable_nodes() 1921 DO_NUMA(page_node->nid = nid); in stable_tree_search() 1947 DO_NUMA(page_node->nid = nid); in stable_tree_search() 1965 DO_NUMA(page_node->nid = nid); in stable_tree_search() 2005 DO_NUMA(page_node->nid = nid); in stable_tree_search() 2075 DO_NUMA(stable_node_dup->nid = nid); in stable_tree_insert() 2170 DO_NUMA(rmap_item->nid = nid); in unstable_tree_search_insert() 2511 for (nid = 0; nid < ksm_nr_node_ids; nid++) in scan_get_next_rmap_item() [all …]
|
| A D | cma.c | 299 cma->nid = NUMA_NO_NODE; in cma_init_reserved_mem() 381 phys_addr_t align, phys_addr_t limit, int nid) in cma_alloc_mem() argument 397 nid, true); in cma_alloc_mem() 417 limit, nid, true); in cma_alloc_mem() 433 int nid) in __cma_declare_contiguous_nid() argument 454 nid = NUMA_NO_NODE; in __cma_declare_contiguous_nid() 512 (*res_cma)->nid = nid; in __cma_declare_contiguous_nid() 546 order_per_bit, false, name, res_cma, nid); in cma_declare_contiguous_multi() 699 cma->nid = nid; in cma_declare_contiguous_multi() 736 int nid) in cma_declare_contiguous_nid() argument [all …]
|
| A D | memremap.c | 145 int range_id, int nid) in pagemap_range() argument 185 if (nid < 0) in pagemap_range() 186 nid = numa_mem_id(); in pagemap_range() 212 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range() 221 error = arch_add_memory(nid, range->start, range_len(range), in pagemap_range() 228 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range() 242 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range() 266 void *memremap_pages(struct dev_pagemap *pgmap, int nid) in memremap_pages() argument 335 error = pagemap_range(pgmap, ¶ms, i, nid); in memremap_pages()
|
| A D | show_mem.c | 89 void si_meminfo_node(struct sysinfo *val, int nid) in si_meminfo_node() argument 95 pg_data_t *pgdat = NODE_DATA(nid); in si_meminfo_node() 108 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); in si_meminfo_node() 119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) in show_mem_node_skip() argument 132 return !node_isset(nid, *nodemask); in show_mem_node_skip() 183 int cpu, nid; in show_free_areas() local 384 for_each_online_node(nid) { in show_free_areas() 385 if (show_mem_node_skip(filter, nid, nodemask)) in show_free_areas() 387 hugetlb_show_meminfo_node(nid); in show_free_areas()
|
| A D | hugetlb_vmemmap.c | 330 int nid = page_to_nid((struct page *)reuse); in vmemmap_remap_free() local 341 walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); in vmemmap_remap_free() 391 int nid = page_to_nid((struct page *)start); in alloc_vmemmap_page_list() local 396 page = alloc_pages_node(nid, gfp_mask, 0); in alloc_vmemmap_page_list() 788 void __init hugetlb_vmemmap_init_early(int nid) in hugetlb_vmemmap_init_early() argument 809 list_for_each_entry(m, &huge_boot_pages[nid], list) { in hugetlb_vmemmap_init_early() 821 if (vmemmap_populate_hvo(start, end, nid, in hugetlb_vmemmap_init_early() 831 sparse_init_early_section(nid, map, pnum, in hugetlb_vmemmap_init_early() 841 void __init hugetlb_vmemmap_init_late(int nid) in hugetlb_vmemmap_init_late() argument 864 if (!hugetlb_bootmem_page_zones_valid(nid, m)) { in hugetlb_vmemmap_init_late() [all …]
|
| A D | hugetlb_cma.h | 8 int nid, nodemask_t *nodemask); 9 struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, 22 gfp_t gfp_mask, int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument 28 struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, in hugetlb_cma_alloc_bootmem() argument
|
| A D | memcontrol-v1.c | 187 lru_gen_soft_reclaim(memcg, nid); in memcg1_update_tree() 199 mz = memcg->nodeinfo[nid]; in memcg1_update_tree() 226 int nid; in memcg1_remove_from_trees() local 228 for_each_node(nid) { in memcg1_remove_from_trees() 229 mz = memcg->nodeinfo[nid]; in memcg1_remove_from_trees() 570 memcg1_update_tree(memcg, nid); in memcg1_check_events() 694 memcg1_check_events(memcg, nid); in memcg1_uncharge_batch() 1792 int nid; in memcg_numa_stat_show() local 1801 for_each_node_state(nid, N_MEMORY) in memcg_numa_stat_show() 1802 seq_printf(m, " N%d=%lu", nid, in memcg_numa_stat_show() [all …]
|
| A D | hugetlb_vmemmap.h | 31 void hugetlb_vmemmap_init_early(int nid); 32 void hugetlb_vmemmap_init_late(int nid); 80 static inline void hugetlb_vmemmap_init_early(int nid) in hugetlb_vmemmap_init_early() argument 84 static inline void hugetlb_vmemmap_init_late(int nid) in hugetlb_vmemmap_init_late() argument
|
| A D | vmscan.c | 449 int nid; in drop_slab() local 2245 int nid; in reclaim_pages() local 2933 int nid; in lru_gen_add_mm() local 2960 int nid; in lru_gen_del_mm() local 4388 int nid; in lru_gen_online_memcg() local 4412 int nid; in lru_gen_offline_memcg() local 4424 int nid; in lru_gen_release_memcg() local 5368 nid = next_memory_node(nid); in lru_gen_seq_next() 5545 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) in run_cmd() 5741 int nid; in lru_gen_exit_memcg() local [all …]
|
| A D | memory-tiers.c | 699 int nid; in memory_tier_late_init() local 706 for_each_node_state(nid, N_MEMORY) { in memory_tier_late_init() 713 if (node_memory_types[nid].memtype) in memory_tier_late_init() 716 memtier = set_node_memory_tier(nid); in memory_tier_late_init() 736 int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, in mt_set_default_dram_perf() argument 749 default_dram_perf_ref_nid = nid; in mt_set_default_dram_perf() 770 "DRAM node %d.\n", nid, default_dram_perf_ref_nid); in mt_set_default_dram_perf() 774 pr_info(" performance of DRAM node %d from %s:\n", nid, source); in mt_set_default_dram_perf() 880 if (clear_node_memory_tier(nn->nid)) in memtier_hotplug_callback() 886 memtier = set_node_memory_tier(nn->nid); in memtier_hotplug_callback()
|
| /mm/damon/ |
| A D | ops-common.c | 307 .nid = target_nid, in __damon_migrate_folio_list() 378 int nid; in damon_migrate_pages() local 392 nid = folio_nid(lru_to_folio(folio_list)); in damon_migrate_pages() 396 if (nid == folio_nid(folio)) { in damon_migrate_pages() 402 NODE_DATA(nid), in damon_migrate_pages() 404 nid = folio_nid(lru_to_folio(folio_list)); in damon_migrate_pages() 408 NODE_DATA(nid), in damon_migrate_pages()
|