| /mm/ |
| A D | mm_init.c | 674 pg_data_t *pgdat; in __init_page_from_nid() local 1302 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages() 1464 pgdat->node_id, in init_currently_empty_zone() 1570 pgdat->nr_zones = 0; in free_area_init_core_hotplug() 1674 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map() 1725 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node() 1729 pgdat->node_id = nid; in free_area_init_node() 1904 pg_data_t *pgdat; in free_area_init() local 2188 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap() 2189 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap() [all …]
|
| A D | vmscan.c | 2748 lruvec->pgdat = pgdat; in get_lruvec() 3444 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn() 3466 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn() 4378 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg() 4441 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg() 5995 .pgdat = pgdat, in shrink_node_memcgs() 6580 if (!pgdat) in throttle_direct_reclaim() 7407 pg_data_t *pgdat; in wakeup_kswapd() local 7658 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim() 7706 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim() [all …]
|
| A D | show_mem.c | 95 pg_data_t *pgdat = NODE_DATA(nid); in si_meminfo_node() local 185 pg_data_t *pgdat; in show_free_areas() local 226 for_each_online_pgdat(pgdat) { in show_free_areas() 258 pgdat->node_id, in show_free_areas() 267 K(node_page_state(pgdat, NR_FILE_DIRTY)), in show_free_areas() 268 K(node_page_state(pgdat, NR_WRITEBACK)), in show_free_areas() 269 K(node_page_state(pgdat, NR_SHMEM)), in show_free_areas() 271 K(node_page_state(pgdat, NR_SHMEM_THPS)), in show_free_areas() 273 K(node_page_state(pgdat, NR_ANON_THPS)), in show_free_areas() 277 node_page_state(pgdat, NR_KERNEL_SCS_KB), in show_free_areas() [all …]
|
| A D | vmstat.c | 277 struct pglist_data *pgdat; in refresh_zone_stat_thresholds() local 752 struct pglist_data *pgdat; in inc_node_page_state() local 754 pgdat = page_pgdat(page); in inc_node_page_state() 1506 pg_data_t *pgdat; in frag_start() local 1510 pgdat && node; in frag_start() 1511 pgdat = next_online_pgdat(pgdat)) in frag_start() 1514 return pgdat; in frag_start() 1547 print(m, pgdat, zone); in walk_zones_in_node() 1587 pgdat->node_id, in pagetypeinfo_showfree_print() 2327 pgdat->node_id, in unusable_show_print() [all …]
|
| A D | compaction.c | 2156 pgdat_kswapd_lock(pgdat); in kswapd_is_running() 2157 running = pgdat->kswapd && task_is_running(pgdat->kswapd); in kswapd_is_running() 2158 pgdat_kswapd_unlock(pgdat); in kswapd_is_running() 2262 pg_data_t *pgdat; in __compact_finished() local 3132 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd_do_work() 3175 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd() 3192 kcompactd_do_work(pgdat); in kcompactd() 3214 compact_node(pgdat, true); in kcompactd() 3241 if (pgdat->kcompactd) in kcompactd_run() 3244 pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid); in kcompactd_run() [all …]
|
| A D | workingset.c | 225 *pgdat = NODE_DATA(nid); in unpack_shadow() 245 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_eviction() local 249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 270 struct pglist_data *pgdat; in lru_gen_test_recent() local 275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 383 struct pglist_data *pgdat = folio_pgdat(folio); in workingset_eviction() local 402 return pack_shadow(memcgid, pgdat, eviction, in workingset_eviction() 427 struct pglist_data *pgdat; in workingset_test_recent() local 537 struct pglist_data *pgdat; in workingset_refault() local 561 pgdat = folio_pgdat(folio); in workingset_refault() [all …]
|
| A D | mmzone.c | 18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument 20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() 32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 37 pgdat = next_online_pgdat(pgdat); in next_zone() 38 if (pgdat) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
| A D | memory-tiers.c | 262 pg_data_t *pgdat; in __node_get_memory_tier() local 264 pgdat = NODE_DATA(node); in __node_get_memory_tier() 265 if (!pgdat) in __node_get_memory_tier() 280 pg_data_t *pgdat; in node_is_toptier() local 283 pgdat = NODE_DATA(node); in node_is_toptier() 284 if (!pgdat) in node_is_toptier() 288 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier() 537 pg_data_t *pgdat = NODE_DATA(node); in set_node_memory_tier() local 573 pg_data_t *pgdat; in clear_node_memory_tier() local 576 pgdat = NODE_DATA(node); in clear_node_memory_tier() [all …]
|
| A D | memory_hotplug.c | 524 for (zone = pgdat->node_zones; in update_pgdat_span() 577 update_pgdat_span(pgdat); in remove_pfn_range_from_zone() 719 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range() 722 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; in resize_pgdat_range() 754 int nid = pgdat->node_id; in move_pfn_range_to_zone() 1264 struct pglist_data *pgdat; in hotadd_init_pgdat() local 1272 pgdat = NODE_DATA(nid); in hotadd_init_pgdat() 1281 build_all_zonelists(pgdat); in hotadd_init_pgdat() 1283 return pgdat; in hotadd_init_pgdat() 1299 pg_data_t *pgdat; in __try_online_node() local [all …]
|
| A D | shuffle.h | 11 extern void __shuffle_free_memory(pg_data_t *pgdat); 13 static inline void __meminit shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument 17 __shuffle_free_memory(pgdat); in shuffle_free_memory() 40 static inline void shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
|
| A D | bootmem_info.c | 105 void __init register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument 108 int node = pgdat->node_id; in register_page_bootmem_info_node() 112 page = virt_to_page(pgdat); in register_page_bootmem_info_node() 117 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node() 118 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node()
|
| A D | page_owner.c | 414 pg_data_t *pgdat, struct zone *zone) in pagetypeinfo_showmixedcount_print() argument 490 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print() 760 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument 827 pgdat->node_id, zone->name, count); in init_pages_in_zone() 830 static void init_zones_in_node(pg_data_t *pgdat) in init_zones_in_node() argument 833 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() 839 init_pages_in_zone(pgdat, zone); in init_zones_in_node() 845 pg_data_t *pgdat; in init_early_allocated_pages() local 847 for_each_online_pgdat(pgdat) in init_early_allocated_pages() 848 init_zones_in_node(pgdat); in init_early_allocated_pages()
|
| A D | sparse.c | 310 static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) in pgdat_to_phys() argument 313 VM_BUG_ON(pgdat != &contig_page_data); in pgdat_to_phys() 316 return __pa(pgdat); in pgdat_to_phys() 321 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 337 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); in sparse_early_usemaps_alloc_pgdat_section() 355 struct pglist_data *pgdat = NODE_DATA(nid); in check_usemap_section_nr() local 365 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); in check_usemap_section_nr() 393 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 396 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section()
|
| A D | shuffle.c | 153 void __meminit __shuffle_free_memory(pg_data_t *pgdat) in __shuffle_free_memory() argument 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
| A D | page_alloc.c | 5596 local_node = pgdat->node_id; in build_zonelists() 5719 build_zonelists(pgdat); in __build_all_zonelists() 6068 struct pglist_data *pgdat; in setup_per_cpu_pageset() local 6089 for_each_online_pgdat(pgdat) in setup_per_cpu_pageset() 6090 pgdat->per_cpu_nodestats = in setup_per_cpu_pageset() 6225 struct pglist_data *pgdat; in calculate_totalreserve_pages() local 6267 struct pglist_data *pgdat; in setup_per_zone_lowmem_reserve() local 6481 pg_data_t *pgdat; in setup_min_unmapped_ratio() local 6509 pg_data_t *pgdat; in setup_min_slab_ratio() local 6513 pgdat->min_slab_pages = 0; in setup_min_slab_ratio() [all …]
|
| A D | internal.h | 409 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 413 pg_data_t *pgdat = folio_pgdat(folio); in acct_reclaim_writeback() local 414 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback() 417 __acct_reclaim_writeback(pgdat, folio, nr_throttled); in acct_reclaim_writeback() 420 static inline void wake_throttle_isolated(pg_data_t *pgdat) in wake_throttle_isolated() argument 424 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated() 540 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); 543 struct mem_cgroup *memcg, pg_data_t *pgdat); 546 struct mem_cgroup *memcg, pg_data_t *pgdat) in user_proactive_reclaim() argument 1207 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, in node_reclaim() argument
|
| A D | memcontrol.c | 1022 int nid = reclaim->pgdat->node_id; in mem_cgroup_iter() 2517 struct pglist_data *pgdat, in account_slab_nmi_safe() argument 2538 struct pglist_data *pgdat, in account_slab_nmi_safe() argument 2549 struct pglist_data *pgdat, in mod_objcg_mlstate() argument 2882 if (stock->cached_pgdat != pgdat) { in __account_obj_stock() 2896 stock->cached_pgdat = pgdat; in __account_obj_stock() 2935 if (pgdat) in consume_obj_stock() 3031 if (pgdat) in refill_obj_stock() 3051 if (pgdat) in refill_obj_stock() 3104 if (!ret && (nr_bytes || pgdat)) in obj_cgroup_charge_account() [all …]
|
| A D | numa.c | 37 pg_data_t *pgdat; in alloc_offline_node_data() local 38 node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); in alloc_offline_node_data()
|
| A D | memcontrol-v1.c | 273 pg_data_t *pgdat, in mem_cgroup_soft_reclaim() argument 283 .pgdat = pgdat, in mem_cgroup_soft_reclaim() 313 pgdat, &nr_scanned); in mem_cgroup_soft_reclaim() 322 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, in memcg1_soft_limit_reclaim() argument 339 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in memcg1_soft_limit_reclaim() 362 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in memcg1_soft_limit_reclaim() 1925 pg_data_t *pgdat; in memcg1_stat_format() local 1930 for_each_online_pgdat(pgdat) { in memcg1_stat_format() 1931 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
|
| A D | page_ext.c | 163 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument 165 pgdat->node_page_ext = NULL; in pgdat_page_ext_init() 493 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument
|
| A D | page-writeback.c | 252 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) in node_dirtyable_memory() argument 258 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 271 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory() 273 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); in node_dirtyable_memory() 274 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); in node_dirtyable_memory() 446 static unsigned long node_dirty_limit(struct pglist_data *pgdat) in node_dirty_limit() argument 448 unsigned long node_memory = node_dirtyable_memory(pgdat); in node_dirty_limit() 475 bool node_dirty_ok(struct pglist_data *pgdat) in node_dirty_ok() argument 477 unsigned long limit = node_dirty_limit(pgdat); in node_dirty_ok() 480 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); in node_dirty_ok() [all …]
|
| A D | migrate.c | 2581 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, in migrate_balanced_pgdat() argument 2586 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_balanced_pgdat() 2587 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 2628 pg_data_t *pgdat = NODE_DATA(node); in migrate_misplaced_folio_prepare() local 2652 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { in migrate_misplaced_folio_prepare() 2657 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_misplaced_folio_prepare() 2658 if (managed_zone(pgdat->node_zones + z)) in migrate_misplaced_folio_prepare() 2669 wakeup_kswapd(pgdat->node_zones + z, 0, in migrate_misplaced_folio_prepare() 2691 pg_data_t *pgdat = NODE_DATA(node); in migrate_misplaced_folio() local 2696 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in migrate_misplaced_folio()
|
| A D | memblock.c | 2371 static void __init reset_node_managed_pages(pg_data_t *pgdat) in reset_node_managed_pages() argument 2375 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages() 2381 struct pglist_data *pgdat; in reset_all_zones_managed_pages() local 2386 for_each_online_pgdat(pgdat) in reset_all_zones_managed_pages() 2387 reset_node_managed_pages(pgdat); in reset_all_zones_managed_pages()
|
| A D | huge_memory.c | 1064 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); in get_deferred_split_queue() local 1069 return &pgdat->deferred_split_queue; in get_deferred_split_queue() 1075 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); in get_deferred_split_queue() local 1077 return &pgdat->deferred_split_queue; in get_deferred_split_queue()
|
| /mm/damon/ |
| A D | ops-common.c | 295 struct list_head *migrate_folios, struct pglist_data *pgdat, in __damon_migrate_folio_list() argument 310 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) in __damon_migrate_folio_list() 325 struct pglist_data *pgdat, in damon_migrate_folio_list() argument 355 &migrate_folios, pgdat, target_nid); in damon_migrate_folio_list()
|