| /mm/ |
| A D | shuffle.c | 84 unsigned long end_pfn = zone_end_pfn(z); in __shuffle_zone() 88 spin_lock_irqsave(&z->lock, flags); in __shuffle_zone() 101 page_i = shuffle_valid_page(z, i, order); in __shuffle_zone() 112 j = z->zone_start_pfn + in __shuffle_zone() 115 page_j = shuffle_valid_page(z, j, order); in __shuffle_zone() 141 spin_unlock_irqrestore(&z->lock, flags); in __shuffle_zone() 143 spin_lock_irqsave(&z->lock, flags); in __shuffle_zone() 146 spin_unlock_irqrestore(&z->lock, flags); in __shuffle_zone() 155 struct zone *z; in __shuffle_free_memory() local 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory() [all …]
|
| A D | mmzone.c | 56 struct zoneref *__next_zones_zonelist(struct zoneref *z, in __next_zones_zonelist() argument 65 while (zonelist_zone_idx(z) > highest_zoneidx) in __next_zones_zonelist() 66 z++; in __next_zones_zonelist() 68 while (zonelist_zone_idx(z) > highest_zoneidx || in __next_zones_zonelist() 69 (zonelist_zone(z) && !zref_in_nodemask(z, nodes))) in __next_zones_zonelist() 70 z++; in __next_zones_zonelist() 72 return z; in __next_zones_zonelist()
|
| A D | shuffle.h | 20 extern void __shuffle_zone(struct zone *z); 21 static inline void __meminit shuffle_zone(struct zone *z) in shuffle_zone() argument 25 __shuffle_zone(z); in shuffle_zone() 44 static inline void shuffle_zone(struct zone *z) in shuffle_zone() argument
|
| A D | page_alloc.c | 2700 struct zone *z; in __drain_all_pages() local 3412 struct zoneref *z; in unreserve_highatomic_pageblock() local 3722 struct zoneref *z; in get_page_from_freelist() local 4191 struct zoneref *z; in should_compact_retry() local 4359 struct zoneref *z; in wake_all_kswapds() local 4491 struct zoneref *z; in should_reclaim_retry() local 4668 if (!zonelist_zone(z)) in __alloc_pages_slowpath() 4965 struct zoneref *z; in alloc_pages_bulk_noprof() local 5391 struct zoneref *z; in nr_free_zone_pages() local 5631 struct zoneref *z; in local_memory_node() local [all …]
|
| A D | mm_init.c | 70 struct zoneref *z; in mminit_verify_zonelist() local 1300 struct zone *z; in reset_memoryless_node_totalpages() local 1302 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages() 1303 z->zone_start_pfn = 0; in reset_memoryless_node_totalpages() 1304 z->spanned_pages = 0; in reset_memoryless_node_totalpages() 1305 z->present_pages = 0; in reset_memoryless_node_totalpages() 1307 z->present_early_pages = 0; in reset_memoryless_node_totalpages() 1557 enum zone_type z; in free_area_init_core_hotplug() local 1588 for (z = 0; z < MAX_NR_ZONES; z++) { in free_area_init_core_hotplug() 1589 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() [all …]
|
| A D | page-writeback.c | 255 int z; in node_dirtyable_memory() local 257 for (z = 0; z < MAX_NR_ZONES; z++) { in node_dirtyable_memory() 258 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 288 struct zone *z; in highmem_dirtyable_memory() local 294 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory() 295 if (!populated_zone(z)) in highmem_dirtyable_memory() 298 nr_pages = zone_page_state(z, NR_FREE_PAGES); in highmem_dirtyable_memory() 300 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory() 301 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); in highmem_dirtyable_memory() 302 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE); in highmem_dirtyable_memory()
|
| A D | migrate.c | 2584 int z; in migrate_balanced_pgdat() local 2586 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_balanced_pgdat() 2587 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 2653 int z; in migrate_misplaced_folio_prepare() local 2657 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_misplaced_folio_prepare() 2658 if (managed_zone(pgdat->node_zones + z)) in migrate_misplaced_folio_prepare() 2666 if (z < 0) in migrate_misplaced_folio_prepare() 2669 wakeup_kswapd(pgdat->node_zones + z, 0, in migrate_misplaced_folio_prepare()
|
| A D | vmstat.c | 565 long n, t, z; in mod_zone_state() local 570 z = 0; /* overflow to zone counters */ in mod_zone_state() 590 z = n + os; in mod_zone_state() 595 if (z) in mod_zone_state() 596 zone_page_state_add(z, zone, item); in mod_zone_state() 623 long n, t, z; in mod_node_state() local 639 z = 0; /* overflow to node counters */ in mod_node_state() 659 z = n + os; in mod_node_state() 664 if (z) in mod_node_state() 665 node_page_state_add(z, pgdat, item); in mod_node_state()
|
| A D | mempolicy.c | 2094 struct zoneref *z; in mempolicy_slab_node() local 2103 z = first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node() 2105 return zonelist_zone(z) ? zonelist_node_idx(z) : node; in mempolicy_slab_node() 2914 struct zoneref *z; in mpol_misplaced() local 2978 z = first_zones_zonelist( in mpol_misplaced() 2982 polnid = zonelist_node_idx(z); in mpol_misplaced()
|
| A D | memblock.c | 2373 struct zone *z; in reset_node_managed_pages() local 2375 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages() 2376 atomic_long_set(&z->managed_pages, 0); in reset_node_managed_pages()
|
| A D | oom_kill.c | 255 struct zoneref *z; in constrained_alloc() local 295 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
|
| A D | vmscan.c | 2423 int z; in prepare_scan_control() local 2430 for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) { in prepare_scan_control() 5945 int z; in should_continue_reclaim() local 5966 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { in should_continue_reclaim() 6259 struct zoneref *z; in shrink_zones() local 6278 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones() 6385 struct zoneref *z; in do_try_to_free_pages() local 6532 struct zoneref *z; in throttle_direct_reclaim() local 6567 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim() 6925 int z; in kswapd_shrink_node() local [all …]
|
| A D | compaction.c | 2436 struct zoneref *z; in compaction_zonelist_suitable() local 2442 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable() 2818 struct zoneref *z; in try_to_compact_pages() local 2828 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
|
| A D | hugetlb.c | 1325 struct zoneref *z; in dequeue_hugetlb_folio_nodemask() local 1336 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_hugetlb_folio_nodemask()
|
| A D | slub.c | 2945 struct zoneref *z; in get_any_partial() local 2976 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in get_any_partial()
|