Lines Matching refs:pgdat

284 #define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx)	\  argument
285 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
519 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument
528 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
536 for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) { in skip_throttle_noprogress()
547 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument
549 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
578 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
579 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
580 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle()
587 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle()
609 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
611 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
621 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument
635 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
636 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
639 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
1044 struct pglist_data *pgdat) in demote_folio_list() argument
1046 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1069 node_get_allowed_targets(pgdat, &allowed_mask); in demote_folio_list()
1099 struct pglist_data *pgdat, struct scan_control *sc, in shrink_folio_list() argument
1114 do_demote_pass = can_demote(pgdat->node_id, sc, memcg); in shrink_folio_list()
1233 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1425 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1603 nr_demoted = demote_folio_list(&demote_folios, pgdat); in shrink_folio_list()
1880 static bool too_many_isolated(struct pglist_data *pgdat, int file, in too_many_isolated() argument
1893 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); in too_many_isolated()
1894 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); in too_many_isolated()
1896 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
1897 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
1912 wake_throttle_isolated(pgdat); in too_many_isolated()
2015 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list() local
2018 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
2024 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); in shrink_inactive_list()
2038 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
2050 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false, in shrink_inactive_list()
2058 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2091 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); in shrink_inactive_list()
2103 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2139 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list() local
2148 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
2211 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2214 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2219 struct pglist_data *pgdat) in reclaim_folio_list() argument
2232 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true, NULL); in reclaim_folio_list()
2238 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); in reclaim_folio_list()
2344 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) in prepare_scan_control() argument
2352 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2426 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2427 file = node_page_state(pgdat, NR_ACTIVE_FILE) + in prepare_scan_control()
2428 node_page_state(pgdat, NR_INACTIVE_FILE); in prepare_scan_control()
2430 for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) { in prepare_scan_control()
2439 anon = node_page_state(pgdat, NR_INACTIVE_ANON); in prepare_scan_control()
2558 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count() local
2567 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2740 struct pglist_data *pgdat = NODE_DATA(nid); in get_lruvec() local
2747 if (!lruvec->pgdat) in get_lruvec()
2748 lruvec->pgdat = pgdat; in get_lruvec()
2755 return &pgdat->__lruvec; in get_lruvec()
2761 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_swappiness() local
2766 if (!can_demote(pgdat->node_id, sc, memcg) && in get_swappiness()
2917 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm() local
2921 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
3426 struct pglist_data *pgdat) in get_pte_pfn() argument
3444 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn()
3451 struct pglist_data *pgdat) in get_pmd_pfn() argument
3466 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn()
3473 struct pglist_data *pgdat) in get_pfn_folio() argument
3480 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3534 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range() local
3563 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range()
3567 folio = get_pfn_folio(pfn, memcg, pgdat); in walk_pte_range()
3610 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked() local
3654 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat); in walk_pmd_range_locked()
3658 folio = get_pfn_folio(pfn, memcg, pgdat); in walk_pmd_range_locked()
3723 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range() local
3724 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat); in walk_pmd_range()
3841 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) in set_mm_walk() argument
3845 if (pgdat && current_is_kswapd()) { in set_mm_walk()
3848 walk = &pgdat->mm_walk; in set_mm_walk()
4106 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) in set_initial_priority() argument
4118 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); in set_initial_priority()
4119 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
4120 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); in set_initial_priority()
4181 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_age_node() argument
4189 set_initial_priority(pgdat, sc); in lru_gen_age_node()
4193 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_age_node()
4242 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_look_around() local
4243 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_look_around()
4289 pfn = get_pte_pfn(ptent, vma, addr, pgdat); in lru_gen_look_around()
4293 folio = get_pfn_folio(pfn, memcg, pgdat); in lru_gen_look_around()
4343 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in lru_gen_rotate_memcg() local
4345 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4358 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4360 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4370 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4372 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4374 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4375 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4377 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4378 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4380 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4392 struct pglist_data *pgdat = NODE_DATA(nid); in lru_gen_online_memcg() local
4395 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4399 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4403 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4404 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4406 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4427 struct pglist_data *pgdat = NODE_DATA(nid); in lru_gen_release_memcg() local
4430 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4438 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4440 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4441 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4443 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4728 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in evict_folios() local
4744 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg); in evict_folios()
4747 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4938 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_one() local
4954 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4973 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) in shrink_many() argument
4984 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4992 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
5064 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_shrink_node() argument
5083 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
5085 set_initial_priority(pgdat, sc); in lru_gen_shrink_node()
5091 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
5093 shrink_many(pgdat, sc); in lru_gen_shrink_node()
5103 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
5693 void lru_gen_init_pgdat(struct pglist_data *pgdat) in lru_gen_init_pgdat() argument
5697 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5701 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5786 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_age_node() argument
5796 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_shrink_node() argument
5939 static inline bool should_continue_reclaim(struct pglist_data *pgdat, in should_continue_reclaim() argument
5966 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { in should_continue_reclaim()
5984 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); in should_continue_reclaim()
5985 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5986 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); in should_continue_reclaim()
5991 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) in shrink_node_memcgs() argument
5995 .pgdat = pgdat, in shrink_node_memcgs()
6014 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs()
6053 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
6070 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
6078 lru_gen_shrink_node(pgdat, sc); in shrink_node()
6082 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
6090 prepare_scan_control(pgdat, sc); in shrink_node()
6092 shrink_node_memcgs(pgdat, sc); in shrink_node()
6125 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
6130 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6140 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); in shrink_node()
6168 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); in shrink_node()
6170 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) in shrink_node()
6180 pgdat->kswapd_failures = 0; in shrink_node()
6219 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) in consider_reclaim_throttle() argument
6228 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6246 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); in consider_reclaim_throttle()
6349 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) in snapshot_refaults() argument
6357 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in snapshot_refaults()
6484 static bool allow_direct_reclaim(pg_data_t *pgdat) in allow_direct_reclaim() argument
6492 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6495 for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) { in allow_direct_reclaim()
6510 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6511 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6512 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6514 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6534 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local
6573 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6574 if (allow_direct_reclaim(pgdat)) in throttle_direct_reclaim()
6580 if (!pgdat) in throttle_direct_reclaim()
6595 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6596 allow_direct_reclaim(pgdat), HZ); in throttle_direct_reclaim()
6600 allow_direct_reclaim(pgdat)); in throttle_direct_reclaim()
6657 pg_data_t *pgdat, in mem_cgroup_shrink_node() argument
6660 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node()
6745 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) in kswapd_age_node() argument
6751 lru_gen_age_node(pgdat, sc); in kswapd_age_node()
6755 lruvec = mem_cgroup_lruvec(NULL, pgdat); in kswapd_age_node()
6764 lruvec = mem_cgroup_lruvec(memcg, pgdat); in kswapd_age_node()
6771 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) in pgdat_watermark_boosted() argument
6784 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6799 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) in pgdat_balanced() argument
6809 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { in pgdat_balanced()
6866 static void clear_pgdat_congested(pg_data_t *pgdat) in clear_pgdat_congested() argument
6868 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested()
6872 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6873 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6882 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, in prepare_kswapd_sleep() argument
6898 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6899 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6902 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6905 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { in prepare_kswapd_sleep()
6906 clear_pgdat_congested(pgdat); in prepare_kswapd_sleep()
6921 static bool kswapd_shrink_node(pg_data_t *pgdat, in kswapd_shrink_node() argument
6930 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { in kswapd_shrink_node()
6938 shrink_node(pgdat, sc); in kswapd_shrink_node()
6956 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) in update_reclaim_active() argument
6961 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { in update_reclaim_active()
6970 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in set_reclaim_active() argument
6972 update_reclaim_active(pgdat, highest_zoneidx, true); in set_reclaim_active()
6976 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in clear_reclaim_active() argument
6978 update_reclaim_active(pgdat, highest_zoneidx, false); in clear_reclaim_active()
6994 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) in balance_pgdat() argument
7022 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { in balance_pgdat()
7029 set_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
7052 zone = pgdat->node_zones + i; in balance_pgdat()
7068 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); in balance_pgdat()
7100 kswapd_age_node(pgdat, &sc); in balance_pgdat()
7112 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
7121 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
7129 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
7130 allow_direct_reclaim(pgdat)) in balance_pgdat()
7131 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7170 pgdat->kswapd_failures++; in balance_pgdat()
7173 clear_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
7184 zone = pgdat->node_zones + i; in balance_pgdat()
7194 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); in balance_pgdat()
7197 snapshot_refaults(NULL, pgdat); in balance_pgdat()
7218 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, in kswapd_highest_zoneidx() argument
7221 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7226 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument
7235 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7244 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
7251 reset_isolation_suitable(pgdat); in kswapd_try_to_sleep()
7257 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); in kswapd_try_to_sleep()
7267 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7268 kswapd_highest_zoneidx(pgdat, in kswapd_try_to_sleep()
7271 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7272 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7275 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7276 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7284 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
7285 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7295 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); in kswapd_try_to_sleep()
7300 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); in kswapd_try_to_sleep()
7307 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7327 pg_data_t *pgdat = (pg_data_t *)p; in kswapd() local
7345 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7346 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7347 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7351 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7352 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
7356 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd()
7360 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7361 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
7363 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7364 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7384 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7386 reclaim_order = balance_pgdat(pgdat, alloc_order, in kswapd()
7407 pg_data_t *pgdat; in wakeup_kswapd() local
7416 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7417 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7420 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7422 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7423 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7425 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7429 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7430 (pgdat_balanced(pgdat, order, highest_zoneidx) && in wakeup_kswapd()
7431 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { in wakeup_kswapd()
7440 wakeup_kcompactd(pgdat, order, highest_zoneidx); in wakeup_kswapd()
7444 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7446 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7493 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_run() local
7495 pgdat_kswapd_lock(pgdat); in kswapd_run()
7496 if (!pgdat->kswapd) { in kswapd_run()
7497 pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid); in kswapd_run()
7498 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7501 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7503 pgdat->kswapd = NULL; in kswapd_run()
7505 wake_up_process(pgdat->kswapd); in kswapd_run()
7508 pgdat_kswapd_unlock(pgdat); in kswapd_run()
7517 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_stop() local
7520 pgdat_kswapd_lock(pgdat); in kswapd_stop()
7521 kswapd = pgdat->kswapd; in kswapd_stop()
7524 pgdat->kswapd = NULL; in kswapd_stop()
7526 pgdat_kswapd_unlock(pgdat); in kswapd_stop()
7592 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) in node_unmapped_file_pages() argument
7594 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); in node_unmapped_file_pages()
7595 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + in node_unmapped_file_pages()
7596 node_page_state(pgdat, NR_ACTIVE_FILE); in node_unmapped_file_pages()
7607 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) in node_pagecache_reclaimable() argument
7619 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); in node_pagecache_reclaimable()
7621 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); in node_pagecache_reclaimable()
7625 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
7637 static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, in __node_reclaim() argument
7645 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order, in __node_reclaim()
7658 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7659 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7665 shrink_node(pgdat, sc); in __node_reclaim()
7680 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
7706 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7707 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= in node_reclaim()
7708 pgdat->min_slab_pages) in node_reclaim()
7723 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7726 if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7729 ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages; in node_reclaim()
7730 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
7752 struct mem_cgroup *memcg, pg_data_t *pgdat) in user_proactive_reclaim() argument
7761 if (!buf || (!memcg && !pgdat) || (memcg && pgdat)) in user_proactive_reclaim()
7830 &pgdat->flags)) in user_proactive_reclaim()
7833 reclaimed = __node_reclaim(pgdat, gfp_mask, in user_proactive_reclaim()
7835 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in user_proactive_reclaim()