Lines Matching refs:pgdat
487 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument
496 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
505 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
520 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument
522 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
551 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
552 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
553 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle()
560 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle()
582 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
584 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
594 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument
608 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
609 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
612 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
988 struct pglist_data *pgdat) in demote_folio_list() argument
990 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1013 node_get_allowed_targets(pgdat, &allowed_mask); in demote_folio_list()
1043 struct pglist_data *pgdat, struct scan_control *sc, in shrink_folio_list() argument
1057 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1162 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1340 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1518 stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); in shrink_folio_list()
1790 static bool too_many_isolated(struct pglist_data *pgdat, int file, in too_many_isolated() argument
1803 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); in too_many_isolated()
1804 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); in too_many_isolated()
1806 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
1807 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
1822 wake_throttle_isolated(pgdat); in too_many_isolated()
1925 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list() local
1928 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
1934 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); in shrink_inactive_list()
1948 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
1960 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); in shrink_inactive_list()
1967 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2000 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); in shrink_inactive_list()
2012 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2048 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list() local
2057 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
2120 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2125 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2130 struct pglist_data *pgdat) in reclaim_folio_list() argument
2143 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, true); in reclaim_folio_list()
2254 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) in prepare_scan_control() argument
2262 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2335 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2336 file = node_page_state(pgdat, NR_ACTIVE_FILE) + in prepare_scan_control()
2337 node_page_state(pgdat, NR_INACTIVE_FILE); in prepare_scan_control()
2340 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_control()
2353 anon = node_page_state(pgdat, NR_INACTIVE_ANON); in prepare_scan_control()
2372 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count() local
2383 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2570 static bool can_age_anon_pages(struct pglist_data *pgdat, in can_age_anon_pages() argument
2578 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
2626 struct pglist_data *pgdat = NODE_DATA(nid); in get_lruvec() local
2633 if (!lruvec->pgdat) in get_lruvec()
2634 lruvec->pgdat = pgdat; in get_lruvec()
2641 return &pgdat->__lruvec; in get_lruvec()
2647 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_swappiness() local
2652 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
2797 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm() local
2801 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
3299 struct pglist_data *pgdat) in get_pte_pfn() argument
3317 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn()
3324 struct pglist_data *pgdat) in get_pmd_pfn() argument
3342 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn()
3349 struct pglist_data *pgdat, bool can_swap) in get_pfn_folio() argument
3354 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3386 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range() local
3408 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range()
3412 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
3449 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked() local
3493 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat); in walk_pmd_range_locked()
3497 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
3559 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range() local
3560 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat); in walk_pmd_range()
3685 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) in set_mm_walk() argument
3689 if (pgdat && current_is_kswapd()) { in set_mm_walk()
3692 walk = &pgdat->mm_walk; in set_mm_walk()
3922 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) in set_initial_priority() argument
3934 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); in set_initial_priority()
3935 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
3936 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); in set_initial_priority()
3997 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_age_node() argument
4005 set_initial_priority(pgdat, sc); in lru_gen_age_node()
4009 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_age_node()
4057 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_look_around() local
4058 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_look_around()
4108 pfn = get_pte_pfn(ptent, vma, addr, pgdat); in lru_gen_look_around()
4112 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); in lru_gen_look_around()
4170 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in lru_gen_rotate_memcg() local
4172 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4185 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4187 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4197 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4199 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4201 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4202 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4204 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4205 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4207 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4219 struct pglist_data *pgdat = NODE_DATA(nid); in lru_gen_online_memcg() local
4222 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4226 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4230 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4231 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4233 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4254 struct pglist_data *pgdat = NODE_DATA(nid); in lru_gen_release_memcg() local
4257 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4265 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4267 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4268 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4270 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4573 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in evict_folios() local
4589 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); in evict_folios()
4591 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4808 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_one() local
4824 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4843 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) in shrink_many() argument
4854 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4862 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
4934 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_shrink_node() argument
4953 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
4955 set_initial_priority(pgdat, sc); in lru_gen_shrink_node()
4961 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
4963 shrink_many(pgdat, sc); in lru_gen_shrink_node()
4973 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
5561 void lru_gen_init_pgdat(struct pglist_data *pgdat) in lru_gen_init_pgdat() argument
5565 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5569 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5652 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_age_node() argument
5662 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) in lru_gen_shrink_node() argument
5805 static inline bool should_continue_reclaim(struct pglist_data *pgdat, in should_continue_reclaim() argument
5832 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
5850 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); in should_continue_reclaim()
5851 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5852 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); in should_continue_reclaim()
5857 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) in shrink_node_memcgs() argument
5861 .pgdat = pgdat, in shrink_node_memcgs()
5880 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs()
5919 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
5936 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
5943 lru_gen_shrink_node(pgdat, sc); in shrink_node()
5947 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
5955 prepare_scan_control(pgdat, sc); in shrink_node()
5957 shrink_node_memcgs(pgdat, sc); in shrink_node()
5990 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
5994 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6004 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); in shrink_node()
6032 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); in shrink_node()
6034 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) in shrink_node()
6044 pgdat->kswapd_failures = 0; in shrink_node()
6084 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) in consider_reclaim_throttle() argument
6093 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6111 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); in consider_reclaim_throttle()
6214 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) in snapshot_refaults() argument
6222 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in snapshot_refaults()
6349 static bool allow_direct_reclaim(pg_data_t *pgdat) in allow_direct_reclaim() argument
6357 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6361 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6379 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6380 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6381 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6383 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6403 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local
6442 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6443 if (allow_direct_reclaim(pgdat)) in throttle_direct_reclaim()
6449 if (!pgdat) in throttle_direct_reclaim()
6464 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6465 allow_direct_reclaim(pgdat), HZ); in throttle_direct_reclaim()
6469 allow_direct_reclaim(pgdat)); in throttle_direct_reclaim()
6526 pg_data_t *pgdat, in mem_cgroup_shrink_node() argument
6529 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node()
6605 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) in kswapd_age_node() argument
6611 lru_gen_age_node(pgdat, sc); in kswapd_age_node()
6615 if (!can_age_anon_pages(pgdat, sc)) in kswapd_age_node()
6618 lruvec = mem_cgroup_lruvec(NULL, pgdat); in kswapd_age_node()
6624 lruvec = mem_cgroup_lruvec(memcg, pgdat); in kswapd_age_node()
6631 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) in pgdat_watermark_boosted() argument
6644 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6659 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) in pgdat_balanced() argument
6670 zone = pgdat->node_zones + i; in pgdat_balanced()
6695 static void clear_pgdat_congested(pg_data_t *pgdat) in clear_pgdat_congested() argument
6697 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested()
6701 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6702 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6711 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, in prepare_kswapd_sleep() argument
6727 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6728 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6731 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6734 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { in prepare_kswapd_sleep()
6735 clear_pgdat_congested(pgdat); in prepare_kswapd_sleep()
6750 static bool kswapd_shrink_node(pg_data_t *pgdat, in kswapd_shrink_node() argument
6760 zone = pgdat->node_zones + z; in kswapd_shrink_node()
6771 shrink_node(pgdat, sc); in kswapd_shrink_node()
6789 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) in update_reclaim_active() argument
6795 zone = pgdat->node_zones + i; in update_reclaim_active()
6808 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in set_reclaim_active() argument
6810 update_reclaim_active(pgdat, highest_zoneidx, true); in set_reclaim_active()
6814 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in clear_reclaim_active() argument
6816 update_reclaim_active(pgdat, highest_zoneidx, false); in clear_reclaim_active()
6832 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) in balance_pgdat() argument
6861 zone = pgdat->node_zones + i; in balance_pgdat()
6871 set_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
6894 zone = pgdat->node_zones + i; in balance_pgdat()
6910 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); in balance_pgdat()
6942 kswapd_age_node(pgdat, &sc); in balance_pgdat()
6954 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
6963 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
6971 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
6972 allow_direct_reclaim(pgdat)) in balance_pgdat()
6973 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7012 pgdat->kswapd_failures++; in balance_pgdat()
7015 clear_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
7026 zone = pgdat->node_zones + i; in balance_pgdat()
7036 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); in balance_pgdat()
7039 snapshot_refaults(NULL, pgdat); in balance_pgdat()
7060 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, in kswapd_highest_zoneidx() argument
7063 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7068 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument
7077 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7086 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
7093 reset_isolation_suitable(pgdat); in kswapd_try_to_sleep()
7099 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); in kswapd_try_to_sleep()
7109 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7110 kswapd_highest_zoneidx(pgdat, in kswapd_try_to_sleep()
7113 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7114 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7117 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7118 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7126 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
7127 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7137 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); in kswapd_try_to_sleep()
7142 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); in kswapd_try_to_sleep()
7149 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7169 pg_data_t *pgdat = (pg_data_t *)p; in kswapd() local
7171 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
7191 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7192 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7193 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7197 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7198 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
7202 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd()
7206 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7207 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
7209 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7210 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7230 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7232 reclaim_order = balance_pgdat(pgdat, alloc_order, in kswapd()
7253 pg_data_t *pgdat; in wakeup_kswapd() local
7262 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7263 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7266 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7268 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7269 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7271 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7275 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7276 (pgdat_balanced(pgdat, order, highest_zoneidx) && in wakeup_kswapd()
7277 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { in wakeup_kswapd()
7286 wakeup_kcompactd(pgdat, order, highest_zoneidx); in wakeup_kswapd()
7290 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7292 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7339 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_run() local
7341 pgdat_kswapd_lock(pgdat); in kswapd_run()
7342 if (!pgdat->kswapd) { in kswapd_run()
7343 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
7344 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7347 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7349 pgdat->kswapd = NULL; in kswapd_run()
7352 pgdat_kswapd_unlock(pgdat); in kswapd_run()
7361 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_stop() local
7364 pgdat_kswapd_lock(pgdat); in kswapd_stop()
7365 kswapd = pgdat->kswapd; in kswapd_stop()
7368 pgdat->kswapd = NULL; in kswapd_stop()
7370 pgdat_kswapd_unlock(pgdat); in kswapd_stop()
7413 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) in node_unmapped_file_pages() argument
7415 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); in node_unmapped_file_pages()
7416 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + in node_unmapped_file_pages()
7417 node_page_state(pgdat, NR_ACTIVE_FILE); in node_unmapped_file_pages()
7428 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) in node_pagecache_reclaimable() argument
7440 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); in node_pagecache_reclaimable()
7442 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); in node_pagecache_reclaimable()
7446 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
7458 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in __node_reclaim() argument
7476 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
7489 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7490 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7496 shrink_node(pgdat, &sc); in __node_reclaim()
7511 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
7525 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7526 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= in node_reclaim()
7527 pgdat->min_slab_pages) in node_reclaim()
7542 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7545 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7548 ret = __node_reclaim(pgdat, gfp_mask, order); in node_reclaim()
7549 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()