Lines Matching refs:lruvec

418 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,  in lruvec_lru_size()  argument
425 for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) { in lruvec_lru_size()
427 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size()
1702 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument
1711 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1738 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_folios() argument
1742 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
1827 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_folios()
1861 struct lruvec *lruvec; in folio_isolate_lru() local
1864 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru()
1865 lruvec_del_folio(lruvec, folio); in folio_isolate_lru()
1866 unlock_page_lruvec_irq(lruvec); in folio_isolate_lru()
1922 static unsigned int move_folios_to_lru(struct lruvec *lruvec, in move_folios_to_lru() argument
1935 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1937 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1959 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1962 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1972 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); in move_folios_to_lru()
1973 lruvec_add_folio(lruvec, folio); in move_folios_to_lru()
1977 workingset_age_nonresident(lruvec, nr_pages); in move_folios_to_lru()
1981 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1984 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2005 struct lruvec *lruvec, struct scan_control *sc, in shrink_inactive_list() argument
2015 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list()
2033 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2035 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, in shrink_inactive_list()
2042 count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); in shrink_inactive_list()
2045 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2051 lruvec_memcg(lruvec)); in shrink_inactive_list()
2053 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2054 move_folios_to_lru(lruvec, &folio_list); in shrink_inactive_list()
2056 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), in shrink_inactive_list()
2062 count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); in shrink_inactive_list()
2065 lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout, in shrink_inactive_list()
2126 struct lruvec *lruvec, in shrink_active_list() argument
2139 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list()
2143 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2145 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, in shrink_active_list()
2152 count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); in shrink_active_list()
2154 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2203 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2205 nr_activate = move_folios_to_lru(lruvec, &l_active); in shrink_active_list()
2206 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); in shrink_active_list()
2209 count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); in shrink_active_list()
2213 lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated); in shrink_active_list()
2277 struct lruvec *lruvec, struct scan_control *sc) in shrink_list() argument
2281 shrink_active_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2287 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2318 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) in inactive_is_low() argument
2325 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); in inactive_is_low()
2326 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); in inactive_is_low()
2347 struct lruvec *target_lruvec; in prepare_scan_control()
2555 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, in get_scan_count() argument
2558 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count()
2559 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_scan_count()
2628 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2675 static bool can_age_anon_pages(struct lruvec *lruvec, in can_age_anon_pages() argument
2683 return can_demote(lruvec_pgdat(lruvec)->node_id, sc, in can_age_anon_pages()
2684 lruvec_memcg(lruvec)); in can_age_anon_pages()
2711 #define DEFINE_MAX_SEQ(lruvec) \ argument
2712 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2714 #define DEFINE_MIN_SEQ(lruvec) \ argument
2716 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2717 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2738 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) in get_lruvec()
2744 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec() local
2747 if (!lruvec->pgdat) in get_lruvec()
2748 lruvec->pgdat = pgdat; in get_lruvec()
2750 return lruvec; in get_lruvec()
2758 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) in get_swappiness() argument
2760 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_swappiness()
2761 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_swappiness()
2773 static int get_nr_gens(struct lruvec *lruvec, int type) in get_nr_gens() argument
2775 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
2778 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) in seq_is_valid() argument
2783 int n = get_nr_gens(lruvec, type); in seq_is_valid()
2908 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) in get_mm_state() argument
2910 return &lruvec->mm_state; in get_mm_state()
2917 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm()
2918 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in get_next_mm()
2945 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_add_mm() local
2946 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_add_mm()
2975 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_del_mm() local
2976 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_del_mm()
3034 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) in get_mm_state() argument
3050 struct lruvec *lruvec = walk->lruvec; in reset_mm_stats() local
3051 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in reset_mm_stats()
3053 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
3076 struct lruvec *lruvec = walk->lruvec; in iterate_mm_list() local
3077 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in iterate_mm_list()
3079 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in iterate_mm_list()
3135 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq) in iterate_mm_list_nowalk() argument
3138 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in iterate_mm_list_nowalk()
3140 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in iterate_mm_list_nowalk()
3187 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, in read_ctrl_pos() argument
3191 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3206 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) in reset_ctrl_pos() argument
3209 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3213 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3283 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in folio_inc_gen() argument
3286 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3307 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in folio_inc_gen()
3331 struct lruvec *lruvec = walk->lruvec; in reset_batch_size() local
3332 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3347 if (lru_gen_is_active(lruvec, gen)) in reset_batch_size()
3349 __update_lru_size(lruvec, lru, zone, delta); in reset_batch_size()
3533 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3534 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3535 DEFINE_MAX_SEQ(walk->lruvec); in walk_pte_range()
3609 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3610 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
3611 DEFINE_MAX_SEQ(walk->lruvec); in walk_pmd_range_locked()
3699 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in walk_pmd_range()
3723 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
3811 struct lruvec *lruvec = walk->lruvec; in walk_mm() local
3816 DEFINE_MAX_SEQ(lruvec); in walk_mm()
3832 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
3834 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
3873 static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness) in inc_min_seq() argument
3877 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
3903 new_gen = folio_inc_gen(lruvec, folio, false); in inc_min_seq()
3920 reset_ctrl_pos(lruvec, type, true); in inc_min_seq()
3926 static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness) in try_to_inc_min_seq() argument
3931 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
3932 DEFINE_MIN_SEQ(lruvec); in try_to_inc_min_seq()
3934 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in try_to_inc_min_seq()
3975 reset_ctrl_pos(lruvec, type, true); in try_to_inc_min_seq()
3983 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness) in inc_max_seq() argument
3988 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
3993 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
3995 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in inc_max_seq()
4002 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) in inc_max_seq()
4005 if (inc_min_seq(lruvec, type, swappiness)) in inc_max_seq()
4008 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4031 __update_lru_size(lruvec, lru, zone, delta); in inc_max_seq()
4032 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4037 reset_ctrl_pos(lruvec, type, false); in inc_max_seq()
4043 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4048 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, in try_to_inc_max_seq() argument
4054 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
4055 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in try_to_inc_max_seq()
4060 return inc_max_seq(lruvec, seq, swappiness); in try_to_inc_max_seq()
4073 success = iterate_mm_list_nowalk(lruvec, seq); in try_to_inc_max_seq()
4079 success = iterate_mm_list_nowalk(lruvec, seq); in try_to_inc_max_seq()
4083 walk->lruvec = lruvec; in try_to_inc_max_seq()
4095 success = inc_max_seq(lruvec, seq, swappiness); in try_to_inc_max_seq()
4132 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) in lruvec_is_sizable() argument
4136 int swappiness = get_swappiness(lruvec, sc); in lruvec_is_sizable()
4137 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
4138 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in lruvec_is_sizable()
4139 DEFINE_MAX_SEQ(lruvec); in lruvec_is_sizable()
4140 DEFINE_MIN_SEQ(lruvec); in lruvec_is_sizable()
4157 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, in lruvec_is_reclaimable() argument
4162 int swappiness = get_swappiness(lruvec, sc); in lruvec_is_reclaimable()
4163 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in lruvec_is_reclaimable()
4164 DEFINE_MIN_SEQ(lruvec); in lruvec_is_reclaimable()
4169 if (!lruvec_is_sizable(lruvec, sc)) in lruvec_is_reclaimable()
4173 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4193 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_age_node() local
4198 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); in lru_gen_age_node()
4243 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_look_around() local
4244 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_look_around()
4245 DEFINE_MAX_SEQ(lruvec); in lru_gen_look_around()
4337 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) in lru_gen_rotate_memcg() argument
4343 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in lru_gen_rotate_memcg()
4347 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4350 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4364 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4365 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4367 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4370 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4372 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4393 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_online_memcg() local
4397 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4401 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4403 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4415 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_offline_memcg() local
4417 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); in lru_gen_offline_memcg()
4428 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_release_memcg() local
4432 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4435 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4437 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4449 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_soft_reclaim() local
4452 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) in lru_gen_soft_reclaim()
4453 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); in lru_gen_soft_reclaim()
4462 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, in sort_folio() argument
4474 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4480 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4483 lruvec_add_folio(lruvec, folio); in sort_folio()
4496 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4511 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4526 gen = folio_inc_gen(lruvec, folio, true); in sort_folio()
4534 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) in isolate_folio() argument
4561 success = lru_gen_del_folio(lruvec, folio, true); in isolate_folio()
4567 static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec, in scan_folios() argument
4579 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
4580 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in scan_folios()
4584 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) in scan_folios()
4606 if (sort_folio(lruvec, folio, sc, tier)) in scan_folios()
4608 else if (isolate_folio(lruvec, folio, sc)) { in scan_folios()
4650 static int get_tier_idx(struct lruvec *lruvec, int type) in get_tier_idx() argument
4660 read_ctrl_pos(lruvec, type, 0, 2, &sp); in get_tier_idx()
4662 read_ctrl_pos(lruvec, type, tier, 3, &pv); in get_tier_idx()
4670 static int get_type_to_scan(struct lruvec *lruvec, int swappiness) in get_type_to_scan() argument
4683 read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp); in get_type_to_scan()
4684 read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv); in get_type_to_scan()
4689 static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec, in isolate_folios() argument
4694 int type = get_type_to_scan(lruvec, swappiness); in isolate_folios()
4698 int tier = get_tier_idx(lruvec, type); in isolate_folios()
4702 scanned = scan_folios(nr_to_scan, lruvec, sc, type, tier, list); in isolate_folios()
4712 static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec, in evict_folios() argument
4726 struct lru_gen_folio *lrugen = &lruvec->lrugen; in evict_folios()
4727 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in evict_folios()
4728 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in evict_folios()
4730 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4732 scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list); in evict_folios()
4734 scanned += try_to_inc_min_seq(lruvec, swappiness); in evict_folios()
4739 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4752 DEFINE_MIN_SEQ(lruvec); in evict_folios()
4768 if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type]) in evict_folios()
4772 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4774 move_folios_to_lru(lruvec, &list); in evict_folios()
4778 walk->lruvec = lruvec; in evict_folios()
4782 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), in evict_folios()
4791 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4803 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, in should_run_aging() argument
4808 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
4809 DEFINE_MIN_SEQ(lruvec); in should_run_aging()
4837 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness) in get_nr_to_scan() argument
4841 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_nr_to_scan()
4842 DEFINE_MAX_SEQ(lruvec); in get_nr_to_scan()
4847 success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan); in get_nr_to_scan()
4860 return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0; in get_nr_to_scan()
4863 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) in should_abort_scan() argument
4883 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
4894 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in try_to_shrink_lruvec() argument
4898 int swappiness = get_swappiness(lruvec, sc); in try_to_shrink_lruvec()
4903 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); in try_to_shrink_lruvec()
4907 delta = evict_folios(nr_to_scan, lruvec, sc, swappiness); in try_to_shrink_lruvec()
4915 if (should_abort_scan(lruvec, sc)) in try_to_shrink_lruvec()
4932 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) in shrink_one() argument
4937 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in shrink_one()
4938 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_one()
4946 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) in shrink_one()
4952 success = try_to_shrink_lruvec(lruvec, sc); in shrink_one()
4965 if (!success && lruvec_is_sizable(lruvec, sc)) in shrink_one()
4969 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? in shrink_one()
4979 struct lruvec *lruvec; in shrink_many() local
4994 lru_gen_rotate_memcg(lruvec, op); in shrink_many()
5004 lruvec = container_of(lrugen, struct lruvec, lrugen); in shrink_many()
5005 memcg = lruvec_memcg(lruvec); in shrink_many()
5015 op = shrink_one(lruvec, sc); in shrink_many()
5019 if (should_abort_scan(lruvec, sc)) in shrink_many()
5026 lru_gen_rotate_memcg(lruvec, op); in shrink_many()
5043 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in lru_gen_shrink_lruvec() argument
5056 if (try_to_shrink_lruvec(lruvec, sc)) in lru_gen_shrink_lruvec()
5057 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); in lru_gen_shrink_lruvec()
5110 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) in state_is_valid() argument
5112 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
5118 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5133 static bool fill_evictable(struct lruvec *lruvec) in fill_evictable() argument
5141 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5152 lruvec_del_folio(lruvec, folio); in fill_evictable()
5153 success = lru_gen_add_folio(lruvec, folio, false); in fill_evictable()
5164 static bool drain_evictable(struct lruvec *lruvec) in drain_evictable() argument
5170 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5181 success = lru_gen_del_folio(lruvec, folio, false); in drain_evictable()
5183 lruvec_add_folio(lruvec, folio); in drain_evictable()
5217 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_change_state() local
5219 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5221 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in lru_gen_change_state()
5222 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); in lru_gen_change_state()
5224 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5226 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { in lru_gen_change_state()
5227 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5229 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5232 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5380 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, in lru_gen_seq_show_full() argument
5387 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5388 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_seq_show_full()
5439 struct lruvec *lruvec = v; in lru_gen_seq_show() local
5440 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5441 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5442 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in lru_gen_seq_show()
5443 DEFINE_MAX_SEQ(lruvec); in lru_gen_seq_show()
5444 DEFINE_MIN_SEQ(lruvec); in lru_gen_seq_show()
5468 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5485 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); in lru_gen_seq_show()
5498 static int run_aging(struct lruvec *lruvec, unsigned long seq, in run_aging() argument
5501 DEFINE_MAX_SEQ(lruvec); in run_aging()
5506 return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST; in run_aging()
5509 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, in run_eviction() argument
5512 DEFINE_MAX_SEQ(lruvec); in run_eviction()
5520 DEFINE_MIN_SEQ(lruvec); in run_eviction()
5528 if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc, in run_eviction()
5541 struct lruvec *lruvec; in run_cmd() local
5564 lruvec = get_lruvec(memcg, nid); in run_cmd()
5567 swappiness = get_swappiness(lruvec, sc); in run_cmd()
5573 err = run_aging(lruvec, seq, swappiness, opt); in run_cmd()
5576 err = run_eviction(lruvec, seq, sc, swappiness, opt); in run_cmd()
5705 void lru_gen_init_lruvec(struct lruvec *lruvec) in lru_gen_init_lruvec() argument
5709 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5710 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_init_lruvec()
5747 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_exit_memcg() local
5748 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); in lru_gen_exit_memcg()
5750 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5751 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5753 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
5791 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in lru_gen_shrink_lruvec() argument
5803 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in shrink_lruvec() argument
5815 lru_gen_shrink_lruvec(lruvec, sc); in shrink_lruvec()
5819 get_scan_count(lruvec, sc, nr); in shrink_lruvec()
5850 lruvec, sc); in shrink_lruvec()
5915 if (can_age_anon_pages(lruvec, sc) && in shrink_lruvec()
5916 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) in shrink_lruvec()
5917 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, in shrink_lruvec()
6014 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs() local
6051 shrink_lruvec(lruvec, sc); in shrink_node_memcgs()
6073 struct lruvec *target_lruvec; in shrink_node()
6351 struct lruvec *target_lruvec; in snapshot_refaults()
6424 struct lruvec *lruvec; in do_try_to_free_pages() local
6426 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6428 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6660 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node() local
6685 shrink_lruvec(lruvec, &sc); in mem_cgroup_shrink_node()
6748 struct lruvec *lruvec; in kswapd_age_node() local
6755 lruvec = mem_cgroup_lruvec(NULL, pgdat); in kswapd_age_node()
6756 if (!can_age_anon_pages(lruvec, sc)) in kswapd_age_node()
6759 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) in kswapd_age_node()
6764 lruvec = mem_cgroup_lruvec(memcg, pgdat); in kswapd_age_node()
6765 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, in kswapd_age_node()
6868 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested() local
6870 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6871 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7860 struct lruvec *lruvec = NULL; in check_move_unevictable_folios() local
7875 lruvec = folio_lruvec_relock_irq(folio, lruvec); in check_move_unevictable_folios()
7877 lruvec_del_folio(lruvec, folio); in check_move_unevictable_folios()
7879 lruvec_add_folio(lruvec, folio); in check_move_unevictable_folios()
7885 if (lruvec) { in check_move_unevictable_folios()
7888 unlock_page_lruvec_irq(lruvec); in check_move_unevictable_folios()