Lines Matching refs:folio

189 			struct folio *prev;				\
487 static inline int is_page_cache_freeable(struct folio *folio) in is_page_cache_freeable() argument
494 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
495 1 + folio_nr_pages(folio); in is_page_cache_freeable()
511 struct folio *folio, int error) in handle_write_error() argument
513 folio_lock(folio); in handle_write_error()
514 if (folio_mapping(folio) == mapping) in handle_write_error()
516 folio_unlock(folio); in handle_write_error()
621 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument
626 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); in __acct_reclaim_writeback()
654 static pageout_t writeout(struct folio *folio, struct address_space *mapping, in writeout() argument
659 folio_set_reclaim(folio); in writeout()
667 res = shmem_writeout(folio, plug, folio_list); in writeout()
669 res = swap_writeout(folio, plug); in writeout()
672 handle_write_error(mapping, folio, res); in writeout()
674 folio_clear_reclaim(folio); in writeout()
679 if (!folio_test_writeback(folio)) in writeout()
680 folio_clear_reclaim(folio); in writeout()
682 trace_mm_vmscan_write_folio(folio); in writeout()
683 node_stat_add_folio(folio, NR_VMSCAN_WRITE); in writeout()
690 static pageout_t pageout(struct folio *folio, struct address_space *mapping, in pageout() argument
707 if (!is_page_cache_freeable(folio)) in pageout()
714 if (folio_test_private(folio)) { in pageout()
715 if (try_to_free_buffers(folio)) { in pageout()
716 folio_clear_dirty(folio); in pageout()
724 if (!shmem_mapping(mapping) && !folio_test_anon(folio)) in pageout()
726 if (!folio_clear_dirty_for_io(folio)) in pageout()
728 return writeout(folio, mapping, plug, folio_list); in pageout()
735 static int __remove_mapping(struct address_space *mapping, struct folio *folio, in __remove_mapping() argument
741 BUG_ON(!folio_test_locked(folio)); in __remove_mapping()
742 BUG_ON(mapping != folio_mapping(folio)); in __remove_mapping()
744 if (!folio_test_swapcache(folio)) in __remove_mapping()
772 refcount = 1 + folio_nr_pages(folio); in __remove_mapping()
773 if (!folio_ref_freeze(folio, refcount)) in __remove_mapping()
776 if (unlikely(folio_test_dirty(folio))) { in __remove_mapping()
777 folio_ref_unfreeze(folio, refcount); in __remove_mapping()
781 if (folio_test_swapcache(folio)) { in __remove_mapping()
782 swp_entry_t swap = folio->swap; in __remove_mapping()
785 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping()
786 __delete_from_swap_cache(folio, swap, shadow); in __remove_mapping()
787 memcg1_swapout(folio, swap); in __remove_mapping()
789 put_swap_folio(folio, swap); in __remove_mapping()
791 void (*free_folio)(struct folio *); in __remove_mapping()
810 if (reclaimed && folio_is_file_lru(folio) && in __remove_mapping()
812 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping()
813 __filemap_remove_folio(folio, shadow); in __remove_mapping()
820 free_folio(folio); in __remove_mapping()
827 if (!folio_test_swapcache(folio)) in __remove_mapping()
844 long remove_mapping(struct address_space *mapping, struct folio *folio) in remove_mapping() argument
846 if (__remove_mapping(mapping, folio, false, NULL)) { in remove_mapping()
852 folio_ref_unfreeze(folio, 1); in remove_mapping()
853 return folio_nr_pages(folio); in remove_mapping()
867 void folio_putback_lru(struct folio *folio) in folio_putback_lru() argument
869 folio_add_lru(folio); in folio_putback_lru()
870 folio_put(folio); /* drop ref from isolate */ in folio_putback_lru()
887 static bool lru_gen_set_refs(struct folio *folio) in lru_gen_set_refs() argument
890 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { in lru_gen_set_refs()
891 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); in lru_gen_set_refs()
895 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset)); in lru_gen_set_refs()
899 static bool lru_gen_set_refs(struct folio *folio) in lru_gen_set_refs() argument
905 static enum folio_references folio_check_references(struct folio *folio, in folio_check_references() argument
911 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
934 return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP; in folio_check_references()
937 referenced_folio = folio_test_clear_referenced(folio); in folio_check_references()
954 folio_set_referenced(folio); in folio_check_references()
962 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) in folio_check_references()
969 if (referenced_folio && folio_is_file_lru(folio)) in folio_check_references()
976 static void folio_check_dirty_writeback(struct folio *folio, in folio_check_dirty_writeback() argument
988 if (!folio_is_file_lru(folio) || in folio_check_dirty_writeback()
989 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { in folio_check_dirty_writeback()
996 *dirty = folio_test_dirty(folio); in folio_check_dirty_writeback()
997 *writeback = folio_test_writeback(folio); in folio_check_dirty_writeback()
1000 if (!folio_test_private(folio)) in folio_check_dirty_writeback()
1003 mapping = folio_mapping(folio); in folio_check_dirty_writeback()
1005 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1008 static struct folio *alloc_demote_folio(struct folio *src, in alloc_demote_folio()
1011 struct folio *dst; in alloc_demote_folio()
1079 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) in may_enter_fs() argument
1083 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) in may_enter_fs()
1092 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); in may_enter_fs()
1119 struct folio *folio; in shrink_folio_list() local
1126 folio = lru_to_folio(folio_list); in shrink_folio_list()
1127 list_del(&folio->lru); in shrink_folio_list()
1129 if (!folio_trylock(folio)) in shrink_folio_list()
1132 if (folio_contain_hwpoisoned_page(folio)) { in shrink_folio_list()
1138 if (folio_test_large(folio)) in shrink_folio_list()
1141 unmap_poisoned_folio(folio, folio_pfn(folio), false); in shrink_folio_list()
1142 folio_unlock(folio); in shrink_folio_list()
1143 folio_put(folio); in shrink_folio_list()
1147 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); in shrink_folio_list()
1149 nr_pages = folio_nr_pages(folio); in shrink_folio_list()
1154 if (unlikely(!folio_evictable(folio))) in shrink_folio_list()
1157 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1165 folio_check_dirty_writeback(folio, &dirty, &writeback); in shrink_folio_list()
1178 if (writeback && folio_test_reclaim(folio)) in shrink_folio_list()
1227 if (folio_test_writeback(folio)) { in shrink_folio_list()
1228 mapping = folio_mapping(folio); in shrink_folio_list()
1232 folio_test_reclaim(folio) && in shrink_folio_list()
1239 !folio_test_reclaim(folio) || in shrink_folio_list()
1240 !may_enter_fs(folio, sc->gfp_mask) || in shrink_folio_list()
1257 folio_set_reclaim(folio); in shrink_folio_list()
1263 folio_unlock(folio); in shrink_folio_list()
1264 folio_wait_writeback(folio); in shrink_folio_list()
1266 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1272 references = folio_check_references(folio, sc); in shrink_folio_list()
1290 (thp_migration_supported() || !folio_test_large(folio))) { in shrink_folio_list()
1291 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1292 folio_unlock(folio); in shrink_folio_list()
1301 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { in shrink_folio_list()
1302 if (!folio_test_swapcache(folio)) { in shrink_folio_list()
1305 if (folio_maybe_dma_pinned(folio)) in shrink_folio_list()
1307 if (folio_test_large(folio)) { in shrink_folio_list()
1309 if (!can_split_folio(folio, 1, NULL)) in shrink_folio_list()
1315 if (data_race(!list_empty(&folio->_deferred_list) && in shrink_folio_list()
1316 folio_test_partially_mapped(folio)) && in shrink_folio_list()
1317 split_folio_to_list(folio, folio_list)) in shrink_folio_list()
1320 if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) { in shrink_folio_list()
1321 int __maybe_unused order = folio_order(folio); in shrink_folio_list()
1323 if (!folio_test_large(folio)) in shrink_folio_list()
1326 if (split_folio_to_list(folio, folio_list)) in shrink_folio_list()
1330 count_memcg_folio_events(folio, in shrink_folio_list()
1336 if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) in shrink_folio_list()
1350 folio_mark_dirty(folio); in shrink_folio_list()
1359 if ((nr_pages > 1) && !folio_test_large(folio)) { in shrink_folio_list()
1368 if (folio_mapped(folio)) { in shrink_folio_list()
1370 bool was_swapbacked = folio_test_swapbacked(folio); in shrink_folio_list()
1372 if (folio_test_pmd_mappable(folio)) in shrink_folio_list()
1386 if (folio_test_large(folio)) in shrink_folio_list()
1389 try_to_unmap(folio, flags); in shrink_folio_list()
1390 if (folio_mapped(folio)) { in shrink_folio_list()
1393 folio_test_swapbacked(folio)) in shrink_folio_list()
1406 if (folio_maybe_dma_pinned(folio)) in shrink_folio_list()
1409 mapping = folio_mapping(folio); in shrink_folio_list()
1410 if (folio_test_dirty(folio)) { in shrink_folio_list()
1422 if (folio_is_file_lru(folio) && in shrink_folio_list()
1424 !folio_test_reclaim(folio) || in shrink_folio_list()
1432 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, in shrink_folio_list()
1434 folio_set_reclaim(folio); in shrink_folio_list()
1441 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1452 switch (pageout(folio, mapping, &plug, folio_list)) { in shrink_folio_list()
1461 if (nr_pages > 1 && !folio_test_large(folio)) { in shrink_folio_list()
1467 if (nr_pages > 1 && !folio_test_large(folio)) { in shrink_folio_list()
1473 if (folio_test_writeback(folio)) in shrink_folio_list()
1475 if (folio_test_dirty(folio)) in shrink_folio_list()
1482 if (!folio_trylock(folio)) in shrink_folio_list()
1484 if (folio_test_dirty(folio) || in shrink_folio_list()
1485 folio_test_writeback(folio)) in shrink_folio_list()
1487 mapping = folio_mapping(folio); in shrink_folio_list()
1517 if (folio_needs_release(folio)) { in shrink_folio_list()
1518 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1520 if (!mapping && folio_ref_count(folio) == 1) { in shrink_folio_list()
1521 folio_unlock(folio); in shrink_folio_list()
1522 if (folio_put_testzero(folio)) in shrink_folio_list()
1538 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { in shrink_folio_list()
1540 if (!folio_ref_freeze(folio, 1)) in shrink_folio_list()
1551 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); in shrink_folio_list()
1552 } else if (!mapping || !__remove_mapping(mapping, folio, true, in shrink_folio_list()
1556 folio_unlock(folio); in shrink_folio_list()
1564 folio_unqueue_deferred_split(folio); in shrink_folio_list()
1565 if (folio_batch_add(&free_folios, folio) == 0) { in shrink_folio_list()
1583 if (folio_test_swapcache(folio) && in shrink_folio_list()
1584 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) in shrink_folio_list()
1585 folio_free_swap(folio); in shrink_folio_list()
1586 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); in shrink_folio_list()
1587 if (!folio_test_mlocked(folio)) { in shrink_folio_list()
1588 int type = folio_is_file_lru(folio); in shrink_folio_list()
1589 folio_set_active(folio); in shrink_folio_list()
1591 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); in shrink_folio_list()
1594 folio_unlock(folio); in shrink_folio_list()
1596 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
1597 VM_BUG_ON_FOLIO(folio_test_lru(folio) || in shrink_folio_list()
1598 folio_test_unevictable(folio), folio); in shrink_folio_list()
1656 struct folio *folio, *next; in reclaim_clean_pages_from_list() local
1660 list_for_each_entry_safe(folio, next, folio_list, lru) { in reclaim_clean_pages_from_list()
1662 if (page_has_movable_ops(&folio->page)) in reclaim_clean_pages_from_list()
1664 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && in reclaim_clean_pages_from_list()
1665 !folio_test_dirty(folio) && !folio_test_unevictable(folio)) { in reclaim_clean_pages_from_list()
1666 folio_clear_active(folio); in reclaim_clean_pages_from_list()
1667 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
1753 struct folio *folio; in isolate_lru_folios() local
1755 folio = lru_to_folio(src); in isolate_lru_folios()
1756 prefetchw_prev_lru_folio(folio, src, flags); in isolate_lru_folios()
1758 nr_pages = folio_nr_pages(folio); in isolate_lru_folios()
1763 (folio_zonenum(folio) > sc->reclaim_idx)) { in isolate_lru_folios()
1764 nr_skipped[folio_zonenum(folio)] += nr_pages; in isolate_lru_folios()
1779 if (!folio_test_lru(folio)) in isolate_lru_folios()
1781 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
1789 if (unlikely(!folio_try_get(folio))) in isolate_lru_folios()
1792 if (!folio_test_clear_lru(folio)) { in isolate_lru_folios()
1794 folio_put(folio); in isolate_lru_folios()
1799 nr_zone_taken[folio_zonenum(folio)] += nr_pages; in isolate_lru_folios()
1802 list_move(&folio->lru, move_to); in isolate_lru_folios()
1854 bool folio_isolate_lru(struct folio *folio) in folio_isolate_lru() argument
1858 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); in folio_isolate_lru()
1860 if (folio_test_clear_lru(folio)) { in folio_isolate_lru()
1863 folio_get(folio); in folio_isolate_lru()
1864 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru()
1865 lruvec_del_folio(lruvec, folio); in folio_isolate_lru()
1930 struct folio *folio = lru_to_folio(list); in move_folios_to_lru() local
1932 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in move_folios_to_lru()
1933 list_del(&folio->lru); in move_folios_to_lru()
1934 if (unlikely(!folio_evictable(folio))) { in move_folios_to_lru()
1936 folio_putback_lru(folio); in move_folios_to_lru()
1952 folio_set_lru(folio); in move_folios_to_lru()
1954 if (unlikely(folio_put_testzero(folio))) { in move_folios_to_lru()
1955 __folio_clear_lru_flags(folio); in move_folios_to_lru()
1957 folio_unqueue_deferred_split(folio); in move_folios_to_lru()
1958 if (folio_batch_add(&free_folios, folio) == 0) { in move_folios_to_lru()
1972 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); in move_folios_to_lru()
1973 lruvec_add_folio(lruvec, folio); in move_folios_to_lru()
1974 nr_pages = folio_nr_pages(folio); in move_folios_to_lru()
1976 if (folio_test_active(folio)) in move_folios_to_lru()
2157 struct folio *folio; in shrink_active_list() local
2160 folio = lru_to_folio(&l_hold); in shrink_active_list()
2161 list_del(&folio->lru); in shrink_active_list()
2163 if (unlikely(!folio_evictable(folio))) { in shrink_active_list()
2164 folio_putback_lru(folio); in shrink_active_list()
2169 if (folio_needs_release(folio) && in shrink_active_list()
2170 folio_trylock(folio)) { in shrink_active_list()
2171 filemap_release_folio(folio, 0); in shrink_active_list()
2172 folio_unlock(folio); in shrink_active_list()
2177 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2188 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { in shrink_active_list()
2189 nr_rotated += folio_nr_pages(folio); in shrink_active_list()
2190 list_add(&folio->lru, &l_active); in shrink_active_list()
2195 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2196 folio_set_workingset(folio); in shrink_active_list()
2197 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2223 struct folio *folio; in reclaim_folio_list() local
2234 folio = lru_to_folio(folio_list); in reclaim_folio_list()
2235 list_del(&folio->lru); in reclaim_folio_list()
2236 folio_putback_lru(folio); in reclaim_folio_list()
2257 struct folio *folio = lru_to_folio(folio_list); in reclaim_pages() local
2259 if (nid == folio_nid(folio)) { in reclaim_pages()
2260 folio_clear_active(folio); in reclaim_pages()
2261 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
3258 static int folio_update_gen(struct folio *folio, int gen) in folio_update_gen() argument
3260 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3265 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { in folio_update_gen()
3266 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); in folio_update_gen()
3277 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3283 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in folio_inc_gen() argument
3285 int type = folio_is_file_lru(folio); in folio_inc_gen()
3288 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3290 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); in folio_inc_gen()
3305 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3307 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in folio_inc_gen()
3312 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, in update_batch_size() argument
3315 int type = folio_is_file_lru(folio); in update_batch_size()
3316 int zone = folio_zonenum(folio); in update_batch_size()
3317 int delta = folio_nr_pages(folio); in update_batch_size()
3472 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, in get_pfn_folio()
3475 struct folio *folio = pfn_folio(pfn); in get_pfn_folio() local
3477 if (folio_lru_gen(folio) < 0) in get_pfn_folio()
3480 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3483 if (folio_memcg(folio) != memcg) in get_pfn_folio()
3486 return folio; in get_pfn_folio()
3497 static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio, in walk_update_folio() argument
3502 if (!folio) in walk_update_folio()
3505 if (dirty && !folio_test_dirty(folio) && in walk_update_folio()
3506 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && in walk_update_folio()
3507 !folio_test_swapcache(folio))) in walk_update_folio()
3508 folio_mark_dirty(folio); in walk_update_folio()
3511 old_gen = folio_update_gen(folio, new_gen); in walk_update_folio()
3513 update_batch_size(walk, folio, old_gen, new_gen); in walk_update_folio()
3514 } else if (lru_gen_set_refs(folio)) { in walk_update_folio()
3515 old_gen = folio_lru_gen(folio); in walk_update_folio()
3517 folio_activate(folio); in walk_update_folio()
3531 struct folio *last = NULL; in walk_pte_range()
3557 struct folio *folio; in walk_pte_range() local
3567 folio = get_pfn_folio(pfn, memcg, pgdat); in walk_pte_range()
3568 if (!folio) in walk_pte_range()
3574 if (last != folio) { in walk_pte_range()
3577 last = folio; in walk_pte_range()
3607 struct folio *last = NULL; in walk_pmd_range_locked()
3639 struct folio *folio; in walk_pmd_range_locked() local
3658 folio = get_pfn_folio(pfn, memcg, pgdat); in walk_pmd_range_locked()
3659 if (!folio) in walk_pmd_range_locked()
3665 if (last != folio) { in walk_pmd_range_locked()
3668 last = folio; in walk_pmd_range_locked()
3894 struct folio *folio = lru_to_folio(head); in inc_min_seq() local
3895 int refs = folio_lru_refs(folio); in inc_min_seq()
3896 bool workingset = folio_test_workingset(folio); in inc_min_seq()
3898 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in inc_min_seq()
3899 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in inc_min_seq()
3900 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in inc_min_seq()
3901 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in inc_min_seq()
3903 new_gen = folio_inc_gen(lruvec, folio, false); in inc_min_seq()
3904 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
3909 int delta = folio_nr_pages(folio); in inc_min_seq()
4235 struct folio *last = NULL; in lru_gen_look_around()
4240 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() local
4241 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_look_around()
4242 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_look_around()
4249 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); in lru_gen_look_around()
4293 folio = get_pfn_folio(pfn, memcg, pgdat); in lru_gen_look_around()
4294 if (!folio) in lru_gen_look_around()
4300 if (last != folio) { in lru_gen_look_around()
4303 last = folio; in lru_gen_look_around()
4462 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, in sort_folio() argument
4467 int gen = folio_lru_gen(folio); in sort_folio()
4468 int type = folio_is_file_lru(folio); in sort_folio()
4469 int zone = folio_zonenum(folio); in sort_folio()
4470 int delta = folio_nr_pages(folio); in sort_folio()
4471 int refs = folio_lru_refs(folio); in sort_folio()
4472 bool workingset = folio_test_workingset(folio); in sort_folio()
4476 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); in sort_folio()
4479 if (!folio_evictable(folio)) { in sort_folio()
4480 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4481 VM_WARN_ON_ONCE_FOLIO(!success, folio); in sort_folio()
4482 folio_set_unevictable(folio); in sort_folio()
4483 lruvec_add_folio(lruvec, folio); in sort_folio()
4490 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4496 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4497 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4510 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { in sort_folio()
4511 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4512 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4516 dirty = folio_test_dirty(folio); in sort_folio()
4517 writeback = folio_test_writeback(folio); in sort_folio()
4526 gen = folio_inc_gen(lruvec, folio, true); in sort_folio()
4527 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4534 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) in isolate_folio() argument
4540 (folio_test_dirty(folio) || in isolate_folio()
4541 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) in isolate_folio()
4545 if (!folio_try_get(folio)) in isolate_folio()
4549 if (!folio_test_clear_lru(folio)) { in isolate_folio()
4550 folio_put(folio); in isolate_folio()
4555 if (!folio_test_referenced(folio)) in isolate_folio()
4556 set_mask_bits(&folio->flags, LRU_REFS_MASK, 0); in isolate_folio()
4559 folio_clear_reclaim(folio); in isolate_folio()
4561 success = lru_gen_del_folio(lruvec, folio, true); in isolate_folio()
4562 VM_WARN_ON_ONCE_FOLIO(!success, folio); in isolate_folio()
4596 struct folio *folio = lru_to_folio(head); in scan_folios() local
4597 int delta = folio_nr_pages(folio); in scan_folios()
4599 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in scan_folios()
4600 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in scan_folios()
4601 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in scan_folios()
4602 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in scan_folios()
4606 if (sort_folio(lruvec, folio, sc, tier)) in scan_folios()
4608 else if (isolate_folio(lruvec, folio, sc)) { in scan_folios()
4609 list_add(&folio->lru, list); in scan_folios()
4612 list_move(&folio->lru, &moved); in scan_folios()
4720 struct folio *folio; in evict_folios() local
4721 struct folio *next; in evict_folios()
4751 list_for_each_entry_safe_reverse(folio, next, &list, lru) { in evict_folios()
4754 if (!folio_evictable(folio)) { in evict_folios()
4755 list_del(&folio->lru); in evict_folios()
4756 folio_putback_lru(folio); in evict_folios()
4761 if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) && in evict_folios()
4762 !folio_test_dirty(folio) && !folio_test_writeback(folio)) { in evict_folios()
4763 list_move(&folio->lru, &clean); in evict_folios()
4768 if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type]) in evict_folios()
4769 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active)); in evict_folios()
5145 struct folio *folio = lru_to_folio(head); in fill_evictable() local
5147 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in fill_evictable()
5148 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); in fill_evictable()
5149 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in fill_evictable()
5150 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5152 lruvec_del_folio(lruvec, folio); in fill_evictable()
5153 success = lru_gen_add_folio(lruvec, folio, false); in fill_evictable()
5174 struct folio *folio = lru_to_folio(head); in drain_evictable() local
5176 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in drain_evictable()
5177 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in drain_evictable()
5178 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in drain_evictable()
5179 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in drain_evictable()
5181 success = lru_gen_del_folio(lruvec, folio, false); in drain_evictable()
5183 lruvec_add_folio(lruvec, folio); in drain_evictable()
7866 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios() local
7867 int nr_pages = folio_nr_pages(folio); in check_move_unevictable_folios()
7872 if (!folio_test_clear_lru(folio)) in check_move_unevictable_folios()
7875 lruvec = folio_lruvec_relock_irq(folio, lruvec); in check_move_unevictable_folios()
7876 if (folio_evictable(folio) && folio_test_unevictable(folio)) { in check_move_unevictable_folios()
7877 lruvec_del_folio(lruvec, folio); in check_move_unevictable_folios()
7878 folio_clear_unevictable(folio); in check_move_unevictable_folios()
7879 lruvec_add_folio(lruvec, folio); in check_move_unevictable_folios()
7882 folio_set_lru(folio); in check_move_unevictable_folios()