Lines Matching refs:folio

56 	struct folio *folio = folio_get_nontail_page(page);  in isolate_movable_page()  local
68 if (!folio) in isolate_movable_page()
71 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
80 if (unlikely(!__folio_test_movable(folio))) in isolate_movable_page()
84 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
98 if (unlikely(!folio_trylock(folio))) in isolate_movable_page()
101 if (!folio_test_movable(folio) || folio_test_isolated(folio)) in isolate_movable_page()
104 mops = folio_movable_ops(folio); in isolate_movable_page()
105 VM_BUG_ON_FOLIO(!mops, folio); in isolate_movable_page()
107 if (!mops->isolate_page(&folio->page, mode)) in isolate_movable_page()
111 WARN_ON_ONCE(folio_test_isolated(folio)); in isolate_movable_page()
112 folio_set_isolated(folio); in isolate_movable_page()
113 folio_unlock(folio); in isolate_movable_page()
118 folio_unlock(folio); in isolate_movable_page()
120 folio_put(folio); in isolate_movable_page()
125 static void putback_movable_folio(struct folio *folio) in putback_movable_folio() argument
127 const struct movable_operations *mops = folio_movable_ops(folio); in putback_movable_folio()
129 mops->putback_page(&folio->page); in putback_movable_folio()
130 folio_clear_isolated(folio); in putback_movable_folio()
143 struct folio *folio; in putback_movable_pages() local
144 struct folio *folio2; in putback_movable_pages()
146 list_for_each_entry_safe(folio, folio2, l, lru) { in putback_movable_pages()
147 if (unlikely(folio_test_hugetlb(folio))) { in putback_movable_pages()
148 folio_putback_active_hugetlb(folio); in putback_movable_pages()
151 list_del(&folio->lru); in putback_movable_pages()
157 if (unlikely(__folio_test_movable(folio))) { in putback_movable_pages()
158 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); in putback_movable_pages()
159 folio_lock(folio); in putback_movable_pages()
160 if (folio_test_movable(folio)) in putback_movable_pages()
161 putback_movable_folio(folio); in putback_movable_pages()
163 folio_clear_isolated(folio); in putback_movable_pages()
164 folio_unlock(folio); in putback_movable_pages()
165 folio_put(folio); in putback_movable_pages()
167 node_stat_mod_folio(folio, NR_ISOLATED_ANON + in putback_movable_pages()
168 folio_is_file_lru(folio), -folio_nr_pages(folio)); in putback_movable_pages()
169 folio_putback_lru(folio); in putback_movable_pages()
175 bool isolate_folio_to_list(struct folio *folio, struct list_head *list) in isolate_folio_to_list() argument
179 if (folio_test_hugetlb(folio)) in isolate_folio_to_list()
180 return isolate_hugetlb(folio, list); in isolate_folio_to_list()
182 lru = !__folio_test_movable(folio); in isolate_folio_to_list()
184 isolated = folio_isolate_lru(folio); in isolate_folio_to_list()
186 isolated = isolate_movable_page(&folio->page, in isolate_folio_to_list()
192 list_add(&folio->lru, list); in isolate_folio_to_list()
194 node_stat_add_folio(folio, NR_ISOLATED_ANON + in isolate_folio_to_list()
195 folio_is_file_lru(folio)); in isolate_folio_to_list()
201 struct folio *folio, in try_to_map_unused_to_zeropage() argument
204 struct page *page = folio_page(folio, idx); in try_to_map_unused_to_zeropage()
215 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || in try_to_map_unused_to_zeropage()
235 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); in try_to_map_unused_to_zeropage()
240 struct folio *folio; member
247 static bool remove_migration_pte(struct folio *folio, in remove_migration_pte() argument
251 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte()
262 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in remove_migration_pte()
264 new = folio_page(folio, idx); in remove_migration_pte()
269 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in remove_migration_pte()
270 !folio_test_pmd_mappable(folio), folio); in remove_migration_pte()
276 try_to_map_unused_to_zeropage(&pvmw, folio, idx)) in remove_migration_pte()
279 folio_get(folio); in remove_migration_pte()
286 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) in remove_migration_pte()
298 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) in remove_migration_pte()
316 if (folio_test_hugetlb(folio)) { in remove_migration_pte()
322 if (folio_test_anon(folio)) in remove_migration_pte()
323 hugetlb_add_anon_rmap(folio, vma, pvmw.address, in remove_migration_pte()
326 hugetlb_add_file_rmap(folio); in remove_migration_pte()
332 if (folio_test_anon(folio)) in remove_migration_pte()
333 folio_add_anon_rmap_pte(folio, new, vma, in remove_migration_pte()
336 folio_add_file_rmap_pte(folio, new, vma); in remove_migration_pte()
356 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) in remove_migration_ptes()
359 .folio = src, in remove_migration_ptes()
457 struct folio *folio) in folio_expected_refs() argument
463 refs += folio_nr_pages(folio); in folio_expected_refs()
464 if (folio_test_private(folio)) in folio_expected_refs()
479 struct folio *newfolio, struct folio *folio, int expected_count) in __folio_migrate_mapping() argument
481 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_migrate_mapping()
484 long nr = folio_nr_pages(folio); in __folio_migrate_mapping()
489 if (folio_test_large(folio) && in __folio_migrate_mapping()
490 folio_test_large_rmappable(folio)) { in __folio_migrate_mapping()
491 if (!folio_ref_freeze(folio, expected_count)) in __folio_migrate_mapping()
493 folio_unqueue_deferred_split(folio); in __folio_migrate_mapping()
494 folio_ref_unfreeze(folio, expected_count); in __folio_migrate_mapping()
498 newfolio->index = folio->index; in __folio_migrate_mapping()
499 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
500 if (folio_test_anon(folio) && folio_test_large(folio)) in __folio_migrate_mapping()
501 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
502 if (folio_test_swapbacked(folio)) in __folio_migrate_mapping()
508 oldzone = folio_zone(folio); in __folio_migrate_mapping()
512 if (!folio_ref_freeze(folio, expected_count)) { in __folio_migrate_mapping()
518 folio_unqueue_deferred_split(folio); in __folio_migrate_mapping()
524 newfolio->index = folio->index; in __folio_migrate_mapping()
525 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
526 if (folio_test_anon(folio) && folio_test_large(folio)) in __folio_migrate_mapping()
527 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
529 if (folio_test_swapbacked(folio)) { in __folio_migrate_mapping()
531 if (folio_test_swapcache(folio)) { in __folio_migrate_mapping()
533 newfolio->private = folio_get_private(folio); in __folio_migrate_mapping()
537 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in __folio_migrate_mapping()
542 dirty = folio_test_dirty(folio); in __folio_migrate_mapping()
544 folio_clear_dirty(folio); in __folio_migrate_mapping()
559 folio_ref_unfreeze(folio, expected_count - nr); in __folio_migrate_mapping()
578 memcg = folio_memcg(folio); in __folio_migrate_mapping()
584 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { in __folio_migrate_mapping()
588 if (folio_test_pmd_mappable(folio)) { in __folio_migrate_mapping()
594 if (folio_test_swapcache(folio)) { in __folio_migrate_mapping()
612 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument
614 int expected_count = folio_expected_refs(mapping, folio) + extra_count; in folio_migrate_mapping()
616 if (folio_ref_count(folio) != expected_count) in folio_migrate_mapping()
619 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); in folio_migrate_mapping()
628 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping()
663 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument
667 if (folio_test_referenced(folio)) in folio_migrate_flags()
669 if (folio_test_uptodate(folio)) in folio_migrate_flags()
671 if (folio_test_clear_active(folio)) { in folio_migrate_flags()
672 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); in folio_migrate_flags()
674 } else if (folio_test_clear_unevictable(folio)) in folio_migrate_flags()
676 if (folio_test_workingset(folio)) in folio_migrate_flags()
678 if (folio_test_checked(folio)) in folio_migrate_flags()
686 if (folio_test_mappedtodisk(folio)) in folio_migrate_flags()
690 if (folio_test_dirty(folio)) in folio_migrate_flags()
693 if (folio_test_young(folio)) in folio_migrate_flags()
695 if (folio_test_idle(folio)) in folio_migrate_flags()
702 cpupid = folio_xchg_last_cpupid(folio, -1); in folio_migrate_flags()
709 bool f_toptier = node_is_toptier(folio_nid(folio)); in folio_migrate_flags()
717 folio_migrate_ksm(newfolio, folio); in folio_migrate_flags()
723 if (folio_test_swapcache(folio)) in folio_migrate_flags()
724 folio_clear_swapcache(folio); in folio_migrate_flags()
725 folio_clear_private(folio); in folio_migrate_flags()
728 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
729 folio->private = NULL; in folio_migrate_flags()
743 if (folio_test_readahead(folio)) in folio_migrate_flags()
746 folio_copy_owner(newfolio, folio); in folio_migrate_flags()
747 pgalloc_tag_copy(newfolio, folio); in folio_migrate_flags()
749 mem_cgroup_migrate(folio, newfolio); in folio_migrate_flags()
757 static int __migrate_folio(struct address_space *mapping, struct folio *dst, in __migrate_folio()
758 struct folio *src, void *src_private, in __migrate_folio()
794 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio()
795 struct folio *src, enum migrate_mode mode) in migrate_folio()
837 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio()
921 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio()
942 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs()
950 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio()
959 static int writeout(struct address_space *mapping, struct folio *folio) in writeout() argument
974 if (!folio_clear_dirty_for_io(folio)) in writeout()
986 remove_migration_ptes(folio, folio, 0); in writeout()
988 rc = mapping->a_ops->writepage(&folio->page, &wbc); in writeout()
992 folio_lock(folio); in writeout()
1001 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio()
1035 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio()
1124 static void __migrate_folio_record(struct folio *dst, in __migrate_folio_record()
1131 static void __migrate_folio_extract(struct folio *dst, in __migrate_folio_extract()
1143 static void migrate_folio_undo_src(struct folio *src, in migrate_folio_undo_src()
1161 static void migrate_folio_undo_dst(struct folio *dst, bool locked, in migrate_folio_undo_dst()
1173 static void migrate_folio_done(struct folio *src, in migrate_folio_done()
1193 struct folio *src, struct folio **dstp, enum migrate_mode mode, in migrate_folio_unmap()
1196 struct folio *dst; in migrate_folio_unmap()
1353 struct folio *src, struct folio *dst, in migrate_folio_move()
1450 struct folio *src, int force, enum migrate_mode mode, in unmap_and_move_huge_page()
1453 struct folio *dst; in unmap_and_move_huge_page()
1561 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios, in try_split_folio() argument
1567 if (!folio_trylock(folio)) in try_split_folio()
1570 folio_lock(folio); in try_split_folio()
1572 rc = split_folio_to_list(folio, split_folios); in try_split_folio()
1573 folio_unlock(folio); in try_split_folio()
1575 list_move_tail(&folio->lru, split_folios); in try_split_folio()
1618 struct folio *folio, *folio2; in migrate_hugetlbs() local
1625 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_hugetlbs()
1626 if (!folio_test_hugetlb(folio)) in migrate_hugetlbs()
1629 nr_pages = folio_nr_pages(folio); in migrate_hugetlbs()
1640 if (!hugepage_migration_supported(folio_hstate(folio))) { in migrate_hugetlbs()
1643 list_move_tail(&folio->lru, ret_folios); in migrate_hugetlbs()
1649 folio, pass > 2, mode, in migrate_hugetlbs()
1719 struct folio *folio, *folio2, *dst = NULL, *dst2; in migrate_pages_batch() local
1733 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages_batch()
1734 is_large = folio_test_large(folio); in migrate_pages_batch()
1735 is_thp = is_large && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1736 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1764 !list_empty(&folio->_deferred_list) && in migrate_pages_batch()
1765 folio_test_partially_mapped(folio)) { in migrate_pages_batch()
1766 if (!try_split_folio(folio, split_folios, mode)) { in migrate_pages_batch()
1788 if (!try_split_folio(folio, split_folios, mode)) { in migrate_pages_batch()
1794 list_move_tail(&folio->lru, ret_folios); in migrate_pages_batch()
1799 private, folio, &dst, mode, reason, in migrate_pages_batch()
1820 int ret = try_split_folio(folio, split_folios, mode); in migrate_pages_batch()
1860 list_move_tail(&folio->lru, &unmap_folios); in migrate_pages_batch()
1890 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1892 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1893 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1894 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1899 folio, dst, mode, in migrate_pages_batch()
1934 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1936 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1941 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, in migrate_pages_batch()
2035 struct folio *folio, *folio2; in migrate_pages() local
2052 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages()
2054 if (folio_test_hugetlb(folio)) { in migrate_pages()
2055 list_move_tail(&folio->lru, &ret_folios); in migrate_pages()
2059 nr_pages += folio_nr_pages(folio); in migrate_pages()
2126 struct folio *alloc_migration_target(struct folio *src, unsigned long private) in alloc_migration_target()
2194 static int __add_folio_for_migration(struct folio *folio, int node, in __add_folio_for_migration() argument
2197 if (is_zero_folio(folio) || is_huge_zero_folio(folio)) in __add_folio_for_migration()
2200 if (folio_is_zone_device(folio)) in __add_folio_for_migration()
2203 if (folio_nid(folio) == node) in __add_folio_for_migration()
2206 if (folio_likely_mapped_shared(folio) && !migrate_all) in __add_folio_for_migration()
2209 if (folio_test_hugetlb(folio)) { in __add_folio_for_migration()
2210 if (isolate_hugetlb(folio, pagelist)) in __add_folio_for_migration()
2212 } else if (folio_isolate_lru(folio)) { in __add_folio_for_migration()
2213 list_add_tail(&folio->lru, pagelist); in __add_folio_for_migration()
2214 node_stat_mod_folio(folio, in __add_folio_for_migration()
2215 NR_ISOLATED_ANON + folio_is_file_lru(folio), in __add_folio_for_migration()
2216 folio_nr_pages(folio)); in __add_folio_for_migration()
2236 struct folio *folio; in add_folio_for_migration() local
2245 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); in add_folio_for_migration()
2246 if (folio) { in add_folio_for_migration()
2247 err = __add_folio_for_migration(folio, node, pagelist, in add_folio_for_migration()
2405 struct folio *folio; in do_pages_stat_array() local
2412 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); in do_pages_stat_array()
2413 if (folio) { in do_pages_stat_array()
2414 if (is_zero_folio(folio) || is_huge_zero_folio(folio)) in do_pages_stat_array()
2416 else if (folio_is_zone_device(folio)) in do_pages_stat_array()
2419 err = folio_nid(folio); in do_pages_stat_array()
2598 static struct folio *alloc_misplaced_dst_folio(struct folio *src, in alloc_misplaced_dst_folio()
2619 int migrate_misplaced_folio_prepare(struct folio *folio, in migrate_misplaced_folio_prepare() argument
2622 int nr_pages = folio_nr_pages(folio); in migrate_misplaced_folio_prepare()
2625 if (folio_is_file_lru(folio)) { in migrate_misplaced_folio_prepare()
2635 folio_likely_mapped_shared(folio)) in migrate_misplaced_folio_prepare()
2643 if (folio_test_dirty(folio)) in migrate_misplaced_folio_prepare()
2666 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
2670 if (!folio_isolate_lru(folio)) in migrate_misplaced_folio_prepare()
2673 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), in migrate_misplaced_folio_prepare()
2685 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, in migrate_misplaced_folio() argument
2692 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio); in migrate_misplaced_folio()
2695 list_add(&folio->lru, &migratepages); in migrate_misplaced_folio()
2705 && !node_is_toptier(folio_nid(folio)) in migrate_misplaced_folio()