Lines Matching refs:folio
97 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_ops_page() local
109 if (!folio) in isolate_movable_ops_page()
134 if (unlikely(!folio_trylock(folio))) in isolate_movable_ops_page()
151 folio_unlock(folio); in isolate_movable_ops_page()
156 folio_unlock(folio); in isolate_movable_ops_page()
158 folio_put(folio); in isolate_movable_ops_page()
177 struct folio *folio = page_folio(page); in putback_movable_ops_page() local
181 folio_lock(folio); in putback_movable_ops_page()
184 folio_unlock(folio); in putback_movable_ops_page()
185 folio_put(folio); in putback_movable_ops_page()
238 struct folio *folio; in putback_movable_pages() local
239 struct folio *folio2; in putback_movable_pages()
241 list_for_each_entry_safe(folio, folio2, l, lru) { in putback_movable_pages()
242 if (unlikely(folio_test_hugetlb(folio))) { in putback_movable_pages()
243 folio_putback_hugetlb(folio); in putback_movable_pages()
246 list_del(&folio->lru); in putback_movable_pages()
247 if (unlikely(page_has_movable_ops(&folio->page))) { in putback_movable_pages()
248 putback_movable_ops_page(&folio->page); in putback_movable_pages()
250 node_stat_mod_folio(folio, NR_ISOLATED_ANON + in putback_movable_pages()
251 folio_is_file_lru(folio), -folio_nr_pages(folio)); in putback_movable_pages()
252 folio_putback_lru(folio); in putback_movable_pages()
258 bool isolate_folio_to_list(struct folio *folio, struct list_head *list) in isolate_folio_to_list() argument
260 if (folio_test_hugetlb(folio)) in isolate_folio_to_list()
261 return folio_isolate_hugetlb(folio, list); in isolate_folio_to_list()
263 if (page_has_movable_ops(&folio->page)) { in isolate_folio_to_list()
264 if (!isolate_movable_ops_page(&folio->page, in isolate_folio_to_list()
268 if (!folio_isolate_lru(folio)) in isolate_folio_to_list()
270 node_stat_add_folio(folio, NR_ISOLATED_ANON + in isolate_folio_to_list()
271 folio_is_file_lru(folio)); in isolate_folio_to_list()
273 list_add(&folio->lru, list); in isolate_folio_to_list()
278 struct folio *folio, in try_to_map_unused_to_zeropage() argument
281 struct page *page = folio_page(folio, idx); in try_to_map_unused_to_zeropage()
292 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || in try_to_map_unused_to_zeropage()
312 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); in try_to_map_unused_to_zeropage()
317 struct folio *folio; member
324 static bool remove_migration_pte(struct folio *folio, in remove_migration_pte() argument
328 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte()
339 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in remove_migration_pte()
341 new = folio_page(folio, idx); in remove_migration_pte()
346 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in remove_migration_pte()
347 !folio_test_pmd_mappable(folio), folio); in remove_migration_pte()
353 try_to_map_unused_to_zeropage(&pvmw, folio, idx)) in remove_migration_pte()
356 folio_get(folio); in remove_migration_pte()
363 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) in remove_migration_pte()
375 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) in remove_migration_pte()
393 if (folio_test_hugetlb(folio)) { in remove_migration_pte()
399 if (folio_test_anon(folio)) in remove_migration_pte()
400 hugetlb_add_anon_rmap(folio, vma, pvmw.address, in remove_migration_pte()
403 hugetlb_add_file_rmap(folio); in remove_migration_pte()
409 if (folio_test_anon(folio)) in remove_migration_pte()
410 folio_add_anon_rmap_pte(folio, new, vma, in remove_migration_pte()
413 folio_add_file_rmap_pte(folio, new, vma); in remove_migration_pte()
433 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) in remove_migration_ptes()
436 .folio = src, in remove_migration_ptes()
542 struct folio *newfolio, struct folio *folio, int expected_count) in __folio_migrate_mapping() argument
544 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_migrate_mapping()
547 long nr = folio_nr_pages(folio); in __folio_migrate_mapping()
552 if (folio_test_large(folio) && in __folio_migrate_mapping()
553 folio_test_large_rmappable(folio)) { in __folio_migrate_mapping()
554 if (!folio_ref_freeze(folio, expected_count)) in __folio_migrate_mapping()
556 folio_unqueue_deferred_split(folio); in __folio_migrate_mapping()
557 folio_ref_unfreeze(folio, expected_count); in __folio_migrate_mapping()
561 newfolio->index = folio->index; in __folio_migrate_mapping()
562 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
563 if (folio_test_anon(folio) && folio_test_large(folio)) in __folio_migrate_mapping()
564 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
565 if (folio_test_swapbacked(folio)) in __folio_migrate_mapping()
571 oldzone = folio_zone(folio); in __folio_migrate_mapping()
575 if (!folio_ref_freeze(folio, expected_count)) { in __folio_migrate_mapping()
581 folio_unqueue_deferred_split(folio); in __folio_migrate_mapping()
587 newfolio->index = folio->index; in __folio_migrate_mapping()
588 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
589 if (folio_test_anon(folio) && folio_test_large(folio)) in __folio_migrate_mapping()
590 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
592 if (folio_test_swapbacked(folio)) in __folio_migrate_mapping()
594 if (folio_test_swapcache(folio)) { in __folio_migrate_mapping()
596 newfolio->private = folio_get_private(folio); in __folio_migrate_mapping()
603 dirty = folio_test_dirty(folio); in __folio_migrate_mapping()
605 folio_clear_dirty(folio); in __folio_migrate_mapping()
620 folio_ref_unfreeze(folio, expected_count - nr); in __folio_migrate_mapping()
639 memcg = folio_memcg(folio); in __folio_migrate_mapping()
645 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { in __folio_migrate_mapping()
649 if (folio_test_pmd_mappable(folio)) { in __folio_migrate_mapping()
655 if (folio_test_swapcache(folio)) { in __folio_migrate_mapping()
673 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument
675 int expected_count = folio_expected_ref_count(folio) + extra_count + 1; in folio_migrate_mapping()
677 if (folio_ref_count(folio) != expected_count) in folio_migrate_mapping()
680 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); in folio_migrate_mapping()
689 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping()
724 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument
728 if (folio_test_referenced(folio)) in folio_migrate_flags()
730 if (folio_test_uptodate(folio)) in folio_migrate_flags()
732 if (folio_test_clear_active(folio)) { in folio_migrate_flags()
733 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); in folio_migrate_flags()
735 } else if (folio_test_clear_unevictable(folio)) in folio_migrate_flags()
737 if (folio_test_workingset(folio)) in folio_migrate_flags()
739 if (folio_test_checked(folio)) in folio_migrate_flags()
747 if (folio_test_mappedtodisk(folio)) in folio_migrate_flags()
751 if (folio_test_dirty(folio)) in folio_migrate_flags()
754 if (folio_test_young(folio)) in folio_migrate_flags()
756 if (folio_test_idle(folio)) in folio_migrate_flags()
759 folio_migrate_refs(newfolio, folio); in folio_migrate_flags()
764 cpupid = folio_xchg_last_cpupid(folio, -1); in folio_migrate_flags()
771 bool f_toptier = node_is_toptier(folio_nid(folio)); in folio_migrate_flags()
779 folio_migrate_ksm(newfolio, folio); in folio_migrate_flags()
785 if (folio_test_swapcache(folio)) in folio_migrate_flags()
786 folio_clear_swapcache(folio); in folio_migrate_flags()
787 folio_clear_private(folio); in folio_migrate_flags()
790 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
791 folio->private = NULL; in folio_migrate_flags()
805 if (folio_test_readahead(folio)) in folio_migrate_flags()
808 folio_copy_owner(newfolio, folio); in folio_migrate_flags()
809 pgalloc_tag_swap(newfolio, folio); in folio_migrate_flags()
811 mem_cgroup_migrate(folio, newfolio); in folio_migrate_flags()
819 static int __migrate_folio(struct address_space *mapping, struct folio *dst, in __migrate_folio()
820 struct folio *src, void *src_private, in __migrate_folio()
856 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio()
857 struct folio *src, enum migrate_mode mode) in migrate_folio()
899 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio()
985 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio()
1006 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs()
1014 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio()
1024 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio()
1054 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio()
1105 static void __migrate_folio_record(struct folio *dst, in __migrate_folio_record()
1112 static void __migrate_folio_extract(struct folio *dst, in __migrate_folio_extract()
1124 static void migrate_folio_undo_src(struct folio *src, in migrate_folio_undo_src()
1142 static void migrate_folio_undo_dst(struct folio *dst, bool locked, in migrate_folio_undo_dst()
1154 static void migrate_folio_done(struct folio *src, in migrate_folio_done()
1169 struct folio *src, struct folio **dstp, enum migrate_mode mode, in migrate_folio_unmap()
1172 struct folio *dst; in migrate_folio_unmap()
1328 struct folio *src, struct folio *dst, in migrate_folio_move()
1428 struct folio *src, int force, enum migrate_mode mode, in unmap_and_move_huge_page()
1431 struct folio *dst; in unmap_and_move_huge_page()
1539 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios, in try_split_folio() argument
1545 if (!folio_trylock(folio)) in try_split_folio()
1548 folio_lock(folio); in try_split_folio()
1550 rc = split_folio_to_list(folio, split_folios); in try_split_folio()
1551 folio_unlock(folio); in try_split_folio()
1553 list_move_tail(&folio->lru, split_folios); in try_split_folio()
1596 struct folio *folio, *folio2; in migrate_hugetlbs() local
1603 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_hugetlbs()
1604 if (!folio_test_hugetlb(folio)) in migrate_hugetlbs()
1607 nr_pages = folio_nr_pages(folio); in migrate_hugetlbs()
1618 if (!hugepage_migration_supported(folio_hstate(folio))) { in migrate_hugetlbs()
1621 list_move_tail(&folio->lru, ret_folios); in migrate_hugetlbs()
1627 folio, pass > 2, mode, in migrate_hugetlbs()
1684 struct folio *folio, *folio2, *dst, *dst2; in migrate_folios_move() local
1689 dst = list_first_entry(dst_folios, struct folio, lru); in migrate_folios_move()
1691 list_for_each_entry_safe(folio, folio2, src_folios, lru) { in migrate_folios_move()
1692 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_folios_move()
1693 nr_pages = folio_nr_pages(folio); in migrate_folios_move()
1698 folio, dst, mode, in migrate_folios_move()
1732 struct folio *folio, *folio2, *dst, *dst2; in migrate_folios_undo() local
1734 dst = list_first_entry(dst_folios, struct folio, lru); in migrate_folios_undo()
1736 list_for_each_entry_safe(folio, folio2, src_folios, lru) { in migrate_folios_undo()
1741 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, in migrate_folios_undo()
1772 struct folio *folio, *folio2, *dst = NULL; in migrate_pages_batch() local
1786 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages_batch()
1787 is_large = folio_test_large(folio); in migrate_pages_batch()
1788 is_thp = folio_test_pmd_mappable(folio); in migrate_pages_batch()
1789 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1817 !list_empty(&folio->_deferred_list) && in migrate_pages_batch()
1818 folio_test_partially_mapped(folio)) { in migrate_pages_batch()
1819 if (!try_split_folio(folio, split_folios, mode)) { in migrate_pages_batch()
1841 if (!try_split_folio(folio, split_folios, mode)) { in migrate_pages_batch()
1847 list_move_tail(&folio->lru, ret_folios); in migrate_pages_batch()
1852 private, folio, &dst, mode, reason, in migrate_pages_batch()
1873 int ret = try_split_folio(folio, split_folios, mode); in migrate_pages_batch()
1913 list_move_tail(&folio->lru, &unmap_folios); in migrate_pages_batch()
2045 struct folio *folio, *folio2; in migrate_pages() local
2062 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages()
2064 if (folio_test_hugetlb(folio)) { in migrate_pages()
2065 list_move_tail(&folio->lru, &ret_folios); in migrate_pages()
2069 nr_pages += folio_nr_pages(folio); in migrate_pages()
2136 struct folio *alloc_migration_target(struct folio *src, unsigned long private) in alloc_migration_target()
2204 static int __add_folio_for_migration(struct folio *folio, int node, in __add_folio_for_migration() argument
2207 if (is_zero_folio(folio) || is_huge_zero_folio(folio)) in __add_folio_for_migration()
2210 if (folio_is_zone_device(folio)) in __add_folio_for_migration()
2213 if (folio_nid(folio) == node) in __add_folio_for_migration()
2216 if (folio_maybe_mapped_shared(folio) && !migrate_all) in __add_folio_for_migration()
2219 if (folio_test_hugetlb(folio)) { in __add_folio_for_migration()
2220 if (folio_isolate_hugetlb(folio, pagelist)) in __add_folio_for_migration()
2222 } else if (folio_isolate_lru(folio)) { in __add_folio_for_migration()
2223 list_add_tail(&folio->lru, pagelist); in __add_folio_for_migration()
2224 node_stat_mod_folio(folio, in __add_folio_for_migration()
2225 NR_ISOLATED_ANON + folio_is_file_lru(folio), in __add_folio_for_migration()
2226 folio_nr_pages(folio)); in __add_folio_for_migration()
2246 struct folio *folio; in add_folio_for_migration() local
2255 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); in add_folio_for_migration()
2256 if (folio) { in add_folio_for_migration()
2257 err = __add_folio_for_migration(folio, node, pagelist, in add_folio_for_migration()
2408 struct folio *folio; in do_pages_stat_array() local
2415 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE); in do_pages_stat_array()
2416 if (folio) { in do_pages_stat_array()
2417 if (is_zero_folio(folio) || is_huge_zero_folio(folio)) in do_pages_stat_array()
2419 else if (folio_is_zone_device(folio)) in do_pages_stat_array()
2422 err = folio_nid(folio); in do_pages_stat_array()
2603 static struct folio *alloc_misplaced_dst_folio(struct folio *src, in alloc_misplaced_dst_folio()
2624 int migrate_misplaced_folio_prepare(struct folio *folio, in migrate_misplaced_folio_prepare() argument
2627 int nr_pages = folio_nr_pages(folio); in migrate_misplaced_folio_prepare()
2630 if (folio_is_file_lru(folio)) { in migrate_misplaced_folio_prepare()
2639 if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio)) in migrate_misplaced_folio_prepare()
2647 if (folio_test_dirty(folio)) in migrate_misplaced_folio_prepare()
2670 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
2674 if (!folio_isolate_lru(folio)) in migrate_misplaced_folio_prepare()
2677 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), in migrate_misplaced_folio_prepare()
2689 int migrate_misplaced_folio(struct folio *folio, int node) in migrate_misplaced_folio() argument
2695 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio); in migrate_misplaced_folio()
2698 list_add(&folio->lru, &migratepages); in migrate_misplaced_folio()
2708 && !node_is_toptier(folio_nid(folio)) in migrate_misplaced_folio()