Lines Matching refs:src

215 static int migrate_movable_ops_page(struct page *dst, struct page *src,  in migrate_movable_ops_page()  argument
220 VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); in migrate_movable_ops_page()
221 VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); in migrate_movable_ops_page()
222 rc = page_movable_ops(src)->migrate_page(dst, src, mode); in migrate_movable_ops_page()
224 ClearPageMovableOpsIsolated(src); in migrate_movable_ops_page()
433 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) in remove_migration_ptes() argument
436 .folio = src, in remove_migration_ptes()
445 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); in remove_migration_ptes()
689 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping() argument
691 XA_STATE(xas, &mapping->i_pages, folio_index(src)); in migrate_huge_page_move_mapping()
692 int rc, expected_count = folio_expected_ref_count(src) + 1; in migrate_huge_page_move_mapping()
694 if (folio_ref_count(src) != expected_count) in migrate_huge_page_move_mapping()
697 rc = folio_mc_copy(dst, src); in migrate_huge_page_move_mapping()
702 if (!folio_ref_freeze(src, expected_count)) { in migrate_huge_page_move_mapping()
707 dst->index = src->index; in migrate_huge_page_move_mapping()
708 dst->mapping = src->mapping; in migrate_huge_page_move_mapping()
714 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); in migrate_huge_page_move_mapping()
820 struct folio *src, void *src_private, in __migrate_folio() argument
823 int rc, expected_count = folio_expected_ref_count(src) + 1; in __migrate_folio()
826 if (folio_ref_count(src) != expected_count) in __migrate_folio()
829 rc = folio_mc_copy(dst, src); in __migrate_folio()
833 rc = __folio_migrate_mapping(mapping, dst, src, expected_count); in __migrate_folio()
838 folio_attach_private(dst, folio_detach_private(src)); in __migrate_folio()
840 folio_migrate_flags(dst, src); in __migrate_folio()
857 struct folio *src, enum migrate_mode mode) in migrate_folio() argument
859 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ in migrate_folio()
860 return __migrate_folio(mapping, dst, src, NULL, mode); in migrate_folio()
899 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio() argument
906 head = folio_buffers(src); in __buffer_migrate_folio()
908 return migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
911 expected_count = folio_expected_ref_count(src) + 1; in __buffer_migrate_folio()
912 if (folio_ref_count(src) != expected_count) in __buffer_migrate_folio()
947 rc = filemap_migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
985 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio() argument
987 return __buffer_migrate_folio(mapping, dst, src, mode, false); in buffer_migrate_folio()
1006 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs() argument
1008 return __buffer_migrate_folio(mapping, dst, src, mode, true); in buffer_migrate_folio_norefs()
1014 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio() argument
1016 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode); in filemap_migrate_folio()
1024 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio() argument
1029 if (folio_test_dirty(src)) in fallback_migrate_folio()
1036 if (!filemap_release_folio(src, GFP_KERNEL)) in fallback_migrate_folio()
1039 return migrate_folio(mapping, dst, src, mode); in fallback_migrate_folio()
1054 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio() argument
1057 struct address_space *mapping = folio_mapping(src); in move_to_new_folio()
1060 VM_BUG_ON_FOLIO(!folio_test_locked(src), src); in move_to_new_folio()
1064 rc = migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1075 rc = mapping->a_ops->migrate_folio(mapping, dst, src, in move_to_new_folio()
1078 rc = fallback_migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1085 if (!folio_test_anon(src)) in move_to_new_folio()
1086 src->mapping = NULL; in move_to_new_folio()
1124 static void migrate_folio_undo_src(struct folio *src, in migrate_folio_undo_src() argument
1131 remove_migration_ptes(src, src, 0); in migrate_folio_undo_src()
1136 folio_unlock(src); in migrate_folio_undo_src()
1138 list_move_tail(&src->lru, ret); in migrate_folio_undo_src()
1154 static void migrate_folio_done(struct folio *src, in migrate_folio_done() argument
1157 if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION) in migrate_folio_done()
1158 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + in migrate_folio_done()
1159 folio_is_file_lru(src), -folio_nr_pages(src)); in migrate_folio_done()
1163 folio_put(src); in migrate_folio_done()
1169 struct folio *src, struct folio **dstp, enum migrate_mode mode, in migrate_folio_unmap() argument
1179 if (folio_ref_count(src) == 1) { in migrate_folio_unmap()
1181 folio_clear_active(src); in migrate_folio_unmap()
1182 folio_clear_unevictable(src); in migrate_folio_unmap()
1184 list_del(&src->lru); in migrate_folio_unmap()
1185 migrate_folio_done(src, reason); in migrate_folio_unmap()
1189 dst = get_new_folio(src, private); in migrate_folio_unmap()
1196 if (!folio_trylock(src)) { in migrate_folio_unmap()
1221 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) in migrate_folio_unmap()
1224 folio_lock(src); in migrate_folio_unmap()
1227 if (folio_test_mlocked(src)) in migrate_folio_unmap()
1230 if (folio_test_writeback(src)) { in migrate_folio_unmap()
1244 folio_wait_writeback(src); in migrate_folio_unmap()
1261 if (folio_test_anon(src) && !folio_test_ksm(src)) in migrate_folio_unmap()
1262 anon_vma = folio_get_anon_vma(src); in migrate_folio_unmap()
1276 if (unlikely(page_has_movable_ops(&src->page))) { in migrate_folio_unmap()
1293 if (!src->mapping) { in migrate_folio_unmap()
1294 if (folio_test_private(src)) { in migrate_folio_unmap()
1295 try_to_free_buffers(src); in migrate_folio_unmap()
1298 } else if (folio_mapped(src)) { in migrate_folio_unmap()
1300 VM_BUG_ON_FOLIO(folio_test_anon(src) && in migrate_folio_unmap()
1301 !folio_test_ksm(src) && !anon_vma, src); in migrate_folio_unmap()
1302 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); in migrate_folio_unmap()
1306 if (!folio_mapped(src)) { in migrate_folio_unmap()
1319 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, in migrate_folio_unmap()
1328 struct folio *src, struct folio *dst, in migrate_folio_move() argument
1341 if (unlikely(page_has_movable_ops(&src->page))) { in migrate_folio_move()
1342 rc = migrate_movable_ops_page(&dst->page, &src->page, mode); in migrate_folio_move()
1348 rc = move_to_new_folio(dst, src, mode); in migrate_folio_move()
1366 remove_migration_ptes(src, dst, 0); in migrate_folio_move()
1382 list_del(&src->lru); in migrate_folio_move()
1386 folio_unlock(src); in migrate_folio_move()
1387 migrate_folio_done(src, reason); in migrate_folio_move()
1401 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED, in migrate_folio_move()
1428 struct folio *src, int force, enum migrate_mode mode, in unmap_and_move_huge_page() argument
1437 if (folio_ref_count(src) == 1) { in unmap_and_move_huge_page()
1439 folio_putback_hugetlb(src); in unmap_and_move_huge_page()
1443 dst = get_new_folio(src, private); in unmap_and_move_huge_page()
1447 if (!folio_trylock(src)) { in unmap_and_move_huge_page()
1456 folio_lock(src); in unmap_and_move_huge_page()
1464 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { in unmap_and_move_huge_page()
1469 if (folio_test_anon(src)) in unmap_and_move_huge_page()
1470 anon_vma = folio_get_anon_vma(src); in unmap_and_move_huge_page()
1475 if (folio_mapped(src)) { in unmap_and_move_huge_page()
1478 if (!folio_test_anon(src)) { in unmap_and_move_huge_page()
1485 mapping = hugetlb_folio_mapping_lock_write(src); in unmap_and_move_huge_page()
1492 try_to_migrate(src, ttu); in unmap_and_move_huge_page()
1499 if (!folio_mapped(src)) in unmap_and_move_huge_page()
1500 rc = move_to_new_folio(dst, src, mode); in unmap_and_move_huge_page()
1503 remove_migration_ptes(src, in unmap_and_move_huge_page()
1504 rc == MIGRATEPAGE_SUCCESS ? dst : src, 0); in unmap_and_move_huge_page()
1514 move_hugetlb_state(src, dst, reason); in unmap_and_move_huge_page()
1519 folio_unlock(src); in unmap_and_move_huge_page()
1522 folio_putback_hugetlb(src); in unmap_and_move_huge_page()
1524 list_move_tail(&src->lru, ret); in unmap_and_move_huge_page()
2136 struct folio *alloc_migration_target(struct folio *src, unsigned long private) in alloc_migration_target() argument
2148 nid = folio_nid(src); in alloc_migration_target()
2150 if (folio_test_hugetlb(src)) { in alloc_migration_target()
2151 struct hstate *h = folio_hstate(src); in alloc_migration_target()
2159 if (folio_test_large(src)) { in alloc_migration_target()
2166 order = folio_order(src); in alloc_migration_target()
2168 zidx = zone_idx(folio_zone(src)); in alloc_migration_target()
2603 static struct folio *alloc_misplaced_dst_folio(struct folio *src, in alloc_misplaced_dst_folio() argument
2607 int order = folio_order(src); in alloc_misplaced_dst_folio()