Lines Matching refs:dst
215 static int migrate_movable_ops_page(struct page *dst, struct page *src, in migrate_movable_ops_page() argument
222 rc = page_movable_ops(src)->migrate_page(dst, src, mode); in migrate_movable_ops_page()
433 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) in remove_migration_ptes() argument
445 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); in remove_migration_ptes()
448 rmap_walk_locked(dst, &rwc); in remove_migration_ptes()
450 rmap_walk(dst, &rwc); in remove_migration_ptes()
689 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping() argument
697 rc = folio_mc_copy(dst, src); in migrate_huge_page_move_mapping()
707 dst->index = src->index; in migrate_huge_page_move_mapping()
708 dst->mapping = src->mapping; in migrate_huge_page_move_mapping()
710 folio_ref_add(dst, folio_nr_pages(dst)); in migrate_huge_page_move_mapping()
712 xas_store(&xas, dst); in migrate_huge_page_move_mapping()
819 static int __migrate_folio(struct address_space *mapping, struct folio *dst, in __migrate_folio() argument
829 rc = folio_mc_copy(dst, src); in __migrate_folio()
833 rc = __folio_migrate_mapping(mapping, dst, src, expected_count); in __migrate_folio()
838 folio_attach_private(dst, folio_detach_private(src)); in __migrate_folio()
840 folio_migrate_flags(dst, src); in __migrate_folio()
856 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio() argument
860 return __migrate_folio(mapping, dst, src, NULL, mode); in migrate_folio()
899 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio() argument
908 return migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
947 rc = filemap_migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
953 folio_set_bh(bh, dst, bh_offset(bh)); in __buffer_migrate_folio()
985 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio() argument
987 return __buffer_migrate_folio(mapping, dst, src, mode, false); in buffer_migrate_folio()
1006 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs() argument
1008 return __buffer_migrate_folio(mapping, dst, src, mode, true); in buffer_migrate_folio_norefs()
1014 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio() argument
1016 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode); in filemap_migrate_folio()
1024 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio() argument
1039 return migrate_folio(mapping, dst, src, mode); in fallback_migrate_folio()
1054 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio() argument
1061 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); in move_to_new_folio()
1064 rc = migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1075 rc = mapping->a_ops->migrate_folio(mapping, dst, src, in move_to_new_folio()
1078 rc = fallback_migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1088 if (likely(!folio_is_zone_device(dst))) in move_to_new_folio()
1089 flush_dcache_folio(dst); in move_to_new_folio()
1105 static void __migrate_folio_record(struct folio *dst, in __migrate_folio_record() argument
1109 dst->private = (void *)anon_vma + old_page_state; in __migrate_folio_record()
1112 static void __migrate_folio_extract(struct folio *dst, in __migrate_folio_extract() argument
1116 unsigned long private = (unsigned long)dst->private; in __migrate_folio_extract()
1120 dst->private = NULL; in __migrate_folio_extract()
1142 static void migrate_folio_undo_dst(struct folio *dst, bool locked, in migrate_folio_undo_dst() argument
1146 folio_unlock(dst); in migrate_folio_undo_dst()
1148 put_new_folio(dst, private); in migrate_folio_undo_dst()
1150 folio_put(dst); in migrate_folio_undo_dst()
1172 struct folio *dst; in migrate_folio_unmap() local
1189 dst = get_new_folio(src, private); in migrate_folio_unmap()
1190 if (!dst) in migrate_folio_unmap()
1192 *dstp = dst; in migrate_folio_unmap()
1194 dst->private = NULL; in migrate_folio_unmap()
1272 if (unlikely(!folio_trylock(dst))) in migrate_folio_unmap()
1277 __migrate_folio_record(dst, old_page_state, anon_vma); in migrate_folio_unmap()
1307 __migrate_folio_record(dst, old_page_state, anon_vma); in migrate_folio_unmap()
1321 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); in migrate_folio_unmap()
1328 struct folio *src, struct folio *dst, in migrate_folio_move() argument
1337 __migrate_folio_extract(dst, &old_page_state, &anon_vma); in migrate_folio_move()
1338 prev = dst->lru.prev; in migrate_folio_move()
1339 list_del(&dst->lru); in migrate_folio_move()
1342 rc = migrate_movable_ops_page(&dst->page, &src->page, mode); in migrate_folio_move()
1348 rc = move_to_new_folio(dst, src, mode); in migrate_folio_move()
1361 folio_add_lru(dst); in migrate_folio_move()
1366 remove_migration_ptes(src, dst, 0); in migrate_folio_move()
1369 folio_unlock(dst); in migrate_folio_move()
1370 folio_set_owner_migrate_reason(dst, reason); in migrate_folio_move()
1376 folio_put(dst); in migrate_folio_move()
1396 list_add(&dst->lru, prev); in migrate_folio_move()
1397 __migrate_folio_record(dst, old_page_state, anon_vma); in migrate_folio_move()
1403 migrate_folio_undo_dst(dst, true, put_new_folio, private); in migrate_folio_move()
1431 struct folio *dst; in unmap_and_move_huge_page() local
1443 dst = get_new_folio(src, private); in unmap_and_move_huge_page()
1444 if (!dst) in unmap_and_move_huge_page()
1472 if (unlikely(!folio_trylock(dst))) in unmap_and_move_huge_page()
1500 rc = move_to_new_folio(dst, src, mode); in unmap_and_move_huge_page()
1504 rc == MIGRATEPAGE_SUCCESS ? dst : src, 0); in unmap_and_move_huge_page()
1507 folio_unlock(dst); in unmap_and_move_huge_page()
1514 move_hugetlb_state(src, dst, reason); in unmap_and_move_huge_page()
1532 put_new_folio(dst, private); in unmap_and_move_huge_page()
1534 folio_put(dst); in unmap_and_move_huge_page()
1684 struct folio *folio, *folio2, *dst, *dst2; in migrate_folios_move() local
1689 dst = list_first_entry(dst_folios, struct folio, lru); in migrate_folios_move()
1690 dst2 = list_next_entry(dst, lru); in migrate_folios_move()
1698 folio, dst, mode, in migrate_folios_move()
1722 dst = dst2; in migrate_folios_move()
1723 dst2 = list_next_entry(dst, lru); in migrate_folios_move()
1732 struct folio *folio, *folio2, *dst, *dst2; in migrate_folios_undo() local
1734 dst = list_first_entry(dst_folios, struct folio, lru); in migrate_folios_undo()
1735 dst2 = list_next_entry(dst, lru); in migrate_folios_undo()
1740 __migrate_folio_extract(dst, &old_page_state, &anon_vma); in migrate_folios_undo()
1743 list_del(&dst->lru); in migrate_folios_undo()
1744 migrate_folio_undo_dst(dst, true, put_new_folio, private); in migrate_folios_undo()
1745 dst = dst2; in migrate_folios_undo()
1746 dst2 = list_next_entry(dst, lru); in migrate_folios_undo()
1772 struct folio *folio, *folio2, *dst = NULL; in migrate_pages_batch() local
1852 private, folio, &dst, mode, reason, in migrate_pages_batch()
1914 list_add_tail(&dst->lru, &dst_folios); in migrate_pages_batch()