Lines Matching refs:mapping
541 static int __folio_migrate_mapping(struct address_space *mapping, in __folio_migrate_mapping() argument
544 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_migrate_mapping()
550 if (!mapping) { in __folio_migrate_mapping()
562 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
588 newfolio->mapping = folio->mapping; in __folio_migrate_mapping()
660 if (dirty && mapping_can_writeback(mapping)) { in __folio_migrate_mapping()
672 int folio_migrate_mapping(struct address_space *mapping, in folio_migrate_mapping() argument
680 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); in folio_migrate_mapping()
688 int migrate_huge_page_move_mapping(struct address_space *mapping, in migrate_huge_page_move_mapping() argument
691 XA_STATE(xas, &mapping->i_pages, folio_index(src)); in migrate_huge_page_move_mapping()
708 dst->mapping = src->mapping; in migrate_huge_page_move_mapping()
819 static int __migrate_folio(struct address_space *mapping, struct folio *dst, in __migrate_folio() argument
833 rc = __folio_migrate_mapping(mapping, dst, src, expected_count); in __migrate_folio()
856 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio() argument
860 return __migrate_folio(mapping, dst, src, NULL, mode); in migrate_folio()
898 static int __buffer_migrate_folio(struct address_space *mapping, in __buffer_migrate_folio() argument
908 return migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
926 spin_lock(&mapping->i_private_lock); in __buffer_migrate_folio()
935 spin_unlock(&mapping->i_private_lock); in __buffer_migrate_folio()
947 rc = filemap_migrate_folio(mapping, dst, src, mode); in __buffer_migrate_folio()
984 int buffer_migrate_folio(struct address_space *mapping, in buffer_migrate_folio() argument
987 return __buffer_migrate_folio(mapping, dst, src, mode, false); in buffer_migrate_folio()
1005 int buffer_migrate_folio_norefs(struct address_space *mapping, in buffer_migrate_folio_norefs() argument
1008 return __buffer_migrate_folio(mapping, dst, src, mode, true); in buffer_migrate_folio_norefs()
1013 int filemap_migrate_folio(struct address_space *mapping, in filemap_migrate_folio() argument
1016 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode); in filemap_migrate_folio()
1023 static int fallback_migrate_folio(struct address_space *mapping, in fallback_migrate_folio() argument
1026 WARN_ONCE(mapping->a_ops->writepages, in fallback_migrate_folio()
1028 mapping->a_ops); in fallback_migrate_folio()
1039 return migrate_folio(mapping, dst, src, mode); in fallback_migrate_folio()
1057 struct address_space *mapping = folio_mapping(src); in move_to_new_folio() local
1063 if (!mapping) in move_to_new_folio()
1064 rc = migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1065 else if (mapping_inaccessible(mapping)) in move_to_new_folio()
1067 else if (mapping->a_ops->migrate_folio) in move_to_new_folio()
1075 rc = mapping->a_ops->migrate_folio(mapping, dst, src, in move_to_new_folio()
1078 rc = fallback_migrate_folio(mapping, dst, src, mode); in move_to_new_folio()
1086 src->mapping = NULL; in move_to_new_folio()
1293 if (!src->mapping) { in migrate_folio_unmap()
1435 struct address_space *mapping = NULL; in unmap_and_move_huge_page() local
1485 mapping = hugetlb_folio_mapping_lock_write(src); in unmap_and_move_huge_page()
1486 if (unlikely(!mapping)) in unmap_and_move_huge_page()
1496 i_mmap_unlock_write(mapping); in unmap_and_move_huge_page()