Lines Matching refs:mapping
278 bool shmem_mapping(struct address_space *mapping) in shmem_mapping() argument
280 return mapping->a_ops == &shmem_aops; in shmem_mapping()
473 struct address_space *mapping = inode->i_mapping; in shmem_charge() local
479 xa_lock_irq(&mapping->i_pages); in shmem_charge()
480 mapping->nrpages += pages; in shmem_charge()
481 xa_unlock_irq(&mapping->i_pages); in shmem_charge()
498 static int shmem_replace_entry(struct address_space *mapping, in shmem_replace_entry() argument
501 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
521 static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index, in shmem_confirm_swap() argument
524 XA_STATE(xas, &mapping->i_pages, index); in shmem_confirm_swap()
590 shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end) in shmem_mapping_size_orders() argument
595 if (!mapping_large_folio_support(mapping) || !write_end) in shmem_mapping_size_orders()
902 struct address_space *mapping, in shmem_add_to_page_cache() argument
905 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
915 folio->mapping = mapping; in shmem_add_to_page_cache()
946 mapping->nrpages += nr; in shmem_add_to_page_cache()
952 folio->mapping = NULL; in shmem_add_to_page_cache()
965 struct address_space *mapping = folio->mapping; in shmem_delete_from_page_cache() local
969 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
970 error = shmem_replace_entry(mapping, folio->index, folio, radswap); in shmem_delete_from_page_cache()
971 folio->mapping = NULL; in shmem_delete_from_page_cache()
972 mapping->nrpages -= nr; in shmem_delete_from_page_cache()
974 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
984 static long shmem_free_swap(struct address_space *mapping, in shmem_free_swap() argument
987 int order = xa_get_order(&mapping->i_pages, index); in shmem_free_swap()
990 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
1005 unsigned long shmem_partial_swap_usage(struct address_space *mapping, in shmem_partial_swap_usage() argument
1008 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
1042 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage() local
1060 return shmem_partial_swap_usage(mapping, vma->vm_pgoff, in shmem_swap_usage()
1067 void shmem_unlock_mapping(struct address_space *mapping) in shmem_unlock_mapping() argument
1076 while (!mapping_unevictable(mapping) && in shmem_unlock_mapping()
1077 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping()
1097 if (folio->mapping == inode->i_mapping) in shmem_get_partial_folio()
1119 struct address_space *mapping = inode->i_mapping; in shmem_undo_range() local
1139 while (index < end && find_lock_entries(mapping, &index, end - 1, in shmem_undo_range()
1147 nr_swaps_freed += shmem_free_swap(mapping, in shmem_undo_range()
1153 truncate_inode_folio(mapping, folio); in shmem_undo_range()
1201 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in shmem_undo_range()
1218 swaps_freed = shmem_free_swap(mapping, indices[i], folio); in shmem_undo_range()
1231 if (folio_mapping(folio) != mapping) { in shmem_undo_range()
1241 truncate_inode_folio(mapping, folio); in shmem_undo_range()
1428 static unsigned int shmem_find_swap_entries(struct address_space *mapping, in shmem_find_swap_entries() argument
1432 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1476 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries() local
1482 mapping_gfp_mask(mapping), NULL, NULL); in shmem_unuse_swap_entries()
1500 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode() local
1508 if (!shmem_find_swap_entries(mapping, start, &fbatch, in shmem_unuse_inode()
1583 struct address_space *mapping = folio->mapping; in shmem_writeout() local
1584 struct inode *inode = mapping->host; in shmem_writeout()
1691 error = shmem_add_to_page_cache(folio, mapping, index, in shmem_writeout()
1856 struct address_space *mapping, pgoff_t index, in shmem_suitable_orders() argument
1883 if (!xa_find(&mapping->i_pages, &aligned_index, in shmem_suitable_orders()
1893 struct address_space *mapping, pgoff_t index, in shmem_suitable_orders() argument
1918 struct address_space *mapping = inode->i_mapping; in shmem_alloc_and_add_folio() local
1930 mapping, index, orders); in shmem_alloc_and_add_folio()
1959 if (xa_find(&mapping->i_pages, &index, in shmem_alloc_and_add_folio()
1973 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); in shmem_alloc_and_add_folio()
1992 READ_ONCE(mapping->nrpages); in shmem_alloc_and_add_folio()
2208 struct address_space *mapping = inode->i_mapping; in shmem_set_folio_swapin_error() local
2214 old = xa_cmpxchg_irq(&mapping->i_pages, index, in shmem_set_folio_swapin_error()
2236 struct address_space *mapping = inode->i_mapping; in shmem_split_large_entry() local
2237 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); in shmem_split_large_entry()
2288 __xa_store(&mapping->i_pages, aligned_index + i, in shmem_split_large_entry()
2319 struct address_space *mapping = inode->i_mapping; in shmem_swapin_folio() local
2338 order = shmem_confirm_swap(mapping, index, index_entry); in shmem_swapin_folio()
2423 shmem_confirm_swap(mapping, index, swap) < 0 || in shmem_swapin_folio()
2447 error = shmem_add_to_page_cache(folio, mapping, index, in shmem_swapin_folio()
2470 if (shmem_confirm_swap(mapping, index, swap) < 0) in shmem_swapin_folio()
2543 if (unlikely(folio->mapping != inode->i_mapping)) { in shmem_get_folio_gfp()
3223 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte() local
3224 gfp_t gfp = mapping_gfp_mask(mapping); in shmem_mfill_atomic_pte()
3306 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); in shmem_mfill_atomic_pte()
3334 shmem_write_begin(const struct kiocb *iocb, struct address_space *mapping, in shmem_write_begin() argument
3338 struct inode *inode = mapping->host; in shmem_write_begin()
3368 shmem_write_end(const struct kiocb *iocb, struct address_space *mapping, in shmem_write_end() argument
3372 struct inode *inode = mapping->host; in shmem_write_end()
3396 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter() local
3459 if (mapping_writably_mapped(mapping)) { in shmem_file_read_iter()
3584 struct address_space *mapping = inode->i_mapping; in shmem_file_splice_read() local
3654 if (mapping_writably_mapped(mapping)) { in shmem_file_splice_read()
3693 struct address_space *mapping = file->f_mapping; in shmem_file_llseek() local
3694 struct inode *inode = mapping->host; in shmem_file_llseek()
3704 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek()
3727 struct address_space *mapping = file->f_mapping; in shmem_fallocate() local
3746 unmap_mapping_range(mapping, unmap_start, in shmem_fallocate()
5251 static int shmem_error_remove_folio(struct address_space *mapping, in shmem_error_remove_folio() argument
5827 void shmem_unlock_mapping(struct address_space *mapping) in shmem_unlock_mapping() argument
5984 struct folio *shmem_read_folio_gfp(struct address_space *mapping, in shmem_read_folio_gfp() argument
5988 struct inode *inode = mapping->host; in shmem_read_folio_gfp()
6003 return mapping_read_folio_gfp(mapping, index, gfp); in shmem_read_folio_gfp()
6008 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, in shmem_read_mapping_page_gfp() argument
6011 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); in shmem_read_mapping_page_gfp()