Lines Matching refs:mapping
128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
134 mapping_set_update(&xas, mapping); in page_cache_delete()
144 folio->mapping = NULL; in page_cache_delete()
146 mapping->nrpages -= nr; in page_cache_delete()
149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
209 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
210 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
220 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local
223 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
224 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
227 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
231 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
248 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local
251 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
252 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
254 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
255 if (mapping_shrinkable(mapping)) in filemap_remove_folio()
256 inode_add_lru(mapping->host); in filemap_remove_folio()
257 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
259 filemap_free_folio(mapping, folio); in filemap_remove_folio()
275 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
278 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
283 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
306 folio->mapping = NULL; in page_cache_delete_batch()
313 mapping->nrpages -= total_pages; in page_cache_delete_batch()
316 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
324 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
325 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
330 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
332 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
333 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
334 if (mapping_shrinkable(mapping)) in delete_from_page_cache_batch()
335 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
336 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
339 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
342 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
346 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
347 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
349 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
350 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
356 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
359 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
361 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
376 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
381 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
382 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
385 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
386 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
409 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
419 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
422 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
425 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
428 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
430 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
434 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
437 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
452 int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range_kick() argument
455 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE); in filemap_fdatawrite_range_kick()
468 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
470 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
486 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
490 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
517 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
530 nr_folios = filemap_get_folios_tag(mapping, &index, end, in __filemap_fdatawait_range()
562 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
565 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
566 return filemap_check_errors(mapping); in filemap_fdatawait_range()
584 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
587 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
588 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
610 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
612 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
631 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
633 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
634 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
639 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
641 return mapping->nrpages; in mapping_needs_writeback()
644 bool filemap_range_has_writeback(struct address_space *mapping, in filemap_range_has_writeback() argument
647 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
682 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
690 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
691 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
700 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
702 err2 = filemap_check_errors(mapping); in filemap_write_and_wait_range()
709 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
711 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
713 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
745 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
748 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
752 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
763 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
764 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
788 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
793 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
794 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
798 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
822 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local
823 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
825 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
829 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
832 new->mapping = mapping; in replace_page_cache_folio()
840 old->mapping = NULL; in replace_page_cache_folio()
857 noinline int __filemap_add_folio(struct address_space *mapping, in __filemap_add_folio() argument
860 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in __filemap_add_folio()
867 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
869 mapping_set_update(&xas, mapping); in __filemap_add_folio()
877 folio->mapping = mapping; in __filemap_add_folio()
905 BUG_ON(shmem_mapping(mapping)); in __filemap_add_folio()
928 mapping->nrpages += nr; in __filemap_add_folio()
951 folio->mapping = NULL; in __filemap_add_folio()
958 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
969 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
1594 struct address_space *mapping = folio->mapping; in filemap_end_dropbehind() local
1602 if (mapping) in filemap_end_dropbehind()
1603 folio_unmap_invalidate(mapping, folio, 0); in filemap_end_dropbehind()
1777 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1780 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1814 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1817 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1863 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) in filemap_get_entry() argument
1865 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1910 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio() argument
1916 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1933 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1953 unsigned int min_order = mapping_min_folio_order(mapping); in __filemap_get_folio()
1956 index = mapping_align_index(mapping, index); in __filemap_get_folio()
1958 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in __filemap_get_folio()
1969 if (order > mapping_max_folio_order(mapping)) in __filemap_get_folio()
1970 order = mapping_max_folio_order(mapping); in __filemap_get_folio()
1991 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
2085 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, in find_get_entries() argument
2088 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2106 nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); in find_get_entries()
2134 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, in find_lock_entries() argument
2137 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2156 if (folio->mapping != mapping || in find_lock_entries()
2202 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, in filemap_get_folios() argument
2205 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); in filemap_get_folios()
2224 unsigned filemap_get_folios_contig(struct address_space *mapping, in filemap_get_folios_contig() argument
2227 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2300 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, in filemap_get_folios_tag() argument
2303 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2367 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2370 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2429 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2438 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2440 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2450 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2454 struct address_space *mapping, size_t count, in filemap_update_page() argument
2460 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2463 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2471 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2485 if (!folio->mapping) in filemap_update_page()
2489 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2497 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2503 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2511 struct address_space *mapping = iocb->ki_filp->f_mapping; in filemap_create_folio() local
2514 unsigned int min_order = mapping_min_folio_order(mapping); in filemap_create_folio()
2520 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); in filemap_create_folio()
2539 filemap_invalidate_lock_shared(mapping); in filemap_create_folio()
2541 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2542 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_folio()
2548 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_create_folio()
2553 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2557 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2563 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2566 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2580 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2593 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2595 DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index); in filemap_get_pages()
2606 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2617 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2625 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2631 trace_mm_filemap_get_pages(mapping, index, last_index - 1); in filemap_get_pages()
2680 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2681 struct inode *inode = mapping->host; in filemap_read()
2733 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2792 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait() local
2797 if (filemap_range_needs_writeback(mapping, pos, end)) in kiocb_write_and_wait()
2802 return filemap_write_and_wait_range(mapping, pos, end); in kiocb_write_and_wait()
2806 int filemap_invalidate_pages(struct address_space *mapping, in filemap_invalidate_pages() argument
2813 if (filemap_range_has_page(mapping, pos, end)) in filemap_invalidate_pages()
2816 ret = filemap_write_and_wait_range(mapping, pos, end); in filemap_invalidate_pages()
2827 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, in filemap_invalidate_pages()
2833 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages() local
2835 return filemap_invalidate_pages(mapping, iocb->ki_pos, in kiocb_invalidate_pages()
2873 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2874 struct inode *inode = mapping->host; in generic_file_read_iter()
2881 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
3054 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
3057 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
3058 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
3068 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3111 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
3114 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3135 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3216 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
3217 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3408 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3409 struct inode *inode = mapping->host; in filemap_fault()
3419 trace_mm_filemap_fault(mapping, index); in filemap_fault()
3424 folio = filemap_get_folio(mapping, index); in filemap_fault()
3433 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3452 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3455 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3461 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3470 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3513 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3537 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3544 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3557 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3593 struct address_space *mapping, pgoff_t end_pgoff) in next_uptodate_folio() argument
3616 if (folio->mapping != mapping) in next_uptodate_folio()
3620 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3737 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3740 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3748 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3765 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; in filemap_map_pages()
3789 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3792 trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff); in filemap_map_pages()
3808 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3812 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3815 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3828 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3842 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3844 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3854 struct address_space *mapping = file->f_mapping; in generic_file_mmap_prepare() local
3856 if (!mapping->a_ops->read_folio) in generic_file_mmap_prepare()
3908 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio() argument
3915 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3917 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3920 mapping_min_folio_order(mapping)); in do_read_cache_folio()
3923 index = mapping_align_index(mapping, index); in do_read_cache_folio()
3924 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3944 if (!folio->mapping) { in do_read_cache_folio()
3986 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio() argument
3989 return do_read_cache_folio(mapping, index, filler, file, in read_cache_folio()
3990 mapping_gfp_mask(mapping)); in read_cache_folio()
4011 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp() argument
4014 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); in mapping_read_folio_gfp()
4018 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
4023 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
4029 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
4032 return do_read_cache_page(mapping, index, filler, file, in read_cache_page()
4033 mapping_gfp_mask(mapping)); in read_cache_page()
4052 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
4056 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
4082 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write() local
4084 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
4085 invalidate_inode_pages2_range(mapping, in kiocb_invalidate_post_direct_write()
4094 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write() local
4109 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
4129 struct inode *inode = mapping->host; in generic_file_direct_write()
4151 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
4152 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
4153 size_t chunk = mapping_max_folio_size(mapping); in generic_perform_write()
4168 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
4175 status = a_ops->write_begin(iocb, mapping, pos, bytes, in generic_perform_write()
4184 if (mapping_writably_mapped(mapping)) in generic_perform_write()
4196 status = a_ops->write_end(iocb, mapping, pos, bytes, copied, in generic_perform_write()
4265 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
4266 struct inode *inode = mapping->host; in __generic_file_write_iter()
4346 struct address_space * const mapping = folio->mapping; in filemap_release_folio() local
4354 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4355 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4376 struct address_space *mapping = inode->i_mapping; in filemap_invalidate_inode() local
4381 if (!mapping || !mapping->nrpages || end < start) in filemap_invalidate_inode()
4385 filemap_invalidate_lock(mapping); in filemap_invalidate_inode()
4387 if (!mapping->nrpages) in filemap_invalidate_inode()
4390 unmap_mapping_pages(mapping, first, nr, false); in filemap_invalidate_inode()
4401 filemap_fdatawrite_wbc(mapping, &wbc); in filemap_invalidate_inode()
4405 invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE); in filemap_invalidate_inode()
4408 filemap_invalidate_unlock(mapping); in filemap_invalidate_inode()
4410 return filemap_check_errors(mapping); in filemap_invalidate_inode()
4427 static void filemap_cachestat(struct address_space *mapping, in filemap_cachestat() argument
4430 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4476 if (shmem_mapping(mapping)) { in filemap_cachestat()
4576 struct address_space *mapping; in SYSCALL_DEFINE4() local
4602 mapping = fd_file(f)->f_mapping; in SYSCALL_DEFINE4()
4603 filemap_cachestat(mapping, first_index, last_index, &cs); in SYSCALL_DEFINE4()