| /mm/ |
| A D | truncate.c | 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 49 inode_add_lru(mapping->host); in clear_shadow_entries() 69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() 79 if (dax_mapping(mapping)) { in truncate_folio_batch_exceptionals() 172 if (folio->mapping != mapping) in truncate_inode_folio() 256 folio2->mapping == folio->mapping) in truncate_inode_partial_folio() 277 if (!mapping) in generic_error_remove_folio() 303 if (!mapping) in mapping_evict_folio() 585 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder() 611 if (folio->mapping != mapping) in folio_unmap_invalidate() [all …]
|
| A D | filemap.c | 220 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local 248 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local 822 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local 832 new->mapping = mapping; in replace_page_cache_folio() 877 folio->mapping = mapping; in __filemap_add_folio() 1933 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio() 2156 if (folio->mapping != mapping || in find_lock_entries() 3068 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data() 3470 if (unlikely(folio->mapping != mapping)) { in filemap_fault() 3616 if (folio->mapping != mapping) in next_uptodate_folio() [all …]
|
| A D | readahead.c | 210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 271 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded() 298 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded() 337 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local 342 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra() 462 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local 470 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order() 488 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order() 513 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order() [all …]
|
| A D | page-writeback.c | 2415 if (unlikely(folio->mapping != mapping)) in folio_prepare_writeback() 2634 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages() 2774 if (mapping->host) { in filemap_dirty_folio() 2797 struct address_space *mapping = folio->mapping; in folio_redirty_for_writepage() local 2803 if (mapping && mapping_can_writeback(mapping)) { in folio_redirty_for_writepage() 2835 if (likely(mapping)) { in folio_mark_dirty() 2849 return mapping->a_ops->dirty_folio(mapping, folio); in folio_mark_dirty() 2932 if (mapping && mapping_can_writeback(mapping)) { in folio_clear_dirty_for_io() 3015 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_end_writeback() 3033 if (mapping->host && !mapping_tagged(mapping, in __folio_end_writeback() [all …]
|
| A D | memory-failure.c | 239 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev() 653 struct address_space *mapping = folio->mapping; in collect_procs_file() local 656 i_mmap_lock_read(mapping); in collect_procs_file() 681 i_mmap_unlock_read(mapping); in collect_procs_file() 703 i_mmap_lock_read(mapping); in collect_procs_fsdax() 733 if (!folio->mapping) in collect_procs() 992 int err = mapping->a_ops->error_remove_folio(mapping, folio); in truncate_error_folio() 1097 if (!mapping) { in me_pagecache_clean() 1135 if (mapping) { in me_pagecache_dirty() 1219 if (mapping) { in me_huge_page() [all …]
|
| A D | migrate.c | 550 if (!mapping) { in __folio_migrate_mapping() 562 newfolio->mapping = folio->mapping; in __folio_migrate_mapping() 588 newfolio->mapping = folio->mapping; in __folio_migrate_mapping() 708 dst->mapping = src->mapping; in migrate_huge_page_move_mapping() 1028 mapping->a_ops); in fallback_migrate_folio() 1063 if (!mapping) in move_to_new_folio() 1075 rc = mapping->a_ops->migrate_folio(mapping, dst, src, in move_to_new_folio() 1086 src->mapping = NULL; in move_to_new_folio() 1293 if (!src->mapping) { in migrate_folio_unmap() 1486 if (unlikely(!mapping)) in unmap_and_move_huge_page() [all …]
|
| A D | fadvise.c | 34 struct address_space *mapping; in generic_fadvise() local 45 mapping = file->f_mapping; in generic_fadvise() 46 if (!mapping || len < 0) in generic_fadvise() 49 bdi = inode_to_bdi(mapping->host); in generic_fadvise() 106 force_page_cache_readahead(mapping, file, start_index, nrpages); in generic_fadvise() 114 __filemap_fdatawrite_range(mapping, offset, endbyte, in generic_fadvise() 158 mapping_try_invalidate(mapping, start_index, end_index, in generic_fadvise() 168 invalidate_mapping_pages(mapping, start_index, in generic_fadvise()
|
| A D | shmem.c | 479 xa_lock_irq(&mapping->i_pages); in shmem_charge() 480 mapping->nrpages += pages; in shmem_charge() 915 folio->mapping = mapping; in shmem_add_to_page_cache() 946 mapping->nrpages += nr; in shmem_add_to_page_cache() 952 folio->mapping = NULL; in shmem_add_to_page_cache() 965 struct address_space *mapping = folio->mapping; in shmem_delete_from_page_cache() local 971 folio->mapping = NULL; in shmem_delete_from_page_cache() 972 mapping->nrpages -= nr; in shmem_delete_from_page_cache() 1583 struct address_space *mapping = folio->mapping; in shmem_writeout() local 1930 mapping, index, orders); in shmem_alloc_and_add_folio() [all …]
|
| A D | secretmem.c | 52 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in secretmem_fault() local 64 filemap_invalidate_lock_shared(mapping); in secretmem_fault() 67 folio = filemap_lock_folio(mapping, offset); in secretmem_fault() 83 err = filemap_add_folio(mapping, folio, offset, gfp); in secretmem_fault() 107 filemap_invalidate_unlock_shared(mapping); in secretmem_fault() 147 static int secretmem_migrate_folio(struct address_space *mapping, in secretmem_migrate_folio() argument 169 struct address_space *mapping = inode->i_mapping; in secretmem_setattr() local 173 filemap_invalidate_lock(mapping); in secretmem_setattr() 180 filemap_invalidate_unlock(mapping); in secretmem_setattr()
|
| A D | vma.c | 231 flush_dcache_mmap_lock(mapping); in __vma_link_file() 245 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct() 295 i_mmap_lock_write(vp->mapping); in vma_prepare() 320 &vp->mapping->i_mmap); in vma_prepare() 339 &vp->mapping->i_mmap); in vma_complete() 1727 i_mmap_lock_write(mapping); in unlink_file_vma_batch_process() 1732 i_mmap_unlock_write(mapping); in unlink_file_vma_batch_process() 1768 i_mmap_lock_write(mapping); in unlink_file_vma() 1780 mapping = file->f_mapping; in vma_link_file() 1781 i_mmap_lock_write(mapping); in vma_link_file() [all …]
|
| A D | mapping_dirty_helpers.c | 263 unsigned long wp_shared_mapping_range(struct address_space *mapping, in wp_shared_mapping_range() argument 268 i_mmap_lock_read(mapping); in wp_shared_mapping_range() 269 WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, in wp_shared_mapping_range() 271 i_mmap_unlock_read(mapping); in wp_shared_mapping_range() 313 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, in clean_record_shared_mapping_range() argument 329 i_mmap_lock_read(mapping); in clean_record_shared_mapping_range() 330 WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, in clean_record_shared_mapping_range() 332 i_mmap_unlock_read(mapping); in clean_record_shared_mapping_range()
|
| A D | workingset.c | 703 struct address_space *mapping; in shadow_lru_isolate() local 718 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate() 721 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate() 728 if (mapping->host != NULL) { in shadow_lru_isolate() 729 if (!spin_trylock(&mapping->host->i_lock)) { in shadow_lru_isolate() 730 xa_unlock(&mapping->i_pages); in shadow_lru_isolate() 755 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate() 756 if (mapping->host != NULL) { in shadow_lru_isolate() 757 if (mapping_shrinkable(mapping)) in shadow_lru_isolate() 758 inode_add_lru(mapping->host); in shadow_lru_isolate() [all …]
|
| A D | internal.h | 150 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping() local 152 return (void *)(mapping & ~FOLIO_MAPPING_FLAGS); in folio_raw_mapping() 466 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead() 470 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 472 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 479 unsigned long mapping_try_invalidate(struct address_space *mapping, 521 struct address_space *mapping = folio_mapping(folio); in folio_needs_release() local 524 (mapping && mapping_release_always(mapping)); in folio_needs_release() 816 p->mapping = TAIL_MAPPING; in prep_compound_tail() 1617 #define mapping_set_update(xas, mapping) do { \ argument [all …]
|
| A D | khugepaged.c | 1729 i_mmap_lock_read(mapping); in retract_page_tables() 1819 i_mmap_unlock_read(mapping); in retract_page_tables() 1873 new_folio->mapping = mapping; in collapse_file() 1959 filemap_flush(mapping); in collapse_file() 2065 filemap_nr_thps_inc(mapping); in collapse_file() 2131 i_mmap_lock_read(mapping); in collapse_file() 2172 i_mmap_unlock_read(mapping); in collapse_file() 2230 folio->mapping = NULL; in collapse_file() 2243 mapping->nrpages -= nr_none; in collapse_file() 2260 filemap_nr_thps_dec(mapping); in collapse_file() [all …]
|
| A D | folio-compat.c | 68 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument 71 return filemap_add_folio(mapping, page_folio(page), index, gfp); in add_to_page_cache_lru() 76 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page() argument 81 folio = __filemap_get_folio(mapping, index, fgp_flags, gfp); in pagecache_get_page()
|
| A D | huge_memory.c | 3333 new_folio->mapping = folio->mapping; in __split_folio_to_order() 3452 if (mapping) { in __split_unmapped_folio() 3649 mapping = folio->mapping; in __folio_split() 3715 if (mapping) { in __folio_split() 3752 if (mapping) { in __folio_split() 3816 if (!mapping) in __folio_split() 3853 if (mapping) in __folio_split() 3890 if (mapping) in __folio_split() 4362 mapping = folio->mapping; in split_huge_pages_pid() 4385 if (!folio_test_anon(folio) && folio->mapping != mapping) in split_huge_pages_pid() [all …]
|
| A D | rmap.c | 1116 struct address_space *mapping; in folio_mkclean() local 1128 mapping = folio_mapping(folio); in folio_mkclean() 1129 if (!mapping) in folio_mkclean() 1202 if (!mapping) in mapping_wrprotect_range() 1343 WRITE_ONCE(folio->mapping, anon_vma); in folio_move_anon_rmap() 2884 if (i_mmap_trylock_read(mapping)) in __rmap_walk_file() 2892 i_mmap_lock_read(mapping); in __rmap_walk_file() 2895 vma_interval_tree_foreach(vma, &mapping->i_mmap, in __rmap_walk_file() 2912 i_mmap_unlock_read(mapping); in __rmap_walk_file() 2935 if (!folio->mapping) in rmap_walk_file() [all …]
|
| A D | hugetlb.c | 1678 folio->mapping = NULL; in free_hpage_workfn() 1840 folio->mapping = NULL; in free_huge_folio() 1934 if (!mapping) in hugetlb_folio_mapping_lock_write() 1935 return mapping; in hugetlb_folio_mapping_lock_write() 1938 return mapping; in hugetlb_folio_mapping_lock_write() 4037 new_folio->mapping = NULL; in demote_free_hugetlb_folios() 5806 i_mmap_lock_write(mapping); in move_hugetlb_page_tables() 5836 i_mmap_unlock_write(mapping); in move_hugetlb_page_tables() 6099 i_mmap_lock_write(mapping); in unmap_ref_private() 7548 i_mmap_lock_read(mapping); in huge_pmd_share() [all …]
|
| A D | util.c | 672 unsigned long mapping = (unsigned long)folio->mapping; in folio_anon_vma() local 674 if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) in folio_anon_vma() 676 return (void *)(mapping - FOLIO_MAPPING_ANON); in folio_anon_vma() 693 struct address_space *mapping; in folio_mapping() local 702 mapping = folio->mapping; in folio_mapping() 703 if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS) in folio_mapping() 706 return mapping; in folio_mapping()
|
| A D | nommu.c | 569 i_mmap_lock_write(mapping); in setup_vma_to_mm() 570 flush_dcache_mmap_lock(mapping); in setup_vma_to_mm() 571 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm() 572 flush_dcache_mmap_unlock(mapping); in setup_vma_to_mm() 573 i_mmap_unlock_write(mapping); in setup_vma_to_mm() 582 struct address_space *mapping; in cleanup_vma_from_mm() local 583 mapping = vma->vm_file->f_mapping; in cleanup_vma_from_mm() 585 i_mmap_lock_write(mapping); in cleanup_vma_from_mm() 586 flush_dcache_mmap_lock(mapping); in cleanup_vma_from_mm() 588 flush_dcache_mmap_unlock(mapping); in cleanup_vma_from_mm() [all …]
|
| A D | vmscan.c | 666 if (shmem_mapping(mapping)) in writeout() 709 if (!mapping) { in pageout() 746 xa_lock_irq(&mapping->i_pages); in __remove_mapping() 811 !mapping_exiting(mapping) && !dax_mapping(mapping)) in __remove_mapping() 816 inode_add_lru(mapping->host); in __remove_mapping() 979 struct address_space *mapping; in folio_check_dirty_writeback() local 1003 mapping = folio_mapping(folio); in folio_check_dirty_writeback() 1004 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback() 1241 (mapping && in shrink_folio_list() 1552 } else if (!mapping || !__remove_mapping(mapping, folio, true, in shrink_folio_list() [all …]
|
| A D | debug.c | 73 struct address_space *mapping = folio_mapping(folio); in __dump_folio() local 81 folio_ref_count(folio), mapcount, mapping, in __dump_folio() 105 else if (mapping) in __dump_folio() 106 dump_mapping(mapping); in __dump_folio()
|
| A D | page_io.c | 82 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local 83 struct inode *inode = mapping->host; in generic_swapfile_activate() 473 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; in swap_write_unplug() local 477 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); in swap_write_unplug() 662 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; in __swap_read_unplug() local 666 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); in __swap_read_unplug()
|
| A D | swap.h | 67 struct folio *filemap_get_incore_folio(struct address_space *mapping, 182 struct folio *filemap_get_incore_folio(struct address_space *mapping, in filemap_get_incore_folio() argument 185 return filemap_get_folio(mapping, index); in filemap_get_incore_folio()
|
| A D | migrate_device.c | 179 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd() 713 struct address_space *mapping; in __migrate_device_pages() local 751 mapping = folio_mapping(folio); in __migrate_device_pages() 755 if (mapping) { in __migrate_device_pages() 780 r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); in __migrate_device_pages()
|