| /linux/mm/ |
| A D | swap.c | 97 void __folio_put(struct folio *folio) in __folio_put() argument 125 struct folio *folio, *next; in put_pages_list() local 154 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add() 196 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local 210 struct folio *folio, move_fn_t move_fn, in __folio_batch_add_and_move() argument 355 void folio_activate(struct folio *folio) in folio_activate() argument 368 void folio_activate(struct folio *folio) in folio_activate() argument 502 void folio_add_lru(struct folio *folio) in folio_add_lru() argument 505 folio_test_unevictable(folio), folio); in folio_add_lru() 702 void folio_deactivate(struct folio *folio) in folio_deactivate() argument [all …]
|
| A D | truncate.c | 55 struct folio *folio = fbatch->folios[i]; in clear_shadow_entries() local 96 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() local 210 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 230 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 238 struct folio *folio) in generic_error_remove_folio() argument 313 struct folio *folio; in truncate_inode_pages_range() local 389 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() local 486 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() local 550 struct folio *folio) in invalidate_complete_folio2() argument 617 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range() local [all …]
|
| A D | filemap.c | 159 struct folio *folio) in filemap_unaccount_folio() argument 293 struct folio *folio; in page_cache_delete_batch() local 483 struct folio *folio; in filemap_range_has_page() local 643 struct folio *folio; in filemap_range_has_writeback() local 998 struct folio *folio; in filemap_alloc_folio_noprof() local 1173 key.folio = folio; in folio_wake_bit() 1664 wait->folio = folio; in __folio_lock_async() 1837 struct folio *folio; in filemap_get_entry() local 1884 struct folio *folio; in __filemap_get_folio() local 1988 struct folio *folio; in find_get_entry() local [all …]
|
| A D | page_io.c | 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() local 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() local 175 static bool is_folio_zero_filled(struct folio *folio) in is_folio_zero_filled() argument 205 static void swap_zeromap_folio_set(struct folio *folio) in swap_zeromap_folio_set() argument 225 static void swap_zeromap_folio_clear(struct folio *folio) in swap_zeromap_folio_clear() argument 243 struct folio *folio = page_folio(page); in swap_writepage() local 443 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); in swap_writepage_bdev_async() 456 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); in __swap_writepage() 536 folio_zero_range(folio, 0, folio_size(folio)); in swap_read_folio_zeromap() 617 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in swap_read_folio() [all …]
|
| A D | rmap.c | 777 struct folio *folio = page_folio(page); in page_address_in_vma() local 1095 int folio_mkclean(struct folio *folio) in folio_mkclean() argument 1279 folio); in __page_check_anon_rmap() 1476 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in __folio_add_file_rmap() 1963 static int folio_not_mapped(struct folio *folio) in folio_not_mapped() argument 2060 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one() 2534 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() local 2604 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument 2654 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument 2722 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); in rmap_walk_locked() [all …]
|
| A D | swap_state.c | 180 bool add_to_swap(struct folio *folio) in add_to_swap() argument 237 void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument 247 folio_ref_sub(folio, folio_nr_pages(folio)); in delete_from_swap_cache() 289 void free_swap_cache(struct folio *folio) in free_swap_cache() argument 304 struct folio *folio = page_folio(page); in free_page_and_swap_cache() local 354 struct folio *folio; in swap_cache_get_folio() local 437 struct folio *folio; in __read_swap_cache_async() local 565 struct folio *folio; in read_swap_cache_async() local 659 struct folio *folio; in swap_cluster_readahead() local 804 struct folio *folio; in swap_vma_readahead() local [all …]
|
| A D | mlock.c | 95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio() 105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __mlock_new_folio() 169 static inline struct folio *mlock_lru(struct folio *folio) in mlock_lru() argument 174 static inline struct folio *mlock_new(struct folio *folio) in mlock_new() argument 190 struct folio *folio; in mlock_folio_batch() local 196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch() 242 void mlock_folio(struct folio *folio) in mlock_folio() argument 267 void mlock_new_folio(struct folio *folio) in mlock_new_folio() argument 290 void munlock_folio(struct folio *folio) in munlock_folio() argument 360 struct folio *folio; in mlock_pte_range() local [all …]
|
| A D | page_idle.c | 37 struct folio *folio; in page_idle_get_folio() local 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio() 47 folio = NULL; in page_idle_get_folio() 49 return folio; in page_idle_get_folio() 52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument 89 static void page_idle_clear_pte_refs(struct folio *folio) in page_idle_clear_pte_refs() argument 101 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) in page_idle_clear_pte_refs() 119 struct folio *folio; in page_idle_bitmap_read() local 139 if (folio) { in page_idle_bitmap_read() 164 struct folio *folio; in page_idle_bitmap_write() local [all …]
|
| A D | migrate.c | 143 struct folio *folio; in putback_movable_pages() local 201 struct folio *folio, in try_to_map_unused_to_zeropage() argument 240 struct folio *folio; member 457 struct folio *folio) in folio_expected_refs() argument 479 struct folio *newfolio, struct folio *folio, int expected_count) in __folio_migrate_mapping() argument 612 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument 663 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument 1618 struct folio *folio, *folio2; in migrate_hugetlbs() local 2035 struct folio *folio, *folio2; in migrate_pages() local 2236 struct folio *folio; in add_folio_for_migration() local [all …]
|
| /linux/include/linux/ |
| A D | rmap.h | 201 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in __folio_rmap_sanity_checks() 204 VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); in __folio_rmap_sanity_checks() 216 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks() 270 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_dup_anon_rmap() 271 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_dup_anon_rmap() 286 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_share_anon_rmap() 287 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_share_anon_rmap() 310 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in hugetlb_add_file_rmap() 399 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_dup_anon_rmap() 526 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_share_anon_rmap() [all …]
|
| A D | hugetlb_cgroup.h | 63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio() 76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument 84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup() 133 struct folio *folio); 136 struct folio *folio); 138 struct folio *folio); 140 struct folio *folio); 222 struct folio *folio) in hugetlb_cgroup_commit_charge() argument 229 struct folio *folio) in hugetlb_cgroup_commit_charge_rsvd() argument 234 struct folio *folio) in hugetlb_cgroup_uncharge_folio() argument [all …]
|
| A D | pagemap.h | 928 return folio->index + folio_nr_pages(folio); in folio_next_index() 1067 struct folio *folio; member 1073 struct folio *folio; member 1081 if (wait_page->folio != key->folio) in wake_page_match() 1091 void __folio_lock(struct folio *folio); 1095 void folio_unlock(struct folio *folio); 1164 struct folio *folio; in lock_page() local 1243 void folio_end_writeback(struct folio *folio); 1245 void folio_wait_stable(struct folio *folio); 1266 void folio_end_private_2(struct folio *folio); [all …]
|
| A D | mm_inline.h | 28 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags() 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags() 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list() 144 static inline int folio_lru_refs(struct folio *folio) in folio_lru_refs() argument 158 static inline int folio_lru_gen(struct folio *folio) in folio_lru_gen() argument 250 (folio_test_dirty(folio) || folio_test_writeback(folio)))) in lru_gen_add_folio() 280 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in lru_gen_del_folio() 281 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in lru_gen_del_folio() 319 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) in lruvec_add_folio() argument [all …]
|
| A D | migrate.h | 10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); 11 typedef void free_folio_t(struct folio *folio, unsigned long private); 73 bool isolate_folio_to_list(struct folio *folio, struct list_head *list); 76 struct folio *dst, struct folio *src); 79 void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 81 struct folio *newfolio, struct folio *folio, int extra_count); 90 static inline struct folio *alloc_migration_target(struct folio *src, in alloc_migration_target() 99 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping() 121 static inline bool folio_test_movable(struct folio *folio) in folio_test_movable() argument 145 int migrate_misplaced_folio_prepare(struct folio *folio, [all …]
|
| A D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 158 static inline void folio_ref_inc(struct folio *folio) in folio_ref_inc() argument 170 static inline void folio_ref_dec(struct folio *folio) in folio_ref_dec() argument 198 static inline int folio_ref_inc_return(struct folio *folio) in folio_ref_inc_return() argument 212 static inline int folio_ref_dec_and_test(struct folio *folio) in folio_ref_dec_and_test() argument 226 static inline int folio_ref_dec_return(struct folio *folio) in folio_ref_dec_return() argument [all …]
|
| /linux/fs/btrfs/ |
| A D | subpage.c | 196 struct folio *folio, u64 start, u32 len) in btrfs_subpage_assert() argument 225 struct folio *folio, u64 start, u32 len) in btrfs_subpage_start_reader() argument 365 struct folio *folio, u64 start, u32 len) in btrfs_folio_start_writer_lock() argument 398 struct folio *folio, u64 start, u32 len) in btrfs_folio_end_writer_lock() argument 471 struct folio *folio, u64 start, u32 len) in btrfs_subpage_set_uptodate() argument 486 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_uptodate() argument 524 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_and_test_dirty() argument 551 struct folio *folio, u64 start, u32 len) in btrfs_subpage_set_writeback() argument 597 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_ordered() argument 627 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_checked() argument [all …]
|
| A D | subpage.h | 11 struct folio; 99 struct folio *folio, u64 start, u32 len); 104 struct folio *folio, u64 start, u32 len); 106 struct folio *folio, u64 start, u32 len); 108 struct folio *folio, u64 start, u32 len); 110 struct folio *folio, unsigned long bitmap); 131 struct folio *folio, u64 start, u32 len); \ 133 struct folio *folio, u64 start, u32 len); \ 147 struct folio *folio, u64 start, u32 len); 156 struct folio *folio, u64 start, u32 len); [all …]
|
| /linux/fs/jfs/ |
| A D | jfs_metapage.c | 135 static inline void inc_io(struct folio *folio) in inc_io() argument 176 #define dec_io(folio, status, handler) handler(folio, status) argument 277 struct folio *folio = bio->bi_private; in metapage_read_end_io() local 333 struct folio *folio = bio->bi_private; in metapage_write_end_io() local 341 static int metapage_write_folio(struct folio *folio, in metapage_write_folio() argument 583 struct folio *folio; in __get_metapage() local 655 mp->folio = folio; in __get_metapage() 695 static int metapage_write_one(struct folio *folio) in metapage_write_one() argument 725 struct folio *folio = mp->folio; in force_metapage() local 759 struct folio *folio = mp->folio; in release_metapage() local [all …]
|
| /linux/mm/damon/ |
| A D | paddr.c | 22 static bool damon_folio_mkold_one(struct folio *folio, in damon_folio_mkold_one() argument 37 static void damon_folio_mkold(struct folio *folio) in damon_folio_mkold() argument 45 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_folio_mkold() 63 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local 65 if (!folio) in damon_pa_mkold() 90 static bool damon_folio_young_one(struct folio *folio, in damon_folio_young_one() argument 122 static bool damon_folio_young(struct folio *folio) in damon_folio_young() argument 202 struct folio *folio) in __damos_pa_filter_out() argument 368 struct folio *folio; in damon_pa_migrate_folio_list() local 373 struct folio *folio; in damon_pa_migrate_folio_list() local [all …]
|
| /linux/fs/bcachefs/ |
| A D | fs-io-pagecache.h | 19 static inline u64 folio_end_pos(struct folio *folio) in folio_end_pos() argument 21 return folio_pos(folio) + folio_size(folio); in folio_end_pos() 24 static inline size_t folio_sectors(struct folio *folio) in folio_sectors() argument 29 static inline loff_t folio_sector(struct folio *folio) in folio_sector() argument 34 static inline u64 folio_end_sector(struct folio *folio) in folio_end_sector() argument 72 static inline void bch2_folio_sector_set(struct folio *folio, in bch2_folio_sector_set() argument 80 static inline int folio_pos_to_s(struct folio *folio, loff_t pos) in folio_pos_to_s() argument 84 BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio)); in folio_pos_to_s() 89 static inline void __bch2_folio_release(struct folio *folio) in __bch2_folio_release() argument 94 static inline void bch2_folio_release(struct folio *folio) in bch2_folio_release() argument [all …]
|
| /linux/fs/iomap/ |
| A D | buffered-io.c | 340 struct folio *folio) in iomap_read_inline_data() argument 685 size_t len, struct folio *folio) in __iomap_write_begin() argument 758 struct folio *folio) in __iomap_put_folio() argument 771 struct folio *folio) in iomap_write_begin_inline() argument 784 struct folio *folio; in iomap_write_begin() local 921 struct folio *folio; in iomap_write_iter() local 1126 struct folio *folio; in iomap_write_delalloc_scan() local 1284 struct folio *folio; in iomap_unshare_iter() local 1388 struct folio *folio; in iomap_zero_iter() local 1473 struct folio *folio) in iomap_folio_mkwrite_iter() argument [all …]
|
| /linux/fs/nilfs2/ |
| A D | page.c | 53 struct folio *folio; in nilfs_grab_buffer() local 76 struct folio *folio = bh->b_folio; in nilfs_forget_buffer() local 141 bool nilfs_folio_buffers_clean(struct folio *folio) in nilfs_folio_buffers_clean() argument 154 void nilfs_folio_bug(struct folio *folio) in nilfs_folio_bug() argument 170 folio, folio_ref_count(folio), in nilfs_folio_bug() 259 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() local 313 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() local 373 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() local 396 void nilfs_clear_folio_dirty(struct folio *folio) in nilfs_clear_folio_dirty() argument 452 void __nilfs_clear_folio_dirty(struct folio *folio) in __nilfs_clear_folio_dirty() argument [all …]
|
| /linux/fs/sysv/ |
| A D | dir.c | 64 struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); in dir_get_folio() local 66 if (IS_ERR(folio)) in dir_get_folio() 68 *foliop = folio; in dir_get_folio() 91 struct folio *folio; in sysv_readdir() local 185 struct folio *folio = NULL; in sysv_add_link() local 214 pos = folio_pos(folio) + offset_in_folio(folio, de); in sysv_add_link() 237 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); in sysv_delete_entry() 255 struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); in sysv_make_empty() local 291 struct folio *folio = NULL; in sysv_empty_dir() local 334 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); in sysv_set_link() [all …]
|
| /linux/fs/ubifs/ |
| A D | file.c | 99 static int do_readpage(struct folio *folio) in do_readpage() argument 110 inode->i_ino, folio->index, i_size, folio->flags); in do_readpage() 221 struct folio *folio; in write_begin_slow() local 425 struct folio *folio; in ubifs_write_begin() local 527 struct folio *folio, void *fsdata) in ubifs_write_end() argument 778 struct folio *folio; in ubifs_do_bulk_read() local 1014 inode->i_ino, folio->index, folio->flags); in ubifs_writepage() 1156 struct folio *folio; in do_truncation() local 1466 struct folio *folio) in ubifs_dirty_folio() argument 1513 struct folio *folio = page_folio(vmf->page); in ubifs_vm_page_mkwrite() local [all …]
|
| /linux/include/trace/events/ |
| A D | pagemap.h | 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) 42 __entry->folio = folio; 43 __entry->pfn = folio_pfn(folio); 50 __entry->folio, 63 TP_PROTO(struct folio *folio), 65 TP_ARGS(folio), 68 __field(struct folio *, folio ) 73 __entry->folio = folio; [all …]
|