Home
last modified time | relevance | path

Searched refs:folio (Results 1 – 25 of 256) sorted by relevance

1234567891011

/linux-6.3-rc2/mm/
A Dswap.c122 void __folio_put(struct folio *folio) in __folio_put() argument
141 struct folio *folio, *next; in put_pages_list() local
168 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add_fn()
210 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local
229 struct folio *folio, move_fn_t move_fn) in folio_batch_add_and_move() argument
352 void folio_activate(struct folio *folio) in folio_activate() argument
371 void folio_activate(struct folio *folio) in folio_activate() argument
501 void folio_add_lru(struct folio *folio) in folio_add_lru() argument
506 folio_test_unevictable(folio), folio); in folio_add_lru()
713 void folio_deactivate(struct folio *folio) in folio_deactivate() argument
[all …]
A Dfilemap.c151 struct folio *folio) in filemap_unaccount_folio() argument
285 struct folio *folio; in page_cache_delete_batch() local
475 struct folio *folio; in filemap_range_has_page() local
636 struct folio *folio; in filemap_range_has_writeback() local
964 struct folio *folio; in filemap_alloc_folio() local
1140 key.folio = folio; in folio_wake_bit()
1681 wait->folio = folio; in __folio_lock_async()
1853 struct folio *folio; in mapping_get_entry() local
1917 struct folio *folio; in __filemap_get_folio() local
2004 struct folio *folio; in find_get_entry() local
[all …]
A Dtruncate.c84 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() local
179 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio()
228 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
247 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
271 struct folio *folio) in mapping_evict_folio() argument
297 struct folio *folio = page_folio(page); in invalidate_inode_page() local
339 struct folio *folio; in truncate_inode_pages_range() local
414 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() local
511 struct folio *folio = fbatch.folios[i]; in invalidate_mapping_pagevec() local
571 struct folio *folio) in invalidate_complete_folio2() argument
[all …]
A Drmap.c750 struct folio *folio = page_folio(page); in page_address_in_vma() local
1025 int folio_mkclean(struct folio *folio) in folio_mkclean() argument
1082 int folio_total_mapcount(struct folio *folio) in folio_total_mapcount() argument
1118 struct folio *folio = page_folio(page); in page_move_anon_rmap() local
1182 struct folio *folio = page_folio(page); in __page_check_anon_rmap() local
1215 struct folio *folio = page_folio(page); in page_add_anon_rmap() local
1316 struct folio *folio = page_folio(page); in page_add_file_rmap() local
1370 struct folio *folio = page_folio(page); in page_remove_rmap() local
1864 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one()
2334 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() local
[all …]
A Dpage_idle.c37 struct folio *folio; in page_idle_get_folio() local
43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio()
47 folio = NULL; in page_idle_get_folio()
49 return folio; in page_idle_get_folio()
52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument
89 static void page_idle_clear_pte_refs(struct folio *folio) in page_idle_clear_pte_refs() argument
101 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) in page_idle_clear_pte_refs()
119 struct folio *folio; in page_idle_bitmap_read() local
139 if (folio) { in page_idle_bitmap_read()
164 struct folio *folio; in page_idle_bitmap_write() local
[all …]
A Dmlock.c95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio()
105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __mlock_new_folio()
169 static inline struct folio *mlock_lru(struct folio *folio) in mlock_lru() argument
174 static inline struct folio *mlock_new(struct folio *folio) in mlock_new() argument
190 struct folio *folio; in mlock_folio_batch() local
196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch()
243 void mlock_folio(struct folio *folio) in mlock_folio() argument
268 void mlock_new_folio(struct folio *folio) in mlock_new_folio() argument
291 void munlock_folio(struct folio *folio) in munlock_folio() argument
315 struct folio *folio; in mlock_pte_range() local
[all …]
A Dswap_state.c99 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap_cache()
100 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in add_to_swap_cache()
141 void __delete_from_swap_cache(struct folio *folio, in __delete_from_swap_cache() argument
152 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __delete_from_swap_cache()
178 bool add_to_swap(struct folio *folio) in add_to_swap() argument
183 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap()
235 void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument
245 folio_ref_sub(folio, folio_nr_pages(folio)); in delete_from_swap_cache()
288 struct folio *folio = page_folio(page); in free_swap_cache() local
336 struct folio *folio; in swap_cache_get_folio() local
[all …]
A Dmigrate.c63 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_page() local
150 struct folio *folio; in putback_movable_pages() local
205 !folio_test_pmd_mappable(folio), folio); in remove_migration_pte()
389 struct folio *folio) in folio_expected_refs() argument
411 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument
526 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping()
555 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument
646 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) in folio_migrate_copy() argument
1539 struct folio *folio, *folio2; in migrate_hugetlbs() local
1949 struct folio *folio, *folio2; in migrate_pages() local
[all …]
A Dswap.h32 bool add_to_swap(struct folio *folio);
34 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
36 void __delete_from_swap_cache(struct folio *folio,
38 void delete_from_swap_cache(struct folio *folio);
60 static inline unsigned int folio_swap_flags(struct folio *folio) in folio_swap_flags() argument
62 return page_swap_info(&folio->page)->flags; in folio_swap_flags()
113 static inline bool add_to_swap(struct folio *folio) in add_to_swap() argument
123 static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, in add_to_swap_cache() argument
129 static inline void __delete_from_swap_cache(struct folio *folio, in __delete_from_swap_cache() argument
134 static inline void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument
[all …]
/linux-6.3-rc2/include/linux/
A Dpagemap.h382 struct folio *folio = page_folio(page); in page_mapping_file() local
686 return folio->index + folio_nr_pages(folio); in folio_next_index()
870 struct folio *folio; member
876 struct folio *folio; member
884 if (wait_page->folio != key->folio) in wake_page_match()
894 void __folio_lock(struct folio *folio);
899 void folio_unlock(struct folio *folio);
968 struct folio *folio; in lock_page() local
1049 void folio_end_writeback(struct folio *folio);
1051 void folio_wait_stable(struct folio *folio);
[all …]
A Dhugetlb_cgroup.h70 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio()
85 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument
93 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup()
144 struct folio *folio);
147 struct folio *folio);
149 struct folio *folio);
151 struct folio *folio);
233 struct folio *folio) in hugetlb_cgroup_commit_charge() argument
240 struct folio *folio) in hugetlb_cgroup_commit_charge_rsvd() argument
245 struct folio *folio) in hugetlb_cgroup_uncharge_folio() argument
[all …]
A Dmm_inline.h27 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument
67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags()
72 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags()
90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list()
143 static inline int folio_lru_refs(struct folio *folio) in folio_lru_refs() argument
157 static inline int folio_lru_gen(struct folio *folio) in folio_lru_gen() argument
246 (folio_test_dirty(folio) || folio_test_writeback(folio)))) in lru_gen_add_folio()
274 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in lru_gen_del_folio()
275 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in lru_gen_del_folio()
313 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) in lruvec_add_folio() argument
[all …]
A Dpage_ref.h87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument
104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument
125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument
137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument
163 static inline void folio_ref_inc(struct folio *folio) in folio_ref_inc() argument
175 static inline void folio_ref_dec(struct folio *folio) in folio_ref_dec() argument
203 static inline int folio_ref_inc_return(struct folio *folio) in folio_ref_inc_return() argument
231 static inline int folio_ref_dec_return(struct folio *folio) in folio_ref_dec_return() argument
261 static inline bool folio_try_get(struct folio *folio) in folio_try_get() argument
277 VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio); in folio_ref_try_add_rcu()
[all …]
A Dpage_idle.h16 static inline bool folio_test_young(struct folio *folio) in folio_test_young() argument
30 static inline void folio_set_young(struct folio *folio) in folio_set_young() argument
41 static inline bool folio_test_clear_young(struct folio *folio) in folio_test_clear_young() argument
55 static inline bool folio_test_idle(struct folio *folio) in folio_test_idle() argument
69 static inline void folio_set_idle(struct folio *folio) in folio_set_idle() argument
80 static inline void folio_clear_idle(struct folio *folio) in folio_clear_idle() argument
94 static inline bool folio_test_young(struct folio *folio) in folio_test_young() argument
99 static inline void folio_set_young(struct folio *folio) in folio_set_young() argument
108 static inline bool folio_test_idle(struct folio *folio) in folio_test_idle() argument
113 static inline void folio_set_idle(struct folio *folio) in folio_set_idle() argument
[all …]
A Dmemcontrol.h351 static inline bool folio_memcg_kmem(struct folio *folio);
379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_memcg()
400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_objcg()
455 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in folio_memcg_rcu()
541 static inline bool folio_memcg_kmem(struct folio *folio) in folio_memcg_kmem() argument
543 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); in folio_memcg_kmem()
550 static inline bool folio_memcg_kmem(struct folio *folio) in folio_memcg_kmem() argument
685 void __mem_cgroup_uncharge(struct folio *folio);
764 struct lruvec *folio_lruvec_lock(struct folio *folio);
949 void folio_memcg_lock(struct folio *folio);
[all …]
A Dswap.h344 static inline swp_entry_t folio_swap_entry(struct folio *folio) in folio_swap_entry() argument
358 void workingset_refault(struct folio *folio, void *shadow);
359 void workingset_activation(struct folio *folio);
404 void folio_deactivate(struct folio *folio);
405 void folio_mark_lazyfree(struct folio *folio);
484 swp_entry_t folio_alloc_swap(struct folio *folio);
485 bool folio_free_swap(struct folio *folio);
486 void put_swap_folio(struct folio *folio, swp_entry_t entry);
596 static inline bool folio_free_swap(struct folio *folio) in folio_free_swap() argument
681 extern bool mem_cgroup_swap_full(struct folio *folio);
[all …]
A Dvmstat.h412 static inline void __zone_stat_mod_folio(struct folio *folio, in __zone_stat_mod_folio() argument
418 static inline void __zone_stat_add_folio(struct folio *folio, in __zone_stat_add_folio() argument
430 static inline void zone_stat_mod_folio(struct folio *folio, in zone_stat_mod_folio() argument
436 static inline void zone_stat_add_folio(struct folio *folio, in zone_stat_add_folio() argument
442 static inline void zone_stat_sub_folio(struct folio *folio, in zone_stat_sub_folio() argument
466 static inline void node_stat_mod_folio(struct folio *folio, in node_stat_mod_folio() argument
472 static inline void node_stat_add_folio(struct folio *folio, in node_stat_add_folio() argument
478 static inline void node_stat_sub_folio(struct folio *folio, in node_stat_sub_folio() argument
615 __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); in __lruvec_stat_add_folio()
645 lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); in lruvec_stat_add_folio()
[all …]
/linux-6.3-rc2/mm/damon/
A Dpaddr.c36 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local
43 if (!folio) in damon_pa_mkold()
46 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_pa_mkold()
51 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); in damon_pa_mkold()
116 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_young() local
125 if (!folio) in damon_pa_young()
128 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_pa_young()
192 struct folio *folio) in __damos_pa_filter_out() argument
237 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); in damon_pa_pageout() local
239 if (!folio) in damon_pa_pageout()
[all …]
A Dops-common.c25 struct folio *folio; in damon_get_folio() local
31 if (!folio_test_lru(folio) || !folio_try_get(folio)) in damon_get_folio()
33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
34 folio_put(folio); in damon_get_folio()
35 folio = NULL; in damon_get_folio()
37 return folio; in damon_get_folio()
43 struct folio *folio = damon_get_folio(pte_pfn(*pte)); in damon_ptep_mkold() local
45 if (!folio) in damon_ptep_mkold()
62 folio_put(folio); in damon_ptep_mkold()
69 struct folio *folio = damon_get_folio(pmd_pfn(*pmd)); in damon_pmdp_mkold() local
[all …]
/linux-6.3-rc2/fs/afs/
A Dwrite.c51 struct folio *folio) in afs_flush_conflicting_write() argument
73 struct folio *folio; in afs_write_begin() local
157 struct folio *folio = page_folio(subpage); in afs_write_end() local
224 struct folio *folio; in afs_kill_pages() local
262 struct folio *folio; in afs_redirty_pages() local
293 struct folio *folio; in afs_pages_written_back() local
469 struct folio *folio; in afs_extend_writeback() local
581 struct folio *folio, in afs_write_back_from_locked_folio() argument
706 struct folio *folio; in afs_writepages_region() local
907 struct folio *folio = page_folio(vmf->page); in afs_page_mkwrite() local
[all …]
/linux-6.3-rc2/include/trace/events/
A Dpagemap.h30 TP_PROTO(struct folio *folio),
32 TP_ARGS(folio),
35 __field(struct folio *, folio )
42 __entry->folio = folio;
43 __entry->pfn = folio_pfn(folio);
50 __entry->folio,
63 TP_PROTO(struct folio *folio),
65 TP_ARGS(folio),
68 __field(struct folio *, folio )
73 __entry->folio = folio;
[all …]
/linux-6.3-rc2/fs/iomap/
A Dbuffered-io.c214 struct folio *folio) in iomap_read_inline_data() argument
471 struct folio *folio; in iomap_get_folio() local
554 size_t len, struct folio *folio) in __iomap_write_begin() argument
617 struct folio *folio) in __iomap_put_folio() argument
630 struct folio *folio) in iomap_write_begin_inline() argument
643 struct folio *folio; in iomap_write_begin() local
790 struct folio *folio; in iomap_write_iter() local
909 struct folio *folio; in iomap_write_delalloc_scan() local
1131 struct folio *folio; in iomap_unshare_iter() local
1185 struct folio *folio; in iomap_zero_iter() local
[all …]
/linux-6.3-rc2/fs/9p/
A Dvfs_addr.c121 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) in v9fs_release_folio() argument
138 static void v9fs_invalidate_folio(struct folio *folio, size_t offset, in v9fs_invalidate_folio() argument
158 static int v9fs_vfs_write_folio_locked(struct folio *folio) in v9fs_vfs_write_folio_locked() argument
200 struct folio *folio = page_folio(page); in v9fs_vfs_writepage() local
216 folio_unlock(folio); in v9fs_vfs_writepage()
220 static int v9fs_launder_folio(struct folio *folio) in v9fs_launder_folio() argument
277 struct folio *folio; in v9fs_write_begin() local
299 struct folio *folio = page_folio(subpage); in v9fs_write_end() local
323 folio_mark_dirty(folio); in v9fs_write_end()
325 folio_unlock(folio); in v9fs_write_end()
[all …]
/linux-6.3-rc2/fs/netfs/
A Dbuffered_read.c19 struct folio *folio; in netfs_rreq_unlock_folios() local
54 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios()
220 int netfs_read_folio(struct file *file, struct folio *folio) in netfs_read_folio() argument
230 folio_file_pos(folio), folio_size(folio), in netfs_read_folio()
343 struct folio *folio; in netfs_write_begin() local
353 if (!folio) in netfs_write_begin()
363 if (!folio) in netfs_write_begin()
381 folio_file_pos(folio), folio_size(folio), in netfs_write_begin()
406 folio_get(folio); in netfs_write_begin()
419 *_folio = folio; in netfs_write_begin()
[all …]
/linux-6.3-rc2/fs/
A Dmpage.c78 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh, in map_buffer_to_folio() argument
115 struct folio *folio; member
135 struct folio *folio = args->folio; in do_mpage_readpage() local
156 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); in do_mpage_readpage()
301 folio_unlock(folio); in do_mpage_readpage()
346 struct folio *folio; in mpage_readahead() local
354 args.folio = folio; in mpage_readahead()
366 int mpage_read_folio(struct folio *folio, get_block_t get_block) in mpage_read_folio() argument
369 .folio = folio, in mpage_read_folio()
573 folio_zero_segment(folio, length, folio_size(folio)); in __mpage_writepage()
[all …]

Completed in 82 milliseconds

1234567891011