Lines Matching refs:folio

138 				   struct folio *folio, void *shadow)  in page_cache_delete()  argument
140 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
145 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
146 nr = folio_nr_pages(folio); in page_cache_delete()
148 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete()
153 folio->mapping = NULL; in page_cache_delete()
159 struct folio *folio) in filemap_unaccount_folio() argument
163 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio()
164 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio()
166 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
167 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
171 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
172 int mapcount = folio_mapcount(folio); in filemap_unaccount_folio()
174 if (folio_ref_count(folio) >= mapcount + 2) { in filemap_unaccount_folio()
181 atomic_set(&folio->_mapcount, -1); in filemap_unaccount_folio()
182 folio_ref_sub(folio, mapcount); in filemap_unaccount_folio()
188 if (folio_test_hugetlb(folio)) in filemap_unaccount_folio()
191 nr = folio_nr_pages(folio); in filemap_unaccount_folio()
193 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
194 if (folio_test_swapbacked(folio)) { in filemap_unaccount_folio()
195 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
196 if (folio_test_pmd_mappable(folio)) in filemap_unaccount_folio()
197 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
198 } else if (folio_test_pmd_mappable(folio)) { in filemap_unaccount_folio()
199 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
217 if (WARN_ON_ONCE(folio_test_dirty(folio) && in filemap_unaccount_folio()
219 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
227 void __filemap_remove_folio(struct folio *folio, void *shadow) in __filemap_remove_folio() argument
229 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
231 trace_mm_filemap_delete_from_page_cache(folio); in __filemap_remove_folio()
232 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
233 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
236 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
238 void (*free_folio)(struct folio *); in filemap_free_folio()
243 free_folio(folio); in filemap_free_folio()
245 if (folio_test_large(folio)) in filemap_free_folio()
246 refs = folio_nr_pages(folio); in filemap_free_folio()
247 folio_put_refs(folio, refs); in filemap_free_folio()
258 void filemap_remove_folio(struct folio *folio) in filemap_remove_folio() argument
260 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
262 BUG_ON(!folio_test_locked(folio)); in filemap_remove_folio()
265 __filemap_remove_folio(folio, NULL); in filemap_remove_folio()
271 filemap_free_folio(mapping, folio); in filemap_remove_folio()
293 struct folio *folio; in page_cache_delete_batch() local
296 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
301 if (xa_is_value(folio)) in page_cache_delete_batch()
310 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
311 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
312 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
316 WARN_ON_ONCE(!folio_test_locked(folio)); in page_cache_delete_batch()
318 folio->mapping = NULL; in page_cache_delete_batch()
323 total_pages += folio_nr_pages(folio); in page_cache_delete_batch()
339 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() local
341 trace_mm_filemap_delete_from_page_cache(folio); in delete_from_page_cache_batch()
342 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
483 struct folio *folio; in filemap_range_has_page() local
492 folio = xas_find(&xas, max); in filemap_range_has_page()
493 if (xas_retry(&xas, folio)) in filemap_range_has_page()
496 if (xa_is_value(folio)) in filemap_range_has_page()
507 return folio != NULL; in filemap_range_has_page()
531 struct folio *folio = fbatch.folios[i]; in __filemap_fdatawait_range() local
533 folio_wait_writeback(folio); in __filemap_fdatawait_range()
643 struct folio *folio; in filemap_range_has_writeback() local
649 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
650 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
652 if (xa_is_value(folio)) in filemap_range_has_writeback()
654 if (folio_test_dirty(folio) || folio_test_locked(folio) || in filemap_range_has_writeback()
655 folio_test_writeback(folio)) in filemap_range_has_writeback()
659 return folio != NULL; in filemap_range_has_writeback()
814 void replace_page_cache_folio(struct folio *old, struct folio *new) in replace_page_cache_folio()
817 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
852 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) in __filemap_add_folio() argument
860 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __filemap_add_folio()
861 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); in __filemap_add_folio()
862 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
863 folio); in __filemap_add_folio()
866 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
867 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
868 huge = folio_test_hugetlb(folio); in __filemap_add_folio()
869 nr = folio_nr_pages(folio); in __filemap_add_folio()
872 folio_ref_add(folio, nr); in __filemap_add_folio()
873 folio->mapping = mapping; in __filemap_add_folio()
874 folio->index = xas.xa_index; in __filemap_add_folio()
902 if (order > 0 && order > folio_order(folio)) { in __filemap_add_folio()
916 xas_store(&xas, folio); in __filemap_add_folio()
924 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); in __filemap_add_folio()
925 if (folio_test_pmd_mappable(folio)) in __filemap_add_folio()
926 __lruvec_stat_mod_folio(folio, in __filemap_add_folio()
951 trace_mm_filemap_add_to_page_cache(folio); in __filemap_add_folio()
954 folio->mapping = NULL; in __filemap_add_folio()
956 folio_put_refs(folio, nr); in __filemap_add_folio()
961 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
967 ret = mem_cgroup_charge(folio, NULL, gfp); in filemap_add_folio()
971 __folio_set_locked(folio); in filemap_add_folio()
972 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
974 mem_cgroup_uncharge(folio); in filemap_add_folio()
975 __folio_clear_locked(folio); in filemap_add_folio()
985 WARN_ON_ONCE(folio_test_active(folio)); in filemap_add_folio()
987 workingset_refault(folio, shadow); in filemap_add_folio()
988 folio_add_lru(folio); in filemap_add_folio()
995 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) in filemap_alloc_folio_noprof()
998 struct folio *folio; in filemap_alloc_folio_noprof() local
1005 folio = __folio_alloc_node_noprof(gfp, order, n); in filemap_alloc_folio_noprof()
1006 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); in filemap_alloc_folio_noprof()
1008 return folio; in filemap_alloc_folio_noprof()
1067 static wait_queue_head_t *folio_waitqueue(struct folio *folio) in folio_waitqueue() argument
1069 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; in folio_waitqueue()
1132 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1135 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1167 static void folio_wake_bit(struct folio *folio, int bit_nr) in folio_wake_bit() argument
1169 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wake_bit()
1173 key.folio = folio; in folio_wake_bit()
1190 folio_clear_waiters(folio); in folio_wake_bit()
1214 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, in folio_trylock_flag() argument
1218 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1220 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1230 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, in folio_wait_bit_common() argument
1233 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wait_bit_common()
1242 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { in folio_wait_bit_common()
1250 wait_page.folio = folio; in folio_wait_bit_common()
1276 folio_set_waiters(folio); in folio_wait_bit_common()
1277 if (!folio_trylock_flag(folio, bit_nr, wait)) in folio_wait_bit_common()
1290 folio_put(folio); in folio_wait_bit_common()
1327 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) in folio_wait_bit_common()
1392 struct folio *folio = pfn_swap_entry_folio(entry); in migration_entry_wait_on_locked() local
1394 q = folio_waitqueue(folio); in migration_entry_wait_on_locked()
1395 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { in migration_entry_wait_on_locked()
1403 wait_page.folio = folio; in migration_entry_wait_on_locked()
1408 folio_set_waiters(folio); in migration_entry_wait_on_locked()
1409 if (!folio_trylock_flag(folio, PG_locked, wait)) in migration_entry_wait_on_locked()
1446 void folio_wait_bit(struct folio *folio, int bit_nr) in folio_wait_bit() argument
1448 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in folio_wait_bit()
1452 int folio_wait_bit_killable(struct folio *folio, int bit_nr) in folio_wait_bit_killable() argument
1454 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); in folio_wait_bit_killable()
1471 static int folio_put_wait_locked(struct folio *folio, int state) in folio_put_wait_locked() argument
1473 return folio_wait_bit_common(folio, PG_locked, state, DROP); in folio_put_wait_locked()
1483 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) in folio_add_wait_queue() argument
1485 wait_queue_head_t *q = folio_waitqueue(folio); in folio_add_wait_queue()
1490 folio_set_waiters(folio); in folio_add_wait_queue()
1504 void folio_unlock(struct folio *folio) in folio_unlock() argument
1509 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_unlock()
1510 if (folio_xor_flags_has_waiters(folio, 1 << PG_locked)) in folio_unlock()
1511 folio_wake_bit(folio, PG_locked); in folio_unlock()
1529 void folio_end_read(struct folio *folio, bool success) in folio_end_read() argument
1535 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_end_read()
1536 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); in folio_end_read()
1540 if (folio_xor_flags_has_waiters(folio, mask)) in folio_end_read()
1541 folio_wake_bit(folio, PG_locked); in folio_end_read()
1556 void folio_end_private_2(struct folio *folio) in folio_end_private_2() argument
1558 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); in folio_end_private_2()
1559 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); in folio_end_private_2()
1560 folio_wake_bit(folio, PG_private_2); in folio_end_private_2()
1561 folio_put(folio); in folio_end_private_2()
1571 void folio_wait_private_2(struct folio *folio) in folio_wait_private_2() argument
1573 while (folio_test_private_2(folio)) in folio_wait_private_2()
1574 folio_wait_bit(folio, PG_private_2); in folio_wait_private_2()
1589 int folio_wait_private_2_killable(struct folio *folio) in folio_wait_private_2_killable() argument
1593 while (folio_test_private_2(folio)) { in folio_wait_private_2_killable()
1594 ret = folio_wait_bit_killable(folio, PG_private_2); in folio_wait_private_2_killable()
1611 void folio_end_writeback(struct folio *folio) in folio_end_writeback() argument
1613 VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); in folio_end_writeback()
1622 if (folio_test_reclaim(folio)) { in folio_end_writeback()
1623 folio_clear_reclaim(folio); in folio_end_writeback()
1624 folio_rotate_reclaimable(folio); in folio_end_writeback()
1633 folio_get(folio); in folio_end_writeback()
1634 if (__folio_end_writeback(folio)) in folio_end_writeback()
1635 folio_wake_bit(folio, PG_writeback); in folio_end_writeback()
1636 acct_reclaim_writeback(folio); in folio_end_writeback()
1637 folio_put(folio); in folio_end_writeback()
1645 void __folio_lock(struct folio *folio) in __folio_lock() argument
1647 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, in __folio_lock()
1652 int __folio_lock_killable(struct folio *folio) in __folio_lock_killable() argument
1654 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, in __folio_lock_killable()
1659 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) in __folio_lock_async() argument
1661 struct wait_queue_head *q = folio_waitqueue(folio); in __folio_lock_async()
1664 wait->folio = folio; in __folio_lock_async()
1669 folio_set_waiters(folio); in __folio_lock_async()
1670 ret = !folio_trylock(folio); in __folio_lock_async()
1696 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument
1710 folio_wait_locked_killable(folio); in __folio_lock_or_retry()
1712 folio_wait_locked(folio); in __folio_lock_or_retry()
1718 ret = __folio_lock_killable(folio); in __folio_lock_or_retry()
1724 __folio_lock(folio); in __folio_lock_or_retry()
1837 struct folio *folio; in filemap_get_entry() local
1842 folio = xas_load(&xas); in filemap_get_entry()
1843 if (xas_retry(&xas, folio)) in filemap_get_entry()
1849 if (!folio || xa_is_value(folio)) in filemap_get_entry()
1852 if (!folio_try_get(folio)) in filemap_get_entry()
1855 if (unlikely(folio != xas_reload(&xas))) { in filemap_get_entry()
1856 folio_put(folio); in filemap_get_entry()
1862 return folio; in filemap_get_entry()
1881 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio()
1884 struct folio *folio; in __filemap_get_folio() local
1887 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1888 if (xa_is_value(folio)) in __filemap_get_folio()
1889 folio = NULL; in __filemap_get_folio()
1890 if (!folio) in __filemap_get_folio()
1895 if (!folio_trylock(folio)) { in __filemap_get_folio()
1896 folio_put(folio); in __filemap_get_folio()
1900 folio_lock(folio); in __filemap_get_folio()
1904 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1905 folio_unlock(folio); in __filemap_get_folio()
1906 folio_put(folio); in __filemap_get_folio()
1909 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in __filemap_get_folio()
1913 folio_mark_accessed(folio); in __filemap_get_folio()
1916 if (folio_test_idle(folio)) in __filemap_get_folio()
1917 folio_clear_idle(folio); in __filemap_get_folio()
1921 folio_wait_stable(folio); in __filemap_get_folio()
1923 if (!folio && (fgp_flags & FGP_CREAT)) { in __filemap_get_folio()
1952 folio = filemap_alloc_folio(alloc_gfp, order); in __filemap_get_folio()
1953 if (!folio) in __filemap_get_folio()
1958 __folio_set_referenced(folio); in __filemap_get_folio()
1960 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1963 folio_put(folio); in __filemap_get_folio()
1964 folio = NULL; in __filemap_get_folio()
1975 if (folio && (fgp_flags & FGP_FOR_MMAP)) in __filemap_get_folio()
1976 folio_unlock(folio); in __filemap_get_folio()
1979 if (!folio) in __filemap_get_folio()
1981 return folio; in __filemap_get_folio()
1985 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
1988 struct folio *folio; in find_get_entry() local
1992 folio = xas_find(xas, max); in find_get_entry()
1994 folio = xas_find_marked(xas, max, mark); in find_get_entry()
1996 if (xas_retry(xas, folio)) in find_get_entry()
2003 if (!folio || xa_is_value(folio)) in find_get_entry()
2004 return folio; in find_get_entry()
2006 if (!folio_try_get(folio)) in find_get_entry()
2009 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
2010 folio_put(folio); in find_get_entry()
2014 return folio; in find_get_entry()
2044 struct folio *folio; in find_get_entries() local
2047 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2049 if (!folio_batch_add(fbatch, folio)) in find_get_entries()
2057 folio = fbatch->folios[idx]; in find_get_entries()
2058 if (!xa_is_value(folio)) in find_get_entries()
2059 nr = folio_nr_pages(folio); in find_get_entries()
2093 struct folio *folio; in find_lock_entries() local
2096 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2100 if (!xa_is_value(folio)) { in find_lock_entries()
2101 nr = folio_nr_pages(folio); in find_lock_entries()
2102 base = folio->index; in find_lock_entries()
2109 if (!folio_trylock(folio)) in find_lock_entries()
2111 if (folio->mapping != mapping || in find_lock_entries()
2112 folio_test_writeback(folio)) in find_lock_entries()
2114 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2115 folio); in find_lock_entries()
2130 if (!folio_batch_add(fbatch, folio)) in find_lock_entries()
2134 folio_unlock(folio); in find_lock_entries()
2136 folio_put(folio); in find_lock_entries()
2184 struct folio *folio; in filemap_get_folios_contig() local
2188 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2189 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2190 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2196 if (xa_is_value(folio)) in filemap_get_folios_contig()
2200 if (xa_is_sibling(folio)) in filemap_get_folios_contig()
2203 if (!folio_try_get(folio)) in filemap_get_folios_contig()
2206 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2209 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_contig()
2210 nr = folio_nr_pages(folio); in filemap_get_folios_contig()
2211 *start = folio->index + nr; in filemap_get_folios_contig()
2216 folio_put(folio); in filemap_get_folios_contig()
2226 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2227 *start = folio_next_index(folio); in filemap_get_folios_contig()
2258 struct folio *folio; in filemap_get_folios_tag() local
2261 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { in filemap_get_folios_tag()
2267 if (xa_is_value(folio)) in filemap_get_folios_tag()
2269 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_tag()
2270 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios_tag()
2271 *start = folio->index + nr; in filemap_get_folios_tag()
2325 struct folio *folio; in filemap_get_read_batch() local
2328 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2329 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2331 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2333 if (xa_is_sibling(folio)) in filemap_get_read_batch()
2335 if (!folio_try_get(folio)) in filemap_get_read_batch()
2338 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2341 if (!folio_batch_add(fbatch, folio)) in filemap_get_read_batch()
2343 if (!folio_test_uptodate(folio)) in filemap_get_read_batch()
2345 if (folio_test_readahead(folio)) in filemap_get_read_batch()
2347 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2350 folio_put(folio); in filemap_get_read_batch()
2358 struct folio *folio) in filemap_read_folio() argument
2360 bool workingset = folio_test_workingset(folio); in filemap_read_folio()
2367 error = filler(file, folio); in filemap_read_folio()
2373 error = folio_wait_locked_killable(folio); in filemap_read_folio()
2376 if (folio_test_uptodate(folio)) in filemap_read_folio()
2384 loff_t pos, size_t count, struct folio *folio, in filemap_range_uptodate() argument
2387 if (folio_test_uptodate(folio)) in filemap_range_uptodate()
2394 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2397 if (folio_pos(folio) > pos) { in filemap_range_uptodate()
2398 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2401 pos -= folio_pos(folio); in filemap_range_uptodate()
2404 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2409 struct folio *folio, bool need_uptodate) in filemap_update_page() argument
2420 if (!folio_trylock(folio)) { in filemap_update_page()
2430 folio_put_wait_locked(folio, TASK_KILLABLE); in filemap_update_page()
2433 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2439 if (!folio->mapping) in filemap_update_page()
2443 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2452 folio); in filemap_update_page()
2455 folio_unlock(folio); in filemap_update_page()
2459 folio_put(folio); in filemap_update_page()
2467 struct folio *folio; in filemap_create_folio() local
2472 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); in filemap_create_folio()
2473 if (!folio) in filemap_create_folio()
2491 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2498 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2503 folio_batch_add(fbatch, folio); in filemap_create_folio()
2507 folio_put(folio); in filemap_create_folio()
2512 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2515 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2519 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2531 struct folio *folio; in filemap_get_pages() local
2562 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2563 if (folio_test_readahead(folio)) { in filemap_get_pages()
2564 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2568 if (!folio_test_uptodate(folio)) { in filemap_get_pages()
2572 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2582 folio_put(folio); in filemap_get_pages()
2590 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) in pos_same_folio() argument
2592 unsigned int shift = folio_shift(folio); in pos_same_folio()
2677 struct folio *folio = fbatch.folios[i]; in filemap_read() local
2678 size_t fsize = folio_size(folio); in filemap_read()
2684 if (end_offset < folio_pos(folio)) in filemap_read()
2687 folio_mark_accessed(folio); in filemap_read()
2694 flush_dcache_folio(folio); in filemap_read()
2696 copied = copy_folio_to_iter(folio, offset, bytes, iter); in filemap_read()
2841 struct folio *folio, loff_t fpos, size_t size) in splice_folio_into_pipe() argument
2844 size_t spliced = 0, offset = offset_in_folio(folio, fpos); in splice_folio_into_pipe()
2846 page = folio_page(folio, offset / PAGE_SIZE); in splice_folio_into_pipe()
2847 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2861 folio_get(folio); in splice_folio_into_pipe()
2945 struct folio *folio = fbatch.folios[i]; in filemap_splice_read() local
2948 if (folio_pos(folio) >= end_offset) in filemap_splice_read()
2950 folio_mark_accessed(folio); in filemap_splice_read()
2958 flush_dcache_folio(folio); in filemap_splice_read()
2961 n = splice_folio_into_pipe(pipe, folio, *ppos, n); in filemap_splice_read()
2984 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2990 if (xa_is_value(folio) || folio_test_uptodate(folio)) in folio_seek_hole_data()
2997 folio_lock(folio); in folio_seek_hole_data()
2998 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3001 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3004 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3009 } while (offset < folio_size(folio)); in folio_seek_hole_data()
3011 folio_unlock(folio); in folio_seek_hole_data()
3016 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
3018 if (xa_is_value(folio)) in seek_folio_size()
3020 return folio_size(folio); in seek_folio_size()
3047 struct folio *folio; in mapping_seek_hole_data() local
3053 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
3063 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
3065 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3073 if (!xa_is_value(folio)) in mapping_seek_hole_data()
3074 folio_put(folio); in mapping_seek_hole_data()
3080 if (folio && !xa_is_value(folio)) in mapping_seek_hole_data()
3081 folio_put(folio); in mapping_seek_hole_data()
3101 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3104 if (folio_trylock(folio)) in lock_folio_maybe_drop_mmap()
3117 if (__folio_lock_killable(folio)) { in lock_folio_maybe_drop_mmap()
3130 __folio_lock(folio); in lock_folio_maybe_drop_mmap()
3212 struct folio *folio) in do_async_mmap_readahead() argument
3228 if (folio_test_readahead(folio)) { in do_async_mmap_readahead()
3230 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3309 struct folio *folio; in filemap_fault() local
3322 folio = filemap_get_folio(mapping, index); in filemap_fault()
3323 if (likely(!IS_ERR(folio))) { in filemap_fault()
3329 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3330 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3353 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3356 if (IS_ERR(folio)) { in filemap_fault()
3364 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3368 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3369 folio_unlock(folio); in filemap_fault()
3370 folio_put(folio); in filemap_fault()
3373 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in filemap_fault()
3380 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3388 folio_unlock(folio); in filemap_fault()
3389 folio_put(folio); in filemap_fault()
3407 folio_unlock(folio); in filemap_fault()
3419 folio_unlock(folio); in filemap_fault()
3420 folio_put(folio); in filemap_fault()
3424 vmf->page = folio_file_page(folio, index); in filemap_fault()
3435 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3438 folio_put(folio); in filemap_fault()
3452 if (!IS_ERR(folio)) in filemap_fault()
3453 folio_put(folio); in filemap_fault()
3462 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3469 folio_unlock(folio); in filemap_map_pmd()
3470 folio_put(folio); in filemap_map_pmd()
3474 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3475 struct page *page = folio_file_page(folio, start); in filemap_map_pmd()
3479 folio_unlock(folio); in filemap_map_pmd()
3490 static struct folio *next_uptodate_folio(struct xa_state *xas, in next_uptodate_folio()
3493 struct folio *folio = xas_next_entry(xas, end_pgoff); in next_uptodate_folio() local
3497 if (!folio) in next_uptodate_folio()
3499 if (xas_retry(xas, folio)) in next_uptodate_folio()
3501 if (xa_is_value(folio)) in next_uptodate_folio()
3503 if (folio_test_locked(folio)) in next_uptodate_folio()
3505 if (!folio_try_get(folio)) in next_uptodate_folio()
3508 if (unlikely(folio != xas_reload(xas))) in next_uptodate_folio()
3510 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) in next_uptodate_folio()
3512 if (!folio_trylock(folio)) in next_uptodate_folio()
3514 if (folio->mapping != mapping) in next_uptodate_folio()
3516 if (!folio_test_uptodate(folio)) in next_uptodate_folio()
3521 return folio; in next_uptodate_folio()
3523 folio_unlock(folio); in next_uptodate_folio()
3525 folio_put(folio); in next_uptodate_folio()
3526 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_folio()
3536 struct folio *folio, unsigned long start, in filemap_map_folio_range() argument
3541 struct page *page = folio_page(folio, start); in filemap_map_folio_range()
3556 if (!folio_test_workingset(folio)) in filemap_map_folio_range()
3571 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3573 folio_ref_add(folio, count); in filemap_map_folio_range()
3586 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3588 folio_ref_add(folio, count); in filemap_map_folio_range()
3599 struct folio *folio, unsigned long addr, in filemap_map_order0_folio() argument
3603 struct page *page = &folio->page; in filemap_map_order0_folio()
3609 if (!folio_test_workingset(folio)) in filemap_map_order0_folio()
3623 set_pte_range(vmf, folio, page, 1, addr); in filemap_map_order0_folio()
3625 folio_ref_inc(folio); in filemap_map_order0_folio()
3639 struct folio *folio; in filemap_map_pages() local
3645 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3646 if (!folio) in filemap_map_pages()
3649 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3657 folio_unlock(folio); in filemap_map_pages()
3658 folio_put(folio); in filemap_map_pages()
3666 folio_type = mm_counter_file(folio); in filemap_map_pages()
3673 end = folio_next_index(folio) - 1; in filemap_map_pages()
3676 if (!folio_test_large(folio)) in filemap_map_pages()
3678 folio, addr, &rss, &mmap_miss); in filemap_map_pages()
3680 ret |= filemap_map_folio_range(vmf, folio, in filemap_map_pages()
3681 xas.xa_index - folio->index, addr, in filemap_map_pages()
3684 folio_unlock(folio); in filemap_map_pages()
3685 folio_put(folio); in filemap_map_pages()
3686 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3706 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite() local
3711 folio_lock(folio); in filemap_page_mkwrite()
3712 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3713 folio_unlock(folio); in filemap_page_mkwrite()
3722 folio_mark_dirty(folio); in filemap_page_mkwrite()
3723 folio_wait_stable(folio); in filemap_page_mkwrite()
3776 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio()
3779 struct folio *folio; in do_read_cache_folio() local
3785 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3786 if (IS_ERR(folio)) { in do_read_cache_folio()
3787 folio = filemap_alloc_folio(gfp, in do_read_cache_folio()
3789 if (!folio) in do_read_cache_folio()
3792 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3794 folio_put(folio); in do_read_cache_folio()
3803 if (folio_test_uptodate(folio)) in do_read_cache_folio()
3806 if (!folio_trylock(folio)) { in do_read_cache_folio()
3807 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); in do_read_cache_folio()
3812 if (!folio->mapping) { in do_read_cache_folio()
3813 folio_unlock(folio); in do_read_cache_folio()
3814 folio_put(folio); in do_read_cache_folio()
3819 if (folio_test_uptodate(folio)) { in do_read_cache_folio()
3820 folio_unlock(folio); in do_read_cache_folio()
3825 err = filemap_read_folio(file, filler, folio); in do_read_cache_folio()
3827 folio_put(folio); in do_read_cache_folio()
3834 folio_mark_accessed(folio); in do_read_cache_folio()
3835 return folio; in do_read_cache_folio()
3854 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio()
3879 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp()
3889 struct folio *folio; in do_read_cache_page() local
3891 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3892 if (IS_ERR(folio)) in do_read_cache_page()
3893 return &folio->page; in do_read_cache_page()
3894 return folio_file_page(folio, index); in do_read_cache_page()
4026 struct folio *folio; in generic_perform_write() local
4055 &folio, &fsdata); in generic_perform_write()
4059 offset = offset_in_folio(folio, pos); in generic_perform_write()
4060 if (bytes > folio_size(folio) - offset) in generic_perform_write()
4061 bytes = folio_size(folio) - offset; in generic_perform_write()
4064 flush_dcache_folio(folio); in generic_perform_write()
4066 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); in generic_perform_write()
4067 flush_dcache_folio(folio); in generic_perform_write()
4070 folio, fsdata); in generic_perform_write()
4207 bool filemap_release_folio(struct folio *folio, gfp_t gfp) in filemap_release_folio() argument
4209 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
4211 BUG_ON(!folio_test_locked(folio)); in filemap_release_folio()
4212 if (!folio_needs_release(folio)) in filemap_release_folio()
4214 if (folio_test_writeback(folio)) in filemap_release_folio()
4218 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4219 return try_to_free_buffers(folio); in filemap_release_folio()
4294 struct folio *folio; in filemap_cachestat() local
4300 xas_for_each(&xas, folio, last_index) { in filemap_cachestat()
4316 if (xas_retry(&xas, folio)) in filemap_cachestat()
4331 if (xa_is_value(folio)) { in filemap_cachestat()
4333 void *shadow = (void *)folio; in filemap_cachestat()
4341 swp_entry_t swp = radix_to_swp_entry(folio); in filemap_cachestat()