Lines Matching refs:folio
127 struct folio *folio, void *shadow) in page_cache_delete() argument
129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
135 if (!folio_test_hugetlb(folio)) { in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
137 nr = folio_nr_pages(folio); in page_cache_delete()
140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete()
145 folio->mapping = NULL; in page_cache_delete()
151 struct folio *folio) in filemap_unaccount_folio() argument
155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio()
156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio()
158 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
159 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
164 int mapcount = page_mapcount(&folio->page); in filemap_unaccount_folio()
166 if (folio_ref_count(folio) >= mapcount + 2) { in filemap_unaccount_folio()
173 page_mapcount_reset(&folio->page); in filemap_unaccount_folio()
174 folio_ref_sub(folio, mapcount); in filemap_unaccount_folio()
180 if (folio_test_hugetlb(folio)) in filemap_unaccount_folio()
183 nr = folio_nr_pages(folio); in filemap_unaccount_folio()
185 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
186 if (folio_test_swapbacked(folio)) { in filemap_unaccount_folio()
187 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
188 if (folio_test_pmd_mappable(folio)) in filemap_unaccount_folio()
189 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
190 } else if (folio_test_pmd_mappable(folio)) { in filemap_unaccount_folio()
191 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
209 if (WARN_ON_ONCE(folio_test_dirty(folio) && in filemap_unaccount_folio()
211 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
219 void __filemap_remove_folio(struct folio *folio, void *shadow) in __filemap_remove_folio() argument
221 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
223 trace_mm_filemap_delete_from_page_cache(folio); in __filemap_remove_folio()
224 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
225 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
228 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
230 void (*free_folio)(struct folio *); in filemap_free_folio()
235 free_folio(folio); in filemap_free_folio()
237 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in filemap_free_folio()
238 refs = folio_nr_pages(folio); in filemap_free_folio()
239 folio_put_refs(folio, refs); in filemap_free_folio()
250 void filemap_remove_folio(struct folio *folio) in filemap_remove_folio() argument
252 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
254 BUG_ON(!folio_test_locked(folio)); in filemap_remove_folio()
257 __filemap_remove_folio(folio, NULL); in filemap_remove_folio()
263 filemap_free_folio(mapping, folio); in filemap_remove_folio()
285 struct folio *folio; in page_cache_delete_batch() local
288 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
293 if (xa_is_value(folio)) in page_cache_delete_batch()
302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
303 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
304 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
308 WARN_ON_ONCE(!folio_test_locked(folio)); in page_cache_delete_batch()
310 folio->mapping = NULL; in page_cache_delete_batch()
315 total_pages += folio_nr_pages(folio); in page_cache_delete_batch()
331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() local
333 trace_mm_filemap_delete_from_page_cache(folio); in delete_from_page_cache_batch()
334 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
475 struct folio *folio; in filemap_range_has_page() local
484 folio = xas_find(&xas, max); in filemap_range_has_page()
485 if (xas_retry(&xas, folio)) in filemap_range_has_page()
488 if (xa_is_value(folio)) in filemap_range_has_page()
499 return folio != NULL; in filemap_range_has_page()
523 struct folio *folio = fbatch.folios[i]; in __filemap_fdatawait_range() local
525 folio_wait_writeback(folio); in __filemap_fdatawait_range()
526 folio_clear_error(folio); in __filemap_fdatawait_range()
636 struct folio *folio; in filemap_range_has_writeback() local
642 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
643 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
645 if (xa_is_value(folio)) in filemap_range_has_writeback()
647 if (folio_test_dirty(folio) || folio_test_locked(folio) || in filemap_range_has_writeback()
648 folio_test_writeback(folio)) in filemap_range_has_writeback()
652 return folio != NULL; in filemap_range_has_writeback()
807 void replace_page_cache_folio(struct folio *old, struct folio *new) in replace_page_cache_folio()
810 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
845 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) in __filemap_add_folio() argument
848 int huge = folio_test_hugetlb(folio); in __filemap_add_folio()
852 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __filemap_add_folio()
853 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); in __filemap_add_folio()
857 int error = mem_cgroup_charge(folio, NULL, gfp); in __filemap_add_folio()
858 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
862 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
863 nr = folio_nr_pages(folio); in __filemap_add_folio()
867 folio_ref_add(folio, nr); in __filemap_add_folio()
868 folio->mapping = mapping; in __filemap_add_folio()
869 folio->index = xas.xa_index; in __filemap_add_folio()
875 if (order > folio_order(folio)) in __filemap_add_folio()
892 if (order > folio_order(folio)) { in __filemap_add_folio()
900 xas_store(&xas, folio); in __filemap_add_folio()
908 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); in __filemap_add_folio()
909 if (folio_test_pmd_mappable(folio)) in __filemap_add_folio()
910 __lruvec_stat_mod_folio(folio, in __filemap_add_folio()
920 trace_mm_filemap_add_to_page_cache(folio); in __filemap_add_folio()
924 mem_cgroup_uncharge(folio); in __filemap_add_folio()
925 folio->mapping = NULL; in __filemap_add_folio()
927 folio_put_refs(folio, nr); in __filemap_add_folio()
932 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
938 __folio_set_locked(folio); in filemap_add_folio()
939 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
941 __folio_clear_locked(folio); in filemap_add_folio()
951 WARN_ON_ONCE(folio_test_active(folio)); in filemap_add_folio()
953 workingset_refault(folio, shadow); in filemap_add_folio()
954 folio_add_lru(folio); in filemap_add_folio()
961 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) in filemap_alloc_folio()
964 struct folio *folio; in filemap_alloc_folio() local
971 folio = __folio_alloc_node(gfp, order, n); in filemap_alloc_folio()
972 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); in filemap_alloc_folio()
974 return folio; in filemap_alloc_folio()
1033 static wait_queue_head_t *folio_waitqueue(struct folio *folio) in folio_waitqueue() argument
1035 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; in folio_waitqueue()
1098 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1101 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1133 static void folio_wake_bit(struct folio *folio, int bit_nr) in folio_wake_bit() argument
1135 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wake_bit()
1140 key.folio = folio; in folio_wake_bit()
1175 folio_clear_waiters(folio); in folio_wake_bit()
1180 static void folio_wake(struct folio *folio, int bit) in folio_wake() argument
1182 if (!folio_test_waiters(folio)) in folio_wake()
1184 folio_wake_bit(folio, bit); in folio_wake()
1206 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, in folio_trylock_flag() argument
1210 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1212 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1222 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, in folio_wait_bit_common() argument
1225 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wait_bit_common()
1234 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { in folio_wait_bit_common()
1242 wait_page.folio = folio; in folio_wait_bit_common()
1268 folio_set_waiters(folio); in folio_wait_bit_common()
1269 if (!folio_trylock_flag(folio, bit_nr, wait)) in folio_wait_bit_common()
1282 folio_put(folio); in folio_wait_bit_common()
1319 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) in folio_wait_bit_common()
1386 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); in migration_entry_wait_on_locked() local
1388 q = folio_waitqueue(folio); in migration_entry_wait_on_locked()
1389 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { in migration_entry_wait_on_locked()
1397 wait_page.folio = folio; in migration_entry_wait_on_locked()
1402 folio_set_waiters(folio); in migration_entry_wait_on_locked()
1403 if (!folio_trylock_flag(folio, PG_locked, wait)) in migration_entry_wait_on_locked()
1443 void folio_wait_bit(struct folio *folio, int bit_nr) in folio_wait_bit() argument
1445 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in folio_wait_bit()
1449 int folio_wait_bit_killable(struct folio *folio, int bit_nr) in folio_wait_bit_killable() argument
1451 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); in folio_wait_bit_killable()
1468 static int folio_put_wait_locked(struct folio *folio, int state) in folio_put_wait_locked() argument
1470 return folio_wait_bit_common(folio, PG_locked, state, DROP); in folio_put_wait_locked()
1480 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) in folio_add_wait_queue() argument
1482 wait_queue_head_t *q = folio_waitqueue(folio); in folio_add_wait_queue()
1487 folio_set_waiters(folio); in folio_add_wait_queue()
1524 void folio_unlock(struct folio *folio) in folio_unlock() argument
1529 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_unlock()
1530 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) in folio_unlock()
1531 folio_wake_bit(folio, PG_locked); in folio_unlock()
1546 void folio_end_private_2(struct folio *folio) in folio_end_private_2() argument
1548 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); in folio_end_private_2()
1549 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); in folio_end_private_2()
1550 folio_wake_bit(folio, PG_private_2); in folio_end_private_2()
1551 folio_put(folio); in folio_end_private_2()
1561 void folio_wait_private_2(struct folio *folio) in folio_wait_private_2() argument
1563 while (folio_test_private_2(folio)) in folio_wait_private_2()
1564 folio_wait_bit(folio, PG_private_2); in folio_wait_private_2()
1579 int folio_wait_private_2_killable(struct folio *folio) in folio_wait_private_2_killable() argument
1583 while (folio_test_private_2(folio)) { in folio_wait_private_2_killable()
1584 ret = folio_wait_bit_killable(folio, PG_private_2); in folio_wait_private_2_killable()
1597 void folio_end_writeback(struct folio *folio) in folio_end_writeback() argument
1606 if (folio_test_reclaim(folio)) { in folio_end_writeback()
1607 folio_clear_reclaim(folio); in folio_end_writeback()
1608 folio_rotate_reclaimable(folio); in folio_end_writeback()
1617 folio_get(folio); in folio_end_writeback()
1618 if (!__folio_end_writeback(folio)) in folio_end_writeback()
1622 folio_wake(folio, PG_writeback); in folio_end_writeback()
1623 acct_reclaim_writeback(folio); in folio_end_writeback()
1624 folio_put(folio); in folio_end_writeback()
1634 struct folio *folio = page_folio(page); in page_endio() local
1638 folio_mark_uptodate(folio); in page_endio()
1640 folio_clear_uptodate(folio); in page_endio()
1641 folio_set_error(folio); in page_endio()
1643 folio_unlock(folio); in page_endio()
1648 folio_set_error(folio); in page_endio()
1649 mapping = folio_mapping(folio); in page_endio()
1653 folio_end_writeback(folio); in page_endio()
1662 void __folio_lock(struct folio *folio) in __folio_lock() argument
1664 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, in __folio_lock()
1669 int __folio_lock_killable(struct folio *folio) in __folio_lock_killable() argument
1671 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, in __folio_lock_killable()
1676 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) in __folio_lock_async() argument
1678 struct wait_queue_head *q = folio_waitqueue(folio); in __folio_lock_async()
1681 wait->folio = folio; in __folio_lock_async()
1686 folio_set_waiters(folio); in __folio_lock_async()
1687 ret = !folio_trylock(folio); in __folio_lock_async()
1713 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, in __folio_lock_or_retry() argument
1726 folio_wait_locked_killable(folio); in __folio_lock_or_retry()
1728 folio_wait_locked(folio); in __folio_lock_or_retry()
1734 ret = __folio_lock_killable(folio); in __folio_lock_or_retry()
1740 __folio_lock(folio); in __folio_lock_or_retry()
1853 struct folio *folio; in mapping_get_entry() local
1858 folio = xas_load(&xas); in mapping_get_entry()
1859 if (xas_retry(&xas, folio)) in mapping_get_entry()
1865 if (!folio || xa_is_value(folio)) in mapping_get_entry()
1868 if (!folio_try_get_rcu(folio)) in mapping_get_entry()
1871 if (unlikely(folio != xas_reload(&xas))) { in mapping_get_entry()
1872 folio_put(folio); in mapping_get_entry()
1878 return folio; in mapping_get_entry()
1914 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio()
1917 struct folio *folio; in __filemap_get_folio() local
1920 folio = mapping_get_entry(mapping, index); in __filemap_get_folio()
1921 if (xa_is_value(folio)) { in __filemap_get_folio()
1923 return folio; in __filemap_get_folio()
1924 folio = NULL; in __filemap_get_folio()
1926 if (!folio) in __filemap_get_folio()
1931 if (!folio_trylock(folio)) { in __filemap_get_folio()
1932 folio_put(folio); in __filemap_get_folio()
1936 folio_lock(folio); in __filemap_get_folio()
1940 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1941 folio_unlock(folio); in __filemap_get_folio()
1942 folio_put(folio); in __filemap_get_folio()
1945 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in __filemap_get_folio()
1949 folio_mark_accessed(folio); in __filemap_get_folio()
1952 if (folio_test_idle(folio)) in __filemap_get_folio()
1953 folio_clear_idle(folio); in __filemap_get_folio()
1957 folio_wait_stable(folio); in __filemap_get_folio()
1959 if (!folio && (fgp_flags & FGP_CREAT)) { in __filemap_get_folio()
1970 folio = filemap_alloc_folio(gfp, 0); in __filemap_get_folio()
1971 if (!folio) in __filemap_get_folio()
1979 __folio_set_referenced(folio); in __filemap_get_folio()
1981 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1983 folio_put(folio); in __filemap_get_folio()
1984 folio = NULL; in __filemap_get_folio()
1993 if (folio && (fgp_flags & FGP_FOR_MMAP)) in __filemap_get_folio()
1994 folio_unlock(folio); in __filemap_get_folio()
1997 return folio; in __filemap_get_folio()
2001 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
2004 struct folio *folio; in find_get_entry() local
2008 folio = xas_find(xas, max); in find_get_entry()
2010 folio = xas_find_marked(xas, max, mark); in find_get_entry()
2012 if (xas_retry(xas, folio)) in find_get_entry()
2019 if (!folio || xa_is_value(folio)) in find_get_entry()
2020 return folio; in find_get_entry()
2022 if (!folio_try_get_rcu(folio)) in find_get_entry()
2025 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
2026 folio_put(folio); in find_get_entry()
2030 return folio; in find_get_entry()
2060 struct folio *folio; in find_get_entries() local
2063 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2065 if (!folio_batch_add(fbatch, folio)) in find_get_entries()
2074 folio = fbatch->folios[idx]; in find_get_entries()
2075 if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) in find_get_entries()
2076 nr = folio_nr_pages(folio); in find_get_entries()
2106 struct folio *folio; in find_lock_entries() local
2109 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2110 if (!xa_is_value(folio)) { in find_lock_entries()
2111 if (folio->index < *start) in find_lock_entries()
2113 if (folio->index + folio_nr_pages(folio) - 1 > end) in find_lock_entries()
2115 if (!folio_trylock(folio)) in find_lock_entries()
2117 if (folio->mapping != mapping || in find_lock_entries()
2118 folio_test_writeback(folio)) in find_lock_entries()
2120 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2121 folio); in find_lock_entries()
2124 if (!folio_batch_add(fbatch, folio)) in find_lock_entries()
2128 folio_unlock(folio); in find_lock_entries()
2130 folio_put(folio); in find_lock_entries()
2138 folio = fbatch->folios[idx]; in find_lock_entries()
2139 if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) in find_lock_entries()
2140 nr = folio_nr_pages(folio); in find_lock_entries()
2171 struct folio *folio; in filemap_get_folios() local
2174 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in filemap_get_folios()
2176 if (xa_is_value(folio)) in filemap_get_folios()
2178 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios()
2179 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios()
2181 if (folio_test_hugetlb(folio)) in filemap_get_folios()
2183 *start = folio->index + nr; in filemap_get_folios()
2206 bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max) in folio_more_pages() argument
2208 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) in folio_more_pages()
2212 return index < folio->index + folio_nr_pages(folio) - 1; in folio_more_pages()
2235 struct folio *folio; in filemap_get_folios_contig() local
2239 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2240 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2241 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2247 if (xa_is_value(folio)) in filemap_get_folios_contig()
2250 if (!folio_try_get_rcu(folio)) in filemap_get_folios_contig()
2253 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2256 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_contig()
2257 nr = folio_nr_pages(folio); in filemap_get_folios_contig()
2259 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2261 *start = folio->index + nr; in filemap_get_folios_contig()
2266 folio_put(folio); in filemap_get_folios_contig()
2276 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2277 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2278 *start = folio->index + 1; in filemap_get_folios_contig()
2280 *start = folio->index + folio_nr_pages(folio); in filemap_get_folios_contig()
2305 struct folio *folio; in filemap_get_folios_tag() local
2308 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { in filemap_get_folios_tag()
2314 if (xa_is_value(folio)) in filemap_get_folios_tag()
2316 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_tag()
2317 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios_tag()
2319 if (folio_test_hugetlb(folio)) in filemap_get_folios_tag()
2321 *start = folio->index + nr; in filemap_get_folios_tag()
2375 struct folio *folio; in filemap_get_read_batch() local
2378 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2379 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2381 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2383 if (xa_is_sibling(folio)) in filemap_get_read_batch()
2385 if (!folio_try_get_rcu(folio)) in filemap_get_read_batch()
2388 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2391 if (!folio_batch_add(fbatch, folio)) in filemap_get_read_batch()
2393 if (!folio_test_uptodate(folio)) in filemap_get_read_batch()
2395 if (folio_test_readahead(folio)) in filemap_get_read_batch()
2397 xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1); in filemap_get_read_batch()
2400 folio_put(folio); in filemap_get_read_batch()
2408 struct folio *folio) in filemap_read_folio() argument
2410 bool workingset = folio_test_workingset(folio); in filemap_read_folio()
2419 folio_clear_error(folio); in filemap_read_folio()
2424 error = filler(file, folio); in filemap_read_folio()
2430 error = folio_wait_locked_killable(folio); in filemap_read_folio()
2433 if (folio_test_uptodate(folio)) in filemap_read_folio()
2441 loff_t pos, size_t count, struct folio *folio, in filemap_range_uptodate() argument
2444 if (folio_test_uptodate(folio)) in filemap_range_uptodate()
2451 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2454 if (folio_pos(folio) > pos) { in filemap_range_uptodate()
2455 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2458 pos -= folio_pos(folio); in filemap_range_uptodate()
2461 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2466 struct folio *folio, bool need_uptodate) in filemap_update_page() argument
2477 if (!folio_trylock(folio)) { in filemap_update_page()
2487 folio_put_wait_locked(folio, TASK_KILLABLE); in filemap_update_page()
2490 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2496 if (!folio->mapping) in filemap_update_page()
2500 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2509 folio); in filemap_update_page()
2512 folio_unlock(folio); in filemap_update_page()
2516 folio_put(folio); in filemap_update_page()
2524 struct folio *folio; in filemap_create_folio() local
2527 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2528 if (!folio) in filemap_create_folio()
2545 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2552 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2557 folio_batch_add(fbatch, folio); in filemap_create_folio()
2561 folio_put(folio); in filemap_create_folio()
2566 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2569 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2573 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2585 struct folio *folio; in filemap_get_pages() local
2612 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2613 if (folio_test_readahead(folio)) { in filemap_get_pages()
2614 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2618 if (!folio_test_uptodate(folio)) { in filemap_get_pages()
2622 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2631 folio_put(folio); in filemap_get_pages()
2639 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) in pos_same_folio() argument
2641 unsigned int shift = folio_shift(folio); in pos_same_folio()
2726 struct folio *folio = fbatch.folios[i]; in filemap_read() local
2727 size_t fsize = folio_size(folio); in filemap_read()
2733 if (end_offset < folio_pos(folio)) in filemap_read()
2736 folio_mark_accessed(folio); in filemap_read()
2743 flush_dcache_folio(folio); in filemap_read()
2745 copied = copy_folio_to_iter(folio, offset, bytes, iter); in filemap_read()
2848 struct folio *folio, loff_t fpos, size_t size) in splice_folio_into_pipe() argument
2851 size_t spliced = 0, offset = offset_in_folio(folio, fpos); in splice_folio_into_pipe()
2853 page = folio_page(folio, offset / PAGE_SIZE); in splice_folio_into_pipe()
2854 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2868 folio_get(folio); in splice_folio_into_pipe()
2934 struct folio *folio = fbatch.folios[i]; in filemap_splice_read() local
2937 if (folio_pos(folio) >= end_offset) in filemap_splice_read()
2939 folio_mark_accessed(folio); in filemap_splice_read()
2947 flush_dcache_folio(folio); in filemap_splice_read()
2950 n = splice_folio_into_pipe(pipe, folio, *ppos, n); in filemap_splice_read()
2973 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2979 if (xa_is_value(folio) || folio_test_uptodate(folio)) in folio_seek_hole_data()
2986 folio_lock(folio); in folio_seek_hole_data()
2987 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
2990 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
2993 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
2998 } while (offset < folio_size(folio)); in folio_seek_hole_data()
3000 folio_unlock(folio); in folio_seek_hole_data()
3005 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
3007 if (xa_is_value(folio)) in seek_folio_size()
3009 return folio_size(folio); in seek_folio_size()
3036 struct folio *folio; in mapping_seek_hole_data() local
3042 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
3052 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
3054 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3062 if (!xa_is_value(folio)) in mapping_seek_hole_data()
3063 folio_put(folio); in mapping_seek_hole_data()
3069 if (folio && !xa_is_value(folio)) in mapping_seek_hole_data()
3070 folio_put(folio); in mapping_seek_hole_data()
3090 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3093 if (folio_trylock(folio)) in lock_folio_maybe_drop_mmap()
3106 if (__folio_lock_killable(folio)) { in lock_folio_maybe_drop_mmap()
3118 __folio_lock(folio); in lock_folio_maybe_drop_mmap()
3200 struct folio *folio) in do_async_mmap_readahead() argument
3216 if (folio_test_readahead(folio)) { in do_async_mmap_readahead()
3218 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3254 struct folio *folio; in filemap_fault() local
3265 folio = filemap_get_folio(mapping, index); in filemap_fault()
3266 if (likely(folio)) { in filemap_fault()
3272 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3273 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3292 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3295 if (!folio) { in filemap_fault()
3303 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3307 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3308 folio_unlock(folio); in filemap_fault()
3309 folio_put(folio); in filemap_fault()
3312 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in filemap_fault()
3318 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3326 folio_unlock(folio); in filemap_fault()
3327 folio_put(folio); in filemap_fault()
3339 folio_unlock(folio); in filemap_fault()
3351 folio_unlock(folio); in filemap_fault()
3352 folio_put(folio); in filemap_fault()
3356 vmf->page = folio_file_page(folio, index); in filemap_fault()
3367 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3370 folio_put(folio); in filemap_fault()
3384 if (folio) in filemap_fault()
3385 folio_put(folio); in filemap_fault()
3394 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3401 folio_unlock(folio); in filemap_map_pmd()
3402 folio_put(folio); in filemap_map_pmd()
3406 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3407 struct page *page = folio_file_page(folio, start); in filemap_map_pmd()
3411 folio_unlock(folio); in filemap_map_pmd()
3421 folio_unlock(folio); in filemap_map_pmd()
3422 folio_put(folio); in filemap_map_pmd()
3429 static struct folio *next_uptodate_page(struct folio *folio, in next_uptodate_page() argument
3436 if (!folio) in next_uptodate_page()
3438 if (xas_retry(xas, folio)) in next_uptodate_page()
3440 if (xa_is_value(folio)) in next_uptodate_page()
3442 if (folio_test_locked(folio)) in next_uptodate_page()
3444 if (!folio_try_get_rcu(folio)) in next_uptodate_page()
3447 if (unlikely(folio != xas_reload(xas))) in next_uptodate_page()
3449 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) in next_uptodate_page()
3451 if (!folio_trylock(folio)) in next_uptodate_page()
3453 if (folio->mapping != mapping) in next_uptodate_page()
3455 if (!folio_test_uptodate(folio)) in next_uptodate_page()
3460 return folio; in next_uptodate_page()
3462 folio_unlock(folio); in next_uptodate_page()
3464 folio_put(folio); in next_uptodate_page()
3465 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3470 static inline struct folio *first_map_page(struct address_space *mapping, in first_map_page()
3478 static inline struct folio *next_map_page(struct address_space *mapping, in next_map_page()
3495 struct folio *folio; in filemap_map_pages() local
3501 folio = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3502 if (!folio) in filemap_map_pages()
3505 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3514 page = folio_file_page(folio, xas.xa_index); in filemap_map_pages()
3540 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3542 folio_ref_inc(folio); in filemap_map_pages()
3545 folio_unlock(folio); in filemap_map_pages()
3548 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3552 folio_unlock(folio); in filemap_map_pages()
3553 folio_put(folio); in filemap_map_pages()
3554 } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()
3566 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite() local
3571 folio_lock(folio); in filemap_page_mkwrite()
3572 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3573 folio_unlock(folio); in filemap_page_mkwrite()
3582 folio_mark_dirty(folio); in filemap_page_mkwrite()
3583 folio_wait_stable(folio); in filemap_page_mkwrite()
3636 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio()
3639 struct folio *folio; in do_read_cache_folio() local
3645 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3646 if (!folio) { in do_read_cache_folio()
3647 folio = filemap_alloc_folio(gfp, 0); in do_read_cache_folio()
3648 if (!folio) in do_read_cache_folio()
3650 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3652 folio_put(folio); in do_read_cache_folio()
3661 if (folio_test_uptodate(folio)) in do_read_cache_folio()
3664 if (!folio_trylock(folio)) { in do_read_cache_folio()
3665 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); in do_read_cache_folio()
3670 if (!folio->mapping) { in do_read_cache_folio()
3671 folio_unlock(folio); in do_read_cache_folio()
3672 folio_put(folio); in do_read_cache_folio()
3677 if (folio_test_uptodate(folio)) { in do_read_cache_folio()
3678 folio_unlock(folio); in do_read_cache_folio()
3683 err = filemap_read_folio(file, filler, folio); in do_read_cache_folio()
3685 folio_put(folio); in do_read_cache_folio()
3692 folio_mark_accessed(folio); in do_read_cache_folio()
3693 return folio; in do_read_cache_folio()
3712 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio()
3737 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp()
3747 struct folio *folio; in do_read_cache_page() local
3749 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3750 if (IS_ERR(folio)) in do_read_cache_page()
3751 return &folio->page; in do_read_cache_page()
3752 return folio_file_page(folio, index); in do_read_cache_page()
4112 bool filemap_release_folio(struct folio *folio, gfp_t gfp) in filemap_release_folio() argument
4114 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
4116 BUG_ON(!folio_test_locked(folio)); in filemap_release_folio()
4117 if (folio_test_writeback(folio)) in filemap_release_folio()
4121 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4122 return try_to_free_buffers(folio); in filemap_release_folio()