Lines Matching refs:folio

50 bool f2fs_is_cp_guaranteed(const struct folio *folio)  in f2fs_is_cp_guaranteed()  argument
52 struct address_space *mapping = folio->mapping; in f2fs_is_cp_guaranteed()
56 if (fscrypt_is_bounce_folio(folio)) in f2fs_is_cp_guaranteed()
57 return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio)); in f2fs_is_cp_guaranteed()
68 folio_test_f2fs_gcing(folio)) in f2fs_is_cp_guaranteed()
73 static enum count_type __read_io_type(struct folio *folio) in __read_io_type() argument
75 struct address_space *mapping = folio->mapping; in __read_io_type()
143 struct folio *folio = fi.folio; in f2fs_finish_read_bio() local
145 if (f2fs_is_compressed_page(folio)) { in f2fs_finish_read_bio()
147 f2fs_end_read_compressed_page(folio, true, 0, in f2fs_finish_read_bio()
149 f2fs_put_folio_dic(folio, in_task); in f2fs_finish_read_bio()
153 dec_page_count(F2FS_F_SB(folio), __read_io_type(folio)); in f2fs_finish_read_bio()
154 folio_end_read(folio, bio->bi_status == BLK_STS_OK); in f2fs_finish_read_bio()
187 struct folio *folio = fi.folio; in f2fs_verify_bio() local
189 if (!f2fs_is_compressed_page(folio) && in f2fs_verify_bio()
190 !fsverity_verify_page(&folio->page)) { in f2fs_verify_bio()
240 struct folio *folio = fi.folio; in f2fs_handle_step_decompress() local
242 if (f2fs_is_compressed_page(folio)) in f2fs_handle_step_decompress()
243 f2fs_end_read_compressed_page(folio, false, blkaddr, in f2fs_handle_step_decompress()
329 struct folio *folio = fi.folio; in f2fs_write_end_io() local
332 if (fscrypt_is_bounce_folio(folio)) { in f2fs_write_end_io()
333 struct folio *io_folio = folio; in f2fs_write_end_io()
335 folio = fscrypt_pagecache_folio(io_folio); in f2fs_write_end_io()
340 if (f2fs_is_compressed_page(folio)) { in f2fs_write_end_io()
341 f2fs_compress_write_end_io(bio, folio); in f2fs_write_end_io()
346 type = WB_DATA_TYPE(folio, false); in f2fs_write_end_io()
349 mapping_set_error(folio->mapping, -EIO); in f2fs_write_end_io()
355 f2fs_bug_on(sbi, is_node_folio(folio) && in f2fs_write_end_io()
356 folio->index != nid_of_node(folio)); in f2fs_write_end_io()
359 if (f2fs_in_warm_node_list(sbi, folio)) in f2fs_write_end_io()
360 f2fs_del_fsync_node_entry(sbi, folio); in f2fs_write_end_io()
361 folio_clear_f2fs_gcing(folio); in f2fs_write_end_io()
362 folio_end_writeback(folio); in f2fs_write_end_io()
447 F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) in f2fs_io_flags()
546 struct folio *folio, nid_t ino) in __has_merged_page() argument
553 if (!inode && !folio && !ino) in __has_merged_page()
557 struct folio *target = fi.folio; in __has_merged_page()
572 if (folio && folio == target) in __has_merged_page()
641 struct inode *inode, struct folio *folio, in __submit_merged_write_cond() argument
653 ret = __has_merged_page(io->bio, inode, folio, ino); in __submit_merged_write_cond()
671 struct inode *inode, struct folio *folio, in f2fs_submit_merged_write_cond() argument
674 __submit_merged_write_cond(sbi, inode, folio, ino, type, false); in f2fs_submit_merged_write_cond()
691 struct folio *fio_folio = fio->folio; in f2fs_submit_page_bio()
692 struct folio *data_folio = fio->encrypted_page ? in f2fs_submit_page_bio()
713 __read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false)); in f2fs_submit_page_bio()
779 struct folio *fio_folio = fio->folio; in add_ipu_page()
826 struct bio **bio, struct folio *folio) in f2fs_submit_merged_ipu_write() argument
832 f2fs_bug_on(sbi, !target && !folio); in f2fs_submit_merged_ipu_write()
848 folio, 0); in f2fs_submit_merged_ipu_write()
865 folio, 0); in f2fs_submit_merged_ipu_write()
886 struct folio *data_folio = fio->encrypted_page ? in f2fs_merge_page_bio()
887 page_folio(fio->encrypted_page) : fio->folio; in f2fs_merge_page_bio()
888 struct folio *folio = fio->folio; in f2fs_merge_page_bio() local
902 f2fs_set_bio_crypt_ctx(bio, folio->mapping->host, in f2fs_merge_page_bio()
903 folio->index, fio, GFP_NOIO); in f2fs_merge_page_bio()
912 wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio)); in f2fs_merge_page_bio()
949 struct folio *bio_folio; in f2fs_submit_page_write()
984 bio_folio = fio->folio; in f2fs_submit_page_write()
1012 wbc_account_cgroup_owner(fio->io_wbc, fio->folio, in f2fs_submit_page_write()
1013 folio_size(fio->folio)); in f2fs_submit_page_write()
1017 trace_f2fs_submit_folio_write(fio->folio, fio); in f2fs_submit_page_write()
1086 static int f2fs_submit_page_read(struct inode *inode, struct folio *folio, in f2fs_submit_page_read() argument
1094 folio->index, for_write); in f2fs_submit_page_read()
1101 if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) { in f2fs_submit_page_read()
1203 struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index, in f2fs_get_read_data_folio()
1208 struct folio *folio; in f2fs_get_read_data_folio() local
1211 folio = f2fs_grab_cache_folio(mapping, index, for_write); in f2fs_get_read_data_folio()
1212 if (IS_ERR(folio)) in f2fs_get_read_data_folio()
1213 return folio; in f2fs_get_read_data_folio()
1248 if (folio_test_uptodate(folio)) { in f2fs_get_read_data_folio()
1249 folio_unlock(folio); in f2fs_get_read_data_folio()
1250 return folio; in f2fs_get_read_data_folio()
1261 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_get_read_data_folio()
1262 if (!folio_test_uptodate(folio)) in f2fs_get_read_data_folio()
1263 folio_mark_uptodate(folio); in f2fs_get_read_data_folio()
1264 folio_unlock(folio); in f2fs_get_read_data_folio()
1265 return folio; in f2fs_get_read_data_folio()
1268 err = f2fs_submit_page_read(inode, folio, dn.data_blkaddr, in f2fs_get_read_data_folio()
1272 return folio; in f2fs_get_read_data_folio()
1275 f2fs_folio_put(folio, true); in f2fs_get_read_data_folio()
1279 struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index, in f2fs_find_data_folio()
1283 struct folio *folio; in f2fs_find_data_folio() local
1285 folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0); in f2fs_find_data_folio()
1286 if (IS_ERR(folio)) in f2fs_find_data_folio()
1288 if (folio_test_uptodate(folio)) in f2fs_find_data_folio()
1289 return folio; in f2fs_find_data_folio()
1290 f2fs_folio_put(folio, false); in f2fs_find_data_folio()
1293 folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs); in f2fs_find_data_folio()
1294 if (IS_ERR(folio)) in f2fs_find_data_folio()
1295 return folio; in f2fs_find_data_folio()
1297 if (folio_test_uptodate(folio)) in f2fs_find_data_folio()
1298 return folio; in f2fs_find_data_folio()
1300 folio_wait_locked(folio); in f2fs_find_data_folio()
1301 if (unlikely(!folio_test_uptodate(folio))) { in f2fs_find_data_folio()
1302 f2fs_folio_put(folio, false); in f2fs_find_data_folio()
1305 return folio; in f2fs_find_data_folio()
1313 struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, in f2fs_get_lock_data_folio()
1317 struct folio *folio; in f2fs_get_lock_data_folio() local
1319 folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL); in f2fs_get_lock_data_folio()
1320 if (IS_ERR(folio)) in f2fs_get_lock_data_folio()
1321 return folio; in f2fs_get_lock_data_folio()
1324 folio_lock(folio); in f2fs_get_lock_data_folio()
1325 if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) { in f2fs_get_lock_data_folio()
1326 f2fs_folio_put(folio, true); in f2fs_get_lock_data_folio()
1329 return folio; in f2fs_get_lock_data_folio()
1341 struct folio *f2fs_get_new_data_folio(struct inode *inode, in f2fs_get_new_data_folio()
1342 struct folio *ifolio, pgoff_t index, bool new_i_size) in f2fs_get_new_data_folio()
1345 struct folio *folio; in f2fs_get_new_data_folio() local
1349 folio = f2fs_grab_cache_folio(mapping, index, true); in f2fs_get_new_data_folio()
1350 if (IS_ERR(folio)) { in f2fs_get_new_data_folio()
1362 f2fs_folio_put(folio, true); in f2fs_get_new_data_folio()
1368 if (folio_test_uptodate(folio)) in f2fs_get_new_data_folio()
1372 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_get_new_data_folio()
1373 if (!folio_test_uptodate(folio)) in f2fs_get_new_data_folio()
1374 folio_mark_uptodate(folio); in f2fs_get_new_data_folio()
1376 f2fs_folio_put(folio, true); in f2fs_get_new_data_folio()
1380 folio = f2fs_get_lock_data_folio(inode, index, true); in f2fs_get_new_data_folio()
1381 if (IS_ERR(folio)) in f2fs_get_new_data_folio()
1382 return folio; in f2fs_get_new_data_folio()
1388 return folio; in f2fs_get_new_data_folio()
1837 struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), in f2fs_xattr_fiemap() local
1840 if (IS_ERR(folio)) in f2fs_xattr_fiemap()
1841 return PTR_ERR(folio); in f2fs_xattr_fiemap()
1845 f2fs_folio_put(folio, true); in f2fs_xattr_fiemap()
1857 f2fs_folio_put(folio, true); in f2fs_xattr_fiemap()
1871 struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), in f2fs_xattr_fiemap() local
1874 if (IS_ERR(folio)) in f2fs_xattr_fiemap()
1875 return PTR_ERR(folio); in f2fs_xattr_fiemap()
1879 f2fs_folio_put(folio, true); in f2fs_xattr_fiemap()
1886 f2fs_folio_put(folio, true); in f2fs_xattr_fiemap()
2069 static int f2fs_read_single_page(struct inode *inode, struct folio *folio, in f2fs_read_single_page() argument
2082 pgoff_t index = folio->index; in f2fs_read_single_page()
2116 folio_set_mappedtodisk(folio); in f2fs_read_single_page()
2125 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_read_single_page()
2127 !fsverity_verify_folio(folio)) { in f2fs_read_single_page()
2131 if (!folio_test_uptodate(folio)) in f2fs_read_single_page()
2132 folio_mark_uptodate(folio); in f2fs_read_single_page()
2133 folio_unlock(folio); in f2fs_read_single_page()
2165 if (!bio_add_folio(bio, folio, blocksize, 0)) in f2fs_read_single_page()
2209 struct folio *folio; in f2fs_read_multi_pages() local
2214 folio = page_folio(page); in f2fs_read_multi_pages()
2215 if ((sector_t)folio->index >= last_block_in_file) { in f2fs_read_multi_pages()
2216 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_read_multi_pages()
2217 if (!folio_test_uptodate(folio)) in f2fs_read_multi_pages()
2218 folio_mark_uptodate(folio); in f2fs_read_multi_pages()
2219 } else if (!folio_test_uptodate(folio)) { in f2fs_read_multi_pages()
2222 folio_unlock(folio); in f2fs_read_multi_pages()
2224 folio_put(folio); in f2fs_read_multi_pages()
2280 struct folio *folio = page_folio(dic->cpages[i]); in f2fs_read_multi_pages() local
2290 if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) { in f2fs_read_multi_pages()
2300 !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) { in f2fs_read_multi_pages()
2309 folio->index, for_write); in f2fs_read_multi_pages()
2319 if (!bio_add_folio(bio, folio, blocksize, 0)) in f2fs_read_multi_pages()
2357 struct readahead_control *rac, struct folio *folio) in f2fs_mpage_readpages() argument
2382 index = rac ? readahead_index(rac) : folio->index; in f2fs_mpage_readpages()
2399 folio = readahead_folio(rac); in f2fs_mpage_readpages()
2400 prefetchw(&folio->flags); in f2fs_mpage_readpages()
2404 index = folio->index; in f2fs_mpage_readpages()
2438 f2fs_compress_ctx_add_page(&cc, folio); in f2fs_mpage_readpages()
2444 ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map, in f2fs_mpage_readpages()
2450 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_mpage_readpages()
2451 folio_unlock(folio); in f2fs_mpage_readpages()
2475 static int f2fs_read_data_folio(struct file *file, struct folio *folio) in f2fs_read_data_folio() argument
2477 struct inode *inode = folio->mapping->host; in f2fs_read_data_folio()
2480 trace_f2fs_readpage(folio, DATA); in f2fs_read_data_folio()
2483 folio_unlock(folio); in f2fs_read_data_folio()
2489 ret = f2fs_read_inline_data(inode, folio); in f2fs_read_data_folio()
2491 ret = f2fs_mpage_readpages(inode, NULL, folio); in f2fs_read_data_folio()
2514 struct folio *mfolio; in f2fs_encrypt_one_page()
2654 struct folio *folio = fio->folio; in f2fs_do_write_data_page() local
2655 struct inode *inode = folio->mapping->host; in f2fs_do_write_data_page()
2664 folio_test_f2fs_atomic(folio); in f2fs_do_write_data_page()
2671 f2fs_lookup_read_extent_cache_block(inode, folio->index, in f2fs_do_write_data_page()
2686 err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); in f2fs_do_write_data_page()
2694 folio_clear_uptodate(folio); in f2fs_do_write_data_page()
2695 folio_clear_f2fs_gcing(folio); in f2fs_do_write_data_page()
2721 folio_start_writeback(folio); in f2fs_do_write_data_page()
2729 folio_end_writeback(folio); in f2fs_do_write_data_page()
2733 trace_f2fs_do_write_data_page(folio, IPU); in f2fs_do_write_data_page()
2755 folio_start_writeback(folio); in f2fs_do_write_data_page()
2762 trace_f2fs_do_write_data_page(folio, OPU); in f2fs_do_write_data_page()
2765 folio_clear_f2fs_atomic(folio); in f2fs_do_write_data_page()
2774 int f2fs_write_single_data_page(struct folio *folio, int *submitted, in f2fs_write_single_data_page() argument
2782 struct inode *inode = folio->mapping->host; in f2fs_write_single_data_page()
2787 loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT; in f2fs_write_single_data_page()
2799 .folio = folio, in f2fs_write_single_data_page()
2811 trace_f2fs_writepage(folio, DATA); in f2fs_write_single_data_page()
2815 mapping_set_error(folio->mapping, -EIO); in f2fs_write_single_data_page()
2833 if (folio->index < end_index || in f2fs_write_single_data_page()
2843 if ((folio->index >= end_index + 1) || !offset) in f2fs_write_single_data_page()
2846 folio_zero_segment(folio, offset, folio_size(folio)); in f2fs_write_single_data_page()
2870 err = f2fs_write_inline_data(inode, folio); in f2fs_write_single_data_page()
2900 folio_clear_uptodate(folio); in f2fs_write_single_data_page()
2901 folio_clear_f2fs_gcing(folio); in f2fs_write_single_data_page()
2903 folio_unlock(folio); in f2fs_write_single_data_page()
2921 folio_redirty_for_writepage(wbc, folio); in f2fs_write_single_data_page()
2928 folio_unlock(folio); in f2fs_write_single_data_page()
3027 struct folio *folio = fbatch.folios[i]; in f2fs_write_cache_pages() local
3030 p = folio_nr_pages(folio); in f2fs_write_cache_pages()
3032 pages[nr_pages] = folio_page(folio, idx); in f2fs_write_cache_pages()
3033 folio_get(folio); in f2fs_write_cache_pages()
3035 index = folio->index + idx + 1; in f2fs_write_cache_pages()
3047 struct folio *folio = page_folio(page); in f2fs_write_cache_pages() local
3064 folio->index)) { in f2fs_write_cache_pages()
3084 folio->index, &fsdata); in f2fs_write_cache_pages()
3091 fsdata, folio->index, 1) || in f2fs_write_cache_pages()
3109 done_index = folio->index; in f2fs_write_cache_pages()
3111 folio_lock(folio); in f2fs_write_cache_pages()
3113 if (unlikely(folio->mapping != mapping)) { in f2fs_write_cache_pages()
3115 folio_unlock(folio); in f2fs_write_cache_pages()
3119 if (!folio_test_dirty(folio)) { in f2fs_write_cache_pages()
3124 if (folio_test_writeback(folio)) { in f2fs_write_cache_pages()
3127 f2fs_folio_wait_writeback(folio, DATA, true, true); in f2fs_write_cache_pages()
3130 if (!folio_clear_dirty_for_io(folio)) in f2fs_write_cache_pages()
3135 folio_get(folio); in f2fs_write_cache_pages()
3136 f2fs_compress_ctx_add_page(&cc, folio); in f2fs_write_cache_pages()
3141 ret = f2fs_write_single_data_page(folio, in f2fs_write_cache_pages()
3167 done_index = folio_next_index(folio); in f2fs_write_cache_pages()
3343 struct folio *folio, loff_t pos, unsigned int len, in prepare_write_begin() argument
3346 struct inode *inode = folio->mapping->host; in prepare_write_begin()
3347 pgoff_t index = folio->index; in prepare_write_begin()
3349 struct folio *ifolio; in prepare_write_begin()
3384 f2fs_do_read_inline_data(folio, ifolio); in prepare_write_begin()
3390 err = f2fs_convert_inline_folio(&dn, folio); in prepare_write_begin()
3434 struct folio *ifolio; in __find_data_block()
3462 struct folio *ifolio; in __reserve_data_block()
3488 struct folio *folio, loff_t pos, unsigned int len, in prepare_atomic_write_begin() argument
3491 struct inode *inode = folio->mapping->host; in prepare_atomic_write_begin()
3493 pgoff_t index = folio->index; in prepare_atomic_write_begin()
3532 loff_t pos, unsigned len, struct folio **foliop, in f2fs_write_begin()
3537 struct folio *folio; in f2fs_write_begin() local
3589 folio = __filemap_get_folio(mapping, index, in f2fs_write_begin()
3591 if (IS_ERR(folio)) { in f2fs_write_begin()
3592 err = PTR_ERR(folio); in f2fs_write_begin()
3598 *foliop = folio; in f2fs_write_begin()
3601 err = prepare_atomic_write_begin(sbi, folio, pos, len, in f2fs_write_begin()
3604 err = prepare_write_begin(sbi, folio, pos, len, in f2fs_write_begin()
3611 folio_unlock(folio); in f2fs_write_begin()
3613 folio_lock(folio); in f2fs_write_begin()
3614 if (folio->mapping != mapping) { in f2fs_write_begin()
3616 folio_unlock(folio); in f2fs_write_begin()
3617 folio_put(folio); in f2fs_write_begin()
3622 f2fs_folio_wait_writeback(folio, DATA, false, true); in f2fs_write_begin()
3624 if (len == folio_size(folio) || folio_test_uptodate(folio)) in f2fs_write_begin()
3629 folio_zero_segment(folio, len, folio_size(folio)); in f2fs_write_begin()
3634 folio_zero_segment(folio, 0, folio_size(folio)); in f2fs_write_begin()
3635 folio_mark_uptodate(folio); in f2fs_write_begin()
3644 folio, blkaddr, 0, true); in f2fs_write_begin()
3648 folio_lock(folio); in f2fs_write_begin()
3649 if (unlikely(folio->mapping != mapping)) { in f2fs_write_begin()
3650 folio_unlock(folio); in f2fs_write_begin()
3651 folio_put(folio); in f2fs_write_begin()
3654 if (unlikely(!folio_test_uptodate(folio))) { in f2fs_write_begin()
3662 folio_unlock(folio); in f2fs_write_begin()
3663 folio_put(folio); in f2fs_write_begin()
3672 struct folio *folio, void *fsdata) in f2fs_write_end() argument
3674 struct inode *inode = folio->mapping->host; in f2fs_write_end()
3683 if (!folio_test_uptodate(folio)) { in f2fs_write_end()
3687 folio_mark_uptodate(folio); in f2fs_write_end()
3693 f2fs_compress_write_end(inode, fsdata, folio->index, copied); in f2fs_write_end()
3706 folio_mark_dirty(folio); in f2fs_write_end()
3709 folio_set_f2fs_atomic(folio); in f2fs_write_end()
3719 folio_unlock(folio); in f2fs_write_end()
3720 folio_put(folio); in f2fs_write_end()
3725 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) in f2fs_invalidate_folio() argument
3727 struct inode *inode = folio->mapping->host; in f2fs_invalidate_folio()
3731 (offset || length != folio_size(folio))) in f2fs_invalidate_folio()
3734 if (folio_test_dirty(folio)) { in f2fs_invalidate_folio()
3744 folio_detach_private(folio); in f2fs_invalidate_folio()
3747 bool f2fs_release_folio(struct folio *folio, gfp_t wait) in f2fs_release_folio() argument
3750 if (folio_test_dirty(folio)) in f2fs_release_folio()
3753 folio_detach_private(folio); in f2fs_release_folio()
3758 struct folio *folio) in f2fs_dirty_data_folio() argument
3762 trace_f2fs_set_page_dirty(folio, DATA); in f2fs_dirty_data_folio()
3764 if (!folio_test_uptodate(folio)) in f2fs_dirty_data_folio()
3765 folio_mark_uptodate(folio); in f2fs_dirty_data_folio()
3766 BUG_ON(folio_test_swapcache(folio)); in f2fs_dirty_data_folio()
3768 if (filemap_dirty_folio(mapping, folio)) { in f2fs_dirty_data_folio()
3769 f2fs_update_dirty_folio(inode, folio); in f2fs_dirty_data_folio()
3877 struct folio *folio; in f2fs_migrate_blocks() local
3880 folio = f2fs_get_lock_data_folio(inode, blkidx, true); in f2fs_migrate_blocks()
3881 if (IS_ERR(folio)) { in f2fs_migrate_blocks()
3883 ret = PTR_ERR(folio); in f2fs_migrate_blocks()
3887 folio_mark_dirty(folio); in f2fs_migrate_blocks()
3888 f2fs_folio_put(folio, true); in f2fs_migrate_blocks()
4099 void f2fs_clear_page_cache_dirty_tag(struct folio *folio) in f2fs_clear_page_cache_dirty_tag() argument
4101 struct address_space *mapping = folio->mapping; in f2fs_clear_page_cache_dirty_tag()
4105 __xa_clear_mark(&mapping->i_pages, folio->index, in f2fs_clear_page_cache_dirty_tag()