Lines Matching refs:folio

123 static void clear_node_folio_dirty(struct folio *folio)  in clear_node_folio_dirty()  argument
125 if (folio_test_dirty(folio)) { in clear_node_folio_dirty()
126 f2fs_clear_page_cache_dirty_tag(folio); in clear_node_folio_dirty()
127 folio_clear_dirty_for_io(folio); in clear_node_folio_dirty()
128 dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES); in clear_node_folio_dirty()
130 folio_clear_uptodate(folio); in clear_node_folio_dirty()
133 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_folio()
138 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_folio()
140 struct folio *src_folio; in get_next_nat_folio()
141 struct folio *dst_folio; in get_next_nat_folio()
323 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_in_warm_node_list() argument
325 return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio); in f2fs_in_warm_node_list()
337 struct folio *folio) in f2fs_add_fsync_node_entry() argument
346 folio_get(folio); in f2fs_add_fsync_node_entry()
347 fn->folio = folio; in f2fs_add_fsync_node_entry()
360 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_del_fsync_node_entry() argument
367 if (fn->folio == folio) { in f2fs_del_fsync_node_entry()
372 folio_put(folio); in f2fs_del_fsync_node_entry()
565 struct folio *folio = NULL; in f2fs_get_node_info() local
619 folio = f2fs_get_meta_folio(sbi, index); in f2fs_get_node_info()
620 if (IS_ERR(folio)) in f2fs_get_node_info()
621 return PTR_ERR(folio); in f2fs_get_node_info()
623 nat_blk = folio_address(folio); in f2fs_get_node_info()
626 f2fs_folio_put(folio, true); in f2fs_get_node_info()
650 static void f2fs_ra_node_pages(struct folio *parent, int start, int n) in f2fs_ra_node_pages()
781 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
791 struct folio *nfolio[4]; in f2fs_get_dnode_of_data()
792 struct folio *parent = NULL; in f2fs_get_dnode_of_data()
985 struct folio *folio; in truncate_dnode() local
992 folio = f2fs_get_node_folio(sbi, dn->nid); in truncate_dnode()
993 if (PTR_ERR(folio) == -ENOENT) in truncate_dnode()
995 else if (IS_ERR(folio)) in truncate_dnode()
996 return PTR_ERR(folio); in truncate_dnode()
998 if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) { in truncate_dnode()
1000 dn->inode->i_ino, dn->nid, ino_of_node(folio)); in truncate_dnode()
1003 f2fs_folio_put(folio, true); in truncate_dnode()
1008 dn->node_folio = folio; in truncate_dnode()
1013 f2fs_folio_put(folio, true); in truncate_dnode()
1024 struct folio *folio; in truncate_nodes() local
1036 folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid); in truncate_nodes()
1037 if (IS_ERR(folio)) { in truncate_nodes()
1038 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio)); in truncate_nodes()
1039 return PTR_ERR(folio); in truncate_nodes()
1042 f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK); in truncate_nodes()
1044 rn = F2FS_NODE(folio); in truncate_nodes()
1054 if (set_nid(folio, i, 0, false)) in truncate_nodes()
1068 if (set_nid(folio, i, 0, false)) in truncate_nodes()
1080 dn->node_folio = folio; in truncate_nodes()
1086 f2fs_folio_put(folio, true); in truncate_nodes()
1092 f2fs_folio_put(folio, true); in truncate_nodes()
1100 struct folio *folios[2]; in truncate_partial_nodes()
1170 struct folio *folio; in f2fs_truncate_inode_blocks() local
1187 folio = f2fs_get_inode_folio(sbi, inode->i_ino); in f2fs_truncate_inode_blocks()
1188 if (IS_ERR(folio)) { in f2fs_truncate_inode_blocks()
1189 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio)); in f2fs_truncate_inode_blocks()
1190 return PTR_ERR(folio); in f2fs_truncate_inode_blocks()
1193 set_new_dnode(&dn, inode, folio, NULL, 0); in f2fs_truncate_inode_blocks()
1194 folio_unlock(folio); in f2fs_truncate_inode_blocks()
1196 ri = F2FS_INODE(folio); in f2fs_truncate_inode_blocks()
1225 dn.nid = get_nid(folio, offset[0], true); in f2fs_truncate_inode_blocks()
1246 set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK); in f2fs_truncate_inode_blocks()
1257 if (offset[1] == 0 && get_nid(folio, offset[0], true)) { in f2fs_truncate_inode_blocks()
1258 folio_lock(folio); in f2fs_truncate_inode_blocks()
1259 BUG_ON(!is_node_folio(folio)); in f2fs_truncate_inode_blocks()
1260 set_nid(folio, offset[0], 0, true); in f2fs_truncate_inode_blocks()
1261 folio_unlock(folio); in f2fs_truncate_inode_blocks()
1268 f2fs_folio_put(folio, false); in f2fs_truncate_inode_blocks()
1279 struct folio *nfolio; in f2fs_truncate_xattr_node()
1349 struct folio *f2fs_new_inode_folio(struct inode *inode) in f2fs_new_inode_folio()
1360 struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs) in f2fs_new_node_folio()
1364 struct folio *folio; in f2fs_new_node_folio() local
1370 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false); in f2fs_new_node_folio()
1371 if (IS_ERR(folio)) in f2fs_new_node_folio()
1372 return folio; in f2fs_new_node_folio()
1403 f2fs_folio_wait_writeback(folio, NODE, true, true); in f2fs_new_node_folio()
1404 fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true); in f2fs_new_node_folio()
1405 set_cold_node(folio, S_ISDIR(dn->inode->i_mode)); in f2fs_new_node_folio()
1406 if (!folio_test_uptodate(folio)) in f2fs_new_node_folio()
1407 folio_mark_uptodate(folio); in f2fs_new_node_folio()
1408 if (folio_mark_dirty(folio)) in f2fs_new_node_folio()
1416 return folio; in f2fs_new_node_folio()
1418 clear_node_folio_dirty(folio); in f2fs_new_node_folio()
1419 f2fs_folio_put(folio, true); in f2fs_new_node_folio()
1428 static int read_node_folio(struct folio *folio, blk_opf_t op_flags) in read_node_folio() argument
1430 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in read_node_folio()
1437 .folio = folio, in read_node_folio()
1442 if (folio_test_uptodate(folio)) { in read_node_folio()
1443 if (!f2fs_inode_chksum_verify(sbi, folio)) { in read_node_folio()
1444 folio_clear_uptodate(folio); in read_node_folio()
1450 err = f2fs_get_node_info(sbi, folio->index, &ni, false); in read_node_folio()
1456 folio_clear_uptodate(folio); in read_node_folio()
1475 struct folio *afolio; in f2fs_ra_node_page()
1496 struct folio *folio, pgoff_t nid, in sanity_check_node_footer() argument
1499 if (unlikely(nid != nid_of_node(folio) || in sanity_check_node_footer()
1500 (ntype == NODE_TYPE_INODE && !IS_INODE(folio)) || in sanity_check_node_footer()
1502 !f2fs_has_xattr_block(ofs_of_node(folio))) || in sanity_check_node_footer()
1506 ntype, nid, nid_of_node(folio), ino_of_node(folio), in sanity_check_node_footer()
1507 ofs_of_node(folio), cpver_of_node(folio), in sanity_check_node_footer()
1508 next_blkaddr_of_node(folio)); in sanity_check_node_footer()
1516 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid, in __get_node_folio()
1517 struct folio *parent, int start, enum node_type ntype) in __get_node_folio()
1519 struct folio *folio; in __get_node_folio() local
1527 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); in __get_node_folio()
1528 if (IS_ERR(folio)) in __get_node_folio()
1529 return folio; in __get_node_folio()
1531 err = read_node_folio(folio, 0); in __get_node_folio()
1540 folio_lock(folio); in __get_node_folio()
1542 if (unlikely(!is_node_folio(folio))) { in __get_node_folio()
1543 f2fs_folio_put(folio, true); in __get_node_folio()
1547 if (unlikely(!folio_test_uptodate(folio))) { in __get_node_folio()
1552 if (!f2fs_inode_chksum_verify(sbi, folio)) { in __get_node_folio()
1557 err = sanity_check_node_footer(sbi, folio, nid, ntype); in __get_node_folio()
1559 return folio; in __get_node_folio()
1561 folio_clear_uptodate(folio); in __get_node_folio()
1565 f2fs_handle_page_eio(sbi, folio, NODE); in __get_node_folio()
1566 f2fs_folio_put(folio, true); in __get_node_folio()
1570 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid) in f2fs_get_node_folio()
1575 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino) in f2fs_get_inode_folio()
1580 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid) in f2fs_get_xnode_folio()
1585 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start) in f2fs_get_node_folio_ra()
1596 struct folio *folio; in flush_inline_data() local
1604 folio = f2fs_filemap_get_folio(inode->i_mapping, 0, in flush_inline_data()
1606 if (IS_ERR(folio)) in flush_inline_data()
1609 if (!folio_test_uptodate(folio)) in flush_inline_data()
1612 if (!folio_test_dirty(folio)) in flush_inline_data()
1615 if (!folio_clear_dirty_for_io(folio)) in flush_inline_data()
1618 ret = f2fs_write_inline_data(inode, folio); in flush_inline_data()
1622 folio_mark_dirty(folio); in flush_inline_data()
1624 f2fs_folio_put(folio, true); in flush_inline_data()
1629 static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) in last_fsync_dnode()
1633 struct folio *last_folio = NULL; in last_fsync_dnode()
1645 struct folio *folio = fbatch.folios[i]; in last_fsync_dnode() local
1653 if (!IS_DNODE(folio) || !is_cold_node(folio)) in last_fsync_dnode()
1655 if (ino_of_node(folio) != ino) in last_fsync_dnode()
1658 folio_lock(folio); in last_fsync_dnode()
1660 if (unlikely(!is_node_folio(folio))) { in last_fsync_dnode()
1662 folio_unlock(folio); in last_fsync_dnode()
1665 if (ino_of_node(folio) != ino) in last_fsync_dnode()
1668 if (!folio_test_dirty(folio)) { in last_fsync_dnode()
1676 folio_get(folio); in last_fsync_dnode()
1677 last_folio = folio; in last_fsync_dnode()
1678 folio_unlock(folio); in last_fsync_dnode()
1686 static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted, in __write_node_folio() argument
1690 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in __write_node_folio()
1695 .ino = ino_of_node(folio), in __write_node_folio()
1699 .folio = folio, in __write_node_folio()
1707 trace_f2fs_writepage(folio, NODE); in __write_node_folio()
1713 folio_clear_uptodate(folio); in __write_node_folio()
1715 folio_unlock(folio); in __write_node_folio()
1724 IS_DNODE(folio) && is_cold_node(folio)) in __write_node_folio()
1728 nid = nid_of_node(folio); in __write_node_folio()
1729 f2fs_bug_on(sbi, folio->index != nid); in __write_node_folio()
1738 folio_clear_uptodate(folio); in __write_node_folio()
1741 folio_unlock(folio); in __write_node_folio()
1756 if (f2fs_in_warm_node_list(sbi, folio)) { in __write_node_folio()
1757 seq = f2fs_add_fsync_node_entry(sbi, folio); in __write_node_folio()
1762 folio_start_writeback(folio); in __write_node_folio()
1766 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio)); in __write_node_folio()
1770 folio_unlock(folio); in __write_node_folio()
1784 folio_redirty_for_writepage(wbc, folio); in __write_node_folio()
1785 folio_unlock(folio); in __write_node_folio()
1789 int f2fs_move_node_folio(struct folio *node_folio, int gc_type) in f2fs_move_node_folio()
1831 struct folio *last_folio = NULL; in f2fs_fsync_node_pages()
1852 struct folio *folio = fbatch.folios[i]; in f2fs_fsync_node_pages() local
1862 if (!IS_DNODE(folio) || !is_cold_node(folio)) in f2fs_fsync_node_pages()
1864 if (ino_of_node(folio) != ino) in f2fs_fsync_node_pages()
1867 folio_lock(folio); in f2fs_fsync_node_pages()
1869 if (unlikely(!is_node_folio(folio))) { in f2fs_fsync_node_pages()
1871 folio_unlock(folio); in f2fs_fsync_node_pages()
1874 if (ino_of_node(folio) != ino) in f2fs_fsync_node_pages()
1877 if (!folio_test_dirty(folio) && folio != last_folio) { in f2fs_fsync_node_pages()
1882 f2fs_folio_wait_writeback(folio, NODE, true, true); in f2fs_fsync_node_pages()
1884 set_fsync_mark(folio, 0); in f2fs_fsync_node_pages()
1885 set_dentry_mark(folio, 0); in f2fs_fsync_node_pages()
1887 if (!atomic || folio == last_folio) { in f2fs_fsync_node_pages()
1888 set_fsync_mark(folio, 1); in f2fs_fsync_node_pages()
1890 if (IS_INODE(folio)) { in f2fs_fsync_node_pages()
1893 f2fs_update_inode(inode, folio); in f2fs_fsync_node_pages()
1894 set_dentry_mark(folio, in f2fs_fsync_node_pages()
1898 if (!folio_test_dirty(folio)) in f2fs_fsync_node_pages()
1899 folio_mark_dirty(folio); in f2fs_fsync_node_pages()
1902 if (!folio_clear_dirty_for_io(folio)) in f2fs_fsync_node_pages()
1905 if (!__write_node_folio(folio, atomic && in f2fs_fsync_node_pages()
1906 folio == last_folio, in f2fs_fsync_node_pages()
1917 if (folio == last_folio) { in f2fs_fsync_node_pages()
1918 f2fs_folio_put(folio, false); in f2fs_fsync_node_pages()
1966 static bool flush_dirty_inode(struct folio *folio) in flush_dirty_inode() argument
1968 struct f2fs_sb_info *sbi = F2FS_F_SB(folio); in flush_dirty_inode()
1970 nid_t ino = ino_of_node(folio); in flush_dirty_inode()
1976 f2fs_update_inode(inode, folio); in flush_dirty_inode()
1977 folio_unlock(folio); in flush_dirty_inode()
1997 struct folio *folio = fbatch.folios[i]; in f2fs_flush_inline_data() local
1999 if (!IS_INODE(folio)) in f2fs_flush_inline_data()
2002 folio_lock(folio); in f2fs_flush_inline_data()
2004 if (unlikely(!is_node_folio(folio))) in f2fs_flush_inline_data()
2006 if (!folio_test_dirty(folio)) in f2fs_flush_inline_data()
2010 if (folio_test_f2fs_inline(folio)) { in f2fs_flush_inline_data()
2011 folio_clear_f2fs_inline(folio); in f2fs_flush_inline_data()
2012 folio_unlock(folio); in f2fs_flush_inline_data()
2013 flush_inline_data(sbi, ino_of_node(folio)); in f2fs_flush_inline_data()
2017 folio_unlock(folio); in f2fs_flush_inline_data()
2046 struct folio *folio = fbatch.folios[i]; in f2fs_sync_node_pages() local
2062 if (step == 0 && IS_DNODE(folio)) in f2fs_sync_node_pages()
2064 if (step == 1 && (!IS_DNODE(folio) || in f2fs_sync_node_pages()
2065 is_cold_node(folio))) in f2fs_sync_node_pages()
2067 if (step == 2 && (!IS_DNODE(folio) || in f2fs_sync_node_pages()
2068 !is_cold_node(folio))) in f2fs_sync_node_pages()
2072 folio_lock(folio); in f2fs_sync_node_pages()
2073 else if (!folio_trylock(folio)) in f2fs_sync_node_pages()
2076 if (unlikely(!is_node_folio(folio))) { in f2fs_sync_node_pages()
2078 folio_unlock(folio); in f2fs_sync_node_pages()
2082 if (!folio_test_dirty(folio)) { in f2fs_sync_node_pages()
2092 if (folio_test_f2fs_inline(folio)) { in f2fs_sync_node_pages()
2093 folio_clear_f2fs_inline(folio); in f2fs_sync_node_pages()
2094 folio_unlock(folio); in f2fs_sync_node_pages()
2095 flush_inline_data(sbi, ino_of_node(folio)); in f2fs_sync_node_pages()
2100 if (IS_INODE(folio) && flush_dirty_inode(folio)) in f2fs_sync_node_pages()
2103 f2fs_folio_wait_writeback(folio, NODE, true, true); in f2fs_sync_node_pages()
2105 if (!folio_clear_dirty_for_io(folio)) in f2fs_sync_node_pages()
2108 set_fsync_mark(folio, 0); in f2fs_sync_node_pages()
2109 set_dentry_mark(folio, 0); in f2fs_sync_node_pages()
2111 if (!__write_node_folio(folio, false, &submitted, in f2fs_sync_node_pages()
2157 struct folio *folio; in f2fs_wait_on_node_pages_writeback() local
2170 folio = fn->folio; in f2fs_wait_on_node_pages_writeback()
2171 folio_get(folio); in f2fs_wait_on_node_pages_writeback()
2174 f2fs_folio_wait_writeback(folio, NODE, true, false); in f2fs_wait_on_node_pages_writeback()
2176 folio_put(folio); in f2fs_wait_on_node_pages_writeback()
2229 struct folio *folio) in f2fs_dirty_node_folio() argument
2231 trace_f2fs_set_page_dirty(folio, NODE); in f2fs_dirty_node_folio()
2233 if (!folio_test_uptodate(folio)) in f2fs_dirty_node_folio()
2234 folio_mark_uptodate(folio); in f2fs_dirty_node_folio()
2236 if (IS_INODE(folio)) in f2fs_dirty_node_folio()
2237 f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio); in f2fs_dirty_node_folio()
2239 if (filemap_dirty_folio(mapping, folio)) { in f2fs_dirty_node_folio()
2241 folio_set_f2fs_reference(folio); in f2fs_dirty_node_folio()
2555 struct folio *folio = get_current_nat_folio(sbi, nid); in __f2fs_build_free_nids() local
2557 if (IS_ERR(folio)) { in __f2fs_build_free_nids()
2558 ret = PTR_ERR(folio); in __f2fs_build_free_nids()
2560 ret = scan_nat_page(sbi, folio_address(folio), in __f2fs_build_free_nids()
2562 f2fs_folio_put(folio, true); in __f2fs_build_free_nids()
2738 int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio) in f2fs_recover_inline_xattr() argument
2742 struct folio *ifolio; in f2fs_recover_inline_xattr()
2749 ri = F2FS_INODE(folio); in f2fs_recover_inline_xattr()
2764 src_addr = inline_xattr_addr(inode, folio); in f2fs_recover_inline_xattr()
2775 int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio) in f2fs_recover_xattr_data() argument
2782 struct folio *xfolio; in f2fs_recover_xattr_data()
2813 if (folio) { in f2fs_recover_xattr_data()
2814 memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio), in f2fs_recover_xattr_data()
2823 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio) in f2fs_recover_inode_page() argument
2826 nid_t ino = ino_of_node(folio); in f2fs_recover_inode_page()
2828 struct folio *ifolio; in f2fs_recover_inode_page()
2852 src = F2FS_INODE(folio); in f2fs_recover_inode_page()
2914 struct folio *folio = f2fs_get_tmp_folio(sbi, idx); in f2fs_restore_node_summary() local
2916 if (IS_ERR(folio)) in f2fs_restore_node_summary()
2917 return PTR_ERR(folio); in f2fs_restore_node_summary()
2919 rn = F2FS_NODE(folio); in f2fs_restore_node_summary()
2924 f2fs_folio_put(folio, true); in f2fs_restore_node_summary()
3038 struct folio *folio = NULL; in __flush_nat_entry_set() local
3052 folio = get_next_nat_folio(sbi, start_nid); in __flush_nat_entry_set()
3053 if (IS_ERR(folio)) in __flush_nat_entry_set()
3054 return PTR_ERR(folio); in __flush_nat_entry_set()
3056 nat_blk = folio_address(folio); in __flush_nat_entry_set()
3093 f2fs_folio_put(folio, true); in __flush_nat_entry_set()
3188 struct folio *folio; in __get_nat_bitmaps() local
3190 folio = f2fs_get_meta_folio(sbi, nat_bits_addr++); in __get_nat_bitmaps()
3191 if (IS_ERR(folio)) in __get_nat_bitmaps()
3192 return PTR_ERR(folio); in __get_nat_bitmaps()
3195 folio_address(folio), F2FS_BLKSIZE); in __get_nat_bitmaps()
3196 f2fs_folio_put(folio, true); in __get_nat_bitmaps()