Lines Matching refs:nid
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range()
38 __func__, nid); in f2fs_check_nid_range()
133 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_folio() argument
135 return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid)); in get_current_nat_folio()
138 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_folio() argument
147 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); in get_next_nat_folio()
150 src_folio = get_current_nat_folio(sbi, nid); in get_next_nat_folio()
162 set_to_next_nat(nm_i, nid); in get_next_nat_folio()
168 nid_t nid, bool no_fail) in __alloc_nat_entry() argument
175 nat_set_nid(new, nid); in __alloc_nat_entry()
250 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); in __grab_nat_entry_set()
389 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_need_dentry_mark() argument
396 e = __lookup_nat_cache(nm_i, nid, false); in f2fs_need_dentry_mark()
406 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_is_checkpointed_node() argument
413 e = __lookup_nat_cache(nm_i, nid, false); in f2fs_is_checkpointed_node()
437 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, in cache_nat_entry() argument
447 new = __alloc_nat_entry(sbi, nid, false); in cache_nat_entry()
452 e = __lookup_nat_cache(nm_i, nid, false); in cache_nat_entry()
470 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); in set_node_addr()
474 e = __lookup_nat_cache(nm_i, ni->nid, true); in set_node_addr()
516 if (ni->nid != ni->ino) in set_node_addr()
519 if (fsync_done && ni->nid == ni->ino) in set_node_addr()
557 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, in f2fs_get_node_info() argument
563 nid_t start_nid = START_NID(nid); in f2fs_get_node_info()
573 ni->nid = nid; in f2fs_get_node_info()
577 e = __lookup_nat_cache(nm_i, nid, false); in f2fs_get_node_info()
604 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); in f2fs_get_node_info()
616 index = current_nat_addr(sbi, nid); in f2fs_get_node_info()
624 ne = nat_blk->entries[nid - start_nid]; in f2fs_get_node_info()
636 ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag); in f2fs_get_node_info()
643 cache_nat_entry(sbi, nid, &ne); in f2fs_get_node_info()
655 nid_t nid; in f2fs_ra_node_pages() local
663 nid = get_nid(parent, i, false); in f2fs_ra_node_pages()
664 f2fs_ra_node_page(sbi, nid); in f2fs_ra_node_pages()
847 dn->nid = nids[i]; in f2fs_get_dnode_of_data()
886 dn->nid = nids[level]; in f2fs_get_dnode_of_data()
942 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in truncate_node()
950 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr); in truncate_node()
958 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); in truncate_node()
961 if (dn->nid == dn->inode->i_ino) { in truncate_node()
962 f2fs_remove_orphan_inode(sbi, dn->nid); in truncate_node()
977 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); in truncate_node()
988 if (dn->nid == 0) in truncate_dnode()
992 folio = f2fs_get_node_folio(sbi, dn->nid); in truncate_dnode()
1000 dn->inode->i_ino, dn->nid, ino_of_node(folio)); in truncate_dnode()
1031 if (dn->nid == 0) in truncate_nodes()
1034 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); in truncate_nodes()
1036 folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid); in truncate_nodes()
1047 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
1050 rdn.nid = child_nid; in truncate_nodes()
1060 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
1065 rdn.nid = child_nid; in truncate_nodes()
1101 nid_t nid[3]; in truncate_partial_nodes() local
1107 nid[0] = get_nid(dn->inode_folio, offset[0], true); in truncate_partial_nodes()
1108 if (!nid[0]) in truncate_partial_nodes()
1114 folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i]); in truncate_partial_nodes()
1120 nid[i + 1] = get_nid(folios[i], offset[i + 1], false); in truncate_partial_nodes()
1130 dn->nid = child_nid; in truncate_partial_nodes()
1140 dn->nid = nid[idx]; in truncate_partial_nodes()
1154 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); in truncate_partial_nodes()
1225 dn.nid = get_nid(folio, offset[0], true); in f2fs_truncate_inode_blocks()
1251 inode->i_ino, dn.nid, offset[0], in f2fs_truncate_inode_blocks()
1277 nid_t nid = F2FS_I(inode)->i_xattr_nid; in f2fs_truncate_xattr_node() local
1282 if (!nid) in f2fs_truncate_xattr_node()
1285 nfolio = f2fs_get_xnode_folio(sbi, nid); in f2fs_truncate_xattr_node()
1289 set_new_dnode(&dn, inode, NULL, nfolio, nid); in f2fs_truncate_xattr_node()
1370 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false); in f2fs_new_node_folio()
1378 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); in f2fs_new_node_folio()
1390 new_ni.ino, new_ni.nid, new_ni.blk_addr, in f2fs_new_node_folio()
1396 new_ni.nid = dn->nid; in f2fs_new_node_folio()
1404 fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true); in f2fs_new_node_folio()
1412 f2fs_i_xnid_write(dn->inode, dn->nid); in f2fs_new_node_folio()
1473 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_ra_node_page() argument
1478 if (!nid) in f2fs_ra_node_page()
1480 if (f2fs_check_nid_range(sbi, nid)) in f2fs_ra_node_page()
1483 afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); in f2fs_ra_node_page()
1487 afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); in f2fs_ra_node_page()
1496 struct folio *folio, pgoff_t nid, in sanity_check_node_footer() argument
1499 if (unlikely(nid != nid_of_node(folio) || in sanity_check_node_footer()
1506 ntype, nid, nid_of_node(folio), ino_of_node(folio), in sanity_check_node_footer()
1516 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid, in __get_node_folio() argument
1522 if (!nid) in __get_node_folio()
1524 if (f2fs_check_nid_range(sbi, nid)) in __get_node_folio()
1527 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false); in __get_node_folio()
1557 err = sanity_check_node_footer(sbi, folio, nid, ntype); in __get_node_folio()
1570 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid) in f2fs_get_node_folio() argument
1572 return __get_node_folio(sbi, nid, NULL, 0, NODE_TYPE_REGULAR); in f2fs_get_node_folio()
1588 nid_t nid = get_nid(parent, start, false); in f2fs_get_node_folio_ra() local
1590 return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR); in f2fs_get_node_folio_ra()
1691 nid_t nid; in __write_node_folio() local
1728 nid = nid_of_node(folio); in __write_node_folio()
1729 f2fs_bug_on(sbi, folio->index != nid); in __write_node_folio()
1731 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) in __write_node_folio()
1765 f2fs_do_write_node_page(nid, &fio); in __write_node_folio()
2268 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); in __insert_free_nid()
2287 radix_tree_delete(&nm_i->free_nid_root, i->nid); in __remove_free_nid()
2312 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, in update_free_nid_bitmap() argument
2316 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); in update_free_nid_bitmap()
2317 unsigned int nid_ofs = nid - START_NID(nid); in update_free_nid_bitmap()
2338 nid_t nid, bool build, bool update) in add_free_nid() argument
2347 if (unlikely(nid == 0)) in add_free_nid()
2350 if (unlikely(f2fs_check_nid_range(sbi, nid))) in add_free_nid()
2354 i->nid = nid; in add_free_nid()
2386 ne = __lookup_nat_cache(nm_i, nid, false); in add_free_nid()
2391 e = __lookup_free_nid_list(nm_i, nid); in add_free_nid()
2402 update_free_nid_bitmap(sbi, nid, ret, build); in add_free_nid()
2414 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) in remove_free_nid() argument
2421 i = __lookup_free_nid_list(nm_i, nid); in remove_free_nid()
2474 nid_t nid; in scan_curseg_cache() local
2477 nid = le32_to_cpu(nid_in_journal(journal, i)); in scan_curseg_cache()
2479 add_free_nid(sbi, nid, true, false); in scan_curseg_cache()
2481 remove_free_nid(sbi, nid); in scan_curseg_cache()
2490 nid_t nid; in scan_free_nid_bits() local
2505 nid = i * NAT_ENTRY_PER_BLOCK + idx; in scan_free_nid_bits()
2506 add_free_nid(sbi, nid, true, false); in scan_free_nid_bits()
2523 nid_t nid = nm_i->next_scan_nid; in __f2fs_build_free_nids() local
2525 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2526 nid = 0; in __f2fs_build_free_nids()
2528 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) in __f2fs_build_free_nids()
2529 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; in __f2fs_build_free_nids()
2547 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, in __f2fs_build_free_nids()
2553 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), in __f2fs_build_free_nids()
2555 struct folio *folio = get_current_nat_folio(sbi, nid); in __f2fs_build_free_nids()
2561 nid); in __f2fs_build_free_nids()
2579 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); in __f2fs_build_free_nids()
2580 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2581 nid = 0; in __f2fs_build_free_nids()
2588 nm_i->next_scan_nid = nid; in __f2fs_build_free_nids()
2617 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) in f2fs_alloc_nid() argument
2637 *nid = i->nid; in f2fs_alloc_nid()
2642 update_free_nid_bitmap(sbi, *nid, false, false); in f2fs_alloc_nid()
2658 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_done() argument
2664 i = __lookup_free_nid_list(nm_i, nid); in f2fs_alloc_nid_done()
2675 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_failed() argument
2681 if (!nid) in f2fs_alloc_nid_failed()
2685 i = __lookup_free_nid_list(nm_i, nid); in f2fs_alloc_nid_failed()
2697 update_free_nid_bitmap(sbi, nid, true, false); in f2fs_alloc_nid_failed()
2920 sum_entry->nid = rn->footer.nid; in f2fs_restore_node_summary()
2945 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); in remove_nats_in_journal() local
2947 if (f2fs_check_nid_range(sbi, nid)) in remove_nats_in_journal()
2954 ne = __lookup_nat_cache(nm_i, nid, true); in remove_nats_in_journal()
2957 ne = __alloc_nat_entry(sbi, nid, true); in remove_nats_in_journal()
3063 nid_t nid = nat_get_nid(ne); in __flush_nat_entry_set() local
3070 NAT_JOURNAL, nid, 1); in __flush_nat_entry_set()
3073 nid_in_journal(journal, offset) = cpu_to_le32(nid); in __flush_nat_entry_set()
3075 raw_ne = &nat_blk->entries[nid - start_nid]; in __flush_nat_entry_set()
3081 add_free_nid(sbi, nid, false, true); in __flush_nat_entry_set()
3084 update_free_nid_bitmap(sbi, nid, false, false); in __flush_nat_entry_set()
3216 nid_t nid, last_nid; in load_free_nid_bitmap() local
3228 nid = i * NAT_ENTRY_PER_BLOCK; in load_free_nid_bitmap()
3229 last_nid = nid + NAT_ENTRY_PER_BLOCK; in load_free_nid_bitmap()
3232 for (; nid < last_nid; nid++) in load_free_nid_bitmap()
3233 update_free_nid_bitmap(sbi, nid, true, true); in load_free_nid_bitmap()
3370 nid_t nid = 0; in f2fs_destroy_node_manager() local
3392 nid, NAT_VEC_SIZE, natvec))) { in f2fs_destroy_node_manager()
3395 nid = nat_get_nid(natvec[found - 1]) + 1; in f2fs_destroy_node_manager()
3407 nid = 0; in f2fs_destroy_node_manager()
3410 nid, NAT_VEC_SIZE, setvec))) { in f2fs_destroy_node_manager()
3413 nid = setvec[found - 1]->set + 1; in f2fs_destroy_node_manager()