| /fs/xfs/scrub/ |
| A D | bmap_repair.c | 99 struct xrep_bmap *rb, in xrep_bmap_discover_shared() argument 131 struct xrep_bmap *rb, in xrep_bmap_from_rmap() argument 576 error = xfarray_load(rb->bmap_records, rb->array_cur++, in xrep_bmap_get_records() 679 return xrep_ino_ensure_extent_count(rb->sc, rb->whichfork, in xrep_bmap_extents_load() 697 &rb->new_bmapbt.bload, rb->real_mappings); in xrep_bmap_btree_load() 724 error = xfs_btree_bload(bmap_cur, &rb->new_bmapbt.bload, rb); in xrep_bmap_btree_load() 764 error = xrep_newbt_init_inode(&rb->new_bmapbt, sc, rb->whichfork, in xrep_bmap_build_new_fork() 785 if (rb->real_mappings <= XFS_IFORK_MAXEXT(sc->ip, rb->whichfork)) { in xrep_bmap_build_new_fork() 938 if (!rb) in xrep_bmap() 940 rb->sc = sc; in xrep_bmap() [all …]
|
| /fs/jffs2/ |
| A D | nodelist.h | 230 struct rb_node rb; member 271 struct rb_node rb; member 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) 350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) 351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) 354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 357 #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb) [all …]
|
| A D | nodelist.c | 124 struct rb_node *parent = &base->rb; in jffs2_fragtree_insert() 134 link = &base->rb.rb_right; in jffs2_fragtree_insert() 136 link = &base->rb.rb_left; in jffs2_fragtree_insert() 143 rb_link_node(&newfrag->rb, &base->rb, link); in jffs2_fragtree_insert() 189 rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); in no_overlapping_node() 204 rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); in no_overlapping_node() 317 rb_replace_node(&this->rb, &newfrag->rb, root); in jffs2_add_frag_to_fragtree() 327 rb_insert_color(&this->rb, root); in jffs2_add_frag_to_fragtree() 338 rb_erase(&this->rb, root); in jffs2_add_frag_to_fragtree() 543 next = frag->rb.rb_right; in jffs2_lookup_node_frag() [all …]
|
| A D | readinode.c | 188 next = tn->rb.rb_right; in jffs2_lookup_tn() 190 next = tn->rb.rb_left; in jffs2_lookup_tn() 286 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); in jffs2_add_tn_to_tree() 346 link = &insert_point->rb.rb_right; in jffs2_add_tn_to_tree() 349 link = &insert_point->rb.rb_left; in jffs2_add_tn_to_tree() 351 link = &insert_point->rb.rb_right; in jffs2_add_tn_to_tree() 353 rb_link_node(&tn->rb, &insert_point->rb, link); in jffs2_add_tn_to_tree() 440 rb_link_node(&tn->rb, parent, link); in ver_insert() 441 rb_insert_color(&tn->rb, ver_root); in ver_insert() 476 eat_last(&rii->tn_root, &last->rb); in jffs2_build_inode_fragtree() [all …]
|
| /fs/ocfs2/ |
| A D | refcounttree.c | 100 rb->rf_signature); in ocfs2_validate_refcount_block() 631 rb->rf_records.rl_count = in ocfs2_create_refcount_tree() 790 if (rb->rf_suballoc_loc) in ocfs2_remove_refcount_tree() 845 if (!rb->rf_count) { in ocfs2_remove_refcount_tree() 1082 el = &rb->rf_list; in ocfs2_get_refcount_rec() 1202 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], in ocfs2_rotate_refcount_rec_left() 2126 BUG_ON(rb->rf_clusters); in ocfs2_remove_refcount_extent() 2131 rb->rf_flags = 0; in ocfs2_remove_refcount_extent() 2132 rb->rf_parent = 0; in ocfs2_remove_refcount_extent() 2133 rb->rf_cpos = 0; in ocfs2_remove_refcount_extent() [all …]
|
| A D | xattr.c | 6282 struct ocfs2_refcount_block *rb = in ocfs2_reflink_lock_xattr_allocators() local 6306 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) in ocfs2_reflink_lock_xattr_allocators() 6307 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * in ocfs2_reflink_lock_xattr_allocators() 6308 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; in ocfs2_reflink_lock_xattr_allocators() 6741 struct ocfs2_refcount_block *rb; in ocfs2_lock_reflink_xattr_rec_allocators() local 6765 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data; in ocfs2_lock_reflink_xattr_rec_allocators() 6772 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) in ocfs2_lock_reflink_xattr_rec_allocators() 6773 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * in ocfs2_lock_reflink_xattr_rec_allocators() 6774 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; in ocfs2_lock_reflink_xattr_rec_allocators()
|
| A D | alloc.c | 384 struct ocfs2_refcount_block *rb = et->et_object; in ocfs2_refcount_tree_fill_root_el() local 386 et->et_root_el = &rb->rf_list; in ocfs2_refcount_tree_fill_root_el() 392 struct ocfs2_refcount_block *rb = et->et_object; in ocfs2_refcount_tree_set_last_eb_blk() local 394 rb->rf_last_eb_blk = cpu_to_le64(blkno); in ocfs2_refcount_tree_set_last_eb_blk() 399 struct ocfs2_refcount_block *rb = et->et_object; in ocfs2_refcount_tree_get_last_eb_blk() local 401 return le64_to_cpu(rb->rf_last_eb_blk); in ocfs2_refcount_tree_get_last_eb_blk() 407 struct ocfs2_refcount_block *rb = et->et_object; in ocfs2_refcount_tree_update_clusters() local 409 le32_add_cpu(&rb->rf_clusters, clusters); in ocfs2_refcount_tree_update_clusters()
|
| /fs/nilfs2/ |
| A D | recovery.c | 373 rb = kmalloc(sizeof(*rb), GFP_NOFS); in nilfs_scan_dsync_log() 374 if (unlikely(!rb)) { in nilfs_scan_dsync_log() 378 rb->ino = ino; in nilfs_scan_dsync_log() 379 rb->blocknr = blocknr++; in nilfs_scan_dsync_log() 383 list_add_tail(&rb->list, head); in nilfs_scan_dsync_log() 402 struct nilfs_recovery_block *rb; in dispose_recovery_list() local 405 list_del(&rb->list); in dispose_recovery_list() 406 kfree(rb); in dispose_recovery_list() 578 err, (unsigned long)rb->ino, in nilfs_recover_dsync_blocks() 584 list_del_init(&rb->list); in nilfs_recover_dsync_blocks() [all …]
|
| /fs/bcachefs/ |
| A D | btree_io.c | 1483 bio_put(&rb->bio); in btree_node_read_work() 1753 rb->c = c; in btree_node_read_all_replicas() 1754 rb->b = b; in btree_node_read_all_replicas() 1755 rb->ra = ra; in btree_node_read_all_replicas() 1758 rb->idx = i; in btree_node_read_all_replicas() 1759 rb->pick = pick; in btree_node_read_all_replicas() 1844 rb->c = c; in bch2_btree_node_read() 1845 rb->b = b; in bch2_btree_node_read() 1846 rb->ra = NULL; in bch2_btree_node_read() 1849 rb->pick = pick; in bch2_btree_node_read() [all …]
|
| /fs/ubifs/ |
| A D | log.c | 38 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_search_bud() 71 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_get_wbuf() 122 b = rb_entry(parent, struct ubifs_bud, rb); in ubifs_add_bud() 130 rb_link_node(&bud->rb, parent, p); in ubifs_add_bud() 131 rb_insert_color(&bud->rb, &c->buds); in ubifs_add_bud() 311 bud = rb_entry(p1, struct ubifs_bud, rb); in remove_buds() 549 struct rb_node rb; member 568 dr = rb_entry(parent, struct done_ref, rb); in done_already() 583 rb_link_node(&dr->rb, parent, p); in done_already() 584 rb_insert_color(&dr->rb, done_tree); in done_already() [all …]
|
| A D | orphan.c | 73 o = rb_entry(parent, struct ubifs_orphan, rb); in ubifs_add_orphan() 87 rb_link_node(&orphan->rb, parent, p); in ubifs_add_orphan() 88 rb_insert_color(&orphan->rb, &c->orph_tree); in ubifs_add_orphan() 104 o = rb_entry(p, struct ubifs_orphan, rb); in lookup_orphan() 118 rb_erase(&o->rb, &c->orph_tree); in __orphan_drop() 139 rb_erase(&orph->rb, &c->orph_tree); in orphan_delete() 732 struct rb_node rb; member 769 o = rb_entry(parent, struct check_orphan, rb); in dbg_ins_check_orphan() 779 rb_link_node(&orphan->rb, parent, p); in dbg_ins_check_orphan() 780 rb_insert_color(&orphan->rb, root); in dbg_ins_check_orphan() [all …]
|
| A D | recovery.c | 1230 struct rb_node rb; member 1254 e = rb_entry(parent, struct size_entry, rb); in add_ino() 1270 rb_link_node(&e->rb, parent, p); in add_ino() 1271 rb_insert_color(&e->rb, &c->size_tree); in add_ino() 1287 e = rb_entry(p, struct size_entry, rb); in find_ino() 1309 rb_erase(&e->rb, &c->size_tree); in remove_ino() 1321 rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { in ubifs_destroy_size_tree() 1512 rb_erase(&e->rb, &c->size_tree); in inode_fix_size() 1536 e = rb_entry(this, struct size_entry, rb); in ubifs_recover_size() 1583 rb_erase(&e->rb, &c->size_tree); in ubifs_recover_size()
|
| A D | debug.c | 615 struct rb_node *rb; in ubifs_dump_budg() local 657 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { in ubifs_dump_budg() 658 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_budg() 683 struct rb_node *rb; in ubifs_dump_lprop() local 745 for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { in ubifs_dump_lprop() 746 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_lprop() 1787 struct rb_node rb; member 1890 rb_link_node(&fscki->rb, parent, p); in add_inode() 1891 rb_insert_color(&fscki->rb, &fsckd->inodes); in add_inode() 1912 fscki = rb_entry(p, struct fsck_inode, rb); in search_inode() [all …]
|
| A D | replay.c | 306 struct replay_entry *ra, *rb; in replay_entries_cmp() local 313 rb = list_entry(b, struct replay_entry, list); in replay_entries_cmp() 314 ubifs_assert(c, ra->sqnum != rb->sqnum); in replay_entries_cmp() 315 if (ra->sqnum > rb->sqnum) in replay_entries_cmp()
|
| A D | ubifs.h | 275 struct rb_node rb; member 713 struct rb_node rb; member 918 struct rb_node rb; member
|
| A D | tnc.c | 56 o = rb_entry(parent, struct ubifs_old_idx, rb); in do_insert_old_idx() 71 rb_link_node(&old_idx->rb, parent, p); in do_insert_old_idx() 72 rb_insert_color(&old_idx->rb, &c->old_idx); in do_insert_old_idx() 184 rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb) in destroy_old_idx()
|
| A D | tnc_commit.c | 170 o = rb_entry(p, struct ubifs_old_idx, rb); in find_old_idx()
|
| /fs/kernfs/ |
| A D | dir.c | 70 if (ra != rb) in kernfs_common_ancestor() 74 db = kernfs_depth(rb->kn, b); in kernfs_common_ancestor() 382 node = &pos->rb.rb_left; in kernfs_link_sibling() 384 node = &pos->rb.rb_right; in kernfs_link_sibling() 390 rb_link_node(&kn->rb, parent, node); in kernfs_link_sibling() 420 if (RB_EMPTY_NODE(&kn->rb)) in kernfs_unlink_sibling() 430 rb_erase(&kn->rb, &kn_parent->dir.children); in kernfs_unlink_sibling() 431 RB_CLEAR_NODE(&kn->rb); in kernfs_unlink_sibling() 657 RB_CLEAR_NODE(&kn->rb); in __kernfs_new_node() 1384 rbn = rb_next(&pos->rb); in kernfs_next_descendant_post() [all …]
|
| /fs/afs/ |
| A D | server.c | 514 struct rb_node *rb; in afs_purge_servers() local 517 for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) { in afs_purge_servers() 518 server = rb_entry(rb, struct afs_server, uuid_rb); in afs_purge_servers()
|
| /fs/btrfs/ |
| A D | extent_map.c | 346 struct rb_node *rb; in try_merge_map() local 363 rb = rb_prev(&em->rb_node); in try_merge_map() 364 merge = rb_entry_safe(rb, struct extent_map, rb_node); in try_merge_map() 366 if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { in try_merge_map() 381 rb = rb_next(&em->rb_node); in try_merge_map() 382 merge = rb_entry_safe(rb, struct extent_map, rb_node); in try_merge_map() 384 if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { in try_merge_map()
|
| /fs/xfs/ |
| A D | xfs_refcount_item.c | 269 struct xfs_refcount_intent *rb = ci_entry(b); in xfs_refcount_update_diff_items() local 271 return ra->ri_group->xg_gno - rb->ri_group->xg_gno; in xfs_refcount_update_diff_items()
|
| A D | xfs_rmap_item.c | 270 struct xfs_rmap_intent *rb = ri_entry(b); in xfs_rmap_update_diff_items() local 272 return ra->ri_group->xg_gno - rb->ri_group->xg_gno; in xfs_rmap_update_diff_items()
|
| A D | xfs_extfree_item.c | 390 struct xfs_extent_free_item *rb = xefi_entry(b); in xfs_extent_free_diff_items() local 392 return ra->xefi_group->xg_gno - rb->xefi_group->xg_gno; in xfs_extent_free_diff_items()
|
| /fs/erofs/ |
| A D | erofs_fs.h | 61 } __packed rb; member
|
| A D | super.c | 327 ((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32); in erofs_read_superblock() 329 sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b); in erofs_read_superblock()
|