| /linux/fs/hpfs/ |
| A D | anode.c | 30 btree = &anode->btree; in hpfs_bplus_lookup() 74 btree = &fnode->btree; in hpfs_add_sector_to_btree() 77 btree = &anode->btree; in hpfs_add_sector_to_btree() 94 btree = &anode->btree; in hpfs_add_sector_to_btree() 154 btree = &anode->btree; in hpfs_add_sector_to_btree() 171 btree = &anode->btree; in hpfs_add_sector_to_btree() 174 btree = &fnode->btree; in hpfs_add_sector_to_btree() 232 btree = &anode->btree; in hpfs_add_sector_to_btree() 238 btree = &fnode->btree; in hpfs_add_sector_to_btree() 410 btree = &fnode->btree; in hpfs_truncate_btree() [all …]
|
| A D | map.c | 180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode() 181 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode() 187 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode() 188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode() 235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode() 236 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode() 240 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode() 241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
|
| /linux/fs/bcachefs/ |
| A D | bset.h | 241 static inline void btree_node_set_format(struct btree *b, in btree_node_set_format() 267 void bch2_btree_keys_init(struct btree *); 335 struct btree *); 337 struct btree *, 368 static inline int bkey_iter_cmp(const struct btree *b, in bkey_iter_cmp() 406 struct btree *b) in __bch2_btree_node_iter_peek_all() 443 struct btree *); 445 struct btree *); 448 struct btree *, 522 void __bch2_verify_btree_nr_keys(struct btree *); [all …]
|
| A D | btree_update_interior.h | 73 struct btree *b; 86 struct btree *b[BTREE_UPDATE_NODES_MAX]; 101 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; 104 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; 127 struct btree *, 143 struct btree *b; in bch2_foreground_maybe_merge_sibling() 171 struct btree *, unsigned); 174 struct btree *, struct bkey_i *, 185 struct btree *b) in btree_update_reserve_required() 206 static inline void *btree_data_end(struct btree *b) in btree_data_end() [all …]
|
| A D | btree_io.h | 14 struct btree; 39 struct btree *b; 58 void bch2_btree_node_io_unlock(struct btree *); 59 void bch2_btree_node_io_lock(struct btree *); 60 void __bch2_btree_node_wait_on_read(struct btree *); 61 void __bch2_btree_node_wait_on_write(struct btree *); 62 void bch2_btree_node_wait_on_read(struct btree *); 63 void bch2_btree_node_wait_on_write(struct btree *); 122 void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *); 126 void bch2_btree_build_aux_trees(struct btree *); [all …]
|
| A D | btree_cache.h | 15 void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *); 17 void __bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); 18 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); 21 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, 24 void bch2_node_pin(struct bch_fs *, struct btree *); 33 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); 34 struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool); 76 static inline bool btree_node_hashed(struct btree *b) in btree_node_hashed() 87 static inline size_t btree_buf_bytes(const struct btree *b) in btree_buf_bytes() 92 static inline size_t btree_buf_max_u64s(const struct btree *b) in btree_buf_max_u64s() [all …]
|
| A D | btree_write_buffer.h | 42 return cmp_int(l->btree, r->btree) ?: bpos_cmp(l->k.k.p, r->k.k.p); in wb_key_cmp() 49 enum btree_id btree, struct bkey_i_accounting *k) in bch2_accounting_key_to_wb() argument 53 search.btree = btree; in bch2_accounting_key_to_wb() 61 return bch2_accounting_key_to_wb_slowpath(c, btree, k); in bch2_accounting_key_to_wb() 74 enum btree_id btree, struct bkey_i *k) in __bch2_journal_key_to_wb() argument 77 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k); in __bch2_journal_key_to_wb() 81 wb_k->btree = btree; in __bch2_journal_key_to_wb() 90 enum btree_id btree, struct bkey_i *k) in bch2_journal_key_to_wb() argument 95 ? bch2_accounting_key_to_wb(c, btree, bkey_i_to_accounting(k)) in bch2_journal_key_to_wb() 96 : __bch2_journal_key_to_wb(c, dst, btree, k); in bch2_journal_key_to_wb()
|
| A D | btree_gc.h | 39 static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level, in gc_pos_btree() argument 44 .btree = btree, in gc_pos_btree() 50 static inline int gc_btree_order(enum btree_id btree) in gc_btree_order() argument 52 if (btree == BTREE_ID_alloc) in gc_btree_order() 54 if (btree == BTREE_ID_stripes) in gc_btree_order() 56 return btree; in gc_btree_order() 62 cmp_int(gc_btree_order(l.btree), in gc_pos_cmp() 63 gc_btree_order(r.btree)) ?: in gc_pos_cmp()
|
| A D | bset.c | 22 struct btree *); 157 struct btree *b) in bch2_btree_node_iter_next_check() 194 struct btree *b) in bch2_btree_node_iter_verify() 1212 struct btree *b, in __bch2_btree_node_iter_push() 1347 struct btree *b) in bch2_btree_node_iter_init_from_start() 1359 struct btree *b, in bch2_btree_node_iter_bset_pos() 1372 struct btree *b, in btree_node_iter_sort_two() 1409 struct btree *b) in __bch2_btree_node_iter_advance() 1436 struct btree *b) in bch2_btree_node_iter_advance() 1450 struct btree *b) in bch2_btree_node_iter_prev_all() [all …]
|
| A D | btree_types.h | 72 struct btree { struct 336 struct btree *b; 420 ? container_of(b, struct btree, c)->key.k.p in btree_node_pos() 613 static inline bool btree_node_ ## flag(struct btree *b) \ 656 static inline struct bset *bset(const struct btree *b, in bset() 675 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first() 680 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last() 692 __btree_node_offset_to_key(const struct btree *b, u16 k) in __btree_node_offset_to_key() 801 static inline bool btree_id_is_extents(enum btree_id btree) in btree_id_is_extents() argument 840 struct btree *b; [all …]
|
| A D | btree_cache.c | 176 struct btree *b; in __btree_node_mem_alloc() 192 struct btree *b; in __bch2_btree_node_mem_alloc() 238 struct btree *b, *n; in bch2_btree_cache_unpin() 318 struct btree *b; in bch2_btree_node_update_key_early() 751 struct btree *b; in btree_node_cannibalize() 904 struct btree *b; in bch2_btree_node_fill() 1045 struct btree *b; in __bch2_btree_node_get() 1168 struct btree *b; in bch2_btree_node_get() 1244 struct btree *b; in bch2_btree_node_get_noiter() 1349 struct btree *b; in bch2_btree_node_evict() [all …]
|
| A D | bbpos.h | 11 return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos); in bbpos_cmp() 21 if (pos.btree != BTREE_ID_NR) { in bbpos_successor() 22 pos.btree++; in bbpos_successor() 32 prt_str(out, bch2_btree_id_str(pos.btree)); in bch2_bbpos_to_text()
|
| A D | bkey.h | 55 struct btree; 58 unsigned bch2_bkey_greatest_differing_bit(const struct btree *, 67 const struct btree *); 75 int bch2_bkey_cmp_packed(const struct btree *, 80 int __bch2_bkey_cmp_left_packed(const struct btree *, 85 int bkey_cmp_left_packed(const struct btree *b, in bkey_cmp_left_packed() 383 const struct btree *); 386 const struct btree *b) in bkey_pack_pos() 399 __bkey_unpack_key_format_checked(const struct btree *b, in __bkey_unpack_key_format_checked() 419 bkey_unpack_key_format_checked(const struct btree *b, in bkey_unpack_key_format_checked() [all …]
|
| A D | debug.h | 8 struct btree; 11 void __bch2_btree_verify(struct bch_fs *, struct btree *); 13 const struct btree *); 15 static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) in bch2_btree_verify()
|
| A D | btree_update.h | 9 struct btree; 12 struct btree_path *, struct btree *); 14 struct btree *, struct btree_node_iter *, 19 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64); 73 enum btree_id btree, struct bpos pos) in bch2_btree_delete_at_buffered() argument 75 return bch2_btree_bit_mod_buffered(trans, btree, pos, false); in bch2_btree_delete_at_buffered() 89 enum btree_id btree, in bch2_insert_snapshot_whiteouts() argument 93 if (!btree_type_has_snapshots(btree) || in bch2_insert_snapshot_whiteouts() 97 return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos); in bch2_insert_snapshot_whiteouts() 132 enum btree_id btree, in bch2_trans_update_buffered() argument [all …]
|
| A D | bkey_sort.h | 6 struct btree *b; 15 static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size) in sort_iter_init() 27 static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b) in sort_iter_stack_init() 47 bch2_sort_repack(struct bset *, struct btree *,
|
| A D | bbpos_types.h | 6 enum btree_id btree; member 10 static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos) in BBPOS() argument 12 return (struct bbpos) { btree, pos }; in BBPOS()
|
| A D | backpointers.c | 662 enum btree_id btree, unsigned level, in check_extent_to_backpointers() argument 703 struct btree *b; in check_btree_root_to_backpointers() 731 .btree = bp.btree_id, in bp_to_bbpos() 768 for (enum btree_id btree = start.btree; in bch2_get_btree_in_memory_pos() local 769 btree < BTREE_ID_NR && !ret; in bch2_get_btree_in_memory_pos() 770 btree++) { in bch2_get_btree_in_memory_pos() 773 if (!(BIT_ULL(btree) & btree_leaf_mask) && in bch2_get_btree_in_memory_pos() 778 btree == start.btree ? start.pos : POS_MIN, in bch2_get_btree_in_memory_pos() 783 BBPOS(btree, b->key.k.p); in bch2_get_btree_in_memory_pos() 798 struct btree *last_node; [all …]
|
| A D | btree_update_interior.c | 311 struct btree *b; in __bch2_btree_node_alloc() 386 struct btree *b; in bch2_btree_node_alloc() 531 struct btree *b; in bch2_btree_reserve_get() 672 struct btree *b; in btree_update_nodes_written() 1354 struct btree *b, in bch2_insert_fixup_btree_ptr() 1408 struct btree *b, in bch2_btree_insert_keys_interior() 1605 struct btree *n[2]; in btree_split() 2213 struct btree *b; in async_btree_node_rewrite_trans() 2329 struct btree *b, struct btree *new_hash, in __bch2_btree_node_update_key() 2336 struct btree *parent; in __bch2_btree_node_update_key() [all …]
|
| /linux/fs/nilfs2/ |
| A D | btree.c | 457 btree->b_inode->i_ino, in nilfs_btree_bad_node() 1231 if (!nilfs_bmap_dirty(btree)) in nilfs_btree_commit_insert() 1232 nilfs_bmap_set_dirty(btree); in nilfs_btree_commit_insert() 1581 if (!nilfs_bmap_dirty(btree)) in nilfs_btree_commit_delete() 1582 nilfs_bmap_set_dirty(btree); in nilfs_btree_commit_delete() 1603 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; in nilfs_btree_delete() 1815 btree->b_ops->bop_clear(btree); in nilfs_btree_commit_convert_and_insert() 1821 dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; in nilfs_btree_commit_convert_and_insert() 1822 __nilfs_btree_init(btree); in nilfs_btree_commit_convert_and_insert() 2109 btree->b_inode->i_ino, in nilfs_btree_propagate() [all …]
|
| /linux/drivers/md/bcache/ |
| A D | btree.h | 117 struct btree { struct 127 struct btree *parent; argument 152 static inline bool btree_node_ ## flag(struct btree *b) \ argument 180 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first() 185 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last() 256 static inline void rw_unlock(bool w, struct btree *b) in rw_unlock() 263 void bch_btree_node_read_done(struct btree *b); 267 void bch_btree_set_root(struct btree *b); 270 struct btree *parent); 273 struct btree *parent); [all …]
|
| A D | btree.c | 329 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() 590 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() 750 struct btree *b; in bch_btree_cache_free() 857 struct btree *b; in mca_find() 889 struct btree *b; in mca_cannibalize() 927 struct btree *b; in mca_alloc() 1012 struct btree *b; in bch_btree_node_get() 1066 struct btree *b; in btree_node_prefetch() 1126 struct btree *b; in __bch_btree_node_alloc() 1354 struct btree *b; [all …]
|
| A D | extents.c | 129 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump() 169 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid() 174 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() 208 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad() 233 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup() 343 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup() 517 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid() 522 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() 554 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad() 600 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
|
| /linux/fs/xfs/libxfs/ |
| A D | xfs_da_btree.c | 764 btree = icnodehdr.btree; in xfs_da3_root_split() 803 btree = nodehdr.btree; in xfs_da3_root_split() 1085 btree = nodehdr.btree; in xfs_da3_node_add() 1495 btree = nodehdr.btree; in xfs_da3_fixhashpath() 1534 btree = nodehdr.btree; in xfs_da3_node_remove() 1538 memmove(&btree[index], &btree[index + 1], tmp); in xfs_da3_node_remove() 1740 btree = nodehdr.btree; in xfs_da3_node_lookup_int() 2199 btree = nodehdr.btree; in xfs_da3_path_shift() 2485 btree = deadhdr.btree; in xfs_da3_swap_lastblock() 2571 btree = par_hdr.btree; in xfs_da3_swap_lastblock() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| A D | persistent-data.rst | 14 - Another btree-based caching target posted to dm-devel 72 dm-btree.[hc] 73 dm-btree-remove.c 74 dm-btree-spine.c 75 dm-btree-internal.h 77 Currently there is only one data structure, a hierarchical btree. 81 The btree is 'hierarchical' in that you can define it to be composed 83 thin-provisioning target uses a btree with two levels of nesting.
|