Lines Matching refs:b
78 insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b; in same_leaf_as_prev()
85 insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b; in same_leaf_as_next()
90 struct btree *b) in bch2_btree_node_prep_for_write() argument
94 if (unlikely(btree_node_just_written(b)) && in bch2_btree_node_prep_for_write()
95 bch2_btree_post_write_cleanup(c, b)) in bch2_btree_node_prep_for_write()
96 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_node_prep_for_write()
102 if (want_new_bset(c, b)) in bch2_btree_node_prep_for_write()
103 bch2_btree_init_next(trans, b); in bch2_btree_node_prep_for_write()
112 bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b); in trans_lock_write_fail()
127 if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c)) in bch2_trans_lock_write()
131 bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b); in bch2_trans_lock_write()
145 trans->paths + i->path, insert_l(trans, i)->b); in bch2_trans_unlock_updates_write()
155 struct btree *b, in bch2_btree_bset_insert_key() argument
162 EBUG_ON(btree_node_just_written(b)); in bch2_btree_bset_insert_key()
163 EBUG_ON(bset_written(b, btree_bset_last(b))); in bch2_btree_bset_insert_key()
165 EBUG_ON(bpos_lt(insert->k.p, b->data->min_key)); in bch2_btree_bset_insert_key()
166 EBUG_ON(bpos_gt(insert->k.p, b->data->max_key)); in bch2_btree_bset_insert_key()
167 EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b)); in bch2_btree_bset_insert_key()
168 EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos)); in bch2_btree_bset_insert_key()
171 k = bch2_btree_node_iter_peek_all(node_iter, b); in bch2_btree_bset_insert_key()
172 if (k && bkey_cmp_left_packed(b, k, &insert->k.p)) in bch2_btree_bset_insert_key()
184 btree_account_key_drop(b, k); in bch2_btree_bset_insert_key()
188 push_whiteout(b, insert->k.p); in bch2_btree_bset_insert_key()
191 if (k >= btree_bset_last(b)->start) { in bch2_btree_bset_insert_key()
193 bch2_bset_delete(b, k, clobber_u64s); in bch2_btree_bset_insert_key()
196 bch2_btree_path_fix_key_modified(trans, b, k); in bch2_btree_bset_insert_key()
204 btree_account_key_drop(b, k); in bch2_btree_bset_insert_key()
210 if (k >= btree_bset_last(b)->start) { in bch2_btree_bset_insert_key()
214 bch2_btree_path_fix_key_modified(trans, b, k); in bch2_btree_bset_insert_key()
218 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b)); in bch2_btree_bset_insert_key()
220 bch2_bset_insert(b, k, insert, clobber_u64s); in bch2_btree_bset_insert_key()
224 bch2_btree_node_iter_fix(trans, path, b, node_iter, k, in bch2_btree_bset_insert_key()
234 struct btree *b = container_of(w, struct btree, writes[i]); in __btree_node_flush() local
237 unsigned idx = w - b->writes; in __btree_node_flush()
239 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in __btree_node_flush()
241 old = READ_ONCE(b->flags); in __btree_node_flush()
253 } while (!try_cmpxchg(&b->flags, &old, new)); in __btree_node_flush()
255 btree_node_write_if_need(trans, b, SIX_LOCK_read); in __btree_node_flush()
256 six_unlock_read(&b->c.lock); in __btree_node_flush()
273 struct btree *b, u64 seq) in bch2_btree_add_journal_pin() argument
275 struct btree_write *w = btree_current_write(b); in bch2_btree_add_journal_pin()
278 btree_node_write_idx(b) == 0 in bch2_btree_add_journal_pin()
296 struct btree *b = path_l(path)->b; in bch2_btree_insert_key_leaf() local
297 struct bset_tree *t = bset_tree_last(b); in bch2_btree_insert_key_leaf()
298 struct bset *i = bset(b, t); in bch2_btree_insert_key_leaf()
300 int old_live_u64s = b->nr.live_u64s; in bch2_btree_insert_key_leaf()
303 if (unlikely(!bch2_btree_bset_insert_key(trans, path, b, in bch2_btree_insert_key_leaf()
309 bch2_btree_add_journal_pin(c, b, journal_seq); in bch2_btree_insert_key_leaf()
311 if (unlikely(!btree_node_dirty(b))) { in bch2_btree_insert_key_leaf()
313 set_btree_node_dirty_acct(c, b); in bch2_btree_insert_key_leaf()
316 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; in bch2_btree_insert_key_leaf()
319 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) in bch2_btree_insert_key_leaf()
320 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added); in bch2_btree_insert_key_leaf()
321 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0) in bch2_btree_insert_key_leaf()
322 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added); in bch2_btree_insert_key_leaf()
325 bch2_maybe_compact_whiteouts(c, b)) in bch2_btree_insert_key_leaf()
326 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_insert_key_leaf()
376 struct btree *b, unsigned u64s) in btree_key_can_insert() argument
378 if (!bch2_btree_node_insert_fits(b, u64s)) in btree_key_can_insert()
388 struct bkey_cached *ck = (void *) path->l[0].b; in btree_key_can_insert_cached_slowpath()
426 struct bkey_cached *ck = (void *) path->l[0].b; in btree_key_can_insert_cached()
620 ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s) in bch2_trans_commit_write_locked()