Lines Matching refs:b

22 	return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);  in is_btree_node()
141 bch2_btree_node_unlock_write(trans, path, path->l[level].b); in btree_node_unlock()
144 six_unlock_type(&path->l[level].b->c.lock, lock_type); in btree_node_unlock()
174 __bch2_btree_node_unlock_write(struct btree_trans *trans, struct btree *b) in __bch2_btree_node_unlock_write() argument
176 if (!b->c.lock.write_lock_recurse) { in __bch2_btree_node_unlock_write()
180 trans_for_each_path_with_node(trans, b, linked, i) in __bch2_btree_node_unlock_write()
181 linked->l[b->c.level].lock_seq++; in __bch2_btree_node_unlock_write()
184 six_unlock_write(&b->c.lock); in __bch2_btree_node_unlock_write()
189 struct btree *b) in bch2_btree_node_unlock_write_inlined() argument
191 EBUG_ON(path->l[b->c.level].b != b); in bch2_btree_node_unlock_write_inlined()
192 EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); in bch2_btree_node_unlock_write_inlined()
193 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); in bch2_btree_node_unlock_write_inlined()
195 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_unlock_write_inlined()
196 __bch2_btree_node_unlock_write(trans, b); in bch2_btree_node_unlock_write_inlined()
228 struct btree_bkey_cached_common *b, in __btree_node_lock_nopath() argument
235 trans->locking = b; in __btree_node_lock_nopath()
237 int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait, in __btree_node_lock_nopath()
243 trace_btree_path_lock(trans, _THIS_IP_, b); in __btree_node_lock_nopath()
249 struct btree_bkey_cached_common *b, in btree_node_lock_nopath() argument
253 return __btree_node_lock_nopath(trans, b, type, false, ip); in btree_node_lock_nopath()
257 struct btree_bkey_cached_common *b, in btree_node_lock_nopath_nofail() argument
260 int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_); in btree_node_lock_nopath_nofail()
270 struct btree_bkey_cached_common *b, in btree_node_lock_increment() argument
278 if (&path->l[level].b->c == b && in btree_node_lock_increment()
280 six_lock_increment(&b->lock, (enum six_lock_type) want); in btree_node_lock_increment()
289 struct btree_bkey_cached_common *b, in btree_node_lock() argument
299 if (likely(six_trylock_type(&b->lock, type)) || in btree_node_lock()
300 btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || in btree_node_lock()
301 !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) { in btree_node_lock()
303 path->l[b->level].lock_taken_time = local_clock(); in btree_node_lock()
311 struct btree_bkey_cached_common *b, bool);
315 struct btree_bkey_cached_common *b, in __btree_node_lock_write() argument
318 EBUG_ON(&path->l[b->level].b->c != b); in __btree_node_lock_write()
319 EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock)); in __btree_node_lock_write()
320 EBUG_ON(!btree_node_intent_locked(path, b->level)); in __btree_node_lock_write()
327 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED); in __btree_node_lock_write()
329 return likely(six_trylock_write(&b->lock)) in __btree_node_lock_write()
331 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail); in __btree_node_lock_write()
337 struct btree_bkey_cached_common *b) in bch2_btree_node_lock_write() argument
339 return __btree_node_lock_write(trans, path, b, false); in bch2_btree_node_lock_write()
370 (!IS_ERR_OR_NULL(path->l[level].b) && in bch2_btree_node_relock()
382 (!IS_ERR_OR_NULL(path->l[level].b) && in bch2_btree_node_relock_notrace()
431 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up); in __btree_path_set_level_up()
445 struct btree_bkey_cached_common *b,