Lines Matching refs:b

10 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,  in bch2_btree_lock_init()  argument
14 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp); in bch2_btree_lock_init()
15 lockdep_set_notrack_class(&b->lock); in bch2_btree_lock_init()
22 struct btree_bkey_cached_common *b, in bch2_btree_node_lock_counts() argument
31 if (IS_ERR_OR_NULL(b)) in bch2_btree_node_lock_counts()
35 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
48 struct btree_path *path, struct btree *b) in bch2_btree_node_unlock_write() argument
50 bch2_btree_node_unlock_write_inlined(trans, path, b); in bch2_btree_node_unlock_write()
297 struct btree_bkey_cached_common *b; in bch2_check_for_deadlock() local
349 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
351 if (IS_ERR_OR_NULL(b)) { in bch2_check_for_deadlock()
374 if (list_empty_careful(&b->lock.wait_list)) in bch2_check_for_deadlock()
377 raw_spin_lock(&b->lock.wait_lock); in bch2_check_for_deadlock()
378 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) { in bch2_check_for_deadlock()
379 BUG_ON(b != trans->locking); in bch2_check_for_deadlock()
393 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
401 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
423 struct btree_bkey_cached_common *b, in __bch2_btree_node_lock_write() argument
426 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; in __bch2_btree_node_lock_write()
435 six_lock_readers_add(&b->lock, -readers); in __bch2_btree_node_lock_write()
436 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, in __bch2_btree_node_lock_write()
438 six_lock_readers_add(&b->lock, readers); in __bch2_btree_node_lock_write()
441 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
448 struct btree_bkey_cached_common *b) in bch2_btree_node_lock_write_nofail() argument
450 int ret = __btree_node_lock_write(trans, path, b, true); in bch2_btree_node_lock_write_nofail()
483 f->b = path->l[l].b; in btree_path_get_locks()
507 path->l[l].b = upgrade in btree_path_get_locks()
519 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock() local
525 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
526 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
527 btree_node_lock_increment(trans, &b->c, level, want))) { in __bch2_btree_node_relock()
542 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade() local
567 ? six_lock_tryupgrade(&b->c.lock) in bch2_btree_node_upgrade()
568 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
571 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
572 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) { in bch2_btree_node_upgrade()
704 prt_printf(&buf, "node %s ", IS_ERR(f.b) ? bch2_err_str(PTR_ERR(f.b)) : in __bch2_btree_path_upgrade()
705 !f.b ? "(null)" : "(node)"); in __bch2_btree_path_upgrade()
707 IS_ERR_OR_NULL(f.b) ? 0 : f.b->c.lock.seq, in __bch2_btree_path_upgrade()
737 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
786 if (IS_ERR_OR_NULL(f->b)) { in bch2_trans_relock_fail()
787 prt_str(&buf, bch2_err_str(PTR_ERR(f->b))); in bch2_trans_relock_fail()
789 prt_printf(&buf, "%u", f->b->c.lock.seq); in bch2_trans_relock_fail()
792 bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l); in bch2_trans_relock_fail()
795 c = six_lock_counts(&f->b->c.lock); in bch2_trans_relock_fail()
870 bch2_btree_node_unlock_write(trans, path, path->l[l].b); in bch2_trans_unlock_write()
909 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); in __bch2_btree_path_verify_locks()