Lines Matching refs:b

128 	struct btree *b = container_of(keys, struct btree, keys);  in bch_bkey_dump()  local
136 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
139 if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets) in bch_bkey_dump()
141 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
144 pr_cont(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
168 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid() local
170 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
173 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() argument
179 if (mutex_trylock(&b->c->bucket_lock)) { in btree_ptr_bad_expensive()
181 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
182 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
186 (b->c->gc_mark_valid && in btree_ptr_bad_expensive()
191 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
196 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
198 btree_bug(b, in btree_ptr_bad_expensive()
200 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
207 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad() local
216 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
217 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
220 if (expensive_debug_checks(b->c) && in bch_btree_ptr_bad()
221 btree_ptr_bad_expensive(b, k)) in bch_btree_ptr_bad()
232 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup() local
235 btree_current_write(b)->prio_blocked++; in bch_btree_ptr_insert_fixup()
323 static bool bch_extent_insert_fixup(struct btree_keys *b, in bch_extent_insert_fixup() argument
328 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
410 if (bkey_written(b, k)) { in bch_extent_insert_fixup()
423 top = bch_bset_search(b, bset_tree_last(b), in bch_extent_insert_fixup()
425 bch_bset_insert(b, top, k); in bch_extent_insert_fixup()
429 bch_bset_insert(b, k, &temp.key); in bch_extent_insert_fixup()
435 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
445 if (bkey_written(b, k) && in bch_extent_insert_fixup()
454 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
502 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid() local
504 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
507 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() argument
510 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
513 if (mutex_trylock(&b->c->bucket_lock)) { in bch_extent_bad_expensive()
514 if (b->c->gc_mark_valid && in bch_extent_bad_expensive()
523 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
528 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
530 btree_bug(b, in bch_extent_bad_expensive()
532 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
539 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad() local
548 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
552 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
560 btree_bug_on(stale > BUCKET_GC_GEN_MAX, b, in bch_extent_bad()
562 stale, b->c->need_gc); in bch_extent_bad()
567 if (expensive_debug_checks(b->c) && in bch_extent_bad()
568 bch_extent_bad_expensive(b, k, i)) in bch_extent_bad()
585 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge() local
588 if (key_merging_disabled(b->c)) in bch_extent_merge()
593 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) in bch_extent_merge()