Lines Matching refs:c
98 #define PTR_HASH(c, k) \ argument
99 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
108 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
115 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
117 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
121 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
127 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put() argument
132 if (ptr_available(c, k, i)) in bkey_put()
133 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
158 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
159 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
177 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
182 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
203 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
209 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
213 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
223 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
225 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
229 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
230 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
252 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
260 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
266 bch_bbio_free(bio, b->c); in bch_btree_node_read()
272 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
276 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
277 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
283 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
284 wake_up_allocators(b->c); in btree_complete_write()
288 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
307 bch_bbio_free(b->bio, b->c); in CLOSURE_CALLBACK()
333 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
347 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
351 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
384 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
395 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
420 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
427 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
428 &b->c->cache->btree_sectors_written); in __bch_btree_node_write()
430 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
496 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
518 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \ argument
519 ? c->root->level : 1) * 8 + 16)
520 #define mca_can_free(c) \ argument
521 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
529 b->c->btree_cache_used--; in mca_data_free()
530 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
539 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
551 ilog2(b->c->btree_pages), in mca_data_alloc()
554 b->c->btree_cache_used++; in mca_data_alloc()
555 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
557 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
580 static struct btree *mca_bucket_alloc(struct cache_set *c, in mca_bucket_alloc() argument
598 b->c = c; in mca_bucket_alloc()
610 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
667 struct cache_set *c = shrink->private_data; in bch_mca_scan() local
673 if (c->shrinker_disabled) in bch_mca_scan()
676 if (c->btree_cache_alloc_lock) in bch_mca_scan()
681 mutex_lock(&c->bucket_lock); in bch_mca_scan()
682 else if (!mutex_trylock(&c->bucket_lock)) in bch_mca_scan()
692 nr /= c->btree_pages; in bch_mca_scan()
695 nr = min_t(unsigned long, nr, mca_can_free(c)); in bch_mca_scan()
698 btree_cache_used = c->btree_cache_used; in bch_mca_scan()
699 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
712 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
727 mutex_unlock(&c->bucket_lock); in bch_mca_scan()
728 return freed * c->btree_pages; in bch_mca_scan()
734 struct cache_set *c = shrink->private_data; in bch_mca_count() local
736 if (c->shrinker_disabled) in bch_mca_count()
739 if (c->btree_cache_alloc_lock) in bch_mca_count()
742 return mca_can_free(c) * c->btree_pages; in bch_mca_count()
745 void bch_btree_cache_free(struct cache_set *c) in bch_btree_cache_free() argument
752 if (c->shrink) in bch_btree_cache_free()
753 shrinker_free(c->shrink); in bch_btree_cache_free()
755 mutex_lock(&c->bucket_lock); in bch_btree_cache_free()
758 if (c->verify_data) in bch_btree_cache_free()
759 list_move(&c->verify_data->list, &c->btree_cache); in bch_btree_cache_free()
761 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_free()
764 list_splice(&c->btree_cache_freeable, in bch_btree_cache_free()
765 &c->btree_cache); in bch_btree_cache_free()
767 while (!list_empty(&c->btree_cache)) { in bch_btree_cache_free()
768 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
782 while (!list_empty(&c->btree_cache_freed)) { in bch_btree_cache_free()
783 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
790 mutex_unlock(&c->bucket_lock); in bch_btree_cache_free()
793 int bch_btree_cache_alloc(struct cache_set *c) in bch_btree_cache_alloc() argument
797 for (i = 0; i < mca_reserve(c); i++) in bch_btree_cache_alloc()
798 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) in bch_btree_cache_alloc()
801 list_splice_init(&c->btree_cache, in bch_btree_cache_alloc()
802 &c->btree_cache_freeable); in bch_btree_cache_alloc()
805 mutex_init(&c->verify_lock); in bch_btree_cache_alloc()
807 c->verify_ondisk = (void *) in bch_btree_cache_alloc()
809 ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_alloc()
810 if (!c->verify_ondisk) { in bch_btree_cache_alloc()
819 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); in bch_btree_cache_alloc()
821 if (c->verify_data && in bch_btree_cache_alloc()
822 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
823 list_del_init(&c->verify_data->list); in bch_btree_cache_alloc()
825 c->verify_data = NULL; in bch_btree_cache_alloc()
828 c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid); in bch_btree_cache_alloc()
829 if (!c->shrink) { in bch_btree_cache_alloc()
834 c->shrink->count_objects = bch_mca_count; in bch_btree_cache_alloc()
835 c->shrink->scan_objects = bch_mca_scan; in bch_btree_cache_alloc()
836 c->shrink->seeks = 4; in bch_btree_cache_alloc()
837 c->shrink->batch = c->btree_pages * 2; in bch_btree_cache_alloc()
838 c->shrink->private_data = c; in bch_btree_cache_alloc()
840 shrinker_register(c->shrink); in bch_btree_cache_alloc()
847 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash() argument
849 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
852 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find() argument
857 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
858 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
866 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock() argument
868 spin_lock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
869 if (likely(c->btree_cache_alloc_lock == NULL)) { in mca_cannibalize_lock()
870 c->btree_cache_alloc_lock = current; in mca_cannibalize_lock()
871 } else if (c->btree_cache_alloc_lock != current) { in mca_cannibalize_lock()
873 prepare_to_wait(&c->btree_cache_wait, &op->wait, in mca_cannibalize_lock()
875 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
878 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
883 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize() argument
888 trace_bcache_btree_cache_cannibalize(c); in mca_cannibalize()
890 if (mca_cannibalize_lock(c, op)) in mca_cannibalize()
893 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
897 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
911 void bch_cannibalize_unlock(struct cache_set *c) in bch_cannibalize_unlock() argument
913 spin_lock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
914 if (c->btree_cache_alloc_lock == current) { in bch_cannibalize_unlock()
915 c->btree_cache_alloc_lock = NULL; in bch_cannibalize_unlock()
916 wake_up(&c->btree_cache_wait); in bch_cannibalize_unlock()
918 spin_unlock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
921 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, in mca_alloc() argument
928 lockdep_assert_held(&c->bucket_lock); in mca_alloc()
930 if (mca_find(c, k)) in mca_alloc()
936 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
943 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
952 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
963 list_move(&b->list, &c->btree_cache); in mca_alloc()
965 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
975 &b->c->expensive_debug_checks); in mca_alloc()
978 &b->c->expensive_debug_checks); in mca_alloc()
985 b = mca_cannibalize(c, op, k); in mca_alloc()
1004 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, in bch_btree_node_get() argument
1013 b = mca_find(c, k); in bch_btree_node_get()
1019 mutex_lock(&c->bucket_lock); in bch_btree_node_get()
1020 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1021 mutex_unlock(&c->bucket_lock); in bch_btree_node_get()
1034 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1065 mutex_lock(&parent->c->bucket_lock); in btree_node_prefetch()
1066 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1067 mutex_unlock(&parent->c->bucket_lock); in btree_node_prefetch()
1082 BUG_ON(b == b->c->root); in btree_node_free()
1108 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1109 bch_bucket_free(b->c, &b->key); in btree_node_free()
1111 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1118 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, in __bch_btree_node_alloc() argument
1125 mutex_lock(&c->bucket_lock); in __bch_btree_node_alloc()
1129 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) in __bch_btree_node_alloc()
1132 bkey_put(c, &k.key); in __bch_btree_node_alloc()
1133 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1135 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1140 cache_bug(c, in __bch_btree_node_alloc()
1146 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1148 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1153 bch_bucket_free(c, &k.key); in __bch_btree_node_alloc()
1155 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1157 trace_bcache_btree_node_alloc_fail(c); in __bch_btree_node_alloc()
1161 static struct btree *bch_btree_node_alloc(struct cache_set *c, in bch_btree_node_alloc() argument
1165 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); in bch_btree_node_alloc()
1171 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1175 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1187 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1189 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1196 bch_inc_gen(b->c->cache, in make_btree_freeing_key()
1197 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1199 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1204 struct cache_set *c = b->c; in btree_check_reserve() local
1205 struct cache *ca = c->cache; in btree_check_reserve()
1206 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1208 mutex_lock(&c->bucket_lock); in btree_check_reserve()
1212 prepare_to_wait(&c->btree_cache_wait, &op->wait, in btree_check_reserve()
1214 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1218 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1220 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1225 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, in __bch_btree_mark_key() argument
1241 if (!ptr_available(c, k, i)) in __bch_btree_mark_key()
1244 g = PTR_BUCKET(c, k, i); in __bch_btree_mark_key()
1249 if (ptr_stale(c, k, i)) { in __bch_btree_mark_key()
1250 stale = max(stale, ptr_stale(c, k, i)); in __bch_btree_mark_key()
1256 c, "inconsistent ptrs: mark = %llu, level = %i", in __bch_btree_mark_key()
1277 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1279 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) in bch_initial_mark_key() argument
1284 if (ptr_available(c, k, i) && in bch_initial_mark_key()
1285 !ptr_stale(c, k, i)) { in bch_initial_mark_key()
1286 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key()
1296 __bch_btree_mark_key(c, level, k); in bch_initial_mark_key()
1299 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) in bch_update_bucket_in_use() argument
1301 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; in bch_update_bucket_in_use()
1334 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1378 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1382 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1416 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1432 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1441 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1518 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1579 static size_t btree_gc_min_nodes(struct cache_set *c) in btree_gc_min_nodes() argument
1597 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; in btree_gc_min_nodes()
1615 bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1624 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1655 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1671 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1672 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1718 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1726 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1731 static void btree_gc_start(struct cache_set *c) in btree_gc_start() argument
1736 if (!c->gc_mark_valid) in btree_gc_start()
1739 mutex_lock(&c->bucket_lock); in btree_gc_start()
1741 c->gc_done = ZERO_KEY; in btree_gc_start()
1743 ca = c->cache; in btree_gc_start()
1754 c->gc_mark_valid = 0; in btree_gc_start()
1755 mutex_unlock(&c->bucket_lock); in btree_gc_start()
1758 static void bch_btree_gc_finish(struct cache_set *c) in bch_btree_gc_finish() argument
1765 mutex_lock(&c->bucket_lock); in bch_btree_gc_finish()
1767 set_gc_sectors(c); in bch_btree_gc_finish()
1768 c->gc_mark_valid = 1; in bch_btree_gc_finish()
1769 c->need_gc = 0; in bch_btree_gc_finish()
1771 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) in bch_btree_gc_finish()
1772 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), in bch_btree_gc_finish()
1777 for (i = 0; i < c->devices_max_used; i++) { in bch_btree_gc_finish()
1778 struct bcache_device *d = c->devices[i]; in bch_btree_gc_finish()
1782 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) in bch_btree_gc_finish()
1790 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), in bch_btree_gc_finish()
1796 c->avail_nbuckets = 0; in bch_btree_gc_finish()
1798 ca = c->cache; in bch_btree_gc_finish()
1809 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1820 c->avail_nbuckets++; in bch_btree_gc_finish()
1823 mutex_unlock(&c->bucket_lock); in bch_btree_gc_finish()
1826 static void bch_btree_gc(struct cache_set *c) in bch_btree_gc() argument
1834 trace_bcache_gc_start(c); in bch_btree_gc()
1840 btree_gc_start(c); in bch_btree_gc()
1844 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); in bch_btree_gc()
1853 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_btree_gc()
1855 bch_btree_gc_finish(c); in bch_btree_gc()
1856 wake_up_allocators(c); in bch_btree_gc()
1858 bch_time_stats_update(&c->btree_gc_time, start_time); in bch_btree_gc()
1862 bch_update_bucket_in_use(c, &stats); in bch_btree_gc()
1863 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); in bch_btree_gc()
1865 trace_bcache_gc_end(c); in bch_btree_gc()
1867 bch_moving_gc(c); in bch_btree_gc()
1870 static bool gc_should_run(struct cache_set *c) in gc_should_run() argument
1872 struct cache *ca = c->cache; in gc_should_run()
1877 if (atomic_read(&c->sectors_to_gc) < 0) in gc_should_run()
1885 struct cache_set *c = arg; in bch_gc_thread() local
1888 wait_event_interruptible(c->gc_wait, in bch_gc_thread()
1890 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || in bch_gc_thread()
1891 gc_should_run(c)); in bch_gc_thread()
1894 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_gc_thread()
1897 set_gc_sectors(c); in bch_gc_thread()
1898 bch_btree_gc(c); in bch_gc_thread()
1905 int bch_gc_thread_start(struct cache_set *c) in bch_gc_thread_start() argument
1907 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); in bch_gc_thread_start()
1908 return PTR_ERR_OR_ZERO(c->gc_thread); in bch_gc_thread_start()
1920 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1922 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1936 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1955 struct cache_set *c = check_state->c; in bch_btree_check_thread() local
1965 bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1966 k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1985 &c->root->keys, in bch_btree_check_thread()
2007 btree_node_prefetch(c->root, p); in bch_btree_check_thread()
2008 c->gc_stats.nodes++; in bch_btree_check_thread()
2010 ret = bcache_btree(check_recurse, p, c->root, &op); in bch_btree_check_thread()
2018 bch_cannibalize_unlock(c); in bch_btree_check_thread()
2019 finish_wait(&c->btree_cache_wait, &(&op)->wait); in bch_btree_check_thread()
2052 int bch_btree_check(struct cache_set *c) in bch_btree_check() argument
2061 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2062 bch_initial_mark_key(c, c->root->level, k); in bch_btree_check()
2064 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); in bch_btree_check()
2066 if (c->root->level == 0) in bch_btree_check()
2070 check_state.c = c; in bch_btree_check()
2078 rw_lock(0, c->root, c->root->level); in bch_btree_check()
2121 rw_unlock(0, c->root); in bch_btree_check()
2125 void bch_initial_gc_finish(struct cache_set *c) in bch_initial_gc_finish() argument
2127 struct cache *ca = c->cache; in bch_initial_gc_finish()
2130 bch_btree_gc_finish(c); in bch_initial_gc_finish()
2132 mutex_lock(&c->bucket_lock); in bch_initial_gc_finish()
2158 mutex_unlock(&c->bucket_lock); in bch_initial_gc_finish()
2210 bkey_put(b->c, k); in bch_btree_insert_keys()
2262 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2269 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2274 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2349 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2353 bkey_put(b->c, &n2->key); in btree_split()
2357 bkey_put(b->c, &n1->key); in btree_split()
2410 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2412 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2413 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2485 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2501 ret = bch_btree_map_leaf_nodes(&op.op, c, in bch_btree_insert()
2512 bkey_put(c, k); in bch_btree_insert()
2531 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2533 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2535 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2537 b->c->root = b; in bch_btree_set_root()
2539 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2574 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, in __bch_btree_map_nodes() argument
2577 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); in __bch_btree_map_nodes()
2609 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, in bch_btree_map_keys() argument
2612 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); in bch_btree_map_keys()
2685 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, in bch_refill_keybuf() argument
2699 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, in bch_refill_keybuf()
2785 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, in bch_keybuf_next_rescan() argument
2802 bch_refill_keybuf(c, buf, end, pred); in bch_keybuf_next_rescan()