Lines Matching refs:b

21 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)  in bch_dump_bset()  argument
31 if (b->ops->key_dump) in bch_dump_bset()
32 b->ops->key_dump(b, k); in bch_dump_bset()
37 bkey_cmp(k, b->ops->is_extents ? in bch_dump_bset()
43 void bch_dump_bucket(struct btree_keys *b) in bch_dump_bucket() argument
48 for (i = 0; i <= b->nsets; i++) in bch_dump_bucket()
49 bch_dump_bset(b, b->set[i].data, in bch_dump_bucket()
50 bset_sector_offset(b, b->set[i].data)); in bch_dump_bucket()
54 int __bch_count_data(struct btree_keys *b) in __bch_count_data() argument
60 if (b->ops->is_extents) in __bch_count_data()
61 for_each_key(b, k, &iter) in __bch_count_data()
66 void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) in __bch_check_keys() argument
73 for_each_key(b, k, &iter) { in __bch_check_keys()
74 if (b->ops->is_extents) { in __bch_check_keys()
79 if (bch_ptr_invalid(b, k)) in __bch_check_keys()
86 if (bch_ptr_bad(b, k)) in __bch_check_keys()
97 if (p && bkey_cmp(p, &b->key) > 0) in __bch_check_keys()
102 bch_dump_bucket(b); in __bch_check_keys()
116 bkey_cmp(k, iter->b->ops->is_extents ? in bch_btree_iter_next_check()
118 bch_dump_bucket(iter->b); in bch_btree_iter_next_check()
268 static inline size_t btree_keys_bytes(struct btree_keys *b) in btree_keys_bytes() argument
270 return PAGE_SIZE << b->page_order; in btree_keys_bytes()
273 static inline size_t btree_keys_cachelines(struct btree_keys *b) in btree_keys_cachelines() argument
275 return btree_keys_bytes(b) / BSET_CACHELINE; in btree_keys_cachelines()
279 static inline size_t bset_tree_bytes(struct btree_keys *b) in bset_tree_bytes() argument
281 return btree_keys_cachelines(b) * sizeof(struct bkey_float); in bset_tree_bytes()
285 static inline size_t bset_prev_bytes(struct btree_keys *b) in bset_prev_bytes() argument
287 return btree_keys_cachelines(b) * sizeof(uint8_t); in bset_prev_bytes()
292 void bch_btree_keys_free(struct btree_keys *b) in bch_btree_keys_free() argument
294 struct bset_tree *t = b->set; in bch_btree_keys_free()
296 if (bset_prev_bytes(b) < PAGE_SIZE) in bch_btree_keys_free()
300 get_order(bset_prev_bytes(b))); in bch_btree_keys_free()
302 if (bset_tree_bytes(b) < PAGE_SIZE) in bch_btree_keys_free()
306 get_order(bset_tree_bytes(b))); in bch_btree_keys_free()
308 free_pages((unsigned long) t->data, b->page_order); in bch_btree_keys_free()
315 int bch_btree_keys_alloc(struct btree_keys *b, in bch_btree_keys_alloc() argument
319 struct bset_tree *t = b->set; in bch_btree_keys_alloc()
323 b->page_order = page_order; in bch_btree_keys_alloc()
325 t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); in bch_btree_keys_alloc()
329 t->tree = bset_tree_bytes(b) < PAGE_SIZE in bch_btree_keys_alloc()
330 ? kmalloc(bset_tree_bytes(b), gfp) in bch_btree_keys_alloc()
331 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); in bch_btree_keys_alloc()
335 t->prev = bset_prev_bytes(b) < PAGE_SIZE in bch_btree_keys_alloc()
336 ? kmalloc(bset_prev_bytes(b), gfp) in bch_btree_keys_alloc()
337 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); in bch_btree_keys_alloc()
343 bch_btree_keys_free(b); in bch_btree_keys_alloc()
347 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, in bch_btree_keys_init() argument
350 b->ops = ops; in bch_btree_keys_init()
351 b->expensive_debug_checks = expensive_debug_checks; in bch_btree_keys_init()
352 b->nsets = 0; in bch_btree_keys_init()
353 b->last_set_unwritten = 0; in bch_btree_keys_init()
419 unsigned int b = fls(j); in __to_inorder() local
420 unsigned int shift = fls(size - 1) - b; in __to_inorder()
422 j ^= 1U << (b - 1); in __to_inorder()
637 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) in bset_alloc_tree() argument
639 if (t != b->set) { in bset_alloc_tree()
647 while (t < b->set + MAX_BSETS) in bset_alloc_tree()
651 static void bch_bset_build_unwritten_tree(struct btree_keys *b) in bch_bset_build_unwritten_tree() argument
653 struct bset_tree *t = bset_tree_last(b); in bch_bset_build_unwritten_tree()
655 BUG_ON(b->last_set_unwritten); in bch_bset_build_unwritten_tree()
656 b->last_set_unwritten = 1; in bch_bset_build_unwritten_tree()
658 bset_alloc_tree(b, t); in bch_bset_build_unwritten_tree()
660 if (t->tree != b->set->tree + btree_keys_cachelines(b)) { in bch_bset_build_unwritten_tree()
666 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) in bch_bset_init_next() argument
668 if (i != b->set->data) { in bch_bset_init_next()
669 b->set[++b->nsets].data = i; in bch_bset_init_next()
670 i->seq = b->set->data->seq; in bch_bset_init_next()
678 bch_bset_build_unwritten_tree(b); in bch_bset_init_next()
690 void bch_bset_build_written_tree(struct btree_keys *b) in bch_bset_build_written_tree() argument
692 struct bset_tree *t = bset_tree_last(b); in bch_bset_build_written_tree()
696 b->last_set_unwritten = 0; in bch_bset_build_written_tree()
698 bset_alloc_tree(b, t); in bch_bset_build_written_tree()
702 b->set->tree + btree_keys_cachelines(b) - t->tree); in bch_bset_build_written_tree()
738 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) in bch_bset_fix_invalidated_key() argument
743 for (t = b->set; t <= bset_tree_last(b); t++) in bch_bset_fix_invalidated_key()
749 if (!t->size || !bset_written(b, t)) in bch_bset_fix_invalidated_key()
783 static void bch_bset_fix_lookup_table(struct btree_keys *b, in bch_bset_fix_lookup_table() argument
820 if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) in bch_bset_fix_lookup_table()
840 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) in bch_bkey_try_merge() argument
842 if (!b->ops->key_merge) in bch_bkey_try_merge()
854 return b->ops->key_merge(b, l, r); in bch_bkey_try_merge()
857 void bch_bset_insert(struct btree_keys *b, struct bkey *where, in bch_bset_insert() argument
860 struct bset_tree *t = bset_tree_last(b); in bch_bset_insert()
862 BUG_ON(!b->last_set_unwritten); in bch_bset_insert()
863 BUG_ON(bset_byte_offset(b, t->data) + in bch_bset_insert()
865 PAGE_SIZE << b->page_order); in bch_bset_insert()
873 bch_bset_fix_lookup_table(b, t, where); in bch_bset_insert()
876 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, in bch_btree_insert_key() argument
880 struct bset *i = bset_tree_last(b)->data; in bch_btree_insert_key()
886 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); in bch_btree_insert_key()
893 if (b->ops->is_extents) in bch_btree_insert_key()
898 m = bch_btree_iter_stack_init(b, &iter, preceding_key_p); in bch_btree_insert_key()
900 if (b->ops->insert_fixup(b, k, &iter.iter, replace_key)) in bch_btree_insert_key()
906 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { in bch_btree_insert_key()
914 bch_bkey_try_merge(b, prev, k)) in bch_btree_insert_key()
924 bch_bkey_try_merge(b, k, m)) in bch_btree_insert_key()
927 bch_bset_insert(b, m, k); in bch_btree_insert_key()
1015 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, in __bch_bset_search() argument
1038 } else if (bset_written(b, t)) { in __bch_bset_search()
1054 BUG_ON(!b->nsets && in __bch_bset_search()
1060 if (btree_keys_expensive_checks(b)) { in __bch_bset_search()
1061 BUG_ON(bset_written(b, t) && in __bch_bset_search()
1103 static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b, in __bch_btree_iter_stack_init() argument
1114 iter->iter.b = b; in __bch_btree_iter_stack_init()
1117 for (; start <= bset_tree_last(b); start++) { in __bch_btree_iter_stack_init()
1118 ret = bch_bset_search(b, start, search); in __bch_btree_iter_stack_init()
1125 struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, in bch_btree_iter_stack_init() argument
1129 return __bch_btree_iter_stack_init(b, iter, search, b->set); in bch_btree_iter_stack_init()
1135 struct btree_iter_set b __maybe_unused; in __bch_btree_iter_next()
1150 heap_pop(iter, b, cmp); in __bch_btree_iter_next()
1165 struct btree_keys *b, ptr_filter_fn fn) in bch_btree_iter_next_filter() argument
1171 } while (ret && fn(b, ret)); in bch_btree_iter_next_filter()
1194 static void btree_mergesort(struct btree_keys *b, struct bset *out, in btree_mergesort() argument
1207 heap_sift(iter, i, b->ops->sort_cmp); in btree_mergesort()
1210 if (b->ops->sort_fixup && fixup) in btree_mergesort()
1211 k = b->ops->sort_fixup(iter, &tmp.k); in btree_mergesort()
1216 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); in btree_mergesort()
1218 if (bad(b, k)) in btree_mergesort()
1224 } else if (!bch_bkey_try_merge(b, last, k)) { in btree_mergesort()
1235 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, in __btree_sort() argument
1256 btree_mergesort(b, out, iter, fixup, false); in __btree_sort()
1257 b->nsets = start; in __btree_sort()
1259 if (!start && order == b->page_order) { in __btree_sort()
1271 out->magic = b->set->data->magic; in __btree_sort()
1272 out->seq = b->set->data->seq; in __btree_sort()
1273 out->version = b->set->data->version; in __btree_sort()
1274 swap(out, b->set->data); in __btree_sort()
1276 b->set[start].data->keys = out->keys; in __btree_sort()
1277 memcpy(b->set[start].data->start, out->start, in __btree_sort()
1286 bch_bset_build_written_tree(b); in __btree_sort()
1292 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, in bch_btree_sort_partial() argument
1295 size_t order = b->page_order, keys = 0; in bch_btree_sort_partial()
1297 int oldsize = bch_count_data(b); in bch_btree_sort_partial()
1299 __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]); in bch_btree_sort_partial()
1304 for (i = start; i <= b->nsets; i++) in bch_btree_sort_partial()
1305 keys += b->set[i].data->keys; in bch_btree_sort_partial()
1307 order = get_order(__set_bytes(b->set->data, keys)); in bch_btree_sort_partial()
1310 __btree_sort(b, &iter.iter, start, order, false, state); in bch_btree_sort_partial()
1312 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); in bch_btree_sort_partial()
1315 void bch_btree_sort_and_fix_extents(struct btree_keys *b, in bch_btree_sort_and_fix_extents() argument
1319 __btree_sort(b, iter, 0, b->page_order, true, state); in bch_btree_sort_and_fix_extents()
1322 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, in bch_btree_sort_into() argument
1328 bch_btree_iter_stack_init(b, &iter, NULL); in bch_btree_sort_into()
1330 btree_mergesort(b, new->set->data, &iter.iter, false, true); in bch_btree_sort_into()
1339 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) in bch_btree_sort_lazy() argument
1345 if (!b->nsets) in bch_btree_sort_lazy()
1348 for (i = b->nsets - 1; i >= 0; --i) { in bch_btree_sort_lazy()
1351 if (b->set[i].data->keys < crit) { in bch_btree_sort_lazy()
1352 bch_btree_sort_partial(b, i, state); in bch_btree_sort_lazy()
1358 if (b->nsets + 1 == MAX_BSETS) { in bch_btree_sort_lazy()
1359 bch_btree_sort(b, state); in bch_btree_sort_lazy()
1364 bch_bset_build_written_tree(b); in bch_btree_sort_lazy()
1367 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) in bch_btree_keys_stats() argument
1371 for (i = 0; i <= b->nsets; i++) { in bch_btree_keys_stats()
1372 struct bset_tree *t = &b->set[i]; in bch_btree_keys_stats()
1376 if (bset_written(b, t)) { in bch_btree_keys_stats()