Lines Matching refs:b

48 int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)  in bch2_btree_node_check_topology()  argument
51 struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2 in bch2_btree_node_check_topology()
52 ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key in bch2_btree_node_check_topology()
53 : b->data->min_key; in bch2_btree_node_check_topology()
60 BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && in bch2_btree_node_check_topology()
61 !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key, in bch2_btree_node_check_topology()
62 b->data->min_key)); in bch2_btree_node_check_topology()
66 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); in bch2_btree_node_check_topology()
68 if (b == btree_node_root(c, b)) { in bch2_btree_node_check_topology()
69 if (!bpos_eq(b->data->min_key, POS_MIN)) { in bch2_btree_node_check_topology()
72 bch2_bpos_to_text(&buf, b->data->min_key); in bch2_btree_node_check_topology()
79 if (!bpos_eq(b->data->max_key, SPOS_MAX)) { in bch2_btree_node_check_topology()
82 bch2_bpos_to_text(&buf, b->data->max_key); in bch2_btree_node_check_topology()
90 if (!b->c.level) in bch2_btree_node_check_topology()
125 if (!bpos_eq(prev.k->k.p, b->key.k.p)) { in bch2_btree_node_check_topology()
139 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); in bch2_btree_node_check_topology()
141 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in bch2_btree_node_check_topology()
152 static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) in __bch2_btree_calc_format() argument
157 for_each_bset(b, t) in __bch2_btree_calc_format()
158 bset_tree_for_each_key(b, t, k) in __bch2_btree_calc_format()
160 uk = bkey_unpack_key(b, k); in __bch2_btree_calc_format()
165 static struct bkey_format bch2_btree_calc_format(struct btree *b) in bch2_btree_calc_format() argument
170 bch2_bkey_format_add_pos(&s, b->data->min_key); in bch2_btree_calc_format()
171 bch2_bkey_format_add_pos(&s, b->data->max_key); in bch2_btree_calc_format()
172 __bch2_btree_calc_format(&s, b); in bch2_btree_calc_format()
205 static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, in bch2_btree_node_format_fits() argument
209 size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f); in bch2_btree_node_format_fits()
211 return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b); in bch2_btree_node_format_fits()
216 static void __btree_node_free(struct btree_trans *trans, struct btree *b) in __btree_node_free() argument
220 trace_and_count(c, btree_node_free, trans, b); in __btree_node_free()
222 BUG_ON(btree_node_write_blocked(b)); in __btree_node_free()
223 BUG_ON(btree_node_dirty(b)); in __btree_node_free()
224 BUG_ON(btree_node_need_write(b)); in __btree_node_free()
225 BUG_ON(b == btree_node_root(c, b)); in __btree_node_free()
226 BUG_ON(b->ob.nr); in __btree_node_free()
227 BUG_ON(!list_empty(&b->write_blocked)); in __btree_node_free()
228 BUG_ON(b->will_make_reachable); in __btree_node_free()
230 clear_btree_node_noevict(b); in __btree_node_free()
235 struct btree *b) in bch2_btree_node_free_inmem() argument
239 bch2_btree_node_lock_write_nofail(trans, path, &b->c); in bch2_btree_node_free_inmem()
241 __btree_node_free(trans, b); in bch2_btree_node_free_inmem()
244 bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_free_inmem()
247 six_unlock_write(&b->c.lock); in bch2_btree_node_free_inmem()
248 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_free_inmem()
250 bch2_trans_node_drop(trans, b); in bch2_btree_node_free_inmem()
255 struct btree *b) in bch2_btree_node_free_never_used() argument
258 struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL]; in bch2_btree_node_free_never_used()
260 BUG_ON(!list_empty(&b->write_blocked)); in bch2_btree_node_free_never_used()
261 BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as)); in bch2_btree_node_free_never_used()
263 b->will_make_reachable = 0; in bch2_btree_node_free_never_used()
266 clear_btree_node_will_make_reachable(b); in bch2_btree_node_free_never_used()
267 clear_btree_node_accessed(b); in bch2_btree_node_free_never_used()
268 clear_btree_node_dirty_acct(c, b); in bch2_btree_node_free_never_used()
269 clear_btree_node_need_write(b); in bch2_btree_node_free_never_used()
272 __bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_free_never_used()
275 BUG_ON(p->nr >= ARRAY_SIZE(p->b)); in bch2_btree_node_free_never_used()
276 p->b[p->nr++] = b; in bch2_btree_node_free_never_used()
278 six_unlock_intent(&b->c.lock); in bch2_btree_node_free_never_used()
280 bch2_trans_node_drop(trans, b); in bch2_btree_node_free_never_used()
292 struct btree *b; in __bch2_btree_node_alloc() local
302 b = bch2_btree_node_mem_alloc(trans, interior_node); in __bch2_btree_node_alloc()
303 if (IS_ERR(b)) in __bch2_btree_node_alloc()
304 return b; in __bch2_btree_node_alloc()
306 BUG_ON(b->ob.nr); in __bch2_btree_node_alloc()
354 bkey_copy(&b->key, &tmp.k); in __bch2_btree_node_alloc()
355 b->ob = obs; in __bch2_btree_node_alloc()
356 six_unlock_write(&b->c.lock); in __bch2_btree_node_alloc()
357 six_unlock_intent(&b->c.lock); in __bch2_btree_node_alloc()
359 return b; in __bch2_btree_node_alloc()
361 bch2_btree_node_to_freelist(c, b); in __bch2_btree_node_alloc()
370 struct btree *b; in bch2_btree_node_alloc() local
377 b = p->b[--p->nr]; in bch2_btree_node_alloc()
379 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_alloc()
380 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_alloc()
382 set_btree_node_accessed(b); in bch2_btree_node_alloc()
383 set_btree_node_dirty_acct(c, b); in bch2_btree_node_alloc()
384 set_btree_node_need_write(b); in bch2_btree_node_alloc()
386 bch2_bset_init_first(b, &b->data->keys); in bch2_btree_node_alloc()
387 b->c.level = level; in bch2_btree_node_alloc()
388 b->c.btree_id = as->btree_id; in bch2_btree_node_alloc()
389 b->version_ondisk = c->sb.version; in bch2_btree_node_alloc()
391 memset(&b->nr, 0, sizeof(b->nr)); in bch2_btree_node_alloc()
392 b->data->magic = cpu_to_le64(bset_magic(c)); in bch2_btree_node_alloc()
393 memset(&b->data->_ptr, 0, sizeof(b->data->_ptr)); in bch2_btree_node_alloc()
394 b->data->flags = 0; in bch2_btree_node_alloc()
395 SET_BTREE_NODE_ID(b->data, as->btree_id); in bch2_btree_node_alloc()
396 SET_BTREE_NODE_LEVEL(b->data, level); in bch2_btree_node_alloc()
398 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in bch2_btree_node_alloc()
399 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key); in bch2_btree_node_alloc()
402 bp->v.seq = b->data->keys.seq; in bch2_btree_node_alloc()
406 SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true); in bch2_btree_node_alloc()
408 bch2_btree_build_aux_trees(b); in bch2_btree_node_alloc()
410 ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); in bch2_btree_node_alloc()
413 trace_and_count(c, btree_node_alloc, trans, b); in bch2_btree_node_alloc()
415 return b; in bch2_btree_node_alloc()
418 static void btree_set_min(struct btree *b, struct bpos pos) in btree_set_min() argument
420 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) in btree_set_min()
421 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos; in btree_set_min()
422 b->data->min_key = pos; in btree_set_min()
425 static void btree_set_max(struct btree *b, struct bpos pos) in btree_set_max() argument
427 b->key.k.p = pos; in btree_set_max()
428 b->data->max_key = pos; in btree_set_max()
433 struct btree *b) in bch2_btree_node_alloc_replacement()
435 struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); in bch2_btree_node_alloc_replacement()
436 struct bkey_format format = bch2_btree_calc_format(b); in bch2_btree_node_alloc_replacement()
442 if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format)) in bch2_btree_node_alloc_replacement()
443 format = b->format; in bch2_btree_node_alloc_replacement()
445 SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); in bch2_btree_node_alloc_replacement()
447 btree_set_min(n, b->data->min_key); in bch2_btree_node_alloc_replacement()
448 btree_set_max(n, b->data->max_key); in bch2_btree_node_alloc_replacement()
453 bch2_btree_sort_into(as->c, n, b); in bch2_btree_node_alloc_replacement()
462 struct btree *b = bch2_btree_node_alloc(as, trans, level); in __btree_root_alloc() local
464 btree_set_min(b, POS_MIN); in __btree_root_alloc()
465 btree_set_max(b, SPOS_MAX); in __btree_root_alloc()
466 b->data->format = bch2_btree_calc_format(b); in __btree_root_alloc()
468 btree_node_set_format(b, b->data->format); in __btree_root_alloc()
469 bch2_btree_build_aux_trees(b); in __btree_root_alloc()
471 return b; in __btree_root_alloc()
483 struct btree *b = p->b[--p->nr]; in bch2_btree_reserve_put() local
492 a->ob = b->ob; in bch2_btree_reserve_put()
493 b->ob.nr = 0; in bch2_btree_reserve_put()
494 bkey_copy(&a->k, &b->key); in bch2_btree_reserve_put()
496 bch2_open_buckets_put(c, &b->ob); in bch2_btree_reserve_put()
501 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_reserve_put()
502 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_reserve_put()
503 __btree_node_free(trans, b); in bch2_btree_reserve_put()
504 bch2_btree_node_to_freelist(c, b); in bch2_btree_reserve_put()
516 struct btree *b; in bch2_btree_reserve_get() local
534 b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, in bch2_btree_reserve_get()
536 if (IS_ERR(b)) { in bch2_btree_reserve_get()
537 ret = PTR_ERR(b); in bch2_btree_reserve_get()
541 p->b[p->nr++] = b; in bch2_btree_reserve_get()
584 struct keylist *keys, struct btree *b) in btree_update_add_key() argument
586 struct bkey_i *k = &b->key; in btree_update_add_key()
592 bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1; in btree_update_add_key()
655 static noinline __no_kmsan_checks bool btree_node_seq_matches(struct btree *b, __le64 seq) in btree_node_seq_matches() argument
657 struct btree_node *b_data = READ_ONCE(b->data); in btree_node_seq_matches()
665 struct btree *b; in btree_update_nodes_written() local
703 b = as->old_nodes[i]; in btree_update_nodes_written()
706 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_update_nodes_written()
707 bool seq_matches = btree_node_seq_matches(b, as->old_nodes_seq[i]); in btree_update_nodes_written()
708 six_unlock_read(&b->c.lock); in btree_update_nodes_written()
712 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner, in btree_update_nodes_written()
757 b = READ_ONCE(as->b); in btree_update_nodes_written()
758 if (b) { in btree_update_nodes_written()
772 as->btree_id, b->c.level, b->key.k.p); in btree_update_nodes_written()
774 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in btree_update_nodes_written()
775 mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); in btree_update_nodes_written()
776 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); in btree_update_nodes_written()
777 path->l[b->c.level].b = b; in btree_update_nodes_written()
779 bch2_btree_node_lock_write_nofail(trans, path, &b->c); in btree_update_nodes_written()
784 if (list_empty(&b->write_blocked)) in btree_update_nodes_written()
785 clear_btree_node_write_blocked(b); in btree_update_nodes_written()
791 if (as->b == b) { in btree_update_nodes_written()
792 BUG_ON(!b->c.level); in btree_update_nodes_written()
793 BUG_ON(!btree_node_dirty(b)); in btree_update_nodes_written()
796 struct bset *last = btree_bset_last(b); in btree_update_nodes_written()
802 bch2_btree_add_journal_pin(c, b, journal_seq); in btree_update_nodes_written()
809 set_btree_node_never_write(b); in btree_update_nodes_written()
815 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); in btree_update_nodes_written()
816 six_unlock_write(&b->c.lock); in btree_update_nodes_written()
818 btree_node_write_if_need(trans, b, SIX_LOCK_intent); in btree_update_nodes_written()
819 btree_node_unlock(trans, path, b->c.level); in btree_update_nodes_written()
827 b = as->new_nodes[i]; in btree_update_nodes_written()
829 BUG_ON(b->will_make_reachable != (unsigned long) as); in btree_update_nodes_written()
830 b->will_make_reachable = 0; in btree_update_nodes_written()
831 clear_btree_node_will_make_reachable(b); in btree_update_nodes_written()
836 b = as->new_nodes[i]; in btree_update_nodes_written()
838 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_update_nodes_written()
839 btree_node_write_if_need(trans, b, SIX_LOCK_read); in btree_update_nodes_written()
840 six_unlock_read(&b->c.lock); in btree_update_nodes_written()
887 static void btree_update_updated_node(struct btree_update *as, struct btree *b) in btree_update_updated_node() argument
892 BUG_ON(as->update_level_end < b->c.level); in btree_update_updated_node()
893 BUG_ON(!btree_node_dirty(b)); in btree_update_updated_node()
894 BUG_ON(!b->c.level); in btree_update_updated_node()
900 as->b = b; in btree_update_updated_node()
901 as->update_level_end = b->c.level; in btree_update_updated_node()
903 set_btree_node_write_blocked(b); in btree_update_updated_node()
904 list_add(&as->write_blocked_list, &b->write_blocked); in btree_update_updated_node()
922 child->b = NULL; in btree_update_reparent()
929 static void btree_update_updated_root(struct btree_update *as, struct btree *b) in btree_update_updated_root() argument
931 struct bkey_i *insert = &b->key; in btree_update_updated_root()
942 b->c.btree_id, b->c.level, in btree_update_updated_root()
964 static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b) in bch2_btree_update_add_new_node() argument
972 BUG_ON(b->will_make_reachable); in bch2_btree_update_add_new_node()
974 as->new_nodes[as->nr_new_nodes++] = b; in bch2_btree_update_add_new_node()
975 b->will_make_reachable = 1UL|(unsigned long) as; in bch2_btree_update_add_new_node()
976 set_btree_node_will_make_reachable(b); in bch2_btree_update_add_new_node()
980 btree_update_add_key(as, &as->new_keys, b); in bch2_btree_update_add_new_node()
982 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in bch2_btree_update_add_new_node()
983 unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data; in bch2_btree_update_add_new_node()
986 bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = in bch2_btree_update_add_new_node()
994 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b) in btree_update_drop_new_node() argument
1006 v = xchg(&b->will_make_reachable, 0); in btree_update_drop_new_node()
1007 clear_btree_node_will_make_reachable(b); in btree_update_drop_new_node()
1016 if (as->new_nodes[i] == b) in btree_update_drop_new_node()
1028 static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b) in bch2_btree_update_get_open_buckets() argument
1030 while (b->ob.nr) in bch2_btree_update_get_open_buckets()
1032 b->ob.v[--b->ob.nr]; in bch2_btree_update_get_open_buckets()
1047 struct btree *b) in bch2_btree_interior_update_will_free_node() argument
1053 set_btree_node_dying(b); in bch2_btree_interior_update_will_free_node()
1055 if (btree_node_fake(b)) in bch2_btree_interior_update_will_free_node()
1068 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) { in bch2_btree_interior_update_will_free_node()
1079 clear_btree_node_dirty_acct(c, b); in bch2_btree_interior_update_will_free_node()
1080 clear_btree_node_need_write(b); in bch2_btree_interior_update_will_free_node()
1081 clear_btree_node_write_blocked(b); in bch2_btree_interior_update_will_free_node()
1091 w = btree_current_write(b); in bch2_btree_interior_update_will_free_node()
1096 w = btree_prev_write(b); in bch2_btree_interior_update_will_free_node()
1112 btree_update_drop_new_node(c, b); in bch2_btree_interior_update_will_free_node()
1114 btree_update_add_key(as, &as->old_keys, b); in bch2_btree_interior_update_will_free_node()
1116 as->old_nodes[as->nr_old_nodes] = b; in bch2_btree_interior_update_will_free_node()
1117 as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq; in bch2_btree_interior_update_will_free_node()
1204 if (bch2_btree_node_insert_fits(path->l[level_end].b, in bch2_btree_update_start()
1208 split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); in bch2_btree_update_start()
1242 struct btree *b = btree_path_node(path, path->level); in bch2_btree_update_start() local
1243 as->node_start = b->data->min_key; in bch2_btree_update_start()
1244 as->node_end = b->data->max_key; in bch2_btree_update_start()
1245 as->node_needed_rewrite = btree_node_rewrite_reason(b); in bch2_btree_update_start()
1246 as->node_written = b->written; in bch2_btree_update_start()
1247 as->node_sectors = btree_buf_bytes(b) >> 9; in bch2_btree_update_start()
1248 as->node_remaining = __bch2_btree_u64s_remaining(b, in bch2_btree_update_start()
1249 btree_bkey_last(b, bset_tree_last(b))); in bch2_btree_update_start()
1321 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) in bch2_btree_set_root_inmem() argument
1325 list_del_init(&b->list); in bch2_btree_set_root_inmem()
1329 bch2_btree_id_root(c, b->c.btree_id)->b = b; in bch2_btree_set_root_inmem()
1338 struct btree *b, in bch2_btree_set_root() argument
1343 trace_and_count(c, btree_node_set_root, trans, b); in bch2_btree_set_root()
1345 struct btree *old = btree_node_root(c, b); in bch2_btree_set_root()
1359 bch2_btree_set_root_inmem(c, b); in bch2_btree_set_root()
1361 btree_update_updated_root(as, b); in bch2_btree_set_root()
1379 struct btree *b, in bch2_insert_fixup_btree_ptr() argument
1392 bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); in bch2_insert_fixup_btree_ptr()
1396 .level = b->c.level, in bch2_insert_fixup_btree_ptr()
1397 .btree = b->c.btree_id, in bch2_insert_fixup_btree_ptr()
1401 bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), from)) { in bch2_insert_fixup_btree_ptr()
1412 b->c.btree_id, b->c.level, in bch2_insert_fixup_btree_ptr()
1415 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && in bch2_insert_fixup_btree_ptr()
1416 bkey_iter_pos_cmp(b, k, &insert->k.p) < 0) in bch2_insert_fixup_btree_ptr()
1417 bch2_btree_node_iter_advance(node_iter, b); in bch2_insert_fixup_btree_ptr()
1419 bch2_btree_bset_insert_key(trans, path, b, node_iter, insert); in bch2_insert_fixup_btree_ptr()
1420 set_btree_node_dirty_acct(c, b); in bch2_insert_fixup_btree_ptr()
1422 old = READ_ONCE(b->flags); in bch2_insert_fixup_btree_ptr()
1429 } while (!try_cmpxchg(&b->flags, &old, new)); in bch2_insert_fixup_btree_ptr()
1438 struct btree *b, in bch2_btree_insert_keys_interior() argument
1445 BUG_ON(btree_node_type(b) != BKEY_TYPE_btree); in bch2_btree_insert_keys_interior()
1447 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && in bch2_btree_insert_keys_interior()
1448 (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) in bch2_btree_insert_keys_interior()
1452 insert != keys->top && bpos_le(insert->k.p, b->key.k.p); in bch2_btree_insert_keys_interior()
1454 bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); in bch2_btree_insert_keys_interior()
1456 int ret = bch2_btree_node_check_topology(trans, b); in bch2_btree_insert_keys_interior()
1493 struct btree *b, in __btree_split_node() argument
1504 unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; in __btree_split_node()
1516 SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1); in __btree_split_node()
1521 for_each_btree_node_key(b, k, &iter) { in __btree_split_node()
1525 uk = bkey_unpack_key(b, k); in __btree_split_node()
1527 if (b->c.level && in __btree_split_node()
1530 (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) || in __btree_split_node()
1541 nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k); in __btree_split_node()
1544 btree_set_min(n[0], b->data->min_key); in __btree_split_node()
1547 btree_set_max(n[1], b->data->max_key); in __btree_split_node()
1557 if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b)) in __btree_split_node()
1558 n[i]->data->format = b->format; in __btree_split_node()
1564 for_each_btree_node_key(b, k, &iter) { in __btree_split_node()
1572 ? &b->format: &bch2_bkey_format_current, k)) in __btree_split_node()
1575 bch2_bkey_unpack(b, (void *) out[i], k); in __btree_split_node()
1612 struct btree *b, in btree_split_insert_keys() argument
1618 bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) { in btree_split_insert_keys()
1621 bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); in btree_split_insert_keys()
1623 int ret = bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); in btree_split_insert_keys()
1632 btree_path_idx_t path, struct btree *b, in btree_split() argument
1636 struct btree *parent = btree_node_parent(trans->paths + path, b); in btree_split()
1642 bch2_verify_btree_nr_keys(b); in btree_split()
1643 BUG_ON(!parent && (b != btree_node_root(c, b))); in btree_split()
1644 BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1)); in btree_split()
1646 ret = bch2_btree_node_check_topology(trans, b); in btree_split()
1650 if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { in btree_split()
1653 trace_and_count(c, btree_node_split, trans, b); in btree_split()
1655 n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); in btree_split()
1656 n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); in btree_split()
1658 __btree_split_node(as, trans, b, n, keys); in btree_split()
1696 n3 = __btree_root_alloc(as, trans, b->c.level + 1); in btree_split()
1715 trace_and_count(c, btree_node_compact, trans, b); in btree_split()
1717 n1 = bch2_btree_node_alloc_replacement(as, trans, b); in btree_split()
1754 bch2_btree_interior_update_will_free_node(as, b); in btree_split()
1773 bch2_btree_node_free_inmem(trans, trans->paths + path, b); in btree_split()
1828 btree_path_idx_t path_idx, struct btree *b, in bch2_btree_insert_node() argument
1834 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); in bch2_btree_insert_node()
1835 int old_live_u64s = b->nr.live_u64s; in bch2_btree_insert_node()
1840 BUG_ON(!b->c.level); in bch2_btree_insert_node()
1841 BUG_ON(!as || as->b); in bch2_btree_insert_node()
1844 if (!btree_node_intent_locked(path, b->c.level)) { in bch2_btree_insert_node()
1848 __func__, b->c.level); in bch2_btree_insert_node()
1858 ret = bch2_btree_node_lock_write(trans, path, &b->c); in bch2_btree_insert_node()
1862 bch2_btree_node_prep_for_write(trans, path, b); in bch2_btree_insert_node()
1864 if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) { in bch2_btree_insert_node()
1865 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1870 ret = bch2_btree_node_check_topology(trans, b) ?: in bch2_btree_insert_node()
1871 bch2_btree_insert_keys_interior(as, trans, path, b, in bch2_btree_insert_node()
1872 path->l[b->c.level].iter, keys); in bch2_btree_insert_node()
1874 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1878 trans_for_each_path_with_node(trans, b, linked, i) in bch2_btree_insert_node()
1879 bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b); in bch2_btree_insert_node()
1883 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; in bch2_btree_insert_node()
1884 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; in bch2_btree_insert_node()
1886 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) in bch2_btree_insert_node()
1887 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added); in bch2_btree_insert_node()
1888 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0) in bch2_btree_insert_node()
1889 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added); in bch2_btree_insert_node()
1892 bch2_maybe_compact_whiteouts(c, b)) in bch2_btree_insert_node()
1893 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_insert_node()
1895 btree_update_updated_node(as, b); in bch2_btree_insert_node()
1896 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1903 if (b->c.level >= as->update_level_end) { in bch2_btree_insert_node()
1904 trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); in bch2_btree_insert_node()
1908 return btree_split(as, trans, path_idx, b, keys); in bch2_btree_insert_node()
1916 struct btree *b = path_l(trans->paths + path)->b; in bch2_btree_split_leaf() local
1927 ret = btree_split(as, trans, path, b, NULL); in bch2_btree_split_leaf()
1948 struct btree *n, *b = bch2_btree_id_root(c, path->btree_id)->b; in __btree_increase_depth() local
1950 BUG_ON(!btree_node_locked(path, b->c.level)); in __btree_increase_depth()
1952 n = __btree_root_alloc(as, trans, b->c.level + 1); in __btree_increase_depth()
1966 bch2_keylist_add(&as->parent_keys, &b->key); in __btree_increase_depth()
1978 list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list); in __btree_increase_depth()
1987 struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b; in bch2_btree_increase_depth() local
1989 if (btree_node_fake(b)) in bch2_btree_increase_depth()
1993 bch2_btree_update_start(trans, trans->paths + path, b->c.level, in bch2_btree_increase_depth()
2014 struct btree *b, *m, *n, *prev, *next, *parent; in __bch2_foreground_maybe_merge() local
2042 b = trans->paths[path].l[level].b; in __bch2_foreground_maybe_merge()
2044 if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) || in __bch2_foreground_maybe_merge()
2045 (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) { in __bch2_foreground_maybe_merge()
2046 b->sib_u64s[sib] = U16_MAX; in __bch2_foreground_maybe_merge()
2051 ? bpos_predecessor(b->data->min_key) in __bch2_foreground_maybe_merge()
2052 : bpos_successor(b->data->max_key); in __bch2_foreground_maybe_merge()
2062 m = trans->paths[sib_path].l[level].b; in __bch2_foreground_maybe_merge()
2064 if (btree_node_parent(trans->paths + path, b) != in __bch2_foreground_maybe_merge()
2066 b->sib_u64s[sib] = U16_MAX; in __bch2_foreground_maybe_merge()
2072 next = b; in __bch2_foreground_maybe_merge()
2074 prev = b; in __bch2_foreground_maybe_merge()
2105 sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) + in __bch2_foreground_maybe_merge()
2116 b->sib_u64s[sib] = sib_u64s; in __bch2_foreground_maybe_merge()
2118 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) in __bch2_foreground_maybe_merge()
2121 parent = btree_node_parent(trans->paths + path, b); in __bch2_foreground_maybe_merge()
2131 trace_and_count(c, btree_node_merge, trans, b); in __bch2_foreground_maybe_merge()
2133 n = bch2_btree_node_alloc(as, trans, b->c.level); in __bch2_foreground_maybe_merge()
2136 max(BTREE_NODE_SEQ(b->data), in __bch2_foreground_maybe_merge()
2168 bch2_btree_interior_update_will_free_node(as, b); in __bch2_foreground_maybe_merge()
2176 bch2_btree_node_free_inmem(trans, trans->paths + path, b); in __bch2_foreground_maybe_merge()
2206 struct btree *b) in get_iter_to_node() argument
2208 bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p, in get_iter_to_node()
2209 BTREE_MAX_DEPTH, b->c.level, in get_iter_to_node()
2216 if (btree_iter_path(trans, iter)->l[b->c.level].b != b) { in get_iter_to_node()
2218 BUG_ON(!btree_node_dying(b)); in get_iter_to_node()
2223 BUG_ON(!btree_node_hashed(b)); in get_iter_to_node()
2232 struct btree *b, in bch2_btree_node_rewrite() argument
2245 parent = btree_node_parent(path, b); in bch2_btree_node_rewrite()
2246 as = bch2_btree_update_start(trans, path, b->c.level, in bch2_btree_node_rewrite()
2252 n = bch2_btree_node_alloc_replacement(as, trans, b); in bch2_btree_node_rewrite()
2263 trace_and_count(c, btree_node_rewrite, trans, b); in bch2_btree_node_rewrite()
2275 bch2_btree_interior_update_will_free_node(as, b); in bch2_btree_node_rewrite()
2280 bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b); in bch2_btree_node_rewrite()
2305 struct btree *b = bch2_btree_iter_peek_node(trans, &iter); in bch2_btree_node_rewrite_key() local
2306 int ret = PTR_ERR_OR_ZERO(b); in bch2_btree_node_rewrite_key()
2310 bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(k); in bch2_btree_node_rewrite_key()
2312 ? bch2_btree_node_rewrite(trans, &iter, b, 0, flags) in bch2_btree_node_rewrite_key()
2330 struct btree *b = bch2_btree_iter_peek_node(trans, &iter); in bch2_btree_node_rewrite_pos() local
2331 int ret = PTR_ERR_OR_ZERO(b); in bch2_btree_node_rewrite_pos()
2335 ret = bch2_btree_node_rewrite(trans, &iter, b, target, flags); in bch2_btree_node_rewrite_pos()
2342 struct btree *b, unsigned flags) in bch2_btree_node_rewrite_key_get_iter() argument
2345 int ret = get_iter_to_node(trans, &iter, b); in bch2_btree_node_rewrite_key_get_iter()
2349 ret = bch2_btree_node_rewrite(trans, &iter, b, 0, flags); in bch2_btree_node_rewrite_key_get_iter()
2386 void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) in bch2_btree_node_rewrite_async() argument
2393 a->btree_id = b->c.btree_id; in bch2_btree_node_rewrite_async()
2394 a->level = b->c.level; in bch2_btree_node_rewrite_async()
2398 bch2_bkey_buf_copy(&a->key, c, &b->key); in bch2_btree_node_rewrite_async()
2467 struct btree *b, struct btree *new_hash, in __bch2_btree_node_update_key() argument
2478 ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1, in __bch2_btree_node_update_key()
2479 bkey_i_to_s_c(&b->key), in __bch2_btree_node_update_key()
2481 bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1, in __bch2_btree_node_update_key()
2491 new_hash, b->c.level, b->c.btree_id); in __bch2_btree_node_update_key()
2495 parent = btree_node_parent(btree_iter_path(trans, iter), b); in __bch2_btree_node_update_key()
2504 BUG_ON(path2->level != b->c.level); in __bch2_btree_node_update_key()
2516 BUG_ON(btree_node_root(c, b) != b); in __bch2_btree_node_update_key()
2526 b->c.btree_id, b->c.level, in __bch2_btree_node_update_key()
2534 bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c); in __bch2_btree_node_update_key()
2540 __bch2_btree_node_hash_remove(&c->btree_cache, b); in __bch2_btree_node_update_key()
2542 bkey_copy(&b->key, new_key); in __bch2_btree_node_update_key()
2543 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); in __bch2_btree_node_update_key()
2547 bkey_copy(&b->key, new_key); in __bch2_btree_node_update_key()
2550 bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b); in __bch2_btree_node_update_key()
2557 bch2_btree_node_hash_remove(&c->btree_cache, b); in __bch2_btree_node_update_key()
2564 struct btree *b, struct bkey_i *new_key, in bch2_btree_node_update_key() argument
2573 ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1); in bch2_btree_node_update_key()
2583 if (btree_ptr_hash_val(new_key) != b->hash_val) { in bch2_btree_node_update_key()
2598 ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, in bch2_btree_node_update_key()
2611 struct btree *b, struct bkey_i *new_key, in bch2_btree_node_update_key_get_iter() argument
2615 int ret = get_iter_to_node(trans, &iter, b); in bch2_btree_node_update_key_get_iter()
2620 !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev)); in bch2_btree_node_update_key_get_iter()
2622 ret = bch2_btree_node_update_key(trans, &iter, b, new_key, in bch2_btree_node_update_key_get_iter()
2634 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) in bch2_btree_set_root_for_read() argument
2636 BUG_ON(btree_node_root(c, b)); in bch2_btree_set_root_for_read()
2638 bch2_btree_set_root_inmem(c, b); in bch2_btree_set_root_for_read()
2645 struct btree *b; in bch2_btree_root_alloc_fake_trans() local
2655 b = bch2_btree_node_mem_alloc(trans, false); in bch2_btree_root_alloc_fake_trans()
2658 ret = PTR_ERR_OR_ZERO(b); in bch2_btree_root_alloc_fake_trans()
2662 set_btree_node_fake(b); in bch2_btree_root_alloc_fake_trans()
2663 set_btree_node_need_rewrite(b); in bch2_btree_root_alloc_fake_trans()
2664 b->c.level = level; in bch2_btree_root_alloc_fake_trans()
2665 b->c.btree_id = id; in bch2_btree_root_alloc_fake_trans()
2667 bkey_btree_ptr_init(&b->key); in bch2_btree_root_alloc_fake_trans()
2668 b->key.k.p = SPOS_MAX; in bch2_btree_root_alloc_fake_trans()
2669 *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id; in bch2_btree_root_alloc_fake_trans()
2671 bch2_bset_init_first(b, &b->data->keys); in bch2_btree_root_alloc_fake_trans()
2672 bch2_btree_build_aux_trees(b); in bch2_btree_root_alloc_fake_trans()
2674 b->data->flags = 0; in bch2_btree_root_alloc_fake_trans()
2675 btree_set_min(b, POS_MIN); in bch2_btree_root_alloc_fake_trans()
2676 btree_set_max(b, SPOS_MAX); in bch2_btree_root_alloc_fake_trans()
2677 b->data->format = bch2_btree_calc_format(b); in bch2_btree_root_alloc_fake_trans()
2678 btree_node_set_format(b, b->data->format); in bch2_btree_root_alloc_fake_trans()
2680 ret = bch2_btree_node_hash_insert(&c->btree_cache, b, in bch2_btree_root_alloc_fake_trans()
2681 b->c.level, b->c.btree_id); in bch2_btree_root_alloc_fake_trans()
2684 bch2_btree_set_root_inmem(c, b); in bch2_btree_root_alloc_fake_trans()
2686 six_unlock_write(&b->c.lock); in bch2_btree_root_alloc_fake_trans()
2687 six_unlock_intent(&b->c.lock); in bch2_btree_root_alloc_fake_trans()