Lines Matching refs:wb

118 					  struct btree_write_buffered_key *wb)  in wb_flush_one_slowpath()  argument
124 trans->journal_res.seq = wb->journal_seq; in wb_flush_one_slowpath()
126 return bch2_trans_update(trans, iter, &wb->k, in wb_flush_one_slowpath()
136 struct btree_write_buffered_key *wb, in wb_flush_one() argument
144 EBUG_ON(!wb->journal_seq); in wb_flush_one()
146 EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); in wb_flush_one()
152 if (!*accounting_accumulated && wb->k.k.type == KEY_TYPE_accounting) { in wb_flush_one()
157 bch2_accounting_accumulate(bkey_i_to_accounting(&wb->k), in wb_flush_one()
180 if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) { in wb_flush_one()
182 return wb_flush_one_slowpath(trans, iter, wb); in wb_flush_one()
185 EBUG_ON(!bpos_eq(wb->k.k.p, path->pos)); in wb_flush_one()
187 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq); in wb_flush_one()
204 struct btree_write_buffered_key *wb) in btree_write_buffered_insert() argument
209 bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), in btree_write_buffered_insert()
212 trans->journal_res.seq = wb->journal_seq; in btree_write_buffered_insert()
215 bch2_trans_update(trans, &iter, &wb->k, in btree_write_buffered_insert()
221 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) in move_keys_from_inc_to_flushing() argument
223 struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer); in move_keys_from_inc_to_flushing()
226 if (!wb->inc.keys.nr) in move_keys_from_inc_to_flushing()
229 bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin, in move_keys_from_inc_to_flushing()
232 darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr)); in move_keys_from_inc_to_flushing()
233 darray_resize(&wb->sorted, wb->flushing.keys.size); in move_keys_from_inc_to_flushing()
235 if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) { in move_keys_from_inc_to_flushing()
236 swap(wb->flushing.keys, wb->inc.keys); in move_keys_from_inc_to_flushing()
240 size_t nr = min(darray_room(wb->flushing.keys), in move_keys_from_inc_to_flushing()
241 wb->sorted.size - wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
242 nr = min(nr, wb->inc.keys.nr); in move_keys_from_inc_to_flushing()
244 memcpy(&darray_top(wb->flushing.keys), in move_keys_from_inc_to_flushing()
245 wb->inc.keys.data, in move_keys_from_inc_to_flushing()
246 sizeof(wb->inc.keys.data[0]) * nr); in move_keys_from_inc_to_flushing()
248 memmove(wb->inc.keys.data, in move_keys_from_inc_to_flushing()
249 wb->inc.keys.data + nr, in move_keys_from_inc_to_flushing()
250 sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr)); in move_keys_from_inc_to_flushing()
252 wb->flushing.keys.nr += nr; in move_keys_from_inc_to_flushing()
253 wb->inc.keys.nr -= nr; in move_keys_from_inc_to_flushing()
255 if (!wb->inc.keys.nr) in move_keys_from_inc_to_flushing()
256 bch2_journal_pin_drop(j, &wb->inc.pin); in move_keys_from_inc_to_flushing()
258 bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin, in move_keys_from_inc_to_flushing()
267 BUG_ON(wb->sorted.size < wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
289 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_locked() local
303 mutex_lock(&wb->inc.lock); in bch2_btree_write_buffer_flush_locked()
304 move_keys_from_inc_to_flushing(wb); in bch2_btree_write_buffer_flush_locked()
305 mutex_unlock(&wb->inc.lock); in bch2_btree_write_buffer_flush_locked()
307 for (size_t i = 0; i < wb->flushing.keys.nr; i++) { in bch2_btree_write_buffer_flush_locked()
308 wb->sorted.data[i].idx = i; in bch2_btree_write_buffer_flush_locked()
309 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree; in bch2_btree_write_buffer_flush_locked()
310 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos)); in bch2_btree_write_buffer_flush_locked()
312 wb->sorted.nr = wb->flushing.keys.nr; in bch2_btree_write_buffer_flush_locked()
328 wb_sort(wb->sorted.data, wb->sorted.nr); in bch2_btree_write_buffer_flush_locked()
330 darray_for_each(wb->sorted, i) { in bch2_btree_write_buffer_flush_locked()
331 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; in bch2_btree_write_buffer_flush_locked()
338 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) in bch2_btree_write_buffer_flush_locked()
339 prefetch(&wb->flushing.keys.data[n->idx]); in bch2_btree_write_buffer_flush_locked()
349 if (i + 1 < &darray_top(wb->sorted) && in bch2_btree_write_buffer_flush_locked()
351 struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx]; in bch2_btree_write_buffer_flush_locked()
431 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr); in bch2_btree_write_buffer_flush_locked()
433 sort_nonatomic(wb->flushing.keys.data, in bch2_btree_write_buffer_flush_locked()
434 wb->flushing.keys.nr, in bch2_btree_write_buffer_flush_locked()
435 sizeof(wb->flushing.keys.data[0]), in bch2_btree_write_buffer_flush_locked()
438 darray_for_each(wb->flushing.keys, i) { in bch2_btree_write_buffer_flush_locked()
449 bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin, in bch2_btree_write_buffer_flush_locked()
486 struct btree_write_buffered_key *dst = wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
488 darray_for_each(wb->flushing.keys, i) in bch2_btree_write_buffer_flush_locked()
491 wb->flushing.keys.nr = dst - wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
496 bch2_journal_pin_drop(j, &wb->flushing.pin); in bch2_btree_write_buffer_flush_locked()
497 wb->flushing.keys.nr = 0; in bch2_btree_write_buffer_flush_locked()
501 trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0); in bch2_btree_write_buffer_flush_locked()
557 struct btree_write_buffer *wb = &c->btree_write_buffer; in btree_write_buffer_flush_seq() local
565 *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; in btree_write_buffer_flush_seq()
571 mutex_lock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
573 mutex_unlock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
576 (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) || in btree_write_buffer_flush_seq()
577 (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq))); in btree_write_buffer_flush_seq()
619 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_nocheck_rw() local
622 if (mutex_trylock(&wb->flushing.lock)) { in bch2_btree_write_buffer_flush_nocheck_rw()
624 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_nocheck_rw()
691 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_work() local
694 mutex_lock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
698 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
703 static void wb_accounting_sort(struct btree_write_buffer *wb) in wb_accounting_sort() argument
705 eytzinger0_sort(wb->accounting.data, wb->accounting.nr, in wb_accounting_sort()
706 sizeof(wb->accounting.data[0]), in wb_accounting_sort()
713 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_accounting_key_to_wb_slowpath() local
718 int ret = darray_push(&wb->accounting, new); in bch2_accounting_key_to_wb_slowpath()
722 wb_accounting_sort(wb); in bch2_accounting_key_to_wb_slowpath()
730 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_key_to_wb_slowpath() local
733 ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL); in bch2_journal_key_to_wb_slowpath()
734 if (!ret && dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
735 ret = darray_resize(&wb->sorted, wb->flushing.keys.size); in bch2_journal_key_to_wb_slowpath()
738 if (dst->wb == &c->btree_write_buffer.flushing) { in bch2_journal_key_to_wb_slowpath()
739 mutex_unlock(&dst->wb->lock); in bch2_journal_key_to_wb_slowpath()
740 dst->wb = &c->btree_write_buffer.inc; in bch2_journal_key_to_wb_slowpath()
741 bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin, in bch2_journal_key_to_wb_slowpath()
749 dst->room = darray_room(dst->wb->keys); in bch2_journal_key_to_wb_slowpath()
750 if (dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
751 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_key_to_wb_slowpath()
755 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys); in bch2_journal_key_to_wb_slowpath()
759 dst->wb->keys.nr++; in bch2_journal_key_to_wb_slowpath()
766 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_keys_to_write_buffer_start() local
768 if (mutex_trylock(&wb->flushing.lock)) { in bch2_journal_keys_to_write_buffer_start()
769 mutex_lock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_start()
770 move_keys_from_inc_to_flushing(wb); in bch2_journal_keys_to_write_buffer_start()
777 if (!wb->inc.keys.nr) { in bch2_journal_keys_to_write_buffer_start()
778 dst->wb = &wb->flushing; in bch2_journal_keys_to_write_buffer_start()
780 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_start()
781 dst->wb = &wb->inc; in bch2_journal_keys_to_write_buffer_start()
784 mutex_lock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_start()
785 dst->wb = &wb->inc; in bch2_journal_keys_to_write_buffer_start()
788 dst->room = darray_room(dst->wb->keys); in bch2_journal_keys_to_write_buffer_start()
789 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_start()
790 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_keys_to_write_buffer_start()
793 bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin, in bch2_journal_keys_to_write_buffer_start()
796 darray_for_each(wb->accounting, i) in bch2_journal_keys_to_write_buffer_start()
802 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_keys_to_write_buffer_end() local
806 darray_for_each(wb->accounting, i) in bch2_journal_keys_to_write_buffer_end()
815 if (live_accounting_keys * 2 < wb->accounting.nr) { in bch2_journal_keys_to_write_buffer_end()
816 struct btree_write_buffered_key *dst = wb->accounting.data; in bch2_journal_keys_to_write_buffer_end()
818 darray_for_each(wb->accounting, src) in bch2_journal_keys_to_write_buffer_end()
821 wb->accounting.nr = dst - wb->accounting.data; in bch2_journal_keys_to_write_buffer_end()
822 wb_accounting_sort(wb); in bch2_journal_keys_to_write_buffer_end()
825 if (!dst->wb->keys.nr) in bch2_journal_keys_to_write_buffer_end()
826 bch2_journal_pin_drop(&c->journal, &dst->wb->pin); in bch2_journal_keys_to_write_buffer_end()
833 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_end()
834 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_end()
835 mutex_unlock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_end()
840 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size) in wb_keys_resize() argument
842 if (wb->keys.size >= new_size) in wb_keys_resize()
845 if (!mutex_trylock(&wb->lock)) in wb_keys_resize()
848 int ret = darray_resize(&wb->keys, new_size); in wb_keys_resize()
849 mutex_unlock(&wb->lock); in wb_keys_resize()
855 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_resize() local
857 return wb_keys_resize(&wb->flushing, new_size) ?: in bch2_btree_write_buffer_resize()
858 wb_keys_resize(&wb->inc, new_size); in bch2_btree_write_buffer_resize()
863 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_fs_btree_write_buffer_exit() local
865 BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) && in bch2_fs_btree_write_buffer_exit()
868 darray_exit(&wb->accounting); in bch2_fs_btree_write_buffer_exit()
869 darray_exit(&wb->sorted); in bch2_fs_btree_write_buffer_exit()
870 darray_exit(&wb->flushing.keys); in bch2_fs_btree_write_buffer_exit()
871 darray_exit(&wb->inc.keys); in bch2_fs_btree_write_buffer_exit()
876 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_fs_btree_write_buffer_init_early() local
878 mutex_init(&wb->inc.lock); in bch2_fs_btree_write_buffer_init_early()
879 mutex_init(&wb->flushing.lock); in bch2_fs_btree_write_buffer_init_early()
880 INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work); in bch2_fs_btree_write_buffer_init_early()
885 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_fs_btree_write_buffer_init() local
890 return darray_make_room(&wb->inc.keys, initial_size) ?: in bch2_fs_btree_write_buffer_init()
891 darray_make_room(&wb->flushing.keys, initial_size) ?: in bch2_fs_btree_write_buffer_init()
892 darray_make_room(&wb->sorted, initial_size); in bch2_fs_btree_write_buffer_init()