Lines Matching refs:m
138 static void trace_io_move_fail2(struct data_update *m, in trace_io_move_fail2() argument
144 struct bch_fs *c = m->op.c; in trace_io_move_fail2()
145 struct bkey_s_c old = bkey_i_to_s_c(m->k.k); in trace_io_move_fail2()
161 if ((ptr_bit & m->data_opts.rewrite_ptrs) && in trace_io_move_fail2()
173 bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); in trace_io_move_fail2()
194 static void trace_data_update2(struct data_update *m, in trace_data_update2() argument
198 struct bch_fs *c = m->op.c; in trace_data_update2()
213 static void trace_io_move_created_rebalance2(struct data_update *m, in trace_io_move_created_rebalance2() argument
217 struct bch_fs *c = m->op.c; in trace_io_move_created_rebalance2()
220 bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); in trace_io_move_created_rebalance2()
236 static int data_update_invalid_bkey(struct data_update *m, in data_update_invalid_bkey() argument
240 struct bch_fs *c = m->op.c; in data_update_invalid_bkey()
245 prt_printf(&buf, "\nop.nonce: %u", m->op.nonce); in data_update_invalid_bkey()
267 struct data_update *m = container_of(op, struct data_update, op); in __bch2_data_update_index_update() local
270 bch2_trans_iter_init(trans, &iter, m->btree_id, in __bch2_data_update_index_update()
276 struct bkey_s_c old = bkey_i_to_s_c(m->k.k); in __bch2_data_update_index_update()
299 trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), in __bch2_data_update_index_update()
337 if ((ptr_bit & m->data_opts.rewrite_ptrs) && in __bch2_data_update_index_update()
340 bch2_extent_ptr_set_cached(c, &m->op.opts, in __bch2_data_update_index_update()
347 if (m->data_opts.rewrite_ptrs && in __bch2_data_update_index_update()
349 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) { in __bch2_data_update_index_update()
350 trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:"); in __bch2_data_update_index_update()
367 trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:"); in __bch2_data_update_index_update()
386 durability - ptr_durability >= m->op.opts.data_replicas) { in __bch2_data_update_index_update()
389 bch2_extent_ptr_set_cached(c, &m->op.opts, in __bch2_data_update_index_update()
401 bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert)); in __bch2_data_update_index_update()
429 .btree = m->btree_id, in __bch2_data_update_index_update()
433 ret = data_update_invalid_bkey(m, old, k, insert); in __bch2_data_update_index_update()
437 ret = bch2_trans_log_str(trans, bch2_data_update_type_strs[m->type]) ?: in __bch2_data_update_index_update()
438 bch2_trans_log_bkey(trans, m->btree_id, 0, m->k.k) ?: in __bch2_data_update_index_update()
439 bch2_insert_snapshot_whiteouts(trans, m->btree_id, in __bch2_data_update_index_update()
441 bch2_insert_snapshot_whiteouts(trans, m->btree_id, in __bch2_data_update_index_update()
450 trace_data_update2(m, old, k, insert); in __bch2_data_update_index_update()
454 trace_io_move_created_rebalance2(m, old, k, insert); in __bch2_data_update_index_update()
460 m->data_opts.btree_insert_flags); in __bch2_data_update_index_update()
468 trace_io_move_finish2(m, &new->k_i, insert); in __bch2_data_update_index_update()
482 if (m->stats) { in __bch2_data_update_index_update()
484 atomic64_inc(&m->stats->keys_raced); in __bch2_data_update_index_update()
486 &m->stats->sectors_raced); in __bch2_data_update_index_update()
505 void bch2_data_update_read_done(struct data_update *m) in bch2_data_update_read_done() argument
507 m->read_done = true; in bch2_data_update_read_done()
510 BUG_ON(!m->op.wbio.bio.bi_vcnt); in bch2_data_update_read_done()
512 m->op.crc = m->rbio.pick.crc; in bch2_data_update_read_done()
513 m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; in bch2_data_update_read_done()
515 this_cpu_add(m->op.c->counters[BCH_COUNTER_io_move_write], m->k.k->k.size); in bch2_data_update_read_done()
517 closure_call(&m->op.cl, bch2_write, NULL, NULL); in bch2_data_update_read_done()
656 void bch2_data_update_to_text(struct printbuf *out, struct data_update *m) in bch2_data_update_to_text() argument
658 prt_str(out, bch2_data_update_type_strs[m->type]); in bch2_data_update_to_text()
661 bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts); in bch2_data_update_to_text()
665 bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k)); in bch2_data_update_to_text()
668 void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update *m) in bch2_data_update_inflight_to_text() argument
670 bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k)); in bch2_data_update_inflight_to_text()
673 bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts); in bch2_data_update_inflight_to_text()
675 if (!m->read_done) { in bch2_data_update_inflight_to_text()
678 bch2_read_bio_to_text(out, &m->rbio); in bch2_data_update_inflight_to_text()
682 bch2_write_op_to_text(out, &m->op); in bch2_data_update_inflight_to_text()
730 static int __bch2_data_update_bios_init(struct data_update *m, struct bch_fs *c, in __bch2_data_update_bios_init() argument
736 m->bvecs = kmalloc_array(nr_vecs, sizeof*(m->bvecs), GFP_KERNEL); in __bch2_data_update_bios_init()
737 if (!m->bvecs) in __bch2_data_update_bios_init()
740 bio_init(&m->rbio.bio, NULL, m->bvecs, nr_vecs, REQ_OP_READ); in __bch2_data_update_bios_init()
741 bio_init(&m->op.wbio.bio, NULL, m->bvecs, nr_vecs, 0); in __bch2_data_update_bios_init()
743 if (bch2_bio_alloc_pages(&m->op.wbio.bio, buf_bytes, GFP_KERNEL)) { in __bch2_data_update_bios_init()
744 kfree(m->bvecs); in __bch2_data_update_bios_init()
745 m->bvecs = NULL; in __bch2_data_update_bios_init()
749 rbio_init(&m->rbio.bio, c, *io_opts, NULL); in __bch2_data_update_bios_init()
750 m->rbio.data_update = true; in __bch2_data_update_bios_init()
751 m->rbio.bio.bi_iter.bi_size = buf_bytes; in __bch2_data_update_bios_init()
752 m->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&m->k.k->k); in __bch2_data_update_bios_init()
753 m->op.wbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0); in __bch2_data_update_bios_init()
757 int bch2_data_update_bios_init(struct data_update *m, struct bch_fs *c, in bch2_data_update_bios_init() argument
760 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(m->k.k)); in bch2_data_update_bios_init()
766 bkey_for_each_ptr_decode(&m->k.k->k, ptrs, p, entry) in bch2_data_update_bios_init()
769 return __bch2_data_update_bios_init(m, c, io_opts, buf_bytes); in bch2_data_update_bios_init()
772 static int can_write_extent(struct bch_fs *c, struct data_update *m) in can_write_extent() argument
774 if ((m->op.flags & BCH_WRITE_alloc_nowait) && in can_write_extent()
775 unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(m->op.watermark))) in can_write_extent()
778 unsigned target = m->op.flags & BCH_WRITE_only_specified_devs in can_write_extent()
779 ? m->op.target in can_write_extent()
783 darray_for_each(m->op.devs_have, i) in can_write_extent()
797 if (!dev_buckets_free(ca, usage, m->op.watermark)) in can_write_extent()
801 if (nr_replicas >= m->op.nr_replicas) in can_write_extent()
807 if (nr_replicas < m->op.nr_replicas) in can_write_extent()
815 struct data_update *m, in bch2_data_update_init() argument
839 bch2_bkey_buf_init(&m->k); in bch2_data_update_init()
840 bch2_bkey_buf_reassemble(&m->k, c, k); in bch2_data_update_init()
841 m->type = data_opts.btree_insert_flags & BCH_WATERMARK_copygc in bch2_data_update_init()
844 m->btree_id = btree_id; in bch2_data_update_init()
845 m->data_opts = data_opts; in bch2_data_update_init()
846 m->ctxt = ctxt; in bch2_data_update_init()
847 m->stats = ctxt ? ctxt->stats : NULL; in bch2_data_update_init()
849 bch2_write_op_init(&m->op, c, *io_opts); in bch2_data_update_init()
850 m->op.pos = bkey_start_pos(k.k); in bch2_data_update_init()
851 m->op.version = k.k->bversion; in bch2_data_update_init()
852 m->op.target = data_opts.target; in bch2_data_update_init()
853 m->op.write_point = wp; in bch2_data_update_init()
854 m->op.nr_replicas = 0; in bch2_data_update_init()
855 m->op.flags |= BCH_WRITE_pages_stable| in bch2_data_update_init()
859 m->data_opts.write_flags; in bch2_data_update_init()
860 m->op.compression_opt = io_opts->background_compression; in bch2_data_update_init()
861 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; in bch2_data_update_init()
865 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(m->k.k)); in bch2_data_update_init()
876 if (ptr_bit & m->data_opts.rewrite_ptrs) { in bch2_data_update_init()
880 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); in bch2_data_update_init()
882 } else if (!(ptr_bit & m->data_opts.kill_ptrs)) { in bch2_data_update_init()
883 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); in bch2_data_update_init()
894 m->op.nonce = p.crc.nonce + p.crc.offset; in bch2_data_update_init()
895 m->op.csum_type = p.crc.csum_type; in bch2_data_update_init()
899 m->op.incompressible = true; in bch2_data_update_init()
917 m->op.nr_replicas = min(durability_removing, durability_required) + in bch2_data_update_init()
918 m->data_opts.extra_replicas; in bch2_data_update_init()
926 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1); in bch2_data_update_init()
928 m->op.nr_replicas_required = m->op.nr_replicas; in bch2_data_update_init()
935 if (!m->op.nr_replicas) { in bch2_data_update_init()
936 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs; in bch2_data_update_init()
937 m->data_opts.rewrite_ptrs = 0; in bch2_data_update_init()
940 ret = bch2_extent_drop_ptrs(trans, iter, k, io_opts, &m->data_opts); in bch2_data_update_init()
959 ret = can_write_extent(c, m); in bch2_data_update_init()
964 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors, in bch2_data_update_init()
965 m->data_opts.extra_replicas in bch2_data_update_init()
984 ret = bch2_update_unwritten_extent(trans, m) ?: in bch2_data_update_init()
991 ret = __bch2_data_update_bios_init(m, c, io_opts, buf_bytes); in bch2_data_update_init()
1002 bch2_disk_reservation_put(c, &m->op.res); in bch2_data_update_init()
1004 bch2_bkey_buf_exit(&m->k, c); in bch2_data_update_init()