Lines Matching refs:bio
105 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) in bch2_bio_free_pages_pool() argument
110 bio_for_each_segment_all(bv, bio, iter) in bch2_bio_free_pages_pool()
113 bio->bi_vcnt = 0; in bch2_bio_free_pages_pool()
136 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, in bch2_bio_alloc_pages_pool() argument
145 BUG_ON(!bio_add_page(bio, page, len, 0)); in bch2_bio_alloc_pages_pool()
492 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set)); in bch2_submit_wbio_replicas()
494 n->bio.bi_end_io = wbio->bio.bi_end_io; in bch2_submit_wbio_replicas()
495 n->bio.bi_private = wbio->bio.bi_private; in bch2_submit_wbio_replicas()
500 n->bio.bi_opf = wbio->bio.bi_opf; in bch2_submit_wbio_replicas()
501 bio_inc_remaining(&wbio->bio); in bch2_submit_wbio_replicas()
515 n->bio.bi_iter.bi_sector = ptr->offset; in bch2_submit_wbio_replicas()
519 bio_sectors(&n->bio)); in bch2_submit_wbio_replicas()
521 bio_set_dev(&n->bio, ca->disk_sb.bdev); in bch2_submit_wbio_replicas()
524 bio_endio(&n->bio); in bch2_submit_wbio_replicas()
528 submit_bio(&n->bio); in bch2_submit_wbio_replicas()
530 n->bio.bi_status = BLK_STS_REMOVED; in bch2_submit_wbio_replicas()
531 bio_endio(&n->bio); in bch2_submit_wbio_replicas()
679 bch2_bio_free_pages_pool(op->c, &op->wbio.bio); in CLOSURE_CALLBACK()
727 static void bch2_write_endio(struct bio *bio) in bch2_write_endio() argument
729 struct closure *cl = bio->bi_private; in bch2_write_endio()
731 struct bch_write_bio *wbio = to_wbio(bio); in bch2_write_endio()
739 wbio->submit_time, !bio->bi_status); in bch2_write_endio()
741 if (unlikely(bio->bi_status)) { in bch2_write_endio()
747 bch2_blk_status_to_str(bio->bi_status)); in bch2_write_endio()
753 bch2_blk_status_to_str(bio->bi_status)); in bch2_write_endio()
770 bch2_bio_free_pages_pool(c, bio); in bch2_write_endio()
773 bio_put(bio); in bch2_write_endio()
776 bio_endio(&parent->bio); in bch2_write_endio()
809 static struct bio *bch2_write_bio_alloc(struct bch_fs *c, in bch2_write_bio_alloc()
811 struct bio *src, in bch2_write_bio_alloc()
816 struct bio *bio; in bch2_write_bio_alloc() local
826 bio = bio_alloc_bioset(NULL, pages, 0, in bch2_write_bio_alloc()
828 wbio = wbio_init(bio); in bch2_write_bio_alloc()
831 wbio->bio.bi_opf = src->bi_opf; in bch2_write_bio_alloc()
834 bch2_bio_map(bio, buf, output_available); in bch2_write_bio_alloc()
835 return bio; in bch2_write_bio_alloc()
844 bch2_bio_alloc_pages_pool(c, bio, in bch2_write_bio_alloc()
848 if (bio->bi_iter.bi_size < output_available) in bch2_write_bio_alloc()
850 bch2_bio_alloc_pages(bio, in bch2_write_bio_alloc()
852 bio->bi_iter.bi_size, in bch2_write_bio_alloc()
855 return bio; in bch2_write_bio_alloc()
862 struct bio *bio = &op->wbio.bio; in bch2_write_rechecksum() local
871 int ret = bch2_rechecksum_bio(c, bio, op->version, op->crc, in bch2_write_rechecksum()
878 bio_advance(bio, op->crc.offset << 9); in bch2_write_rechecksum()
879 bio->bi_iter.bi_size = op->crc.live_size << 9; in bch2_write_rechecksum()
887 struct bio *bio = &op->wbio.bio; in bch2_write_prep_encoded_data() local
891 BUG_ON(bio_sectors(bio) != op->crc.compressed_size); in bch2_write_prep_encoded_data()
916 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio); in bch2_write_prep_encoded_data()
921 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio); in bch2_write_prep_encoded_data()
929 ret = bch2_bio_uncompress_inplace(op, bio); in bch2_write_prep_encoded_data()
956 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio); in bch2_write_prep_encoded_data()
960 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio); in bch2_write_prep_encoded_data()
984 struct bio **_dst) in bch2_write_extent()
987 struct bio *src = &op->wbio.bio, *dst = src; in bch2_write_extent()
1337 struct bio *bio = &op->wbio.bio; in bch2_nocow_write() local
1389 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top); in bch2_nocow_write()
1406 bio = &op->wbio.bio; in bch2_nocow_write()
1407 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) { in bch2_nocow_write()
1408 bio = bio_split(bio, k.k->p.offset - op->pos.offset, in bch2_nocow_write()
1410 wbio_init(bio)->put_bio = true; in bch2_nocow_write()
1411 bio->bi_opf = op->wbio.bio.bi_opf; in bch2_nocow_write()
1416 op->pos.offset += bio_sectors(bio); in bch2_nocow_write()
1417 op->written += bio_sectors(bio); in bch2_nocow_write()
1419 bio->bi_end_io = bch2_write_endio; in bch2_nocow_write()
1420 bio->bi_private = &op->cl; in bch2_nocow_write()
1421 bio->bi_opf |= REQ_OP_WRITE; in bch2_nocow_write()
1424 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, in bch2_nocow_write()
1498 struct bio *bio = NULL; in __bch2_write() local
1554 ret = bch2_write_extent(op, wp, &bio); in __bch2_write()
1570 bio->bi_end_io = bch2_write_endio; in __bch2_write()
1571 bio->bi_private = &op->cl; in __bch2_write()
1572 bio->bi_opf |= REQ_OP_WRITE; in __bch2_write()
1574 closure_get(bio->bi_private); in __bch2_write()
1579 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, in __bch2_write()
1610 struct bio *bio = &op->wbio.bio; in bch2_write_data_inline() local
1631 sectors = bio_sectors(bio); in bch2_write_data_inline()
1639 iter = bio->bi_iter; in bch2_write_data_inline()
1641 memcpy_from_bio(id->v.data, bio, iter); in bch2_write_data_inline()
1673 struct bio *bio = &op->wbio.bio; in CLOSURE_CALLBACK() local
1690 wbio_init(bio)->put_bio = false; in CLOSURE_CALLBACK()
1692 if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) { in CLOSURE_CALLBACK()
1710 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio)); in CLOSURE_CALLBACK()
1711 bch2_increment_clock(c, bio_sectors(bio), WRITE); in CLOSURE_CALLBACK()
1713 data_len = min_t(u64, bio->bi_iter.bi_size, in CLOSURE_CALLBACK()
1775 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) || in bch2_fs_io_write_init()
1776 bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0)) in bch2_fs_io_write_init()