Lines Matching refs:bio

171 static void nvmet_bio_done(struct bio *bio)  in nvmet_bio_done()  argument
173 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done()
175 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done()
176 nvmet_req_bio_put(req, bio); in nvmet_bio_done()
180 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
194 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip()
201 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
203 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip()
209 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip()
226 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
236 struct bio *bio; in nvmet_bdev_execute_rw() local
269 bio = &req->b.inline_bio; in nvmet_bdev_execute_rw()
270 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_rw()
272 bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); in nvmet_bdev_execute_rw()
274 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
275 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
276 bio->bi_private = req; in nvmet_bdev_execute_rw()
277 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_rw()
278 bio->bi_opf = op; in nvmet_bdev_execute_rw()
286 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) in nvmet_bdev_execute_rw()
288 struct bio *prev = bio; in nvmet_bdev_execute_rw()
291 rc = nvmet_bdev_alloc_bip(req, bio, in nvmet_bdev_execute_rw()
294 bio_io_error(bio); in nvmet_bdev_execute_rw()
299 bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); in nvmet_bdev_execute_rw()
300 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
301 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
302 bio->bi_opf = op; in nvmet_bdev_execute_rw()
304 bio_chain(bio, prev); in nvmet_bdev_execute_rw()
313 rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); in nvmet_bdev_execute_rw()
315 bio_io_error(bio); in nvmet_bdev_execute_rw()
320 submit_bio(bio); in nvmet_bdev_execute_rw()
326 struct bio *bio = &req->b.inline_bio; in nvmet_bdev_execute_flush() local
331 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_flush()
332 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_flush()
333 bio->bi_private = req; in nvmet_bdev_execute_flush()
334 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_flush()
335 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in nvmet_bdev_execute_flush()
337 submit_bio(bio); in nvmet_bdev_execute_flush()
348 struct nvme_dsm_range *range, struct bio **bio) in nvmet_bdev_discard_range() argument
356 GFP_KERNEL, 0, bio); in nvmet_bdev_discard_range()
367 struct bio *bio = NULL; in nvmet_bdev_execute_discard() local
377 status = nvmet_bdev_discard_range(req, &range, &bio); in nvmet_bdev_execute_discard()
382 if (bio) { in nvmet_bdev_execute_discard()
383 bio->bi_private = req; in nvmet_bdev_execute_discard()
384 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_discard()
386 bio_io_error(bio); in nvmet_bdev_execute_discard()
388 submit_bio(bio); in nvmet_bdev_execute_discard()
415 struct bio *bio = NULL; in nvmet_bdev_execute_write_zeroes() local
428 GFP_KERNEL, &bio, 0); in nvmet_bdev_execute_write_zeroes()
429 if (bio) { in nvmet_bdev_execute_write_zeroes()
430 bio->bi_private = req; in nvmet_bdev_execute_write_zeroes()
431 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_write_zeroes()
432 submit_bio(bio); in nvmet_bdev_execute_write_zeroes()