Lines Matching refs:bio

48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)  in bio_copy_from_iter()  argument
53 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
84 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
109 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
111 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
122 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
123 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
125 bio_free_pages(bio); in bio_uncopy_user()
136 struct bio *bio; in bio_copy_user_iov() local
157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov()
158 if (!bio) in bio_copy_user_iov()
160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov()
192 if (bio_add_page(bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
203 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
210 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
218 ret = bio_copy_from_iter(bio, &iter2); in bio_copy_user_iov()
223 zero_fill_bio(bio); in bio_copy_user_iov()
224 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
227 bio->bi_private = bmd; in bio_copy_user_iov()
229 ret = blk_rq_append_bio(rq, bio); in bio_copy_user_iov()
235 bio_free_pages(bio); in bio_copy_user_iov()
236 bio_uninit(bio); in bio_copy_user_iov()
237 kfree(bio); in bio_copy_user_iov()
243 static void blk_mq_map_bio_put(struct bio *bio) in blk_mq_map_bio_put() argument
245 if (bio->bi_opf & REQ_ALLOC_CACHE) { in blk_mq_map_bio_put()
246 bio_put(bio); in blk_mq_map_bio_put()
248 bio_uninit(bio); in blk_mq_map_bio_put()
249 kfree(bio); in blk_mq_map_bio_put()
253 static struct bio *blk_rq_map_bio_alloc(struct request *rq, in blk_rq_map_bio_alloc()
256 struct bio *bio; in blk_rq_map_bio_alloc() local
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc()
261 if (!bio) in blk_rq_map_bio_alloc()
264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc()
265 if (!bio) in blk_rq_map_bio_alloc()
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc()
269 return bio; in blk_rq_map_bio_alloc()
276 struct bio *bio; in bio_map_user_iov() local
282 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); in bio_map_user_iov()
283 if (!bio) in bio_map_user_iov()
285 ret = bio_iov_iter_get_pages(bio, iter); in bio_map_user_iov()
288 ret = blk_rq_append_bio(rq, bio); in bio_map_user_iov()
294 bio_release_pages(bio, false); in bio_map_user_iov()
296 blk_mq_map_bio_put(bio); in bio_map_user_iov()
300 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
303 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
306 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
307 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
308 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
313 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
315 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
316 bio_uninit(bio); in bio_map_kern_endio()
317 kfree(bio); in bio_map_kern_endio()
320 static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op, in bio_map_kern()
324 struct bio *bio; in bio_map_kern() local
326 bio = bio_kmalloc(nr_vecs, gfp_mask); in bio_map_kern()
327 if (!bio) in bio_map_kern()
329 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, op); in bio_map_kern()
331 bio->bi_private = data; in bio_map_kern()
332 if (!bio_add_vmalloc(bio, data, len)) { in bio_map_kern()
333 bio_uninit(bio); in bio_map_kern()
334 kfree(bio); in bio_map_kern()
338 bio_add_virt_nofail(bio, data, len); in bio_map_kern()
340 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
341 return bio; in bio_map_kern()
344 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
346 bio_free_pages(bio); in bio_copy_kern_endio()
347 bio_uninit(bio); in bio_copy_kern_endio()
348 kfree(bio); in bio_copy_kern_endio()
351 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
353 char *p = bio->bi_private; in bio_copy_kern_endio_read()
357 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
362 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
375 static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op, in bio_copy_kern()
381 struct bio *bio; in bio_copy_kern() local
392 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_kern()
393 if (!bio) in bio_copy_kern()
395 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, op); in bio_copy_kern()
411 if (bio_add_page(bio, page, bytes, 0) < bytes) in bio_copy_kern()
419 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
421 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
422 bio->bi_private = data; in bio_copy_kern()
425 return bio; in bio_copy_kern()
428 bio_free_pages(bio); in bio_copy_kern()
429 bio_uninit(bio); in bio_copy_kern()
430 kfree(bio); in bio_copy_kern()
438 int blk_rq_append_bio(struct request *rq, struct bio *bio) in blk_rq_append_bio() argument
446 ret = bio_split_rw_at(bio, lim, &nr_segs, max_bytes); in blk_rq_append_bio()
454 if (rq->bio) { in blk_rq_append_bio()
455 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
457 rq->biotail->bi_next = bio; in blk_rq_append_bio()
458 rq->biotail = bio; in blk_rq_append_bio()
459 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio()
460 bio_crypt_free_ctx(bio); in blk_rq_append_bio()
465 rq->bio = rq->biotail = bio; in blk_rq_append_bio()
466 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_append_bio()
475 struct bio *bio; in blk_rq_map_user_bvec() local
482 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); in blk_rq_map_user_bvec()
483 if (!bio) in blk_rq_map_user_bvec()
485 bio_iov_bvec_set(bio, iter); in blk_rq_map_user_bvec()
487 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_user_bvec()
489 blk_mq_map_bio_put(bio); in blk_rq_map_user_bvec()
514 struct bio *bio = NULL; in blk_rq_map_user_iov() local
550 if (!bio) in blk_rq_map_user_iov()
551 bio = rq->bio; in blk_rq_map_user_iov()
557 blk_rq_unmap_user(bio); in blk_rq_map_user_iov()
559 rq->bio = NULL; in blk_rq_map_user_iov()
623 int blk_rq_unmap_user(struct bio *bio) in blk_rq_unmap_user() argument
625 struct bio *next_bio; in blk_rq_unmap_user()
628 while (bio) { in blk_rq_unmap_user()
629 if (bio->bi_private) { in blk_rq_unmap_user()
630 ret2 = bio_uncopy_user(bio); in blk_rq_unmap_user()
634 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
637 if (bio_integrity(bio)) in blk_rq_unmap_user()
638 bio_integrity_unmap_user(bio); in blk_rq_unmap_user()
640 next_bio = bio; in blk_rq_unmap_user()
641 bio = bio->bi_next; in blk_rq_unmap_user()
665 struct bio *bio; in blk_rq_map_kern() local
674 bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask); in blk_rq_map_kern()
676 bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask); in blk_rq_map_kern()
678 if (IS_ERR(bio)) in blk_rq_map_kern()
679 return PTR_ERR(bio); in blk_rq_map_kern()
681 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_kern()
683 bio_uninit(bio); in blk_rq_map_kern()
684 kfree(bio); in blk_rq_map_kern()