Lines Matching refs:bio
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec()
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument
28 struct bvec_iter iter = bio->bi_iter; in bio_get_last_bvec()
31 bio_get_first_bvec(bio, bv); in bio_get_last_bvec()
32 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
35 bio_advance_iter(bio, &iter, iter.bi_size); in bio_get_last_bvec()
42 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec()
53 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
66 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
90 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
95 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
108 static struct bio *bio_submit_split(struct bio *bio, int split_sectors) in bio_submit_split() argument
111 bio->bi_status = errno_to_blk_status(split_sectors); in bio_submit_split()
112 bio_endio(bio); in bio_submit_split()
117 struct bio *split; in bio_submit_split()
119 split = bio_split(bio, split_sectors, GFP_NOIO, in bio_submit_split()
120 &bio->bi_bdev->bd_disk->bio_split); in bio_submit_split()
123 bio_chain(split, bio); in bio_submit_split()
124 trace_block_split(split, bio->bi_iter.bi_sector); in bio_submit_split()
125 WARN_ON_ONCE(bio_zone_write_plugging(bio)); in bio_submit_split()
126 submit_bio_noacct(bio); in bio_submit_split()
130 return bio; in bio_submit_split()
133 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, in bio_split_discard() argument
148 return bio; in bio_split_discard()
150 if (bio_sectors(bio) <= max_discard_sectors) in bio_split_discard()
151 return bio; in bio_split_discard()
159 tmp = bio->bi_iter.bi_sector + split_sectors - in bio_split_discard()
166 return bio_submit_split(bio, split_sectors); in bio_split_discard()
169 struct bio *bio_split_write_zeroes(struct bio *bio, in bio_split_write_zeroes() argument
174 return bio; in bio_split_write_zeroes()
175 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) in bio_split_write_zeroes()
176 return bio; in bio_split_write_zeroes()
177 return bio_submit_split(bio, lim->max_write_zeroes_sectors); in bio_split_write_zeroes()
201 static inline unsigned get_max_io_size(struct bio *bio, in get_max_io_size() argument
206 bool is_atomic = bio->bi_opf & REQ_ATOMIC; in get_max_io_size()
221 blk_boundary_sectors_left(bio->bi_iter.bi_sector, in get_max_io_size()
225 start = bio->bi_iter.bi_sector & (pbs - 1); in get_max_io_size()
311 int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, in bio_split_rw_at() argument
318 bio_for_each_bvec(bv, bio, iter) { in bio_split_rw_at()
344 if (bio->bi_opf & REQ_ATOMIC) in bio_split_rw_at()
351 if (bio->bi_opf & REQ_NOWAIT) in bio_split_rw_at()
368 bio_clear_polled(bio); in bio_split_rw_at()
373 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, in bio_split_rw() argument
376 return bio_submit_split(bio, in bio_split_rw()
377 bio_split_rw_at(bio, lim, nr_segs, in bio_split_rw()
378 get_max_io_size(bio, lim) << SECTOR_SHIFT)); in bio_split_rw()
388 struct bio *bio_split_zone_append(struct bio *bio, in bio_split_zone_append() argument
394 split_sectors = bio_split_rw_at(bio, lim, nr_segs, in bio_split_zone_append()
398 return bio_submit_split(bio, split_sectors); in bio_split_zone_append()
412 struct bio *bio_split_to_limits(struct bio *bio) in bio_split_to_limits() argument
414 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; in bio_split_to_limits()
417 return __bio_split_to_limits(bio, lim, &nr_segs); in bio_split_to_limits()
428 if (!rq->bio) in blk_recalc_rq_segments()
431 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments()
435 struct bio *bio = rq->bio; in blk_recalc_rq_segments() local
437 for_each_bio(bio) in blk_recalc_rq_segments()
536 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
545 for_each_bio(bio) { in __blk_bios_map_sg()
546 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg()
563 if (likely(bio->bi_iter.bi_size)) { in __blk_bios_map_sg()
583 else if (rq->bio) in __blk_rq_map_sg()
584 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
621 static inline int ll_new_hw_segment(struct request *req, struct bio *bio, in ll_new_hw_segment() argument
624 if (!blk_cgroup_mergeable(req, bio)) in ll_new_hw_segment()
627 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
642 if (bio_integrity(bio)) in ll_new_hw_segment()
644 bio); in ll_new_hw_segment()
652 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
654 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
657 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
659 if (!bio_crypt_ctx_back_mergeable(req, bio)) in ll_back_merge_fn()
661 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
667 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
670 static int ll_front_merge_fn(struct request *req, struct bio *bio, in ll_front_merge_fn() argument
673 if (req_gap_front_merge(req, bio)) in ll_front_merge_fn()
676 integrity_req_gap_front_merge(req, bio)) in ll_front_merge_fn()
678 if (!bio_crypt_ctx_front_mergeable(req, bio)) in ll_front_merge_fn()
680 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
681 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { in ll_front_merge_fn()
686 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn()
696 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge()
712 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
726 if (!blk_cgroup_mergeable(req, next->bio)) in ll_merge_requests_fn()
753 struct bio *bio; in blk_rq_set_mixed_merge() local
763 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
764 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
765 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
766 bio->bi_opf |= ff; in blk_rq_set_mixed_merge()
771 static inline blk_opf_t bio_failfast(const struct bio *bio) in bio_failfast() argument
773 if (bio->bi_opf & REQ_RAHEAD) in bio_failfast()
776 return bio->bi_opf & REQ_FAILFAST_MASK; in bio_failfast()
785 struct bio *bio, bool front_merge) in blk_update_mixed_merge() argument
788 if (bio->bi_opf & REQ_RAHEAD) in blk_update_mixed_merge()
789 bio->bi_opf |= REQ_FAILFAST_MASK; in blk_update_mixed_merge()
793 req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK; in blk_update_mixed_merge()
821 struct bio *bio) in blk_atomic_write_mergeable_rq_bio() argument
823 return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC); in blk_atomic_write_mergeable_rq_bio()
900 req->biotail->bi_next = next->bio; in attempt_merge()
921 next->bio = NULL; in attempt_merge()
958 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
960 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
963 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
967 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
971 if (!blk_cgroup_mergeable(rq, bio)) in blk_rq_merge_ok()
975 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
979 if (!bio_crypt_rq_ctx_compatible(rq, bio)) in blk_rq_merge_ok()
983 if (rq->write_hint != bio->bi_write_hint) in blk_rq_merge_ok()
986 if (rq->ioprio != bio_prio(bio)) in blk_rq_merge_ok()
989 if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false) in blk_rq_merge_ok()
995 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
999 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
1001 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
1017 struct bio *bio, unsigned int nr_segs) in bio_attempt_back_merge() argument
1019 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_back_merge()
1021 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
1024 trace_block_bio_backmerge(bio); in bio_attempt_back_merge()
1025 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
1030 blk_update_mixed_merge(req, bio, false); in bio_attempt_back_merge()
1033 blk_zone_write_plug_bio_merged(bio); in bio_attempt_back_merge()
1035 req->biotail->bi_next = bio; in bio_attempt_back_merge()
1036 req->biotail = bio; in bio_attempt_back_merge()
1037 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
1039 bio_crypt_free_ctx(bio); in bio_attempt_back_merge()
1046 struct bio *bio, unsigned int nr_segs) in bio_attempt_front_merge() argument
1048 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_front_merge()
1058 if (!ll_front_merge_fn(req, bio, nr_segs)) in bio_attempt_front_merge()
1061 trace_block_bio_frontmerge(bio); in bio_attempt_front_merge()
1062 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
1067 blk_update_mixed_merge(req, bio, true); in bio_attempt_front_merge()
1069 bio->bi_next = req->bio; in bio_attempt_front_merge()
1070 req->bio = bio; in bio_attempt_front_merge()
1072 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
1073 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
1075 bio_crypt_do_front_merge(req, bio); in bio_attempt_front_merge()
1082 struct request *req, struct bio *bio) in bio_attempt_discard_merge() argument
1088 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
1092 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1094 req->biotail->bi_next = bio; in bio_attempt_discard_merge()
1095 req->biotail = bio; in bio_attempt_discard_merge()
1096 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_discard_merge()
1108 struct bio *bio, in blk_attempt_bio_merge() argument
1112 if (!blk_rq_merge_ok(rq, bio)) in blk_attempt_bio_merge()
1115 switch (blk_try_merge(rq, bio)) { in blk_attempt_bio_merge()
1117 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1118 return bio_attempt_back_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1121 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1122 return bio_attempt_front_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1125 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1153 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1164 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1185 struct bio *bio, unsigned int nr_segs) in blk_bio_list_merge() argument
1194 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1209 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1214 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1216 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1218 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1225 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1227 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1234 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()