Lines Matching refs:bio
135 static inline unsigned int throtl_bio_data_size(struct bio *bio) in throtl_bio_data_size() argument
138 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) in throtl_bio_data_size()
140 return bio->bi_iter.bi_size; in throtl_bio_data_size()
161 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
164 bool rw = bio_data_dir(bio); in throtl_qnode_add_bio()
170 if (bio_flagged(bio, BIO_TG_BPS_THROTTLED) || in throtl_qnode_add_bio()
171 bio_flagged(bio, BIO_BPS_THROTTLED)) { in throtl_qnode_add_bio()
172 bio_list_add(&qn->bios_iops, bio); in throtl_qnode_add_bio()
175 bio_list_add(&qn->bios_bps, bio); in throtl_qnode_add_bio()
193 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
196 struct bio *bio; in throtl_peek_queued() local
202 bio = bio_list_peek(&qn->bios_iops); in throtl_peek_queued()
203 if (!bio) in throtl_peek_queued()
204 bio = bio_list_peek(&qn->bios_bps); in throtl_peek_queued()
205 WARN_ON_ONCE(!bio); in throtl_peek_queued()
206 return bio; in throtl_peek_queued()
225 static struct bio *throtl_pop_queued(struct throtl_service_queue *sq, in throtl_pop_queued()
230 struct bio *bio; in throtl_pop_queued() local
236 bio = bio_list_pop(&qn->bios_iops); in throtl_pop_queued()
237 if (bio) { in throtl_pop_queued()
240 bio = bio_list_pop(&qn->bios_bps); in throtl_pop_queued()
241 if (bio) in throtl_pop_queued()
244 WARN_ON_ONCE(!bio); in throtl_pop_queued()
256 return bio; in throtl_pop_queued()
766 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_within_iops_limit() argument
769 bool rw = bio_data_dir(bio); in tg_within_iops_limit()
789 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_within_bps_limit() argument
792 bool rw = bio_data_dir(bio); in tg_within_bps_limit()
796 unsigned int bio_size = throtl_bio_data_size(bio); in tg_within_bps_limit()
826 static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bps_bio() argument
828 unsigned int bio_size = throtl_bio_data_size(bio); in throtl_charge_bps_bio()
831 if (!bio_flagged(bio, BIO_BPS_THROTTLED) && in throtl_charge_bps_bio()
832 !bio_flagged(bio, BIO_TG_BPS_THROTTLED)) { in throtl_charge_bps_bio()
833 bio_set_flag(bio, BIO_TG_BPS_THROTTLED); in throtl_charge_bps_bio()
834 tg->bytes_disp[bio_data_dir(bio)] += bio_size; in throtl_charge_bps_bio()
838 static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_iops_bio() argument
840 bio_clear_flag(bio, BIO_TG_BPS_THROTTLED); in throtl_charge_iops_bio()
841 tg->io_disp[bio_data_dir(bio)]++; in throtl_charge_iops_bio()
859 static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio) in tg_dispatch_bps_time() argument
861 bool rw = bio_data_dir(bio); in tg_dispatch_bps_time()
867 bio_flagged(bio, BIO_BPS_THROTTLED) || in tg_dispatch_bps_time()
868 bio_flagged(bio, BIO_TG_BPS_THROTTLED)) in tg_dispatch_bps_time()
872 bps_wait = tg_within_bps_limit(tg, bio, bps_limit); in tg_dispatch_bps_time()
878 static unsigned long tg_dispatch_iops_time(struct throtl_grp *tg, struct bio *bio) in tg_dispatch_iops_time() argument
880 bool rw = bio_data_dir(bio); in tg_dispatch_iops_time()
888 iops_wait = tg_within_iops_limit(tg, bio, iops_limit); in tg_dispatch_iops_time()
898 static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio) in tg_dispatch_time() argument
900 bool rw = bio_data_dir(bio); in tg_dispatch_time()
910 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_dispatch_time()
912 wait = tg_dispatch_bps_time(tg, bio); in tg_dispatch_time()
920 throtl_charge_bps_bio(tg, bio); in tg_dispatch_time()
922 return tg_dispatch_iops_time(tg, bio); in tg_dispatch_time()
934 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, in throtl_add_bio_tg() argument
938 bool rw = bio_data_dir(bio); in throtl_add_bio_tg()
952 throtl_qnode_add_bio(bio, qn, sq); in throtl_add_bio_tg()
959 if (bio_flagged(bio, BIO_BPS_THROTTLED) && in throtl_add_bio_tg()
960 bio == throtl_peek_queued(&sq->queued[rw])) in throtl_add_bio_tg()
970 struct bio *bio; in tg_update_disptime() local
972 bio = throtl_peek_queued(&sq->queued[READ]); in tg_update_disptime()
973 if (bio) in tg_update_disptime()
974 read_wait = tg_dispatch_time(tg, bio); in tg_update_disptime()
976 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
977 if (bio) in tg_update_disptime()
978 write_wait = tg_dispatch_time(tg, bio); in tg_update_disptime()
1009 struct bio *bio; in tg_dispatch_one_bio() local
1017 bio = throtl_pop_queued(sq, &tg_to_put, rw); in tg_dispatch_one_bio()
1019 throtl_charge_iops_bio(tg, bio); in tg_dispatch_one_bio()
1029 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1032 bio_set_flag(bio, BIO_BPS_THROTTLED); in tg_dispatch_one_bio()
1033 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1051 struct bio *bio; in throtl_dispatch_tg() local
1055 while ((bio = throtl_peek_queued(&sq->queued[READ])) && in throtl_dispatch_tg()
1056 tg_dispatch_time(tg, bio) == 0) { in throtl_dispatch_tg()
1065 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1066 tg_dispatch_time(tg, bio) == 0) { in throtl_dispatch_tg()
1212 struct bio *bio; in blk_throtl_dispatch_work_fn() local
1220 while ((bio = throtl_pop_queued(td_sq, NULL, rw))) in blk_throtl_dispatch_work_fn()
1221 bio_list_add(&bio_list_on_stack, bio); in blk_throtl_dispatch_work_fn()
1226 while ((bio = bio_list_pop(&bio_list_on_stack))) in blk_throtl_dispatch_work_fn()
1227 submit_bio_noacct_nocheck(bio); in blk_throtl_dispatch_work_fn()
1723 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw) in tg_within_limit() argument
1731 if (bio_flagged(bio, BIO_BPS_THROTTLED)) in tg_within_limit()
1733 tg_dispatch_iops_time(tg, bio) == 0; in tg_within_limit()
1742 tg_dispatch_bps_time(tg, bio) == 0) in tg_within_limit()
1743 throtl_charge_bps_bio(tg, bio); in tg_within_limit()
1748 return tg_dispatch_time(tg, bio) == 0; in tg_within_limit()
1751 bool __blk_throtl_bio(struct bio *bio) in __blk_throtl_bio() argument
1753 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __blk_throtl_bio()
1754 struct blkcg_gq *blkg = bio->bi_blkg; in __blk_throtl_bio()
1758 bool rw = bio_data_dir(bio); in __blk_throtl_bio()
1767 if (tg_within_limit(tg, bio, rw)) { in __blk_throtl_bio()
1769 throtl_charge_iops_bio(tg, bio); in __blk_throtl_bio()
1783 } else if (bio_issue_as_root_blkg(bio)) { in __blk_throtl_bio()
1792 throtl_charge_bps_bio(tg, bio); in __blk_throtl_bio()
1793 throtl_charge_iops_bio(tg, bio); in __blk_throtl_bio()
1808 bio_set_flag(bio, BIO_BPS_THROTTLED); in __blk_throtl_bio()
1816 tg->bytes_disp[rw], bio->bi_iter.bi_size, in __blk_throtl_bio()
1822 throtl_add_bio_tg(bio, qn, tg); in __blk_throtl_bio()