Lines Matching refs:READ

332 	INIT_LIST_HEAD(&sq->queued[READ]);  in throtl_service_queue_init()
356 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
362 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
364 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
366 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
368 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
424 for (rw = READ; rw <= WRITE; rw++) { in tg_update_has_rules()
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
478 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
480 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
653 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
671 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
687 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
756 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, in throtl_trim_slice()
813 if (tg->service_queue.nr_queued[READ]) in tg_update_carryover()
814 __tg_update_carryover(tg, READ); in tg_update_carryover()
820 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE], in tg_update_carryover()
821 tg->carryover_ios[READ], tg->carryover_ios[WRITE]); in tg_update_carryover()
1009 bio = throtl_peek_queued(&sq->queued[READ]); in tg_update_disptime()
1092 while ((bio = throtl_peek_queued(&sq->queued[READ])) && in throtl_dispatch_tg()
1136 if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) in throtl_select_dispatch()
1195 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn()
1196 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn()
1257 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1314 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1315 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1353 throtl_start_new_slice(tg, READ, false); in tg_conf_updated()
1437 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1449 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1504 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1506 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1513 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1515 tg->bps_conf[READ][off]); in tg_prfill_limit()
1519 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1521 tg->iops_conf[READ][off]); in tg_prfill_limit()
1571 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1573 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1617 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1619 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1623 tg->bps[READ][index] = v[0]; in tg_set_limit()
1625 tg->iops[READ][index] = v[2]; in tg_set_limit()
1628 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1629 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1632 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1633 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1640 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1644 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1646 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1757 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1758 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1780 if (!parent->bps[READ][LIMIT_LOW] && in tg_last_low_overflow_time()
1781 !parent->iops[READ][LIMIT_LOW] && in tg_last_low_overflow_time()
1838 if (throtl_low_limit_reached(tg, READ) && in throtl_tg_can_upgrade()
2004 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2005 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
2007 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
2008 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2018 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2019 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2020 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2021 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2037 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2039 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2074 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2105 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2131 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2132 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2243 rw == READ ? 'R' : 'W', in __blk_throtl_bio()
2247 sq->nr_queued[READ], sq->nr_queued[WRITE]); in __blk_throtl_bio()
2371 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2373 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2380 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2399 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2414 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2435 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()