Lines Matching refs:hctx

51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_mark_restart_hctx()  argument
53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
60 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument
62 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart()
73 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart()
87 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
94 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
103 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list()
116 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument
118 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched()
126 if (hctx->dispatch_busy) in __blk_mq_do_dispatch_sched()
129 max_dispatch = hctx->queue->nr_requests; in __blk_mq_do_dispatch_sched()
135 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched()
138 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_do_dispatch_sched()
147 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched()
170 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()
199 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); in __blk_mq_do_dispatch_sched()
207 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_sched() argument
212 ret = __blk_mq_do_dispatch_sched(hctx); in blk_mq_do_dispatch_sched()
218 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_next_ctx() argument
221 unsigned short idx = ctx->index_hw[hctx->type]; in blk_mq_next_ctx()
223 if (++idx == hctx->nr_ctx) in blk_mq_next_ctx()
226 return hctx->ctxs[idx]; in blk_mq_next_ctx()
237 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_ctx() argument
239 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx()
241 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); in blk_mq_do_dispatch_ctx()
248 if (!list_empty_careful(&hctx->dispatch)) { in blk_mq_do_dispatch_ctx()
253 if (!sbitmap_any_bit_set(&hctx->ctx_map)) in blk_mq_do_dispatch_ctx()
260 rq = blk_mq_dequeue_from_ctx(hctx, ctx); in blk_mq_do_dispatch_ctx()
284 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); in blk_mq_do_dispatch_ctx()
288 WRITE_ONCE(hctx->dispatch_from, ctx); in blk_mq_do_dispatch_ctx()
292 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_dispatch_requests() argument
294 struct request_queue *q = hctx->queue; in __blk_mq_sched_dispatch_requests()
303 if (!list_empty_careful(&hctx->dispatch)) { in __blk_mq_sched_dispatch_requests()
304 spin_lock(&hctx->lock); in __blk_mq_sched_dispatch_requests()
305 if (!list_empty(&hctx->dispatch)) in __blk_mq_sched_dispatch_requests()
306 list_splice_init(&hctx->dispatch, &rq_list); in __blk_mq_sched_dispatch_requests()
307 spin_unlock(&hctx->lock); in __blk_mq_sched_dispatch_requests()
324 blk_mq_sched_mark_restart_hctx(hctx); in __blk_mq_sched_dispatch_requests()
325 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { in __blk_mq_sched_dispatch_requests()
327 ret = blk_mq_do_dispatch_sched(hctx); in __blk_mq_sched_dispatch_requests()
329 ret = blk_mq_do_dispatch_ctx(hctx); in __blk_mq_sched_dispatch_requests()
332 ret = blk_mq_do_dispatch_sched(hctx); in __blk_mq_sched_dispatch_requests()
333 } else if (hctx->dispatch_busy) { in __blk_mq_sched_dispatch_requests()
335 ret = blk_mq_do_dispatch_ctx(hctx); in __blk_mq_sched_dispatch_requests()
337 blk_mq_flush_busy_ctxs(hctx, &rq_list); in __blk_mq_sched_dispatch_requests()
338 blk_mq_dispatch_rq_list(hctx, &rq_list, 0); in __blk_mq_sched_dispatch_requests()
344 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_dispatch_requests() argument
346 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests()
349 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) in blk_mq_sched_dispatch_requests()
352 hctx->run++; in blk_mq_sched_dispatch_requests()
358 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { in blk_mq_sched_dispatch_requests()
359 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) in blk_mq_sched_dispatch_requests()
360 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_dispatch_requests()
369 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_bio_merge() local
379 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in blk_mq_sched_bio_merge()
380 type = hctx->type; in blk_mq_sched_bio_merge()
381 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || in blk_mq_sched_bio_merge()
407 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, in blk_mq_sched_bypass_insert() argument
433 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request() local
437 if (blk_mq_sched_bypass_insert(hctx, rq)) { in blk_mq_sched_insert_request()
468 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
471 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_sched_insert_request()
477 blk_mq_run_hw_queue(hctx, async); in blk_mq_sched_insert_request()
480 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, in blk_mq_sched_insert_requests() argument
485 struct request_queue *q = hctx->queue; in blk_mq_sched_insert_requests()
494 e = hctx->queue->elevator; in blk_mq_sched_insert_requests()
496 e->type->ops.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests()
503 if (!hctx->dispatch_busy && !run_queue_async) { in blk_mq_sched_insert_requests()
504 blk_mq_try_issue_list_directly(hctx, list); in blk_mq_sched_insert_requests()
508 blk_mq_insert_requests(hctx, ctx, list); in blk_mq_sched_insert_requests()
511 blk_mq_run_hw_queue(hctx, run_queue_async); in blk_mq_sched_insert_requests()
517 struct blk_mq_hw_ctx *hctx, in blk_mq_sched_alloc_map_and_rqs() argument
521 hctx->sched_tags = q->sched_shared_tags; in blk_mq_sched_alloc_map_and_rqs()
525 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, in blk_mq_sched_alloc_map_and_rqs()
528 if (!hctx->sched_tags) in blk_mq_sched_alloc_map_and_rqs()
542 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_tags_teardown() local
545 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_tags_teardown()
546 if (hctx->sched_tags) { in blk_mq_sched_tags_teardown()
548 blk_mq_free_rq_map(hctx->sched_tags); in blk_mq_sched_tags_teardown()
549 hctx->sched_tags = NULL; in blk_mq_sched_tags_teardown()
579 struct blk_mq_hw_ctx *hctx; in blk_mq_init_sched() local
603 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
604 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); in blk_mq_init_sched()
615 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched()
617 ret = e->ops.init_hctx(hctx, i); in blk_mq_init_sched()
626 blk_mq_debugfs_register_sched_hctx(q, hctx); in blk_mq_init_sched()
645 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_free_rqs() local
652 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_rqs()
653 if (hctx->sched_tags) in blk_mq_sched_free_rqs()
655 hctx->sched_tags, i); in blk_mq_sched_free_rqs()
662 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_sched() local
666 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_sched()
667 blk_mq_debugfs_unregister_sched_hctx(hctx); in blk_mq_exit_sched()
668 if (e->type->ops.exit_hctx && hctx->sched_data) { in blk_mq_exit_sched()
669 e->type->ops.exit_hctx(hctx, i); in blk_mq_exit_sched()
670 hctx->sched_data = NULL; in blk_mq_exit_sched()
672 flags = hctx->flags; in blk_mq_exit_sched()