Lines Matching refs:hctx

50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
61 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
62 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
63 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
72 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
74 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
75 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
78 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
81 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
83 sbitmap_clear_bit(&hctx->ctx_map, bit); in blk_mq_hctx_clear_pending()
306 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
309 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
310 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
311 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
351 struct blk_mq_hw_ctx *hctx = data->hctx; in blk_mq_rq_ctx_init() local
357 rq->mq_hctx = hctx; in blk_mq_rq_ctx_init()
427 blk_mq_add_active_requests(data->hctx, nr); in __blk_mq_alloc_requests_batch()
451 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_requests()
475 blk_mq_tag_busy(data->hctx); in __blk_mq_alloc_requests()
513 blk_mq_inc_active_requests(data->hctx); in __blk_mq_alloc_requests()
653 data.hctx = xa_load(&q->hctx_table, hctx_idx); in blk_mq_alloc_request_hctx()
654 if (!blk_mq_hw_queue_mapped(data.hctx)) in blk_mq_alloc_request_hctx()
656 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
664 blk_mq_tag_busy(data.hctx); in blk_mq_alloc_request_hctx()
674 blk_mq_inc_active_requests(data.hctx); in blk_mq_alloc_request_hctx()
709 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request() local
717 blk_mq_dec_active_requests(hctx); in __blk_mq_free_request()
718 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
721 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); in __blk_mq_free_request()
722 blk_mq_sched_restart(hctx); in __blk_mq_free_request()
1059 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, in blk_mq_flush_tag_batch() argument
1062 struct request_queue *q = hctx->queue; in blk_mq_flush_tag_batch()
1064 blk_mq_sub_active_requests(hctx, nr_tags); in blk_mq_flush_tag_batch()
1066 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); in blk_mq_flush_tag_batch()
1322 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_execute_rq_nowait() local
1335 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); in blk_execute_rq_nowait()
1383 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_execute_rq() local
1396 blk_mq_run_hw_queue(hctx, false); in blk_execute_rq()
1617 struct blk_mq_hw_ctx *hctx; in blk_mq_timeout_work() local
1660 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work()
1662 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_timeout_work()
1663 blk_mq_tag_idle(hctx); in blk_mq_timeout_work()
1670 struct blk_mq_hw_ctx *hctx; member
1677 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local
1678 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
1679 enum hctx_type type = hctx->type; in flush_busy_ctx()
1692 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in blk_mq_flush_busy_ctxs() argument
1695 .hctx = hctx, in blk_mq_flush_busy_ctxs()
1699 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); in blk_mq_flush_busy_ctxs()
1704 struct blk_mq_hw_ctx *hctx; member
1712 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; in dispatch_rq_from_ctx() local
1713 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
1714 enum hctx_type type = hctx->type; in dispatch_rq_from_ctx()
1728 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_dequeue_from_ctx() argument
1731 unsigned off = start ? start->index_hw[hctx->type] : 0; in blk_mq_dequeue_from_ctx()
1733 .hctx = hctx, in blk_mq_dequeue_from_ctx()
1737 __sbitmap_for_each_set(&hctx->ctx_map, off, in blk_mq_dequeue_from_ctx()
1771 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_wake() local
1773 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); in blk_mq_dispatch_wake()
1775 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1780 sbq = &hctx->tags->bitmap_tags; in blk_mq_dispatch_wake()
1783 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1785 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_wake()
1795 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, in blk_mq_mark_tag_wait() argument
1803 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && in blk_mq_mark_tag_wait()
1804 !(blk_mq_is_shared_tags(hctx->flags))) { in blk_mq_mark_tag_wait()
1805 blk_mq_sched_mark_restart_hctx(hctx); in blk_mq_mark_tag_wait()
1818 wait = &hctx->dispatch_wait; in blk_mq_mark_tag_wait()
1823 sbq = &hctx->tags->breserved_tags; in blk_mq_mark_tag_wait()
1825 sbq = &hctx->tags->bitmap_tags; in blk_mq_mark_tag_wait()
1826 wq = &bt_wait_ptr(sbq, hctx)->wait; in blk_mq_mark_tag_wait()
1829 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1831 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1863 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1874 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1889 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) in blk_mq_update_dispatch_busy() argument
1893 ewma = hctx->dispatch_busy; in blk_mq_update_dispatch_busy()
1903 hctx->dispatch_busy = ewma; in blk_mq_update_dispatch_busy()
1924 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_prep_dispatch_rq() local
1944 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_prep_dispatch_rq()
1980 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, in blk_mq_commit_rqs() argument
1983 if (hctx->queue->mq_ops->commit_rqs && queued) { in blk_mq_commit_rqs()
1984 trace_block_unplug(hctx->queue, queued, !from_schedule); in blk_mq_commit_rqs()
1985 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_commit_rqs()
1992 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, in blk_mq_dispatch_rq_list() argument
1996 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list()
2014 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
2030 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
2050 blk_mq_commit_rqs(hctx, queued, false); in blk_mq_dispatch_rq_list()
2060 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || in blk_mq_dispatch_rq_list()
2061 blk_mq_is_shared_tags(hctx->flags)); in blk_mq_dispatch_rq_list()
2066 spin_lock(&hctx->lock); in blk_mq_dispatch_rq_list()
2067 list_splice_tail_init(list, &hctx->dispatch); in blk_mq_dispatch_rq_list()
2068 spin_unlock(&hctx->lock); in blk_mq_dispatch_rq_list()
2105 needs_restart = blk_mq_sched_needs_restart(hctx); in blk_mq_dispatch_rq_list()
2109 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) in blk_mq_dispatch_rq_list()
2110 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_rq_list()
2112 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); in blk_mq_dispatch_rq_list()
2114 blk_mq_update_dispatch_busy(hctx, true); in blk_mq_dispatch_rq_list()
2118 blk_mq_update_dispatch_busy(hctx, false); in blk_mq_dispatch_rq_list()
2122 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_first_mapped_cpu() argument
2124 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); in blk_mq_first_mapped_cpu()
2127 cpu = cpumask_first(hctx->cpumask); in blk_mq_first_mapped_cpu()
2135 static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_empty_cpumask() argument
2137 return hctx->next_cpu >= nr_cpu_ids; in blk_mq_hctx_empty_cpumask()
2146 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
2149 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu()
2152 if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx)) in blk_mq_hctx_next_cpu()
2155 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
2157 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu()
2160 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu()
2161 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
2178 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
2179 hctx->next_cpu_batch = 1; in blk_mq_hctx_next_cpu()
2183 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
2194 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_run_hw_queue() argument
2196 if (unlikely(blk_mq_hctx_stopped(hctx))) in blk_mq_delay_run_hw_queue()
2198 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, in blk_mq_delay_run_hw_queue()
2212 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
2221 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); in blk_mq_run_hw_queue()
2231 __blk_mq_run_dispatch_ops(hctx->queue, false, in blk_mq_run_hw_queue()
2232 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue()
2233 blk_mq_hctx_has_pending(hctx)); in blk_mq_run_hw_queue()
2238 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { in blk_mq_run_hw_queue()
2239 blk_mq_delay_run_hw_queue(hctx, 0); in blk_mq_run_hw_queue()
2243 blk_mq_run_dispatch_ops(hctx->queue, in blk_mq_run_hw_queue()
2244 blk_mq_sched_dispatch_requests(hctx)); in blk_mq_run_hw_queue()
2262 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; in blk_mq_get_sq_hctx() local
2264 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_get_sq_hctx()
2265 return hctx; in blk_mq_get_sq_hctx()
2276 struct blk_mq_hw_ctx *hctx, *sq_hctx; in blk_mq_run_hw_queues() local
2282 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
2283 if (blk_mq_hctx_stopped(hctx)) in blk_mq_run_hw_queues()
2290 if (!sq_hctx || sq_hctx == hctx || in blk_mq_run_hw_queues()
2291 !list_empty_careful(&hctx->dispatch)) in blk_mq_run_hw_queues()
2292 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
2304 struct blk_mq_hw_ctx *hctx, *sq_hctx; in blk_mq_delay_run_hw_queues() local
2310 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_delay_run_hw_queues()
2311 if (blk_mq_hctx_stopped(hctx)) in blk_mq_delay_run_hw_queues()
2319 if (delayed_work_pending(&hctx->run_work)) in blk_mq_delay_run_hw_queues()
2326 if (!sq_hctx || sq_hctx == hctx || in blk_mq_delay_run_hw_queues()
2327 !list_empty_careful(&hctx->dispatch)) in blk_mq_delay_run_hw_queues()
2328 blk_mq_delay_run_hw_queue(hctx, msecs); in blk_mq_delay_run_hw_queues()
2342 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
2344 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
2346 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
2361 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
2364 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
2365 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
2369 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
2371 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
2373 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); in blk_mq_start_hw_queue()
2379 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
2382 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
2383 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
2387 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_start_stopped_hw_queue() argument
2389 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_start_stopped_hw_queue()
2392 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queue()
2393 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queue()
2399 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
2402 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_stopped_hw_queues()
2403 blk_mq_start_stopped_hw_queue(hctx, async || in blk_mq_start_stopped_hw_queues()
2404 (hctx->flags & BLK_MQ_F_BLOCKING)); in blk_mq_start_stopped_hw_queues()
2410 struct blk_mq_hw_ctx *hctx = in blk_mq_run_work_fn() local
2413 blk_mq_run_dispatch_ops(hctx->queue, in blk_mq_run_work_fn()
2414 blk_mq_sched_dispatch_requests(hctx)); in blk_mq_run_work_fn()
2427 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_bypass_insert() local
2429 spin_lock(&hctx->lock); in blk_mq_request_bypass_insert()
2431 list_add(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
2433 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
2434 spin_unlock(&hctx->lock); in blk_mq_request_bypass_insert()
2437 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, in blk_mq_insert_requests() argument
2442 enum hctx_type type = hctx->type; in blk_mq_insert_requests()
2448 if (!hctx->dispatch_busy && !run_queue_async) { in blk_mq_insert_requests()
2449 blk_mq_run_dispatch_ops(hctx->queue, in blk_mq_insert_requests()
2450 blk_mq_try_issue_list_directly(hctx, list)); in blk_mq_insert_requests()
2468 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
2471 blk_mq_run_hw_queue(hctx, run_queue_async); in blk_mq_insert_requests()
2478 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_insert_request() local
2521 q->elevator->type->ops.insert_requests(hctx, &list, flags); in blk_mq_insert_request()
2527 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); in blk_mq_insert_request()
2530 &ctx->rq_lists[hctx->type]); in blk_mq_insert_request()
2531 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_request()
2558 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_issue_directly() argument
2573 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
2576 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
2580 blk_mq_update_dispatch_busy(hctx, true); in __blk_mq_issue_directly()
2584 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
2616 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_directly() argument
2621 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { in blk_mq_try_issue_directly()
2628 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); in blk_mq_try_issue_directly()
2632 ret = __blk_mq_issue_directly(hctx, rq, true); in blk_mq_try_issue_directly()
2639 blk_mq_run_hw_queue(hctx, false); in blk_mq_try_issue_directly()
2649 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_issue_directly() local
2651 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { in blk_mq_request_issue_directly()
2658 return __blk_mq_issue_directly(hctx, rq, last); in blk_mq_request_issue_directly()
2663 struct blk_mq_hw_ctx *hctx = NULL; in blk_mq_plug_issue_direct() local
2671 if (hctx != rq->mq_hctx) { in blk_mq_plug_issue_direct()
2672 if (hctx) { in blk_mq_plug_issue_direct()
2673 blk_mq_commit_rqs(hctx, queued, false); in blk_mq_plug_issue_direct()
2676 hctx = rq->mq_hctx; in blk_mq_plug_issue_direct()
2687 blk_mq_run_hw_queue(hctx, false); in blk_mq_plug_issue_direct()
2697 blk_mq_commit_rqs(hctx, queued, false); in blk_mq_plug_issue_direct()
2802 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_list_directly() argument
2822 blk_mq_run_hw_queue(hctx, false); in blk_mq_try_issue_list_directly()
2832 blk_mq_commit_rqs(hctx, queued, false); in blk_mq_try_issue_list_directly()
2945 struct blk_mq_hw_ctx *hctx; in blk_mq_submit_bio() local
3035 hctx = rq->mq_hctx; in blk_mq_submit_bio()
3037 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { in blk_mq_submit_bio()
3039 blk_mq_run_hw_queue(hctx, true); in blk_mq_submit_bio()
3041 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); in blk_mq_submit_bio()
3482 struct blk_mq_hw_ctx *hctx; member
3490 if (rq->mq_hctx != iter_data->hctx) in blk_mq_has_request()
3496 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_requests() argument
3498 struct blk_mq_tags *tags = hctx->sched_tags ? in blk_mq_hctx_has_requests()
3499 hctx->sched_tags : hctx->tags; in blk_mq_hctx_has_requests()
3501 .hctx = hctx, in blk_mq_hctx_has_requests()
3508 static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_has_online_cpu() argument
3511 enum hctx_type type = hctx->type; in blk_mq_hctx_has_online_cpu()
3520 struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue, in blk_mq_hctx_has_online_cpu()
3523 if (h != hctx) in blk_mq_hctx_has_online_cpu()
3536 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, in blk_mq_hctx_notify_offline() local
3539 if (blk_mq_hctx_has_online_cpu(hctx, cpu)) in blk_mq_hctx_notify_offline()
3549 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); in blk_mq_hctx_notify_offline()
3557 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { in blk_mq_hctx_notify_offline()
3558 while (blk_mq_hctx_has_requests(hctx)) in blk_mq_hctx_notify_offline()
3560 percpu_ref_put(&hctx->queue->q_usage_counter); in blk_mq_hctx_notify_offline()
3574 const struct blk_mq_hw_ctx *hctx) in blk_mq_cpu_mapped_to_hctx() argument
3576 struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue, in blk_mq_cpu_mapped_to_hctx()
3577 hctx->type, cpu); in blk_mq_cpu_mapped_to_hctx()
3579 return mapped_hctx == hctx; in blk_mq_cpu_mapped_to_hctx()
3584 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, in blk_mq_hctx_notify_online() local
3587 if (blk_mq_cpu_mapped_to_hctx(cpu, hctx)) in blk_mq_hctx_notify_online()
3588 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); in blk_mq_hctx_notify_online()
3599 struct blk_mq_hw_ctx *hctx; in blk_mq_hctx_notify_dead() local
3604 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); in blk_mq_hctx_notify_dead()
3605 if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx)) in blk_mq_hctx_notify_dead()
3608 ctx = __blk_mq_get_ctx(hctx->queue, cpu); in blk_mq_hctx_notify_dead()
3609 type = hctx->type; in blk_mq_hctx_notify_dead()
3614 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_notify_dead()
3621 spin_lock(&hctx->lock); in blk_mq_hctx_notify_dead()
3622 list_splice_tail_init(&tmp, &hctx->dispatch); in blk_mq_hctx_notify_dead()
3623 spin_unlock(&hctx->lock); in blk_mq_hctx_notify_dead()
3625 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_notify_dead()
3629 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) in blk_mq_remove_cpuhp() argument
3631 if (!(hctx->flags & BLK_MQ_F_STACKING)) in blk_mq_remove_cpuhp()
3633 &hctx->cpuhp_online); in blk_mq_remove_cpuhp()
3635 &hctx->cpuhp_dead); in blk_mq_remove_cpuhp()
3670 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
3672 struct request *flush_rq = hctx->fq->flush_rq; in blk_mq_exit_hctx()
3674 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_exit_hctx()
3675 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
3684 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
3686 blk_mq_remove_cpuhp(hctx); in blk_mq_exit_hctx()
3691 list_add(&hctx->hctx_list, &q->unused_hctx_list); in blk_mq_exit_hctx()
3698 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
3701 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
3704 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
3710 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
3712 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
3714 if (!(hctx->flags & BLK_MQ_F_STACKING)) in blk_mq_init_hctx()
3716 &hctx->cpuhp_online); in blk_mq_init_hctx()
3717 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); in blk_mq_init_hctx()
3719 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
3722 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
3725 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
3726 hctx->numa_node)) in blk_mq_init_hctx()
3729 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) in blk_mq_init_hctx()
3736 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); in blk_mq_init_hctx()
3739 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
3741 blk_mq_remove_cpuhp(hctx); in blk_mq_init_hctx()
3749 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_hctx() local
3752 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); in blk_mq_alloc_hctx()
3753 if (!hctx) in blk_mq_alloc_hctx()
3756 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) in blk_mq_alloc_hctx()
3759 atomic_set(&hctx->nr_active, 0); in blk_mq_alloc_hctx()
3762 hctx->numa_node = node; in blk_mq_alloc_hctx()
3764 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_alloc_hctx()
3765 spin_lock_init(&hctx->lock); in blk_mq_alloc_hctx()
3766 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_alloc_hctx()
3767 hctx->queue = q; in blk_mq_alloc_hctx()
3768 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; in blk_mq_alloc_hctx()
3770 INIT_LIST_HEAD(&hctx->hctx_list); in blk_mq_alloc_hctx()
3776 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_alloc_hctx()
3778 if (!hctx->ctxs) in blk_mq_alloc_hctx()
3781 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), in blk_mq_alloc_hctx()
3784 hctx->nr_ctx = 0; in blk_mq_alloc_hctx()
3786 spin_lock_init(&hctx->dispatch_wait_lock); in blk_mq_alloc_hctx()
3787 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); in blk_mq_alloc_hctx()
3788 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); in blk_mq_alloc_hctx()
3790 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); in blk_mq_alloc_hctx()
3791 if (!hctx->fq) in blk_mq_alloc_hctx()
3794 blk_mq_hctx_kobj_init(hctx); in blk_mq_alloc_hctx()
3796 return hctx; in blk_mq_alloc_hctx()
3799 sbitmap_free(&hctx->ctx_map); in blk_mq_alloc_hctx()
3801 kfree(hctx->ctxs); in blk_mq_alloc_hctx()
3803 free_cpumask_var(hctx->cpumask); in blk_mq_alloc_hctx()
3805 kfree(hctx); in blk_mq_alloc_hctx()
3818 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
3833 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_init_cpu_queues()
3834 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
3835 hctx->numa_node = cpu_to_node(i); in blk_mq_init_cpu_queues()
3898 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
3902 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
3903 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
3904 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
3905 hctx->dispatch_from = NULL; in blk_mq_map_swqueue()
3935 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_map_swqueue()
3936 ctx->hctxs[j] = hctx; in blk_mq_map_swqueue()
3942 if (cpumask_test_cpu(i, hctx->cpumask)) in blk_mq_map_swqueue()
3945 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
3946 hctx->type = j; in blk_mq_map_swqueue()
3947 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue()
3948 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
3954 BUG_ON(!hctx->nr_ctx); in blk_mq_map_swqueue()
3962 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
3969 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
3977 hctx->tags = NULL; in blk_mq_map_swqueue()
3981 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
3982 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
3989 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); in blk_mq_map_swqueue()
3995 for_each_cpu(cpu, hctx->cpumask) { in blk_mq_map_swqueue()
3997 cpumask_clear_cpu(cpu, hctx->cpumask); in blk_mq_map_swqueue()
4003 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
4004 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
4014 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared() local
4017 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
4019 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; in queue_set_hctx_shared()
4021 blk_mq_tag_idle(hctx); in queue_set_hctx_shared()
4022 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; in queue_set_hctx_shared()
4114 struct blk_mq_hw_ctx *hctx, *next; in blk_mq_release() local
4117 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_release()
4118 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); in blk_mq_release()
4121 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { in blk_mq_release()
4122 list_del_init(&hctx->hctx_list); in blk_mq_release()
4123 kobject_put(&hctx->kobj); in blk_mq_release()
4234 struct blk_mq_hw_ctx *hctx = NULL, *tmp; in blk_mq_alloc_and_init_hctx() local
4240 hctx = tmp; in blk_mq_alloc_and_init_hctx()
4244 if (hctx) in blk_mq_alloc_and_init_hctx()
4245 list_del_init(&hctx->hctx_list); in blk_mq_alloc_and_init_hctx()
4248 if (!hctx) in blk_mq_alloc_and_init_hctx()
4249 hctx = blk_mq_alloc_hctx(q, set, node); in blk_mq_alloc_and_init_hctx()
4250 if (!hctx) in blk_mq_alloc_and_init_hctx()
4253 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) in blk_mq_alloc_and_init_hctx()
4256 return hctx; in blk_mq_alloc_and_init_hctx()
4259 kobject_put(&hctx->kobj); in blk_mq_alloc_and_init_hctx()
4267 struct blk_mq_hw_ctx *hctx; in blk_mq_realloc_hw_ctxs() local
4287 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); in blk_mq_realloc_hw_ctxs()
4288 WARN_ON_ONCE(!hctx); in blk_mq_realloc_hw_ctxs()
4302 xa_for_each_start(&q->hctx_table, j, hctx, j) in blk_mq_realloc_hw_ctxs()
4303 blk_mq_exit_hctx(q, set, hctx, j); in blk_mq_realloc_hw_ctxs()
4657 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
4673 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
4674 if (!hctx->tags) in blk_mq_update_nr_requests()
4680 if (hctx->sched_tags) { in blk_mq_update_nr_requests()
4681 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, in blk_mq_update_nr_requests()
4684 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, in blk_mq_update_nr_requests()
4690 q->elevator->type->ops.depth_updated(hctx); in blk_mq_update_nr_requests()
4875 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, in blk_hctx_poll() argument
4882 ret = q->mq_ops->poll(hctx, iob); in blk_hctx_poll()
4905 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie); in blk_mq_poll() local
4907 return blk_hctx_poll(q, hctx, iob, flags); in blk_mq_poll()
4936 struct blk_mq_hw_ctx *hctx; in blk_mq_cancel_work_sync() local
4941 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_cancel_work_sync()
4942 cancel_delayed_work_sync(&hctx->run_work); in blk_mq_cancel_work_sync()