Searched refs:mq_hctx (Results 1 – 14 of 14) sorted by relevance
55 return rqa->mq_hctx > rqb->mq_hctx; in sched_rq_cmp()61 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()67 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()143 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()266 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); in blk_mq_do_dispatch_ctx()413 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request()
382 rq->mq_hctx = hctx; in blk_mq_rq_ctx_init()719 rq->mq_hctx = NULL; in __blk_mq_free_request()1116 cur_hctx = rq->mq_hctx; in blk_mq_end_request_batch()1210 if (rq->mq_hctx->nr_ctx == 1 || in blk_mq_complete_request_remote()1358 if (!rq->mq_hctx) in blk_rq_is_poll()1767 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_alloc_driver_tag()2687 if (hctx != rq->mq_hctx) { in blk_mq_plug_issue_direct()2692 hctx = rq->mq_hctx; in blk_mq_plug_issue_direct()2735 this_hctx = rq->mq_hctx; in blk_mq_dispatch_plug_list()2893 hctx_type = rq->mq_hctx->type; in blk_mq_get_cached_request()[all …]
328 flush_rq->mq_hctx = first_rq->mq_hctx; in blk_kick_flush()362 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
269 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()276 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag()
284 if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) in bt_iter()673 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | in blk_mq_unique_tag()
903 if (dd_has_write_work(rq->mq_hctx)) in dd_finish_request()904 blk_mq_sched_mark_restart_hctx(rq->mq_hctx); in dd_finish_request()
384 if (rq->mq_hctx == params->hctx) in hctx_show_busy_rq()
484 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_prep_rq_batch()487 req->mq_hctx->tags->rqs[req->tag] = req; in virtblk_prep_rq_batch()489 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; in virtblk_prep_rq_batch()525 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx); in virtio_queue_rqs()535 if (!next || req->mq_hctx != next->mq_hctx) { in virtio_queue_rqs()
655 struct ublk_queue *ubq = req->mq_hctx->driver_data; in ublk_complete_rq()749 struct ublk_queue *ubq = req->mq_hctx->driver_data; in __ublk_rq_task_work()858 struct ublk_queue *ubq = req->mq_hctx->driver_data; in ublk_rq_task_work_fn()
512 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls()779 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data()929 req->mq_hctx->tags->rqs[req->tag] = req; in nvme_prep_rq_batch()939 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs()950 if (!next || req->mq_hctx != next->mq_hctx) { in nvme_queue_rqs()965 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_unmap_rq()1222 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio()1285 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout()1312 nvme_poll(req->mq_hctx, NULL); in nvme_timeout()
192 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
678 if (req->mq_hctx->type == HCTX_TYPE_POLL) in nvme_init_request()
87 struct blk_mq_hw_ctx *mq_hctx; member
1634 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq()
Completed in 51 milliseconds