| /linux/block/ |
| A D | blk-mq-sched.c | 94 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 126 if (hctx->dispatch_busy) in __blk_mq_do_dispatch_sched() 170 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched() 226 return hctx->ctxs[idx]; in blk_mq_next_ctx() 304 spin_lock(&hctx->lock); in __blk_mq_sched_dispatch_requests() 352 hctx->run++; in blk_mq_sched_dispatch_requests() 380 type = hctx->type; in blk_mq_sched_bio_merge() 528 if (!hctx->sched_tags) in blk_mq_sched_alloc_map_and_rqs() 546 if (hctx->sched_tags) { in blk_mq_sched_tags_teardown() 653 if (hctx->sched_tags) in blk_mq_sched_free_rqs() [all …]
|
| A D | blk-mq.c | 334 struct blk_mq_hw_ctx *hctx = data->hctx; in blk_mq_rq_ctx_init() local 1301 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local 1319 .hctx = hctx, in blk_mq_flush_busy_ctxs() 1357 .hctx = hctx, in blk_mq_dequeue_from_ctx() 2200 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_commit_rqs() 2469 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_try_issue_list_directly() 2958 hctx->sched_tags : hctx->tags; in blk_mq_hctx_has_requests() 2960 .hctx = hctx, in blk_mq_hctx_has_requests() 3381 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue() 3382 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue() [all …]
|
| A D | blk-mq-sysfs.c | 42 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 44 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 45 kfree(hctx); in blk_mq_hw_sysfs_release() 58 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 64 q = hctx->queue; in blk_mq_hw_sysfs_show() 80 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store() local 86 q = hctx->queue; in blk_mq_hw_sysfs_store() 177 if (!hctx->nr_ctx) in blk_mq_unregister_hctx() 183 kobject_del(&hctx->kobj); in blk_mq_unregister_hctx() 192 if (!hctx->nr_ctx) in blk_mq_register_hctx() [all …]
|
| A D | blk-mq.h | 166 struct blk_mq_hw_ctx *hctx; member 177 return data->hctx->tags; in blk_mq_tags_from_data() 178 return data->hctx->sched_tags; in blk_mq_tags_from_data() 188 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped() 231 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests() 240 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests() 245 __blk_mq_sub_active_requests(hctx, 1); in __blk_mq_dec_active_requests() 252 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests() 262 __blk_mq_dec_active_requests(hctx); in __blk_mq_put_driver_tag() 282 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag() [all …]
|
| A D | blk-mq-debugfs.c | 363 __acquires(&hctx->lock) in hctx_dispatch_start() 367 spin_lock(&hctx->lock); in hctx_dispatch_start() 379 __releases(&hctx->lock) in hctx_dispatch_stop() 416 struct show_busy_params params = { .m = m, .hctx = hctx }; in hctx_busy_show() 473 if (hctx->tags) in hctx_tags_show() 490 if (hctx->tags) in hctx_tags_bitmap_show() 507 if (hctx->sched_tags) in hctx_sched_tags_show() 524 if (hctx->sched_tags) in hctx_sched_tags_bitmap_show() 545 hctx->run = 0; in hctx_run_write() 764 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); in blk_mq_debugfs_register_hctx() [all …]
|
| A D | blk-mq-tag.c | 27 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy() 28 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() 58 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 80 !hctx_may_queue(data->hctx, bt)) in __blk_mq_get_tag() 132 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 179 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 217 struct blk_mq_hw_ctx *hctx; member 240 struct blk_mq_hw_ctx *hctx = iter_data->hctx; in bt_iter() local 256 if (rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter() 280 .hctx = hctx, in bt_for_each() [all …]
|
| A D | kyber-iosched.c | 474 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 478 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx() 506 hctx->sched_data = khd; in kyber_init_hctx() 507 kyber_depth_updated(hctx); in kyber_init_hctx() 526 kfree(hctx->sched_data); in kyber_exit_hctx() 607 rq->mq_ctx->index_hw[hctx->type]); in kyber_insert_requests() 700 blk_mq_run_hw_queue(hctx, true); in kyber_domain_wake() 706 struct blk_mq_hw_ctx *hctx) in kyber_get_domain_token() argument 754 struct blk_mq_hw_ctx *hctx) in kyber_dispatch_cur_domain() argument 969 struct blk_mq_hw_ctx *hctx = data; in kyber_cur_domain_show() local [all …]
|
| A D | blk-mq-sched.h | 19 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 20 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 24 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 28 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 34 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 36 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 37 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart() 79 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 81 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 84 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() [all …]
|
| A D | blk-mq-tag.h | 23 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 37 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 39 if (!hctx) in bt_wait_ptr() 41 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr() 53 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument 55 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_busy() 58 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy() 61 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument 63 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle() 66 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
|
| A D | blk-mq-debugfs.h | 24 struct blk_mq_hw_ctx *hctx); 25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 32 struct blk_mq_hw_ctx *hctx); 33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
| A D | mq-deadline.c | 516 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 562 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument 564 struct request_queue *q = hctx->queue; in dd_depth_updated() 566 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() 576 dd_depth_updated(hctx); in dd_init_hctx() 712 struct request_queue *q = hctx->queue; in dd_insert_request() 765 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument 768 struct request_queue *q = hctx->queue; in dd_insert_requests() 777 dd_insert_request(hctx, rq, at_head); in dd_insert_requests() 840 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work() argument [all …]
|
| A D | blk-flush.c | 352 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local 370 blk_mq_sched_restart(hctx); in mq_flush_data_end_io() 519 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument 522 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
|
| A D | bfq-iosched.c | 4907 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument 4909 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 4921 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() 5071 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request() 5078 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, in bfq_dispatch_request() 5970 struct request_queue *q = hctx->queue; in bfq_insert_request() 6069 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, in bfq_insert_requests() argument 6077 bfq_insert_request(hctx, rq, at_head); in bfq_insert_requests() 6881 static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) in bfq_depth_updated() argument 6884 struct blk_mq_tags *tags = hctx->sched_tags; in bfq_depth_updated() [all …]
|
| /linux/include/linux/ |
| A D | blk-mq.h | 794 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 795 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 798 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 803 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 804 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 861 #define queue_for_each_hw_ctx(q, hctx, i) \ argument 863 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 865 #define hctx_for_each_ctx(hctx, ctx, i) \ argument 866 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 867 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) [all …]
|
| /linux/net/dccp/ccids/ |
| A D | ccid3.h | 104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local 105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk() 106 return hctx; in ccid3_hc_tx_sk()
|
| /linux/drivers/s390/block/ |
| A D | scm_blk.c | 283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument 286 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request() 288 struct scm_queue *sq = hctx->driver_data; in scm_blk_request() 332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument 341 hctx->driver_data = qd; in scm_blk_init_hctx() 346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument 348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() 351 kfree(hctx->driver_data); in scm_blk_exit_hctx() 352 hctx->driver_data = NULL; in scm_blk_exit_hctx()
|
| /linux/drivers/block/rnbd/ |
| A D | rnbd-clt.c | 167 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue() 171 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue() 1117 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument 1120 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue() 1123 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue() 1181 struct rnbd_queue *q = hctx->driver_data; in rnbd_rdma_poll() 1337 struct blk_mq_hw_ctx *hctx) in rnbd_init_hw_queue() argument 1341 q->hctx = hctx; in rnbd_init_hw_queue() 1347 struct blk_mq_hw_ctx *hctx; in rnbd_init_mq_hw_queues() local 1352 rnbd_init_hw_queue(dev, q, hctx); in rnbd_init_mq_hw_queues() [all …]
|
| A D | rnbd-clt.h | 105 struct blk_mq_hw_ctx *hctx; member
|
| /linux/drivers/block/ |
| A D | virtio_blk.c | 186 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, in virtblk_map_data() argument 202 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); in virtblk_map_data() 298 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument 300 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() 301 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs() 312 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument 315 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq() 320 int qid = hctx->queue_num; in virtio_queue_rq() 333 num = virtblk_map_data(hctx, req, vbr); in virtio_queue_rq() 347 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
|
| /linux/drivers/nvme/target/ |
| A D | loop.c | 131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument 134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq() 135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq() 218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument 232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); in nvme_loop_init_hctx() 234 hctx->driver_data = queue; in nvme_loop_init_hctx() 238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument 246 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
|
| /linux/drivers/block/null_blk/ |
| A D | main.c | 1560 struct nullb_queue *nq = hctx->driver_data; in null_poll() 1586 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq() local 1591 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq() 1592 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq() 1612 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument 1616 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq() 1619 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq() 1621 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq() 1678 struct nullb_queue *nq = hctx->driver_data; in null_exit_hctx() 1696 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx() [all …]
|
| /linux/drivers/scsi/ |
| A D | scsi_lib.c | 1659 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument 1805 static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in scsi_mq_poll() argument 1807 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll() 1810 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll() 1815 static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scsi_init_hctx() argument 1820 hctx->driver_data = shost; in scsi_init_hctx() 1895 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument 1897 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs() 1899 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
|
| /linux/drivers/nvme/host/ |
| A D | tcp.c | 447 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_hctx() argument 453 hctx->driver_data = queue; in nvme_tcp_init_hctx() 457 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_admin_hctx() argument 463 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx() 2357 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_tcp_commit_rqs() argument 2359 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() 2365 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_tcp_queue_rq() argument 2368 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq() 2369 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() 2434 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in nvme_tcp_poll() argument [all …]
|
| A D | pci.c | 410 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx() 412 hctx->driver_data = nvmeq; in nvme_admin_init_hctx() 416 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx() argument 422 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx() 423 hctx->driver_data = nvmeq; in nvme_init_hctx() 521 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_commit_rqs() argument 523 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs() 918 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_queue_rq() argument 921 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq() 922 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() [all …]
|
| /linux/drivers/block/mtip32xx/ |
| A D | mtip32xx.c | 165 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_cmd_from_tag() local 167 return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); in mtip_cmd_from_tag() 2059 struct blk_mq_hw_ctx *hctx) in mtip_hw_submit_io() argument 2071 nents = blk_rq_map_sg(hctx->queue, rq, command->sg); in mtip_hw_submit_io() 3383 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, in mtip_check_unal_depth() argument 3386 struct driver_data *dd = hctx->queue->queuedata; in mtip_check_unal_depth() 3410 struct driver_data *dd = hctx->queue->queuedata; in mtip_issue_reserved_cmd() 3447 struct driver_data *dd = hctx->queue->queuedata; in mtip_queue_rq() 3452 return mtip_issue_reserved_cmd(hctx, rq); in mtip_queue_rq() 3454 if (unlikely(mtip_check_unal_depth(hctx, rq))) in mtip_queue_rq() [all …]
|