/linux-6.3-rc2/block/ |
A D | blk-mq-sched.c | 60 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local 67 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 99 if (hctx->dispatch_busy) in __blk_mq_do_dispatch_sched() 143 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched() 206 return hctx->ctxs[idx]; in blk_mq_next_ctx() 284 spin_lock(&hctx->lock); in __blk_mq_sched_dispatch_requests() 332 hctx->run++; in blk_mq_sched_dispatch_requests() 360 type = hctx->type; in blk_mq_sched_bio_merge() 509 if (!hctx->sched_tags) in blk_mq_sched_alloc_map_and_rqs() 634 if (hctx->sched_tags) in blk_mq_sched_free_rqs() [all …]
|
A D | blk-mq-sysfs.c | 40 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 42 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 43 kfree(hctx); in blk_mq_hw_sysfs_release() 55 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 61 q = hctx->queue; in blk_mq_hw_sysfs_show() 151 if (!hctx->nr_ctx) in blk_mq_unregister_hctx() 157 kobject_del(&hctx->kobj); in blk_mq_unregister_hctx() 166 if (!hctx->nr_ctx) in blk_mq_register_hctx() 169 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); in blk_mq_register_hctx() 185 kobject_del(&hctx->kobj); in blk_mq_register_hctx() [all …]
|
A D | blk-mq.h | 164 struct blk_mq_hw_ctx *hctx; member 175 return data->hctx->tags; in blk_mq_tags_from_data() 176 return data->hctx->sched_tags; in blk_mq_tags_from_data() 186 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped() 229 atomic_inc(&hctx->nr_active); in __blk_mq_inc_active_requests() 238 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests() 243 __blk_mq_sub_active_requests(hctx, 1); in __blk_mq_dec_active_requests() 250 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests() 260 __blk_mq_dec_active_requests(hctx); in __blk_mq_put_driver_tag() 280 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag() [all …]
|
A D | blk-mq.c | 376 struct blk_mq_hw_ctx *hctx = data->hctx; in blk_mq_rq_ctx_init() local 1695 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local 1713 .hctx = hctx, in blk_mq_flush_busy_ctxs() 1730 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; in dispatch_rq_from_ctx() local 1751 .hctx = hctx, in blk_mq_dequeue_from_ctx() 2013 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_commit_rqs() 3437 hctx->sched_tags : hctx->tags; in blk_mq_hctx_has_requests() 3439 .hctx = hctx, in blk_mq_hctx_has_requests() 3854 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue() 3855 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue() [all …]
|
A D | blk-mq-debugfs.c | 340 __acquires(&hctx->lock) in hctx_dispatch_start() 344 spin_lock(&hctx->lock); in hctx_dispatch_start() 356 __releases(&hctx->lock) in hctx_dispatch_stop() 393 struct show_busy_params params = { .m = m, .hctx = hctx }; in hctx_busy_show() 450 if (hctx->tags) in hctx_tags_show() 467 if (hctx->tags) in hctx_tags_bitmap_show() 484 if (hctx->sched_tags) in hctx_sched_tags_show() 501 if (hctx->sched_tags) in hctx_sched_tags_bitmap_show() 522 hctx->run = 0; in hctx_run_write() 739 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); in blk_mq_debugfs_register_hctx() [all …]
|
A D | blk-mq-tag.c | 45 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() 77 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 102 !hctx_may_queue(data->hctx, bt)) in __blk_mq_get_tag() 154 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 201 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 238 struct blk_mq_hw_ctx *hctx; member 262 struct blk_mq_hw_ctx *hctx = iter_data->hctx; in bt_iter() local 272 tags = hctx->tags; in bt_iter() 284 if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) in bt_iter() 310 .hctx = hctx, in bt_for_each() [all …]
|
A D | blk-mq-sched.h | 17 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 18 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 22 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 26 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 32 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 34 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 35 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart() 77 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 79 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 82 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() [all …]
|
A D | blk-mq-tag.h | 23 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 37 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 39 if (!hctx) in bt_wait_ptr() 41 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr() 53 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument 55 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy() 56 __blk_mq_tag_busy(hctx); in blk_mq_tag_busy() 59 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument 61 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle() 64 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
|
A D | kyber-iosched.c | 477 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 481 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx() 509 hctx->sched_data = khd; in kyber_init_hctx() 510 kyber_depth_updated(hctx); in kyber_init_hctx() 529 kfree(hctx->sched_data); in kyber_exit_hctx() 610 rq->mq_ctx->index_hw[hctx->type]); in kyber_insert_requests() 703 blk_mq_run_hw_queue(hctx, true); in kyber_domain_wake() 709 struct blk_mq_hw_ctx *hctx) in kyber_get_domain_token() argument 757 struct blk_mq_hw_ctx *hctx) in kyber_dispatch_cur_domain() argument 972 struct blk_mq_hw_ctx *hctx = data; in kyber_cur_domain_show() local [all …]
|
A D | blk-mq-debugfs.h | 25 struct blk_mq_hw_ctx *hctx); 26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 33 struct blk_mq_hw_ctx *hctx); 34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
A D | mq-deadline.c | 574 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 620 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument 622 struct request_queue *q = hctx->queue; in dd_depth_updated() 624 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() 634 dd_depth_updated(hctx); in dd_init_hctx() 773 struct request_queue *q = hctx->queue; in dd_insert_request() 827 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument 830 struct request_queue *q = hctx->queue; in dd_insert_requests() 839 dd_insert_request(hctx, rq, at_head); in dd_insert_requests() 850 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) in dd_has_write_work() argument [all …]
|
A D | blk-flush.c | 362 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local 380 blk_mq_sched_restart(hctx); in mq_flush_data_end_io() 528 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument 531 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
|
/linux-6.3-rc2/samples/hid/ |
A D | hid_mouse.bpf.c | 9 int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 12 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG() 17 bpf_printk("event: size: %d", hctx->size); in BPF_PROG() 55 int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 58 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG() 73 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 75 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
|
A D | hid_surface_dial.bpf.c | 14 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 16 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG() 105 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
|
/linux-6.3-rc2/net/dccp/ccids/ |
A D | ccid3.h | 104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local 105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk() 106 return hctx; in ccid3_hc_tx_sk()
|
/linux-6.3-rc2/drivers/hid/bpf/entrypoints/ |
A D | entrypoints.bpf.c | 18 int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 20 bpf_tail_call(ctx, &hid_jmp_table, hctx->index); in BPF_PROG()
|
/linux-6.3-rc2/include/linux/ |
A D | blk-mq.h | 877 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 878 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 881 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 888 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 889 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 946 #define queue_for_each_hw_ctx(q, hctx, i) \ argument 947 xa_for_each(&(q)->hctx_table, (i), (hctx)) 949 #define hctx_for_each_ctx(hctx, ctx, i) \ argument 950 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 951 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) [all …]
|
/linux-6.3-rc2/drivers/s390/block/ |
A D | scm_blk.c | 282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument 285 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request() 287 struct scm_queue *sq = hctx->driver_data; in scm_blk_request() 331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument 340 hctx->driver_data = qd; in scm_blk_init_hctx() 345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument 347 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() 350 kfree(hctx->driver_data); in scm_blk_exit_hctx() 351 hctx->driver_data = NULL; in scm_blk_exit_hctx()
|
/linux-6.3-rc2/drivers/block/rnbd/ |
A D | rnbd-clt.c | 139 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue() 143 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue() 1096 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument 1099 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue() 1102 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue() 1160 struct rnbd_queue *q = hctx->driver_data; in rnbd_rdma_poll() 1312 struct blk_mq_hw_ctx *hctx) in rnbd_init_hw_queue() argument 1316 q->hctx = hctx; in rnbd_init_hw_queue() 1322 struct blk_mq_hw_ctx *hctx; in rnbd_init_mq_hw_queues() local 1327 rnbd_init_hw_queue(dev, q, hctx); in rnbd_init_mq_hw_queues() [all …]
|
A D | rnbd-clt.h | 105 struct blk_mq_hw_ctx *hctx; member
|
/linux-6.3-rc2/drivers/block/ |
A D | virtio_blk.c | 136 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq() 137 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq() 396 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() 397 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs() 433 num = virtblk_map_data(hctx, req, vbr); in virtblk_prep_rq() 446 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq() 450 int qid = hctx->queue_num; in virtio_queue_rq() 455 status = virtblk_prep_rq(hctx, vblk, req, vbr); in virtio_queue_rq() 467 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq() 1209 struct virtio_blk *vblk = hctx->queue->queuedata; in virtblk_poll() [all …]
|
/linux-6.3-rc2/drivers/nvme/target/ |
A D | loop.c | 131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument 134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq() 135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq() 218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument 232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); in nvme_loop_init_hctx() 234 hctx->driver_data = queue; in nvme_loop_init_hctx() 238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument 246 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
|
/linux-6.3-rc2/drivers/block/null_blk/ |
A D | main.c | 1606 struct nullb_queue *nq = hctx->driver_data; in null_poll() 1634 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq() local 1639 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq() 1640 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq() 1655 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) in null_timeout_rq() 1664 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq() 1667 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq() 1669 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq() 1726 struct nullb_queue *nq = hctx->driver_data; in null_exit_hctx() 1744 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx() [all …]
|
/linux-6.3-rc2/drivers/nvme/host/ |
A D | apple.c | 732 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in apple_nvme_queue_rq() argument 735 struct nvme_ns *ns = hctx->queue->queuedata; in apple_nvme_queue_rq() 736 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_queue_rq() 775 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in apple_nvme_init_hctx() argument 778 hctx->driver_data = data; in apple_nvme_init_hctx() 936 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, in apple_nvme_poll() argument 939 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_poll()
|
/linux-6.3-rc2/drivers/scsi/ |
A D | scsi_lib.c | 1703 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument 1846 static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in scsi_mq_poll() argument 1848 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll() 1851 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll() 1856 static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scsi_init_hctx() argument 1861 hctx->driver_data = shost; in scsi_init_hctx() 1932 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument 1934 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs() 1936 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
|