Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 52) sorted by relevance

123

/block/
A Dblk-pm.c31 q->dev = dev; in blk_pm_runtime_init()
32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
63 if (!q->dev) in blk_pre_runtime_suspend()
78 blk_set_pm_only(q); in blk_pre_runtime_suspend()
81 blk_freeze_queue_start(q); in blk_pre_runtime_suspend()
100 blk_clear_pm_only(q); in blk_pre_runtime_suspend()
122 if (!q->dev) in blk_post_runtime_suspend()
135 blk_clear_pm_only(q); in blk_post_runtime_suspend()
152 if (!q->dev) in blk_pre_runtime_resume()
178 if (!q->dev) in blk_post_runtime_resume()
[all …]
A Delevator.c75 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local
170 ioc_clear_queue(q); in elevator_exit()
298 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge()
348 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { in elv_attempt_insert_merge()
590 if (q->elevator) { in elevator_switch()
592 elevator_exit(q); in elevator_switch()
603 q->nr_requests = q->tag_set->queue_depth; in elevator_switch()
627 e = q->elevator; in elv_exit_and_release()
628 elevator_exit(q); in elv_exit_and_release()
689 if (!(q->elevator && elevator_match(q->elevator->type, ctx->name))) in elevator_change()
[all …]
A Dblk-core.c260 if (queue_is_mq(q)) in blk_free_queue()
384 struct request_queue *q = timer_container_of(q, t, timeout); in blk_rq_timed_out_timer() local
400 if (!q) in blk_alloc_queue()
406 if (q->id < 0) { in blk_alloc_queue()
407 error = q->id; in blk_alloc_queue()
412 if (!q->stats) { in blk_alloc_queue()
420 q->limits = *lim; in blk_alloc_queue()
422 q->node = node_id; in blk_alloc_queue()
467 return q; in blk_alloc_queue()
1099 if (queue_is_mq(q) && q->mq_ops->busy) in blk_lld_busy()
[all …]
A Dblk-mq.c124 q->mq_freeze_disk_dead = !q->disk || in blk_freeze_set_owner()
377 rq->q = q; in blk_rq_init()
407 rq->q = q; in blk_mq_rq_ctx_init()
575 .q = q, in blk_mq_rq_cache_fill()
616 if (!rq || rq->q != q) in blk_mq_alloc_cached_request()
641 .q = q, in blk_mq_alloc_request()
675 .q = q, in blk_mq_alloc_request_hctx()
754 struct request_queue *q = rq->q; in blk_mq_finish_request() local
771 struct request_queue *q = rq->q; in __blk_mq_free_request() local
3021 .q = q, in blk_mq_get_new_requests()
[all …]
A Dblk-mq-sched.c349 ctx = blk_mq_get_ctx(q); in blk_mq_sched_bio_merge()
387 q->sched_shared_tags = NULL; in blk_mq_sched_tags_teardown()
433 struct request_queue *q; in blk_mq_free_sched_tags_batch() local
446 if (q->elevator) { in blk_mq_free_sched_tags_batch()
509 struct request_queue *q; in blk_mq_alloc_sched_tags_batch() local
523 if (q->elevator) { in blk_mq_alloc_sched_tags_batch()
536 if (q->elevator) { in blk_mq_alloc_sched_tags_batch()
582 blk_mq_exit_sched(q, eq); in blk_mq_init_sched()
593 q->elevator = NULL; in blk_mq_init_sched()
607 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, in blk_mq_sched_free_rqs()
[all …]
A Dblk-rq-qos.h75 return rq_qos_id(q, RQ_QOS_WBT); in wbt_rq_qos()
80 return rq_qos_id(q, RQ_QOS_LATENCY); in iolat_rq_qos()
117 __rq_qos_cleanup(q->rq_qos, bio); in rq_qos_cleanup()
124 __rq_qos_done(q->rq_qos, rq); in rq_qos_done()
130 __rq_qos_issue(q->rq_qos, rq); in rq_qos_issue()
136 __rq_qos_requeue(q->rq_qos, rq); in rq_qos_requeue()
145 if (q->rq_qos) in rq_qos_done_bio()
146 __rq_qos_done_bio(q->rq_qos, bio); in rq_qos_done_bio()
154 __rq_qos_throttle(q->rq_qos, bio); in rq_qos_throttle()
162 __rq_qos_track(q->rq_qos, rq, bio); in rq_qos_track()
[all …]
A Dblk-sysfs.c72 if (!queue_is_mq(q)) in queue_requests_store()
592 if (!wbt_rq_qos(q)) { in queue_wb_lat_show()
625 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
761 if (!queue_is_mq(q)) in blk_mq_queue_attr_visible()
891 if (queue_is_mq(q)) in blk_register_queue()
903 if (queue_is_mq(q)) in blk_register_queue()
911 if (q->elevator) in blk_register_queue()
934 if (queue_is_mq(q)) in blk_register_queue()
952 if (WARN_ON(!q)) in blk_unregister_queue()
972 if (queue_is_mq(q)) in blk_unregister_queue()
[all …]
A Dblk-mq-sysfs.c54 struct request_queue *q; in blk_mq_hw_sysfs_show() local
59 q = hctx->queue; in blk_mq_hw_sysfs_show()
64 mutex_lock(&q->elevator_lock); in blk_mq_hw_sysfs_show()
66 mutex_unlock(&q->elevator_lock); in blk_mq_hw_sysfs_show()
201 kobject_put(q->mq_kobj); in blk_mq_sysfs_deinit()
214 kobject_get(q->mq_kobj); in blk_mq_sysfs_init()
249 kobject_del(q->mq_kobj); in blk_mq_sysfs_register()
260 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_sysfs_unregister()
265 kobject_del(q->mq_kobj); in blk_mq_sysfs_unregister()
273 if (!blk_queue_registered(q)) in blk_mq_sysfs_unregister_hctxs()
[all …]
A Dblk-mq.h47 void blk_mq_exit_queue(struct request_queue *q);
84 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
132 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
148 struct request_queue *q; member
254 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
255 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
260 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
261 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
270 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
276 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
[all …]
A Dblk-mq-debugfs.c23 __acquires(&q->requeue_lock) in queue_requeue_list_start()
27 spin_lock_irq(&q->requeue_lock); in queue_requeue_list_start()
39 __releases(&q->requeue_lock) in queue_requeue_list_stop()
122 if (blk_queue_dying(q)) in queue_state_write()
625 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); in blk_mq_debugfs_register()
632 if (q->rq_qos) { in blk_mq_debugfs_register()
661 if (!q->debugfs_dir) in blk_mq_debugfs_register_hctx()
710 if (!q->debugfs_dir) in blk_mq_debugfs_register_sched()
716 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); in blk_mq_debugfs_register_sched()
718 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); in blk_mq_debugfs_register_sched()
[all …]
A Dblk-ioc.c69 struct request_queue *q = icq->q; in ioc_destroy_icq() local
114 struct request_queue *q = icq->q; in ioc_release_fn() local
118 spin_unlock(&q->queue_lock); in ioc_release_fn()
125 spin_lock(&q->queue_lock); in ioc_release_fn()
331 if (icq && icq->q == q) in ioc_lookup_icq()
335 if (icq && icq->q == q) in ioc_lookup_icq()
363 q->node); in ioc_create_icq()
373 icq->q = q; in ioc_create_icq()
388 icq = ioc_lookup_icq(q); in ioc_create_icq()
421 icq = ioc_lookup_icq(q); in ioc_find_get_icq()
[all …]
A Dblk.h70 if (blk_queue_pm_only(q) && in blk_try_enter_queue()
78 blk_queue_exit(q); in blk_try_enter_queue()
93 return __bio_queue_enter(q, bio); in bio_queue_enter()
208 struct request_queue *q = rq->q; in blk_queue_get_max_sectors() local
221 return q->limits.max_sectors; in blk_queue_get_max_sectors()
458 if (req == q->last_merge) in req_set_nomerge()
459 q->last_merge = NULL; in req_set_nomerge()
738 if (!q->mq_freeze_disk_dead) in blk_freeze_acquire_lock()
740 if (!q->mq_freeze_queue_dying) in blk_freeze_acquire_lock()
746 if (!q->mq_freeze_queue_dying) in blk_unfreeze_release_lock()
[all …]
A Dblk-merge.c457 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
503 req_set_nomerge(req->q, req); in ll_new_hw_segment()
518 req_set_nomerge(req->q, req); in ll_back_merge_fn()
537 req_set_nomerge(req->q, req); in ll_front_merge_fn()
558 req_set_nomerge(q, req); in req_attempt_discard_merge()
864 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
901 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
931 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
941 req_set_nomerge(q, req); in bio_attempt_discard_merge()
1002 if (rq->q == q) in blk_attempt_plug_merge()
[all …]
A Dblk-stat.c52 struct request_queue *q = rq->q; in blk_stat_add() local
148 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback()
150 blk_queue_flag_set(QUEUE_FLAG_STATS, q); in blk_stat_add_callback()
159 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback()
161 if (list_empty(&q->stats->callbacks) && !q->stats->accounting) in blk_stat_remove_callback()
162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q); in blk_stat_remove_callback()
188 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_disable_accounting()
189 if (!--q->stats->accounting && list_empty(&q->stats->callbacks)) in blk_stat_disable_accounting()
199 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_enable_accounting()
200 if (!q->stats->accounting++ && list_empty(&q->stats->callbacks)) in blk_stat_enable_accounting()
[all …]
A Dblk-mq-debugfs.h23 void blk_mq_debugfs_register(struct request_queue *q);
24 void blk_mq_debugfs_register_hctx(struct request_queue *q,
27 void blk_mq_debugfs_register_hctxs(struct request_queue *q);
28 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
30 void blk_mq_debugfs_register_sched(struct request_queue *q);
31 void blk_mq_debugfs_unregister_sched(struct request_queue *q);
32 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
39 static inline void blk_mq_debugfs_register(struct request_queue *q) in blk_mq_debugfs_register() argument
43 static inline void blk_mq_debugfs_register_hctx(struct request_queue *q, in blk_mq_debugfs_register_hctx() argument
52 static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q) in blk_mq_debugfs_register_hctxs() argument
[all …]
A Dblk-flush.c152 struct request_queue *q = rq->q; in blk_flush_complete_seq() local
176 spin_lock(&q->requeue_lock); in blk_flush_complete_seq()
204 struct request_queue *q = flush_rq->q; in flush_end_io() local
231 if (!q->elevator) { in flush_end_io()
299 blk_rq_init(q, flush_rq); in blk_kick_flush()
312 if (!q->elevator) in blk_kick_flush()
330 spin_lock(&q->requeue_lock); in blk_kick_flush()
334 blk_mq_kick_requeue_list(q); in blk_kick_flush()
340 struct request_queue *q = rq->q; in mq_flush_data_end_io() local
346 if (q->elevator) { in mq_flush_data_end_io()
[all …]
A Dblk-rq-qos.c317 mutex_lock(&q->rq_qos_mutex); in rq_qos_exit()
318 while (q->rq_qos) { in rq_qos_exit()
319 struct rq_qos *rqos = q->rq_qos; in rq_qos_exit()
320 q->rq_qos = rqos->next; in rq_qos_exit()
324 mutex_unlock(&q->rq_qos_mutex); in rq_qos_exit()
345 if (rq_qos_id(q, rqos->id)) in rq_qos_add()
347 rqos->next = q->rq_qos; in rq_qos_add()
348 q->rq_qos = rqos; in rq_qos_add()
354 mutex_lock(&q->debugfs_mutex); in rq_qos_add()
382 mutex_lock(&q->debugfs_mutex); in rq_qos_del()
[all …]
A Dblk-cgroup.c123 struct request_queue *q = blkg->q; in blkg_free_workfn() local
144 blk_put_queue(q); in blkg_free_workfn()
618 q->root_blkg = NULL; in blkg_destroy_all()
688 if (!blkg->q->disk) in blkg_dev_name()
884 q = disk->queue; in blkg_conf_prep()
960 blk_queue_exit(q); in blkg_conf_prep()
969 blk_queue_exit(q); in blkg_conf_prep()
1332 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs() local
1615 if (queue_is_mq(q)) in blkcg_activate_policy()
1679 if (queue_is_mq(q)) in blkcg_activate_policy()
[all …]
A Dmq-deadline.c184 elv_rqhash_del(q, rq); in deadline_remove_request()
185 if (q->last_merge == rq) in deadline_remove_request()
186 q->last_merge = NULL; in deadline_remove_request()
589 q->elevator = eq; in dd_init_sched()
680 elv_rqhash_add(q, rq); in dd_insert_request()
681 if (!q->last_merge) in dd_insert_request()
682 q->last_merge = rq; in dd_insert_request()
889 struct request_queue *q = data; in deadline_batching_show() local
898 struct request_queue *q = data; in deadline_starved_show() local
907 struct request_queue *q = data; in dd_async_depth_show() local
[all …]
A Dblk-timeout.c23 bool __blk_should_fake_timeout(struct request_queue *q) in __blk_should_fake_timeout() argument
55 struct request_queue *q = disk->queue; in part_timeout_store() local
60 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
62 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
87 kblockd_schedule_work(&req->q->timeout_work); in blk_abort_request()
130 struct request_queue *q = req->q; in blk_add_timer() local
138 req->timeout = q->rq_timeout; in blk_add_timer()
152 if (!timer_pending(&q->timeout) || in blk_add_timer()
153 time_before(expiry, q->timeout.expires)) { in blk_add_timer()
154 unsigned long diff = q->timeout.expires - expiry; in blk_add_timer()
[all …]
A Dblk-mq-sched.h10 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
12 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
21 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
23 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
24 void blk_mq_sched_free_rqs(struct request_queue *q);
47 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument
51 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge()
54 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
62 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request()
72 struct request_queue *q = rq->q; in blk_mq_sched_requeue_request() local
[all …]
A Dblk-mq-tag.c49 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() local
87 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle() local
90 &q->queue_flags)) in __blk_mq_tag_idle()
192 data->ctx = blk_mq_get_ctx(data->q); in blk_mq_get_tag()
246 struct request_queue *q; member
270 struct request_queue *q = iter_data->q; in bt_iter() local
291 if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) in bt_iter()
321 .q = q, in bt_for_each()
519 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_queue_tag_busy_iter()
536 blk_queue_exit(q); in blk_mq_queue_tag_busy_iter()
[all …]
A Dbsg-lib.c275 struct request_queue *q = hctx->queue; in bsg_queue_rq() local
276 struct device *dev = q->queuedata; in bsg_queue_rq()
322 if (q) { in bsg_remove_queue()
327 blk_mq_destroy_queue(q); in bsg_remove_queue()
328 blk_put_queue(q); in bsg_remove_queue()
368 struct request_queue *q; in bsg_setup_queue() local
389 if (IS_ERR(q)) { in bsg_setup_queue()
390 ret = PTR_ERR(q); in bsg_setup_queue()
402 return q; in bsg_setup_queue()
404 blk_mq_destroy_queue(q); in bsg_setup_queue()
[all …]
A Dblk-pm.h9 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument
11 if (!q->dev || !blk_queue_pm_only(q)) in blk_pm_resume_queue()
13 if (pm && q->rpm_status != RPM_SUSPENDED) in blk_pm_resume_queue()
15 pm_request_resume(q->dev); in blk_pm_resume_queue()
21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
22 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy()
25 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument
A Dblk-integrity.c39 if (!biovec_phys_mergeable(q, &ivprv, &iv)) in blk_rq_count_integrity_sg()
41 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg()
138 struct request_queue *q = rq->q; in blk_rq_map_integrity_sg() local
147 if (!biovec_phys_mergeable(q, &ivprv, &iv)) in blk_rq_map_integrity_sg()
178 BUG_ON(segments > queue_max_integrity_segments(q)); in blk_rq_map_integrity_sg()
214 q->limits.max_integrity_segments) in blk_integrity_merge_rq()
237 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); in blk_integrity_merge_bio()
239 q->limits.max_integrity_segments) in blk_integrity_merge_bio()
276 struct request_queue *q = dev_to_disk(dev)->queue; in flag_store() local
286 lim = queue_limits_start_update(q); in flag_store()
[all …]

Completed in 68 milliseconds

123