Lines Matching refs:q

75 	struct request_queue *q = rq->q;  in elv_iosched_allow_bio_merge()  local
76 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge()
79 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
136 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
141 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
164 static void elevator_exit(struct request_queue *q) in elevator_exit() argument
166 struct elevator_queue *e = q->elevator; in elevator_exit()
168 lockdep_assert_held(&q->elevator_lock); in elevator_exit()
170 ioc_clear_queue(q); in elevator_exit()
173 blk_mq_exit_sched(q, e); in elevator_exit()
183 void elv_rqhash_del(struct request_queue *q, struct request *rq) in elv_rqhash_del() argument
190 void elv_rqhash_add(struct request_queue *q, struct request *rq) in elv_rqhash_add() argument
192 struct elevator_queue *e = q->elevator; in elv_rqhash_add()
200 void elv_rqhash_reposition(struct request_queue *q, struct request *rq) in elv_rqhash_reposition() argument
203 elv_rqhash_add(q, rq); in elv_rqhash_reposition()
206 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) in elv_rqhash_find() argument
208 struct elevator_queue *e = q->elevator; in elv_rqhash_find()
280 enum elv_merge elv_merge(struct request_queue *q, struct request **req, in elv_merge() argument
283 struct elevator_queue *e = q->elevator; in elv_merge()
292 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge()
298 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge()
299 enum elv_merge ret = blk_try_merge(q->last_merge, bio); in elv_merge()
302 *req = q->last_merge; in elv_merge()
307 if (blk_queue_noxmerges(q)) in elv_merge()
313 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
323 return e->type->ops.request_merge(q, req, bio); in elv_merge()
336 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, in elv_attempt_insert_merge() argument
342 if (blk_queue_nomerges(q)) in elv_attempt_insert_merge()
348 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { in elv_attempt_insert_merge()
353 if (blk_queue_noxmerges(q)) in elv_attempt_insert_merge()
361 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
362 if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) in elv_attempt_insert_merge()
374 void elv_merged_request(struct request_queue *q, struct request *rq, in elv_merged_request() argument
377 struct elevator_queue *e = q->elevator; in elv_merged_request()
380 e->type->ops.request_merged(q, rq, type); in elv_merged_request()
383 elv_rqhash_reposition(q, rq); in elv_merged_request()
385 q->last_merge = rq; in elv_merged_request()
388 void elv_merge_requests(struct request_queue *q, struct request *rq, in elv_merge_requests() argument
391 struct elevator_queue *e = q->elevator; in elv_merge_requests()
394 e->type->ops.requests_merged(q, rq, next); in elv_merge_requests()
396 elv_rqhash_reposition(q, rq); in elv_merge_requests()
397 q->last_merge = rq; in elv_merge_requests()
400 struct request *elv_latter_request(struct request_queue *q, struct request *rq) in elv_latter_request() argument
402 struct elevator_queue *e = q->elevator; in elv_latter_request()
405 return e->type->ops.next_request(q, rq); in elv_latter_request()
410 struct request *elv_former_request(struct request_queue *q, struct request *rq) in elv_former_request() argument
412 struct elevator_queue *e = q->elevator; in elv_former_request()
415 return e->type->ops.former_request(q, rq); in elv_former_request()
469 static int elv_register_queue(struct request_queue *q, in elv_register_queue() argument
475 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched"); in elv_register_queue()
492 blk_mq_sched_reg_debugfs(q); in elv_register_queue()
498 static void elv_unregister_queue(struct request_queue *q, in elv_unregister_queue() argument
506 blk_mq_sched_unreg_debugfs(q); in elv_unregister_queue()
574 static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) in elevator_switch() argument
579 WARN_ON_ONCE(q->mq_freeze_depth == 0); in elevator_switch()
580 lockdep_assert_held(&q->elevator_lock); in elevator_switch()
588 blk_mq_quiesce_queue(q); in elevator_switch()
590 if (q->elevator) { in elevator_switch()
591 ctx->old = q->elevator; in elevator_switch()
592 elevator_exit(q); in elevator_switch()
596 ret = blk_mq_init_sched(q, new_e, ctx->et); in elevator_switch()
599 ctx->new = q->elevator; in elevator_switch()
601 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); in elevator_switch()
602 q->elevator = NULL; in elevator_switch()
603 q->nr_requests = q->tag_set->queue_depth; in elevator_switch()
605 blk_add_trace_msg(q, "elv switch: %s", ctx->name); in elevator_switch()
608 blk_mq_unquiesce_queue(q); in elevator_switch()
620 static void elv_exit_and_release(struct request_queue *q) in elv_exit_and_release() argument
625 memflags = blk_mq_freeze_queue(q); in elv_exit_and_release()
626 mutex_lock(&q->elevator_lock); in elv_exit_and_release()
627 e = q->elevator; in elv_exit_and_release()
628 elevator_exit(q); in elv_exit_and_release()
629 mutex_unlock(&q->elevator_lock); in elv_exit_and_release()
630 blk_mq_unfreeze_queue(q, memflags); in elv_exit_and_release()
632 blk_mq_free_sched_tags(e->et, q->tag_set); in elv_exit_and_release()
637 static int elevator_change_done(struct request_queue *q, in elevator_change_done() argument
646 elv_unregister_queue(q, ctx->old); in elevator_change_done()
647 blk_mq_free_sched_tags(ctx->old->et, q->tag_set); in elevator_change_done()
650 wbt_enable_default(q->disk); in elevator_change_done()
653 ret = elv_register_queue(q, ctx->new, !ctx->no_uevent); in elevator_change_done()
655 elv_exit_and_release(q); in elevator_change_done()
663 static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) in elevator_change() argument
666 struct blk_mq_tag_set *set = q->tag_set; in elevator_change()
677 memflags = blk_mq_freeze_queue(q); in elevator_change()
687 blk_mq_cancel_work_sync(q); in elevator_change()
688 mutex_lock(&q->elevator_lock); in elevator_change()
689 if (!(q->elevator && elevator_match(q->elevator->type, ctx->name))) in elevator_change()
690 ret = elevator_switch(q, ctx); in elevator_change()
691 mutex_unlock(&q->elevator_lock); in elevator_change()
692 blk_mq_unfreeze_queue(q, memflags); in elevator_change()
694 ret = elevator_change_done(q, ctx); in elevator_change()
708 void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e, in elv_update_nr_hw_queues() argument
711 struct blk_mq_tag_set *set = q->tag_set; in elv_update_nr_hw_queues()
715 WARN_ON_ONCE(q->mq_freeze_depth == 0); in elv_update_nr_hw_queues()
717 if (e && !blk_queue_dying(q) && blk_queue_registered(q)) { in elv_update_nr_hw_queues()
721 mutex_lock(&q->elevator_lock); in elv_update_nr_hw_queues()
723 ret = elevator_switch(q, &ctx); in elv_update_nr_hw_queues()
724 mutex_unlock(&q->elevator_lock); in elv_update_nr_hw_queues()
726 blk_mq_unfreeze_queue_nomemrestore(q); in elv_update_nr_hw_queues()
728 WARN_ON_ONCE(elevator_change_done(q, &ctx)); in elv_update_nr_hw_queues()
740 void elevator_set_default(struct request_queue *q) in elevator_set_default() argument
750 blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q); in elevator_set_default()
752 if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) in elevator_set_default()
764 if ((q->nr_hw_queues == 1 || in elevator_set_default()
765 blk_mq_is_shared_tags(q->tag_set->flags))) { in elevator_set_default()
766 err = elevator_change(q, &ctx); in elevator_set_default()
774 void elevator_set_none(struct request_queue *q) in elevator_set_none() argument
781 err = elevator_change(q, &ctx); in elevator_set_none()
804 struct request_queue *q = disk->queue; in elv_iosched_store() local
805 struct blk_mq_tag_set *set = q->tag_set; in elv_iosched_store()
808 if (!blk_queue_registered(q)) in elv_iosched_store()
822 if (!blk_queue_no_elv_switch(q)) { in elv_iosched_store()
823 ret = elevator_change(q, &ctx); in elv_iosched_store()
835 struct request_queue *q = disk->queue; in elv_iosched_show() local
839 mutex_lock(&q->elevator_lock); in elv_iosched_show()
840 if (!q->elevator) { in elv_iosched_show()
844 cur = q->elevator->type; in elv_iosched_show()
857 mutex_unlock(&q->elevator_lock); in elv_iosched_show()
862 struct request *elv_rb_former_request(struct request_queue *q, in elv_rb_former_request() argument
874 struct request *elv_rb_latter_request(struct request_queue *q, in elv_rb_latter_request() argument