Lines Matching refs:q

70 	struct request_queue *q = disk->queue;  in queue_requests_store()  local
72 if (!queue_is_mq(q)) in queue_requests_store()
79 memflags = blk_mq_freeze_queue(q); in queue_requests_store()
80 mutex_lock(&q->elevator_lock); in queue_requests_store()
87 mutex_unlock(&q->elevator_lock); in queue_requests_store()
88 blk_mq_unfreeze_queue(q, memflags); in queue_requests_store()
109 struct request_queue *q = disk->queue; in queue_ra_store() local
118 mutex_lock(&q->limits_lock); in queue_ra_store()
119 memflags = blk_mq_freeze_queue(q); in queue_ra_store()
121 mutex_unlock(&q->limits_lock); in queue_ra_store()
122 blk_mq_unfreeze_queue(q, memflags); in queue_ra_store()
341 struct request_queue *q = disk->queue; in queue_nomerges_store() local
347 memflags = blk_mq_freeze_queue(q); in queue_nomerges_store()
348 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
349 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
351 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
353 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
354 blk_mq_unfreeze_queue(q, memflags); in queue_nomerges_store()
372 struct request_queue *q = disk->queue; in queue_rq_affinity_store() local
386 memflags = blk_mq_freeze_queue(q); in queue_rq_affinity_store()
388 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
389 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
391 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
392 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
394 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
395 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
397 blk_mq_unfreeze_queue(q, memflags); in queue_rq_affinity_store()
413 struct request_queue *q = disk->queue; in queue_poll_store() local
415 memflags = blk_mq_freeze_queue(q); in queue_poll_store()
416 if (!(q->limits.features & BLK_FEAT_POLL)) { in queue_poll_store()
424 blk_mq_unfreeze_queue(q, memflags); in queue_poll_store()
439 struct request_queue *q = disk->queue; in queue_io_timeout_store() local
445 memflags = blk_mq_freeze_queue(q); in queue_io_timeout_store()
446 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
447 blk_mq_unfreeze_queue(q, memflags); in queue_io_timeout_store()
589 struct request_queue *q = disk->queue; in queue_wb_lat_show() local
592 if (!wbt_rq_qos(q)) { in queue_wb_lat_show()
597 if (wbt_disabled(q)) { in queue_wb_lat_show()
602 ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
611 struct request_queue *q = disk->queue; in queue_wb_lat_store() local
623 memflags = blk_mq_freeze_queue(q); in queue_wb_lat_store()
625 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
634 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
638 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
646 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
649 wbt_set_min_lat(q, val); in queue_wb_lat_store()
652 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
654 blk_mq_unfreeze_queue(q, memflags); in queue_wb_lat_store()
745 struct request_queue *q = disk->queue; in queue_attr_visible() local
749 !blk_queue_is_zoned(q)) in queue_attr_visible()
759 struct request_queue *q = disk->queue; in blk_mq_queue_attr_visible() local
761 if (!queue_is_mq(q)) in blk_mq_queue_attr_visible()
764 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) in blk_mq_queue_attr_visible()
809 struct request_queue *q = disk->queue; in queue_attr_store() local
817 struct queue_limits lim = queue_limits_start_update(q); in queue_attr_store()
821 queue_limits_cancel_update(q); in queue_attr_store()
825 res = queue_limits_commit_update_frozen(q, &lim); in queue_attr_store()
858 struct request_queue *q = disk->queue; in blk_debugfs_remove() local
860 mutex_lock(&q->debugfs_mutex); in blk_debugfs_remove()
861 blk_trace_shutdown(q); in blk_debugfs_remove()
862 debugfs_remove_recursive(q->debugfs_dir); in blk_debugfs_remove()
863 q->debugfs_dir = NULL; in blk_debugfs_remove()
864 q->sched_debugfs_dir = NULL; in blk_debugfs_remove()
865 q->rqos_debugfs_dir = NULL; in blk_debugfs_remove()
866 mutex_unlock(&q->debugfs_mutex); in blk_debugfs_remove()
875 struct request_queue *q = disk->queue; in blk_register_queue() local
882 if (queue_is_mq(q)) { in blk_register_queue()
887 mutex_lock(&q->sysfs_lock); in blk_register_queue()
889 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
890 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); in blk_register_queue()
891 if (queue_is_mq(q)) in blk_register_queue()
892 blk_mq_debugfs_register(q); in blk_register_queue()
893 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
903 if (queue_is_mq(q)) in blk_register_queue()
904 elevator_set_default(q); in blk_register_queue()
906 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
911 if (q->elevator) in blk_register_queue()
912 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
913 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
924 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
925 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
933 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
934 if (queue_is_mq(q)) in blk_register_queue()
950 struct request_queue *q = disk->queue; in blk_unregister_queue() local
952 if (WARN_ON(!q)) in blk_unregister_queue()
956 if (!blk_queue_registered(q)) in blk_unregister_queue()
964 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
965 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
966 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
972 if (queue_is_mq(q)) in blk_unregister_queue()
976 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
978 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
984 if (queue_is_mq(q)) in blk_unregister_queue()
985 elevator_set_none(q); in blk_unregister_queue()