Lines Matching refs:q

64 static ssize_t queue_requests_show(struct request_queue *q, char *page)  in queue_requests_show()  argument
66 return queue_var_show(q->nr_requests, page); in queue_requests_show()
70 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
75 if (!queue_is_mq(q)) in queue_requests_store()
85 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
92 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
96 if (!q->disk) in queue_ra_show()
98 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show()
103 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
108 if (!q->disk) in queue_ra_store()
113 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
117 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
119 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
124 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
126 return queue_var_show(queue_max_segments(q), page); in queue_max_segments_show()
129 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
132 return queue_var_show(queue_max_discard_segments(q), page); in queue_max_discard_segments_show()
135 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
137 return queue_var_show(q->limits.max_integrity_segments, page); in queue_max_integrity_segments_show()
140 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
142 return queue_var_show(queue_max_segment_size(q), page); in queue_max_segment_size_show()
145 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
147 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
150 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
152 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
155 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
157 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
160 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
162 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
165 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
167 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
170 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
172 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
175 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
179 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
182 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
185 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
188 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
197 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
204 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
205 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
207 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
211 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
216 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
221 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
224 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
227 static ssize_t queue_zone_write_granularity_show(struct request_queue *q, in queue_zone_write_granularity_show() argument
230 return queue_var_show(queue_zone_write_granularity(q), page); in queue_zone_write_granularity_show()
233 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) in queue_zone_append_max_show() argument
235 unsigned long long max_sectors = q->limits.max_zone_append_sectors; in queue_zone_append_max_show()
241 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
245 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
254 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
256 q->limits.max_user_sectors = 0; in queue_max_sectors_store()
263 q->limits.max_user_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
266 spin_lock_irq(&q->queue_lock); in queue_max_sectors_store()
267 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
268 if (q->disk) in queue_max_sectors_store()
269 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
270 spin_unlock_irq(&q->queue_lock); in queue_max_sectors_store()
275 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
277 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
282 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) in queue_virt_boundary_mask_show() argument
284 return queue_var_show(q->limits.virt_boundary_mask, page); in queue_virt_boundary_mask_show()
287 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) in queue_dma_alignment_show() argument
289 return queue_var_show(queue_dma_alignment(q), page); in queue_dma_alignment_show()
294 queue_##name##_show(struct request_queue *q, char *page) \
297 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
301 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
312 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
314 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
324 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
326 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
336 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) in queue_nr_zones_show() argument
338 return queue_var_show(disk_nr_zones(q->disk), page); in queue_nr_zones_show()
341 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) in queue_max_open_zones_show() argument
343 return queue_var_show(bdev_max_open_zones(q->disk->part0), page); in queue_max_open_zones_show()
346 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) in queue_max_active_zones_show() argument
348 return queue_var_show(bdev_max_active_zones(q->disk->part0), page); in queue_max_active_zones_show()
351 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
353 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
354 blk_queue_noxmerges(q), page); in queue_nomerges_show()
357 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
366 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
367 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
369 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
371 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
376 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
378 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
379 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
385 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
396 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
397 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
399 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
400 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
402 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
403 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
409 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
413 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in queue_poll_delay_show()
416 val = q->poll_nsec / 1000; in queue_poll_delay_show()
421 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
426 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
434 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in queue_poll_delay_store()
436 q->poll_nsec = val * 1000; in queue_poll_delay_store()
443 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
445 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
448 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
451 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in queue_poll_store()
458 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) in queue_io_timeout_show() argument
460 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); in queue_io_timeout_show()
463 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, in queue_io_timeout_store() argument
473 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
478 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
480 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
483 if (wbt_disabled(q)) in queue_wb_lat_show()
486 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
489 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
502 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
504 ret = wbt_init(q->disk); in queue_wb_lat_store()
510 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
514 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
522 blk_mq_freeze_queue(q); in queue_wb_lat_store()
523 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
525 wbt_set_min_lat(q, val); in queue_wb_lat_store()
527 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
528 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
533 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
535 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
541 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
556 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
558 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
563 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
565 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
568 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
570 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
697 struct request_queue *q = disk->queue; in queue_attr_visible() local
700 (!q->mq_ops || !q->mq_ops->timeout)) in queue_attr_visible()
705 !blk_queue_is_zoned(q)) in queue_attr_visible()
724 struct request_queue *q = disk->queue; in queue_attr_show() local
729 mutex_lock(&q->sysfs_lock); in queue_attr_show()
730 res = entry->show(q, page); in queue_attr_show()
731 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
741 struct request_queue *q = disk->queue; in queue_attr_store() local
747 mutex_lock(&q->sysfs_lock); in queue_attr_store()
748 res = entry->store(q, page, length); in queue_attr_store()
749 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
776 struct request_queue *q = disk->queue; in blk_debugfs_remove() local
778 mutex_lock(&q->debugfs_mutex); in blk_debugfs_remove()
779 blk_trace_shutdown(q); in blk_debugfs_remove()
780 debugfs_remove_recursive(q->debugfs_dir); in blk_debugfs_remove()
781 q->debugfs_dir = NULL; in blk_debugfs_remove()
782 q->sched_debugfs_dir = NULL; in blk_debugfs_remove()
783 q->rqos_debugfs_dir = NULL; in blk_debugfs_remove()
784 mutex_unlock(&q->debugfs_mutex); in blk_debugfs_remove()
793 struct request_queue *q = disk->queue; in blk_register_queue() local
796 mutex_lock(&q->sysfs_dir_lock); in blk_register_queue()
802 if (queue_is_mq(q)) { in blk_register_queue()
807 mutex_lock(&q->sysfs_lock); in blk_register_queue()
809 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
810 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); in blk_register_queue()
811 if (queue_is_mq(q)) in blk_register_queue()
812 blk_mq_debugfs_register(q); in blk_register_queue()
813 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
819 if (q->elevator) { in blk_register_queue()
820 ret = elv_register_queue(q, false); in blk_register_queue()
829 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
835 if (q->elevator) in blk_register_queue()
836 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
837 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
838 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
849 if (!blk_queue_init_done(q)) { in blk_register_queue()
850 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
851 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
857 elv_unregister_queue(q); in blk_register_queue()
862 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
865 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
878 struct request_queue *q = disk->queue; in blk_unregister_queue() local
880 if (WARN_ON(!q)) in blk_unregister_queue()
884 if (!blk_queue_registered(q)) in blk_unregister_queue()
892 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
893 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
894 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
896 mutex_lock(&q->sysfs_dir_lock); in blk_unregister_queue()
901 if (queue_is_mq(q)) in blk_unregister_queue()
905 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
906 elv_unregister_queue(q); in blk_unregister_queue()
908 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
913 mutex_unlock(&q->sysfs_dir_lock); in blk_unregister_queue()