Lines Matching refs:q
80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument
82 set_bit(flag, &q->queue_flags); in blk_queue_flag_set()
91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
93 clear_bit(flag, &q->queue_flags); in blk_queue_flag_clear()
220 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
222 timer_delete_sync(&q->timeout); in blk_sync_queue()
223 cancel_work_sync(&q->timeout_work); in blk_sync_queue()
231 void blk_set_pm_only(struct request_queue *q) in blk_set_pm_only() argument
233 atomic_inc(&q->pm_only); in blk_set_pm_only()
237 void blk_clear_pm_only(struct request_queue *q) in blk_clear_pm_only() argument
241 pm_only = atomic_dec_return(&q->pm_only); in blk_clear_pm_only()
244 wake_up_all(&q->mq_freeze_wq); in blk_clear_pm_only()
250 struct request_queue *q = container_of(rcu_head, in blk_free_queue_rcu() local
253 percpu_ref_exit(&q->q_usage_counter); in blk_free_queue_rcu()
254 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
257 static void blk_free_queue(struct request_queue *q) in blk_free_queue() argument
259 blk_free_queue_stats(q->stats); in blk_free_queue()
260 if (queue_is_mq(q)) in blk_free_queue()
261 blk_mq_release(q); in blk_free_queue()
263 ida_free(&blk_queue_ida, q->id); in blk_free_queue()
264 lockdep_unregister_key(&q->io_lock_cls_key); in blk_free_queue()
265 lockdep_unregister_key(&q->q_lock_cls_key); in blk_free_queue()
266 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_free_queue()
276 void blk_put_queue(struct request_queue *q) in blk_put_queue() argument
278 if (refcount_dec_and_test(&q->refs)) in blk_put_queue()
279 blk_free_queue(q); in blk_put_queue()
283 bool blk_queue_start_drain(struct request_queue *q) in blk_queue_start_drain() argument
290 bool freeze = __blk_freeze_queue_start(q, current); in blk_queue_start_drain()
291 if (queue_is_mq(q)) in blk_queue_start_drain()
292 blk_mq_wake_waiters(q); in blk_queue_start_drain()
294 wake_up_all(&q->mq_freeze_wq); in blk_queue_start_drain()
304 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) in blk_queue_enter() argument
308 while (!blk_try_enter_queue(q, pm)) { in blk_queue_enter()
320 wait_event(q->mq_freeze_wq, in blk_queue_enter()
321 (!q->mq_freeze_depth && in blk_queue_enter()
322 blk_pm_resume_queue(pm, q)) || in blk_queue_enter()
323 blk_queue_dying(q)); in blk_queue_enter()
324 if (blk_queue_dying(q)) in blk_queue_enter()
328 rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_); in blk_queue_enter()
329 rwsem_release(&q->q_lockdep_map, _RET_IP_); in blk_queue_enter()
333 int __bio_queue_enter(struct request_queue *q, struct bio *bio) in __bio_queue_enter() argument
335 while (!blk_try_enter_queue(q, false)) { in __bio_queue_enter()
353 wait_event(q->mq_freeze_wq, in __bio_queue_enter()
354 (!q->mq_freeze_depth && in __bio_queue_enter()
355 blk_pm_resume_queue(false, q)) || in __bio_queue_enter()
361 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); in __bio_queue_enter()
362 rwsem_release(&q->io_lockdep_map, _RET_IP_); in __bio_queue_enter()
369 void blk_queue_exit(struct request_queue *q) in blk_queue_exit() argument
371 percpu_ref_put(&q->q_usage_counter); in blk_queue_exit()
376 struct request_queue *q = in blk_queue_usage_counter_release() local
379 wake_up_all(&q->mq_freeze_wq); in blk_queue_usage_counter_release()
384 struct request_queue *q = timer_container_of(q, t, timeout); in blk_rq_timed_out_timer() local
386 kblockd_schedule_work(&q->timeout_work); in blk_rq_timed_out_timer()
395 struct request_queue *q; in blk_alloc_queue() local
398 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, in blk_alloc_queue()
400 if (!q) in blk_alloc_queue()
403 q->last_merge = NULL; in blk_alloc_queue()
405 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); in blk_alloc_queue()
406 if (q->id < 0) { in blk_alloc_queue()
407 error = q->id; in blk_alloc_queue()
411 q->stats = blk_alloc_queue_stats(); in blk_alloc_queue()
412 if (!q->stats) { in blk_alloc_queue()
420 q->limits = *lim; in blk_alloc_queue()
422 q->node = node_id; in blk_alloc_queue()
424 atomic_set(&q->nr_active_requests_shared_tags, 0); in blk_alloc_queue()
426 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); in blk_alloc_queue()
427 INIT_WORK(&q->timeout_work, blk_timeout_work); in blk_alloc_queue()
428 INIT_LIST_HEAD(&q->icq_list); in blk_alloc_queue()
430 refcount_set(&q->refs, 1); in blk_alloc_queue()
431 mutex_init(&q->debugfs_mutex); in blk_alloc_queue()
432 mutex_init(&q->elevator_lock); in blk_alloc_queue()
433 mutex_init(&q->sysfs_lock); in blk_alloc_queue()
434 mutex_init(&q->limits_lock); in blk_alloc_queue()
435 mutex_init(&q->rq_qos_mutex); in blk_alloc_queue()
436 spin_lock_init(&q->queue_lock); in blk_alloc_queue()
438 init_waitqueue_head(&q->mq_freeze_wq); in blk_alloc_queue()
439 mutex_init(&q->mq_freeze_lock); in blk_alloc_queue()
441 blkg_init_queue(q); in blk_alloc_queue()
447 error = percpu_ref_init(&q->q_usage_counter, in blk_alloc_queue()
452 lockdep_register_key(&q->io_lock_cls_key); in blk_alloc_queue()
453 lockdep_register_key(&q->q_lock_cls_key); in blk_alloc_queue()
454 lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)", in blk_alloc_queue()
455 &q->io_lock_cls_key, 0); in blk_alloc_queue()
456 lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)", in blk_alloc_queue()
457 &q->q_lock_cls_key, 0); in blk_alloc_queue()
461 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); in blk_alloc_queue()
462 rwsem_release(&q->io_lockdep_map, _RET_IP_); in blk_alloc_queue()
465 q->nr_requests = BLKDEV_DEFAULT_RQ; in blk_alloc_queue()
467 return q; in blk_alloc_queue()
470 blk_free_queue_stats(q->stats); in blk_alloc_queue()
472 ida_free(&blk_queue_ida, q->id); in blk_alloc_queue()
474 kmem_cache_free(blk_requestq_cachep, q); in blk_alloc_queue()
486 bool blk_get_queue(struct request_queue *q) in blk_get_queue() argument
488 if (unlikely(blk_queue_dying(q))) in blk_get_queue()
490 refcount_inc(&q->refs); in blk_get_queue()
594 static inline blk_status_t blk_check_zone_append(struct request_queue *q, in blk_check_zone_append() argument
612 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append()
616 if (nr_sectors > q->limits.max_zone_append_sectors) in blk_check_zone_append()
681 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct() local
699 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct()
756 static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, in blk_validate_atomic_write_op_size() argument
759 if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) in blk_validate_atomic_write_op_size()
762 if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) in blk_validate_atomic_write_op_size()
780 struct request_queue *q = bdev_get_queue(bdev); in submit_bio_noacct() local
825 status = blk_validate_atomic_write_op_size(q, bio); in submit_bio_noacct()
845 status = blk_check_zone_append(q, bio); in submit_bio_noacct()
850 if (!q->limits.max_write_zeroes_sectors) in submit_bio_noacct()
936 struct request_queue *q; in bio_poll() local
943 q = bdev_get_queue(bdev); in bio_poll()
958 if (!percpu_ref_tryget(&q->q_usage_counter)) in bio_poll()
960 if (queue_is_mq(q)) { in bio_poll()
961 ret = blk_mq_poll(q, cookie, iob, flags); in bio_poll()
963 struct gendisk *disk = q->disk; in bio_poll()
965 if ((q->limits.features & BLK_FEAT_POLL) && disk && in bio_poll()
969 blk_queue_exit(q); in bio_poll()
1097 int blk_lld_busy(struct request_queue *q) in blk_lld_busy() argument
1099 if (queue_is_mq(q) && q->mq_ops->busy) in blk_lld_busy()
1100 return q->mq_ops->busy(q); in blk_lld_busy()