Lines Matching refs:mq

26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)  in mmc_cqe_dcmd_busy()  argument
29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy()
32 void mmc_cqe_check_busy(struct mmc_queue *mq) in mmc_cqe_check_busy() argument
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy()
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy()
59 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) in mmc_issue_type() argument
61 struct mmc_host *host = mq->card->host; in mmc_issue_type()
72 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) in __mmc_cqe_recovery_notifier() argument
74 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier()
75 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier()
76 schedule_work(&mq->recovery_work); in __mmc_cqe_recovery_notifier()
86 struct mmc_queue *mq = q->queuedata; in mmc_cqe_recovery_notifier() local
89 spin_lock_irqsave(&mq->lock, flags); in mmc_cqe_recovery_notifier()
90 __mmc_cqe_recovery_notifier(mq); in mmc_cqe_recovery_notifier()
91 spin_unlock_irqrestore(&mq->lock, flags); in mmc_cqe_recovery_notifier()
98 struct mmc_queue *mq = req->q->queuedata; in mmc_cqe_timed_out() local
99 struct mmc_host *host = mq->card->host; in mmc_cqe_timed_out()
100 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); in mmc_cqe_timed_out()
123 struct mmc_queue *mq = q->queuedata; in mmc_mq_timed_out() local
124 struct mmc_card *card = mq->card; in mmc_mq_timed_out()
129 spin_lock_irqsave(&mq->lock, flags); in mmc_mq_timed_out()
130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; in mmc_mq_timed_out()
131 spin_unlock_irqrestore(&mq->lock, flags); in mmc_mq_timed_out()
138 struct mmc_queue *mq = container_of(work, struct mmc_queue, in mmc_mq_recovery_handler() local
140 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler()
141 struct mmc_host *host = mq->card->host; in mmc_mq_recovery_handler()
143 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
145 mq->in_recovery = true; in mmc_mq_recovery_handler()
148 mmc_blk_cqe_recovery(mq); in mmc_mq_recovery_handler()
150 mmc_blk_mq_recovery(mq); in mmc_mq_recovery_handler()
152 mq->in_recovery = false; in mmc_mq_recovery_handler()
154 spin_lock_irq(&mq->lock); in mmc_mq_recovery_handler()
155 mq->recovery_needed = false; in mmc_mq_recovery_handler()
156 spin_unlock_irq(&mq->lock); in mmc_mq_recovery_handler()
161 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
206 struct mmc_queue *mq = set->driver_data; in mmc_mq_init_request() local
207 struct mmc_card *card = mq->card; in mmc_mq_init_request()
231 struct mmc_queue *mq = q->queuedata; in mmc_mq_queue_rq() local
232 struct mmc_card *card = mq->card; in mmc_mq_queue_rq()
239 if (mmc_card_removed(mq->card)) { in mmc_mq_queue_rq()
244 issue_type = mmc_issue_type(mq, req); in mmc_mq_queue_rq()
246 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
248 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq()
249 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
255 if (mmc_cqe_dcmd_busy(mq)) { in mmc_mq_queue_rq()
256 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; in mmc_mq_queue_rq()
257 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
266 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { in mmc_mq_queue_rq()
267 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
284 mq->busy = true; in mmc_mq_queue_rq()
286 mq->in_flight[issue_type] += 1; in mmc_mq_queue_rq()
287 get_card = (mmc_tot_in_flight(mq) == 1); in mmc_mq_queue_rq()
288 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); in mmc_mq_queue_rq()
290 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
298 mmc_get_card(card, &mq->ctx); in mmc_mq_queue_rq()
307 issued = mmc_blk_mq_issue_rq(mq, req); in mmc_mq_queue_rq()
324 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
325 mq->in_flight[issue_type] -= 1; in mmc_mq_queue_rq()
326 if (mmc_tot_in_flight(mq) == 0) in mmc_mq_queue_rq()
328 mq->busy = false; in mmc_mq_queue_rq()
329 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
331 mmc_put_card(card, &mq->ctx); in mmc_mq_queue_rq()
333 WRITE_ONCE(mq->busy, false); in mmc_mq_queue_rq()
347 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_setup_queue() argument
352 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); in mmc_setup_queue()
353 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_setup_queue()
355 mmc_queue_setup_discard(mq->queue, card); in mmc_setup_queue()
358 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); in mmc_setup_queue()
359 blk_queue_max_hw_sectors(mq->queue, in mmc_setup_queue()
362 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, in mmc_setup_queue()
365 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); in mmc_setup_queue()
372 blk_queue_logical_block_size(mq->queue, block_size); in mmc_setup_queue()
379 blk_queue_max_segment_size(mq->queue, in mmc_setup_queue()
382 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); in mmc_setup_queue()
384 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); in mmc_setup_queue()
385 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); in mmc_setup_queue()
387 mutex_init(&mq->complete_lock); in mmc_setup_queue()
389 init_waitqueue_head(&mq->wait); in mmc_setup_queue()
391 mmc_crypto_setup_queue(mq->queue, host); in mmc_setup_queue()
409 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_init_queue() argument
415 mq->card = card; in mmc_init_queue()
417 spin_lock_init(&mq->lock); in mmc_init_queue()
419 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
420 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
426 mq->tag_set.queue_depth = in mmc_init_queue()
429 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
430 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
431 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
432 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
433 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
434 mq->tag_set.driver_data = mq; in mmc_init_queue()
448 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
453 disk = blk_mq_alloc_disk(&mq->tag_set, mq); in mmc_init_queue()
455 blk_mq_free_tag_set(&mq->tag_set); in mmc_init_queue()
458 mq->queue = disk->queue; in mmc_init_queue()
461 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); in mmc_init_queue()
462 blk_queue_rq_timeout(mq->queue, 60 * HZ); in mmc_init_queue()
464 mmc_setup_queue(mq, card); in mmc_init_queue()
468 void mmc_queue_suspend(struct mmc_queue *mq) in mmc_queue_suspend() argument
470 blk_mq_quiesce_queue(mq->queue); in mmc_queue_suspend()
476 mmc_claim_host(mq->card->host); in mmc_queue_suspend()
477 mmc_release_host(mq->card->host); in mmc_queue_suspend()
480 void mmc_queue_resume(struct mmc_queue *mq) in mmc_queue_resume() argument
482 blk_mq_unquiesce_queue(mq->queue); in mmc_queue_resume()
485 void mmc_cleanup_queue(struct mmc_queue *mq) in mmc_cleanup_queue() argument
487 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
497 blk_mq_free_tag_set(&mq->tag_set); in mmc_cleanup_queue()
504 flush_work(&mq->complete_work); in mmc_cleanup_queue()
506 mq->card = NULL; in mmc_cleanup_queue()
512 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) in mmc_queue_map_sg() argument
516 return blk_rq_map_sg(mq->queue, req, mqrq->sg); in mmc_queue_map_sg()