Lines Matching refs:queue

103 static rt_err_t nvme_submit_cmd(struct rt_nvme_queue *queue,  in nvme_submit_cmd()  argument
109 struct rt_nvme_controller *nvme = queue->nvme; in nvme_submit_cmd()
112 level = rt_spin_lock_irqsave(&queue->lock); in nvme_submit_cmd()
114 tail = queue->sq_tail; in nvme_submit_cmd()
115 head = queue->cq_head; in nvme_submit_cmd()
120 rt_spin_unlock_irqrestore(&queue->lock, level); in nvme_submit_cmd()
128 rt_memcpy(&queue->sq_cmds[tail], cmd, sizeof(*cmd)); in nvme_submit_cmd()
132 if ((err = nvme->ops->submit_cmd(queue, cmd))) in nvme_submit_cmd()
138 if (++tail == queue->depth) in nvme_submit_cmd()
142 HWREG32(queue->doorbell) = tail; in nvme_submit_cmd()
143 queue->sq_tail = tail; in nvme_submit_cmd()
145 queue->cmd = cmd; in nvme_submit_cmd()
146 queue->err = RT_EOK; in nvme_submit_cmd()
148 rt_spin_unlock_irqrestore(&queue->lock, level); in nvme_submit_cmd()
150 err = rt_completion_wait(&queue->done, in nvme_submit_cmd()
151 rt_tick_from_millisecond(queue->qid != 0 ? RT_WAITING_FOREVER : 60)); in nvme_submit_cmd()
153 return err ? : queue->err; in nvme_submit_cmd()
653 struct rt_nvme_queue *queue = param; in nvme_queue_isr() local
654 struct rt_nvme_controller *nvme = queue->nvme; in nvme_queue_isr()
656 level = rt_spin_lock_irqsave(&queue->lock); in nvme_queue_isr()
658 head = queue->cq_head; in nvme_queue_isr()
659 phase = queue->cq_phase; in nvme_queue_isr()
660 status = HWREG16(&queue->cq_entry[head].status); in nvme_queue_isr()
667 queue->err = -RT_EIO; in nvme_queue_isr()
673 nvme->ops->complete_cmd(queue, queue->cmd); in nvme_queue_isr()
677 if (++head == queue->depth) in nvme_queue_isr()
683 HWREG32(queue->doorbell + nvme->doorbell_stride) = head; in nvme_queue_isr()
684 queue->cq_head = head; in nvme_queue_isr()
685 queue->cq_phase = phase; in nvme_queue_isr()
687 rt_completion_done(&queue->done); in nvme_queue_isr()
690 rt_spin_unlock_irqrestore(&queue->lock, level); in nvme_queue_isr()
728 static rt_err_t nvme_attach_queue(struct rt_nvme_queue *queue, rt_uint8_t opcode) in nvme_attach_queue() argument
731 struct rt_nvme_controller *nvme = queue->nvme; in nvme_attach_queue()
739 cmd.create_cq.prp1 = rt_cpu_to_le64(queue->cq_entry_phy); in nvme_attach_queue()
740 cmd.create_cq.cqid = rt_cpu_to_le16(queue->qid); in nvme_attach_queue()
741 cmd.create_cq.qsize = rt_cpu_to_le16(queue->depth - 1); in nvme_attach_queue()
743 cmd.create_cq.irq_vector = rt_cpu_to_le16(nvme->irqs_nr > 1 ? queue->qid : 0); in nvme_attach_queue()
748 cmd.create_sq.prp1 = rt_cpu_to_le64(queue->sq_cmds_phy); in nvme_attach_queue()
749 cmd.create_sq.sqid = rt_cpu_to_le16(queue->qid); in nvme_attach_queue()
750 cmd.create_sq.qsize = rt_cpu_to_le16(queue->depth - 1); in nvme_attach_queue()
752 cmd.create_sq.cqid = rt_cpu_to_le16(queue->qid); in nvme_attach_queue()
763 rt_inline rt_err_t nvme_attach_queue_sq(struct rt_nvme_queue *queue) in nvme_attach_queue_sq() argument
765 return nvme_attach_queue(queue, RT_NVME_ADMIN_OPCODE_CREATE_SQ); in nvme_attach_queue_sq()
768 rt_inline rt_err_t nvme_attach_queue_cq(struct rt_nvme_queue *queue) in nvme_attach_queue_cq() argument
770 return nvme_attach_queue(queue, RT_NVME_ADMIN_OPCODE_CREATE_CQ); in nvme_attach_queue_cq()
773 static rt_err_t nvme_detach_queue(struct rt_nvme_queue *queue, in nvme_detach_queue() argument
777 struct rt_nvme_controller *nvme = queue->nvme; in nvme_detach_queue()
781 cmd.delete_queue.qid = rt_cpu_to_le16(queue->qid); in nvme_detach_queue()
791 static void nvme_free_queue(struct rt_nvme_queue *queue) in nvme_free_queue() argument
794 struct rt_nvme_controller *nvme = queue->nvme; in nvme_free_queue()
800 if (!(err = nvme->ops->cleanup_queue(queue))) in nvme_free_queue()
808 if (queue->sq_cmds) in nvme_free_queue()
810 rt_dma_free(nvme->dev, sizeof(*queue->sq_cmds) * queue->depth, in nvme_free_queue()
811 queue->sq_cmds, queue->sq_cmds_phy, dma_flags); in nvme_free_queue()
814 if (queue->cq_entry) in nvme_free_queue()
816 rt_dma_free(nvme->dev, sizeof(*queue->cq_entry) * queue->depth, in nvme_free_queue()
817 queue->cq_entry, queue->cq_entry_phy, dma_flags); in nvme_free_queue()
826 struct rt_nvme_queue *queue = &nvme->queue[qid]; in nvme_alloc_queue() local
828 rt_memset(queue, 0, sizeof(*queue)); in nvme_alloc_queue()
830 queue->nvme = nvme; in nvme_alloc_queue()
831 queue->doorbell = &nvme->doorbell_tbl[qid * 2 * nvme->doorbell_stride]; in nvme_alloc_queue()
832 queue->qid = qid; in nvme_alloc_queue()
833 queue->depth = depth; in nvme_alloc_queue()
834 queue->cq_head = 0; in nvme_alloc_queue()
835 queue->cq_phase = 1; in nvme_alloc_queue()
836 rt_completion_init(&queue->done); in nvme_alloc_queue()
837 rt_spin_lock_init(&queue->lock); in nvme_alloc_queue()
842 queue->sq_cmds = rt_dma_alloc(nvme->dev, in nvme_alloc_queue()
843 sizeof(*queue->sq_cmds) * depth, &queue->sq_cmds_phy, dma_flags); in nvme_alloc_queue()
845 if (!queue->sq_cmds) in nvme_alloc_queue()
852 queue->cq_entry = rt_dma_alloc(nvme->dev, in nvme_alloc_queue()
853 sizeof(*queue->cq_entry) * depth, &queue->cq_entry_phy, dma_flags); in nvme_alloc_queue()
855 if (!queue->cq_entry) in nvme_alloc_queue()
861 rt_memset(queue->sq_cmds, 0, sizeof(struct rt_nvme_command) * depth); in nvme_alloc_queue()
862 rt_memset(queue->cq_entry, 0, sizeof(struct rt_nvme_completion) * depth); in nvme_alloc_queue()
866 if (!(err = nvme->ops->setup_queue(queue))) in nvme_alloc_queue()
874 return queue; in nvme_alloc_queue()
877 nvme_free_queue(queue); in nvme_alloc_queue()
959 struct rt_nvme_queue *queue; in nvme_setup_io_queues() local
971 queue = nvme_alloc_queue(nvme, q_idx, nvme->queue_depth); in nvme_setup_io_queues()
973 if (!queue) in nvme_setup_io_queues()
978 if ((err = nvme_attach_queue_cq(queue)) || in nvme_setup_io_queues()
979 (err = nvme_attach_queue_sq(queue))) in nvme_setup_io_queues()
1012 struct rt_nvme_queue *queue; in nvme_remove_io_queues() local
1016 queue = &nvme->io_queues[i]; in nvme_remove_io_queues()
1018 nvme_detach_queue(queue, RT_NVME_ADMIN_OPCODE_DELETE_SQ); in nvme_remove_io_queues()
1019 nvme_detach_queue(queue, RT_NVME_ADMIN_OPCODE_DELETE_CQ); in nvme_remove_io_queues()
1020 nvme_free_queue(queue); in nvme_remove_io_queues()
1025 rt_pic_detach_irq(irq, queue); in nvme_remove_io_queues()