Lines Matching refs:iod

425 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);  in nvme_pci_init_request()  local
428 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
529 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_prps() local
530 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
533 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_prps()
534 __le64 *prp_list = iod->list[i].prp_list; in nvme_free_prps()
544 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_unmap_data() local
546 if (iod->dma_len) { in nvme_unmap_data()
547 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
552 WARN_ON_ONCE(!iod->sgt.nents); in nvme_unmap_data()
554 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
556 if (iod->nr_allocations == 0) in nvme_unmap_data()
557 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, in nvme_unmap_data()
558 iod->first_dma); in nvme_unmap_data()
559 else if (iod->nr_allocations == 1) in nvme_unmap_data()
560 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, in nvme_unmap_data()
561 iod->first_dma); in nvme_unmap_data()
564 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
584 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_prps() local
587 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_prps()
597 iod->first_dma = 0; in nvme_pci_setup_prps()
611 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
618 iod->nr_allocations = 0; in nvme_pci_setup_prps()
621 iod->nr_allocations = 1; in nvme_pci_setup_prps()
626 iod->nr_allocations = -1; in nvme_pci_setup_prps()
629 iod->list[0].prp_list = prp_list; in nvme_pci_setup_prps()
630 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
638 iod->list[iod->nr_allocations++].prp_list = prp_list; in nvme_pci_setup_prps()
658 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); in nvme_pci_setup_prps()
659 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
665 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), in nvme_pci_setup_prps()
667 blk_rq_payload_bytes(req), iod->sgt.nents); in nvme_pci_setup_prps()
690 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_sgls() local
693 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_sgls()
694 unsigned int entries = iod->sgt.nents; in nvme_pci_setup_sgls()
708 iod->nr_allocations = 0; in nvme_pci_setup_sgls()
711 iod->nr_allocations = 1; in nvme_pci_setup_sgls()
716 iod->nr_allocations = -1; in nvme_pci_setup_sgls()
720 iod->list[0].sg_list = sg_list; in nvme_pci_setup_sgls()
721 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
736 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_prp_simple() local
740 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
741 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
743 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
745 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
747 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
757 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_sgl_simple() local
759 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
760 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
762 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
765 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
766 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
774 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_data() local
794 iod->dma_len = 0; in nvme_map_data()
795 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
796 if (!iod->sgt.sgl) in nvme_map_data()
798 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); in nvme_map_data()
799 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); in nvme_map_data()
800 if (!iod->sgt.orig_nents) in nvme_map_data()
803 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
811 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) in nvme_map_data()
820 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
822 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
829 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_metadata() local
831 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
833 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
835 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
841 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_prep_rq() local
844 iod->aborted = false; in nvme_prep_rq()
845 iod->nr_allocations = -1; in nvme_prep_rq()
846 iod->sgt.nents = 0; in nvme_prep_rq()
853 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
859 ret = nvme_map_metadata(dev, req, &iod->cmd); in nvme_prep_rq()
882 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_queue_rq() local
899 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
910 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_submit_cmds() local
912 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
969 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_unmap_rq() local
971 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_unmap_rq()
1284 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_timeout() local
1351 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1366 iod->aborted = true; in nvme_timeout()