Lines Matching refs:rsp

164 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
194 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_in() argument
196 return nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_in()
197 rsp->req.transfer_len && in nvmet_rdma_need_data_in()
198 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_in()
201 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_out() argument
203 return !nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_out()
204 rsp->req.transfer_len && in nvmet_rdma_need_data_out()
205 !rsp->req.cqe->status && in nvmet_rdma_need_data_out()
206 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_out()
212 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_get_rsp() local
216 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
218 if (likely(rsp)) in nvmet_rdma_get_rsp()
219 list_del(&rsp->free_list); in nvmet_rdma_get_rsp()
222 if (unlikely(!rsp)) { in nvmet_rdma_get_rsp()
225 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); in nvmet_rdma_get_rsp()
226 if (unlikely(!rsp)) in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp()
230 kfree(rsp); in nvmet_rdma_get_rsp()
234 rsp->allocated = true; in nvmet_rdma_get_rsp()
237 return rsp; in nvmet_rdma_get_rsp()
241 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_put_rsp() argument
245 if (unlikely(rsp->allocated)) { in nvmet_rdma_put_rsp()
246 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
247 kfree(rsp); in nvmet_rdma_put_rsp()
251 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
252 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
253 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
464 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
466 ret = nvmet_rdma_alloc_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
470 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
477 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
479 list_del(&rsp->free_list); in nvmet_rdma_alloc_rsps()
480 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
493 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps() local
495 list_del(&rsp->free_list); in nvmet_rdma_free_rsps()
496 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_free_rsps()
525 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_process_wr_wait_list() local
528 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
530 list_del(&rsp->wait_list); in nvmet_rdma_process_wr_wait_list()
533 ret = nvmet_rdma_execute_command(rsp); in nvmet_rdma_process_wr_wait_list()
537 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
633 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, in nvmet_rdma_rw_ctx_init() argument
636 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
637 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_init()
641 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_init()
646 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_init()
653 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_rw_ctx_destroy() argument
655 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
656 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_destroy()
659 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_destroy()
664 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy()
668 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_release_rsp() argument
670 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp()
672 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
674 if (rsp->n_rdma) in nvmet_rdma_release_rsp()
675 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_release_rsp()
677 if (rsp->req.sg != rsp->cmd->inline_sg) in nvmet_rdma_release_rsp()
678 nvmet_req_free_sgls(&rsp->req); in nvmet_rdma_release_rsp()
683 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_release_rsp()
702 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_send_done() local
706 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_send_done()
718 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_queue_response() local
720 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
723 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { in nvmet_rdma_queue_response()
724 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; in nvmet_rdma_queue_response()
725 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; in nvmet_rdma_queue_response()
727 rsp->send_wr.opcode = IB_WR_SEND; in nvmet_rdma_queue_response()
730 if (nvmet_rdma_need_data_out(rsp)) { in nvmet_rdma_queue_response()
731 if (rsp->req.metadata_len) in nvmet_rdma_queue_response()
732 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
733 cm_id->port_num, &rsp->write_cqe, NULL); in nvmet_rdma_queue_response()
735 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
736 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response()
738 first_wr = &rsp->send_wr; in nvmet_rdma_queue_response()
741 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
743 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
744 rsp->send_sge.addr, rsp->send_sge.length, in nvmet_rdma_queue_response()
749 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_queue_response()
755 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_read_data_done() local
760 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_read_data_done()
761 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
762 rsp->n_rdma = 0; in nvmet_rdma_read_data_done()
765 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_read_data_done()
766 nvmet_req_uninit(&rsp->req); in nvmet_rdma_read_data_done()
767 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_read_data_done()
776 if (rsp->req.metadata_len) in nvmet_rdma_read_data_done()
777 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_read_data_done()
778 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_read_data_done()
781 nvmet_req_complete(&rsp->req, status); in nvmet_rdma_read_data_done()
783 rsp->req.execute(&rsp->req); in nvmet_rdma_read_data_done()
788 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_write_data_done() local
791 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
797 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_write_data_done()
798 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
799 rsp->n_rdma = 0; in nvmet_rdma_write_data_done()
802 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_write_data_done()
803 nvmet_req_uninit(&rsp->req); in nvmet_rdma_write_data_done()
804 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_write_data_done()
818 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_write_data_done()
820 rsp->req.cqe->status = cpu_to_le16(status << 1); in nvmet_rdma_write_data_done()
821 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_write_data_done()
823 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { in nvmet_rdma_write_data_done()
825 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_write_data_done()
829 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, in nvmet_rdma_use_inline_sg() argument
836 sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
849 rsp->req.sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
850 rsp->req.sg_cnt = sg_count; in nvmet_rdma_use_inline_sg()
853 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl_inline() argument
855 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; in nvmet_rdma_map_sgl_inline()
859 if (!nvme_is_write(rsp->req.cmd)) { in nvmet_rdma_map_sgl_inline()
860 rsp->req.error_loc = in nvmet_rdma_map_sgl_inline()
865 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
874 nvmet_rdma_use_inline_sg(rsp, len, off); in nvmet_rdma_map_sgl_inline()
875 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; in nvmet_rdma_map_sgl_inline()
876 rsp->req.transfer_len += len; in nvmet_rdma_map_sgl_inline()
880 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, in nvmet_rdma_map_sgl_keyed() argument
888 rsp->req.transfer_len = get_unaligned_le24(sgl->length); in nvmet_rdma_map_sgl_keyed()
891 if (!rsp->req.transfer_len) in nvmet_rdma_map_sgl_keyed()
894 if (rsp->req.metadata_len) in nvmet_rdma_map_sgl_keyed()
895 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); in nvmet_rdma_map_sgl_keyed()
897 ret = nvmet_req_alloc_sgls(&rsp->req); in nvmet_rdma_map_sgl_keyed()
901 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); in nvmet_rdma_map_sgl_keyed()
904 rsp->n_rdma += ret; in nvmet_rdma_map_sgl_keyed()
907 rsp->invalidate_rkey = key; in nvmet_rdma_map_sgl_keyed()
908 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; in nvmet_rdma_map_sgl_keyed()
914 rsp->req.transfer_len = 0; in nvmet_rdma_map_sgl_keyed()
918 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl() argument
920 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; in nvmet_rdma_map_sgl()
926 return nvmet_rdma_map_sgl_inline(rsp); in nvmet_rdma_map_sgl()
929 rsp->req.error_loc = in nvmet_rdma_map_sgl()
936 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); in nvmet_rdma_map_sgl()
938 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); in nvmet_rdma_map_sgl()
941 rsp->req.error_loc = in nvmet_rdma_map_sgl()
947 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_rdma_map_sgl()
952 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_execute_command() argument
954 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command()
956 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, in nvmet_rdma_execute_command()
959 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
961 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
965 if (nvmet_rdma_need_data_in(rsp)) { in nvmet_rdma_execute_command()
966 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
967 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
968 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); in nvmet_rdma_execute_command()
970 rsp->req.execute(&rsp->req); in nvmet_rdma_execute_command()
1013 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_recv_done() local
1032 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1033 if (unlikely(!rsp)) { in nvmet_rdma_recv_done()
1042 rsp->queue = queue; in nvmet_rdma_recv_done()
1043 rsp->cmd = cmd; in nvmet_rdma_recv_done()
1044 rsp->flags = 0; in nvmet_rdma_recv_done()
1045 rsp->req.cmd = cmd->nvme_cmd; in nvmet_rdma_recv_done()
1046 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1047 rsp->n_rdma = 0; in nvmet_rdma_recv_done()
1054 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1056 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_recv_done()
1061 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1652 struct nvmet_rdma_rsp *rsp; in __nvmet_rdma_queue_disconnect() local
1654 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1657 list_del(&rsp->wait_list); in __nvmet_rdma_queue_disconnect()
1658 nvmet_rdma_put_rsp(rsp); in __nvmet_rdma_queue_disconnect()
1985 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_disc_port_addr() local
1987 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()