Lines Matching refs:cmd

123 	struct nvmet_tcp_cmd	*cmd;  member
168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
169 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
170 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
173 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_cmd_tag() argument
180 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
183 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_data_in() argument
185 return nvme_is_write(cmd->req.cmd) && in nvmet_tcp_has_data_in()
186 cmd->rbytes_done < cmd->req.transfer_len; in nvmet_tcp_has_data_in()
189 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_in() argument
191 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; in nvmet_tcp_need_data_in()
194 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_out() argument
196 return !nvme_is_write(cmd->req.cmd) && in nvmet_tcp_need_data_out()
197 cmd->req.transfer_len > 0 && in nvmet_tcp_need_data_out()
198 !cmd->req.cqe->status; in nvmet_tcp_need_data_out()
201 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_inline_data() argument
203 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && in nvmet_tcp_has_inline_data()
204 !cmd->rbytes_done; in nvmet_tcp_has_inline_data()
210 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_get_cmd() local
212 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
214 if (!cmd) in nvmet_tcp_get_cmd()
216 list_del_init(&cmd->entry); in nvmet_tcp_get_cmd()
218 cmd->rbytes_done = cmd->wbytes_done = 0; in nvmet_tcp_get_cmd()
219 cmd->pdu_len = 0; in nvmet_tcp_get_cmd()
220 cmd->pdu_recv = 0; in nvmet_tcp_get_cmd()
221 cmd->iov = NULL; in nvmet_tcp_get_cmd()
222 cmd->flags = 0; in nvmet_tcp_get_cmd()
223 return cmd; in nvmet_tcp_get_cmd()
226 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_put_cmd() argument
228 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
231 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
302 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_free_cmd_buffers() argument
304 WARN_ON(unlikely(cmd->nr_mapped > 0)); in nvmet_tcp_free_cmd_buffers()
306 kfree(cmd->iov); in nvmet_tcp_free_cmd_buffers()
307 sgl_free(cmd->req.sg); in nvmet_tcp_free_cmd_buffers()
308 cmd->iov = NULL; in nvmet_tcp_free_cmd_buffers()
309 cmd->req.sg = NULL; in nvmet_tcp_free_cmd_buffers()
312 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_unmap_pdu_iovec() argument
317 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_unmap_pdu_iovec()
319 for (i = 0; i < cmd->nr_mapped; i++) in nvmet_tcp_unmap_pdu_iovec()
322 cmd->nr_mapped = 0; in nvmet_tcp_unmap_pdu_iovec()
325 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_pdu_iovec() argument
327 struct kvec *iov = cmd->iov; in nvmet_tcp_map_pdu_iovec()
331 length = cmd->pdu_len; in nvmet_tcp_map_pdu_iovec()
332 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); in nvmet_tcp_map_pdu_iovec()
333 offset = cmd->rbytes_done; in nvmet_tcp_map_pdu_iovec()
334 cmd->sg_idx = offset / PAGE_SIZE; in nvmet_tcp_map_pdu_iovec()
336 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_map_pdu_iovec()
350 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, in nvmet_tcp_map_pdu_iovec()
351 cmd->nr_mapped, cmd->pdu_len); in nvmet_tcp_map_pdu_iovec()
371 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_data() argument
373 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_map_data()
381 if (!nvme_is_write(cmd->req.cmd)) in nvmet_tcp_map_data()
384 if (len > cmd->req.port->inline_data_size) in nvmet_tcp_map_data()
386 cmd->pdu_len = len; in nvmet_tcp_map_data()
388 cmd->req.transfer_len += len; in nvmet_tcp_map_data()
390 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); in nvmet_tcp_map_data()
391 if (!cmd->req.sg) in nvmet_tcp_map_data()
393 cmd->cur_sg = cmd->req.sg; in nvmet_tcp_map_data()
395 if (nvmet_tcp_has_data_in(cmd)) { in nvmet_tcp_map_data()
396 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data()
397 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data()
398 if (!cmd->iov) in nvmet_tcp_map_data()
404 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_map_data()
409 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_send_ddgst() argument
411 ahash_request_set_crypt(hash, cmd->req.sg, in nvmet_tcp_send_ddgst()
412 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); in nvmet_tcp_send_ddgst()
417 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_recv_ddgst() argument
424 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { in nvmet_tcp_recv_ddgst()
429 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); in nvmet_tcp_recv_ddgst()
433 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_c2h_data_pdu() argument
435 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu()
436 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu()
437 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
438 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
440 cmd->offset = 0; in nvmet_setup_c2h_data_pdu()
441 cmd->state = NVMET_TCP_SEND_DATA_PDU; in nvmet_setup_c2h_data_pdu()
450 cmd->req.transfer_len + ddgst); in nvmet_setup_c2h_data_pdu()
451 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
452 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
453 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
457 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
460 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
466 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_r2t_pdu() argument
468 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu()
469 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu()
470 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
472 cmd->offset = 0; in nvmet_setup_r2t_pdu()
473 cmd->state = NVMET_TCP_SEND_R2T; in nvmet_setup_r2t_pdu()
481 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
482 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
483 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
484 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
485 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
491 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_response_pdu() argument
493 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu()
494 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu()
495 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
497 cmd->offset = 0; in nvmet_setup_response_pdu()
498 cmd->state = NVMET_TCP_SEND_RESPONSE; in nvmet_setup_response_pdu()
505 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
514 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_process_resp_list() local
517 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); in nvmet_tcp_process_resp_list()
518 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
551 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_queue_response() local
553 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response()
557 if (unlikely(cmd == queue->cmd)) { in nvmet_tcp_queue_response()
558 sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_queue_response()
567 len && len <= cmd->req.port->inline_data_size && in nvmet_tcp_queue_response()
568 nvme_is_write(cmd->req.cmd)) in nvmet_tcp_queue_response()
572 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
573 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
576 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_execute_request() argument
578 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) in nvmet_tcp_execute_request()
579 nvmet_tcp_queue_response(&cmd->req); in nvmet_tcp_execute_request()
581 cmd->req.execute(&cmd->req); in nvmet_tcp_execute_request()
584 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_try_send_data_pdu() argument
586 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
587 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; in nvmet_try_send_data_pdu()
590 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), in nvmet_try_send_data_pdu()
591 offset_in_page(cmd->data_pdu) + cmd->offset, in nvmet_try_send_data_pdu()
596 cmd->offset += ret; in nvmet_try_send_data_pdu()
602 cmd->state = NVMET_TCP_SEND_DATA; in nvmet_try_send_data_pdu()
603 cmd->offset = 0; in nvmet_try_send_data_pdu()
607 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_data() argument
609 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data()
612 while (cmd->cur_sg) { in nvmet_try_send_data()
613 struct page *page = sg_page(cmd->cur_sg); in nvmet_try_send_data()
614 u32 left = cmd->cur_sg->length - cmd->offset; in nvmet_try_send_data()
617 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
618 cmd->wbytes_done + left < cmd->req.transfer_len || in nvmet_try_send_data()
622 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, in nvmet_try_send_data()
627 cmd->offset += ret; in nvmet_try_send_data()
628 cmd->wbytes_done += ret; in nvmet_try_send_data()
631 if (cmd->offset == cmd->cur_sg->length) { in nvmet_try_send_data()
632 cmd->cur_sg = sg_next(cmd->cur_sg); in nvmet_try_send_data()
633 cmd->offset = 0; in nvmet_try_send_data()
638 cmd->state = NVMET_TCP_SEND_DDGST; in nvmet_try_send_data()
639 cmd->offset = 0; in nvmet_try_send_data()
642 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
643 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_data()
645 nvmet_setup_response_pdu(cmd); in nvmet_try_send_data()
650 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_data()
656 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, in nvmet_try_send_response() argument
659 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
660 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; in nvmet_try_send_response()
664 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
669 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), in nvmet_try_send_response()
670 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); in nvmet_try_send_response()
673 cmd->offset += ret; in nvmet_try_send_response()
679 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_response()
680 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
681 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_response()
685 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_r2t() argument
687 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
688 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; in nvmet_try_send_r2t()
692 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
697 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), in nvmet_try_send_r2t()
698 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); in nvmet_try_send_r2t()
701 cmd->offset += ret; in nvmet_try_send_r2t()
707 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
711 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_ddgst() argument
713 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst()
714 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; in nvmet_try_send_ddgst()
717 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, in nvmet_try_send_ddgst()
722 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
731 cmd->offset += ret; in nvmet_try_send_ddgst()
738 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
739 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_ddgst()
741 nvmet_setup_response_pdu(cmd); in nvmet_try_send_ddgst()
749 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one() local
752 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
753 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
754 if (unlikely(!cmd)) in nvmet_tcp_try_send_one()
758 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { in nvmet_tcp_try_send_one()
759 ret = nvmet_try_send_data_pdu(cmd); in nvmet_tcp_try_send_one()
764 if (cmd->state == NVMET_TCP_SEND_DATA) { in nvmet_tcp_try_send_one()
765 ret = nvmet_try_send_data(cmd, last_in_batch); in nvmet_tcp_try_send_one()
770 if (cmd->state == NVMET_TCP_SEND_DDGST) { in nvmet_tcp_try_send_one()
771 ret = nvmet_try_send_ddgst(cmd, last_in_batch); in nvmet_tcp_try_send_one()
776 if (cmd->state == NVMET_TCP_SEND_R2T) { in nvmet_tcp_try_send_one()
777 ret = nvmet_try_send_r2t(cmd, last_in_batch); in nvmet_tcp_try_send_one()
782 if (cmd->state == NVMET_TCP_SEND_RESPONSE) in nvmet_tcp_try_send_one()
783 ret = nvmet_try_send_response(cmd, last_in_batch); in nvmet_tcp_try_send_one()
818 queue->cmd = NULL; in nvmet_prepare_receive_pdu()
920 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) in nvmet_tcp_handle_req_failure() argument
922 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); in nvmet_tcp_handle_req_failure()
932 if (!nvme_is_write(cmd->req.cmd) || !data_len || in nvmet_tcp_handle_req_failure()
933 data_len > cmd->req.port->inline_data_size) { in nvmet_tcp_handle_req_failure()
938 ret = nvmet_tcp_map_data(cmd); in nvmet_tcp_handle_req_failure()
946 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_req_failure()
947 cmd->flags |= NVMET_TCP_F_INIT_FAILED; in nvmet_tcp_handle_req_failure()
953 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_handle_h2c_data_pdu() local
956 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
958 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
960 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { in nvmet_tcp_handle_h2c_data_pdu()
963 cmd->rbytes_done); in nvmet_tcp_handle_h2c_data_pdu()
965 nvmet_req_complete(&cmd->req, in nvmet_tcp_handle_h2c_data_pdu()
970 cmd->pdu_len = le32_to_cpu(data->data_length); in nvmet_tcp_handle_h2c_data_pdu()
971 cmd->pdu_recv = 0; in nvmet_tcp_handle_h2c_data_pdu()
972 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_h2c_data_pdu()
973 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
981 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
982 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1003 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
1004 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1013 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
1014 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); in nvmet_tcp_done_recv_pdu()
1019 req->cmd, req->cmd->common.command_id, in nvmet_tcp_done_recv_pdu()
1020 req->cmd->common.opcode, in nvmet_tcp_done_recv_pdu()
1021 le32_to_cpu(req->cmd->common.dptr.sgl.length)); in nvmet_tcp_done_recv_pdu()
1023 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1027 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1030 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1038 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1039 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1041 nvmet_tcp_map_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1045 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1049 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1085 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1136 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_prep_recv_ddgst() argument
1138 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst()
1140 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1148 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data() local
1151 while (msg_data_left(&cmd->recv_msg)) { in nvmet_tcp_try_recv_data()
1152 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1153 cmd->recv_msg.msg_flags); in nvmet_tcp_try_recv_data()
1157 cmd->pdu_recv += ret; in nvmet_tcp_try_recv_data()
1158 cmd->rbytes_done += ret; in nvmet_tcp_try_recv_data()
1161 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_try_recv_data()
1163 nvmet_tcp_prep_recv_ddgst(cmd); in nvmet_tcp_try_recv_data()
1167 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_data()
1168 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_data()
1176 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst() local
1180 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1194 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1196 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1197 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1198 le32_to_cpu(cmd->exp_ddgst)); in nvmet_tcp_try_recv_ddgst()
1199 nvmet_tcp_finish_cmd(cmd); in nvmet_tcp_try_recv_ddgst()
1205 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_ddgst()
1206 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_ddgst()
1338 c->req.cmd = &c->cmd_pdu->cmd; in nvmet_tcp_alloc_cmd()
1428 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_finish_cmd() argument
1430 nvmet_req_uninit(&cmd->req); in nvmet_tcp_finish_cmd()
1431 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_finish_cmd()
1432 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_finish_cmd()
1437 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds() local
1440 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1441 if (nvmet_tcp_need_data_in(cmd)) in nvmet_tcp_uninit_data_in_cmds()
1442 nvmet_req_uninit(&cmd->req); in nvmet_tcp_uninit_data_in_cmds()
1444 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_uninit_data_in_cmds()
1445 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_uninit_data_in_cmds()
1833 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_disc_port_addr() local
1835 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr()