Lines Matching refs:cmd

113 	struct nvmet_tcp_cmd	*cmd;  member
156 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
159 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_cmd_tag() argument
166 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
169 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_data_in() argument
171 return nvme_is_write(cmd->req.cmd) && in nvmet_tcp_has_data_in()
172 cmd->rbytes_done < cmd->req.transfer_len; in nvmet_tcp_has_data_in()
175 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_in() argument
177 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; in nvmet_tcp_need_data_in()
180 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_out() argument
182 return !nvme_is_write(cmd->req.cmd) && in nvmet_tcp_need_data_out()
183 cmd->req.transfer_len > 0 && in nvmet_tcp_need_data_out()
184 !cmd->req.cqe->status; in nvmet_tcp_need_data_out()
187 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_inline_data() argument
189 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && in nvmet_tcp_has_inline_data()
190 !cmd->rbytes_done; in nvmet_tcp_has_inline_data()
196 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_get_cmd() local
198 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
200 if (!cmd) in nvmet_tcp_get_cmd()
202 list_del_init(&cmd->entry); in nvmet_tcp_get_cmd()
204 cmd->rbytes_done = cmd->wbytes_done = 0; in nvmet_tcp_get_cmd()
205 cmd->pdu_len = 0; in nvmet_tcp_get_cmd()
206 cmd->pdu_recv = 0; in nvmet_tcp_get_cmd()
207 cmd->iov = NULL; in nvmet_tcp_get_cmd()
208 cmd->flags = 0; in nvmet_tcp_get_cmd()
209 return cmd; in nvmet_tcp_get_cmd()
212 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_put_cmd() argument
214 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
217 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
288 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_unmap_pdu_iovec() argument
293 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_unmap_pdu_iovec()
295 for (i = 0; i < cmd->nr_mapped; i++) in nvmet_tcp_unmap_pdu_iovec()
299 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_pdu_iovec() argument
301 struct kvec *iov = cmd->iov; in nvmet_tcp_map_pdu_iovec()
305 length = cmd->pdu_len; in nvmet_tcp_map_pdu_iovec()
306 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); in nvmet_tcp_map_pdu_iovec()
307 offset = cmd->rbytes_done; in nvmet_tcp_map_pdu_iovec()
308 cmd->sg_idx = offset / PAGE_SIZE; in nvmet_tcp_map_pdu_iovec()
310 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_map_pdu_iovec()
324 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, in nvmet_tcp_map_pdu_iovec()
325 cmd->nr_mapped, cmd->pdu_len); in nvmet_tcp_map_pdu_iovec()
345 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_data() argument
347 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_map_data()
355 if (!nvme_is_write(cmd->req.cmd)) in nvmet_tcp_map_data()
358 if (len > cmd->req.port->inline_data_size) in nvmet_tcp_map_data()
360 cmd->pdu_len = len; in nvmet_tcp_map_data()
362 cmd->req.transfer_len += len; in nvmet_tcp_map_data()
364 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); in nvmet_tcp_map_data()
365 if (!cmd->req.sg) in nvmet_tcp_map_data()
367 cmd->cur_sg = cmd->req.sg; in nvmet_tcp_map_data()
369 if (nvmet_tcp_has_data_in(cmd)) { in nvmet_tcp_map_data()
370 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data()
371 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data()
372 if (!cmd->iov) in nvmet_tcp_map_data()
378 sgl_free(cmd->req.sg); in nvmet_tcp_map_data()
383 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_send_ddgst() argument
385 ahash_request_set_crypt(hash, cmd->req.sg, in nvmet_tcp_send_ddgst()
386 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); in nvmet_tcp_send_ddgst()
391 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_recv_ddgst() argument
398 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { in nvmet_tcp_recv_ddgst()
403 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); in nvmet_tcp_recv_ddgst()
407 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_c2h_data_pdu() argument
409 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu()
410 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu()
411 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
412 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
414 cmd->offset = 0; in nvmet_setup_c2h_data_pdu()
415 cmd->state = NVMET_TCP_SEND_DATA_PDU; in nvmet_setup_c2h_data_pdu()
424 cmd->req.transfer_len + ddgst); in nvmet_setup_c2h_data_pdu()
425 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
426 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
427 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
431 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
434 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
440 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_r2t_pdu() argument
442 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu()
443 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu()
444 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
446 cmd->offset = 0; in nvmet_setup_r2t_pdu()
447 cmd->state = NVMET_TCP_SEND_R2T; in nvmet_setup_r2t_pdu()
455 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
456 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
457 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
458 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
459 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
465 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_response_pdu() argument
467 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu()
468 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu()
469 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
471 cmd->offset = 0; in nvmet_setup_response_pdu()
472 cmd->state = NVMET_TCP_SEND_RESPONSE; in nvmet_setup_response_pdu()
479 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
488 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_process_resp_list() local
491 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); in nvmet_tcp_process_resp_list()
492 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
525 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_queue_response() local
527 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response()
531 if (unlikely(cmd == queue->cmd)) { in nvmet_tcp_queue_response()
532 sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_queue_response()
541 len && len <= cmd->req.port->inline_data_size && in nvmet_tcp_queue_response()
542 nvme_is_write(cmd->req.cmd)) in nvmet_tcp_queue_response()
546 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
547 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
550 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_execute_request() argument
552 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) in nvmet_tcp_execute_request()
553 nvmet_tcp_queue_response(&cmd->req); in nvmet_tcp_execute_request()
555 cmd->req.execute(&cmd->req); in nvmet_tcp_execute_request()
558 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_try_send_data_pdu() argument
560 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
561 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; in nvmet_try_send_data_pdu()
564 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), in nvmet_try_send_data_pdu()
565 offset_in_page(cmd->data_pdu) + cmd->offset, in nvmet_try_send_data_pdu()
570 cmd->offset += ret; in nvmet_try_send_data_pdu()
576 cmd->state = NVMET_TCP_SEND_DATA; in nvmet_try_send_data_pdu()
577 cmd->offset = 0; in nvmet_try_send_data_pdu()
581 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_data() argument
583 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data()
586 while (cmd->cur_sg) { in nvmet_try_send_data()
587 struct page *page = sg_page(cmd->cur_sg); in nvmet_try_send_data()
588 u32 left = cmd->cur_sg->length - cmd->offset; in nvmet_try_send_data()
591 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
592 cmd->wbytes_done + left < cmd->req.transfer_len || in nvmet_try_send_data()
596 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, in nvmet_try_send_data()
601 cmd->offset += ret; in nvmet_try_send_data()
602 cmd->wbytes_done += ret; in nvmet_try_send_data()
605 if (cmd->offset == cmd->cur_sg->length) { in nvmet_try_send_data()
606 cmd->cur_sg = sg_next(cmd->cur_sg); in nvmet_try_send_data()
607 cmd->offset = 0; in nvmet_try_send_data()
612 cmd->state = NVMET_TCP_SEND_DDGST; in nvmet_try_send_data()
613 cmd->offset = 0; in nvmet_try_send_data()
616 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
617 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_data()
619 nvmet_setup_response_pdu(cmd); in nvmet_try_send_data()
624 kfree(cmd->iov); in nvmet_try_send_data()
625 sgl_free(cmd->req.sg); in nvmet_try_send_data()
632 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, in nvmet_try_send_response() argument
635 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
636 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; in nvmet_try_send_response()
640 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
645 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), in nvmet_try_send_response()
646 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); in nvmet_try_send_response()
649 cmd->offset += ret; in nvmet_try_send_response()
655 kfree(cmd->iov); in nvmet_try_send_response()
656 sgl_free(cmd->req.sg); in nvmet_try_send_response()
657 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
658 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_response()
662 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_r2t() argument
664 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
665 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; in nvmet_try_send_r2t()
669 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
674 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), in nvmet_try_send_r2t()
675 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); in nvmet_try_send_r2t()
678 cmd->offset += ret; in nvmet_try_send_r2t()
684 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
688 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_ddgst() argument
690 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst()
691 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; in nvmet_try_send_ddgst()
694 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, in nvmet_try_send_ddgst()
699 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
708 cmd->offset += ret; in nvmet_try_send_ddgst()
715 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
716 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_ddgst()
718 nvmet_setup_response_pdu(cmd); in nvmet_try_send_ddgst()
726 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one() local
729 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
730 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
731 if (unlikely(!cmd)) in nvmet_tcp_try_send_one()
735 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { in nvmet_tcp_try_send_one()
736 ret = nvmet_try_send_data_pdu(cmd); in nvmet_tcp_try_send_one()
741 if (cmd->state == NVMET_TCP_SEND_DATA) { in nvmet_tcp_try_send_one()
742 ret = nvmet_try_send_data(cmd, last_in_batch); in nvmet_tcp_try_send_one()
747 if (cmd->state == NVMET_TCP_SEND_DDGST) { in nvmet_tcp_try_send_one()
748 ret = nvmet_try_send_ddgst(cmd, last_in_batch); in nvmet_tcp_try_send_one()
753 if (cmd->state == NVMET_TCP_SEND_R2T) { in nvmet_tcp_try_send_one()
754 ret = nvmet_try_send_r2t(cmd, last_in_batch); in nvmet_tcp_try_send_one()
759 if (cmd->state == NVMET_TCP_SEND_RESPONSE) in nvmet_tcp_try_send_one()
760 ret = nvmet_try_send_response(cmd, last_in_batch); in nvmet_tcp_try_send_one()
795 queue->cmd = NULL; in nvmet_prepare_receive_pdu()
897 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) in nvmet_tcp_handle_req_failure() argument
899 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); in nvmet_tcp_handle_req_failure()
902 if (!nvme_is_write(cmd->req.cmd) || in nvmet_tcp_handle_req_failure()
903 data_len > cmd->req.port->inline_data_size) { in nvmet_tcp_handle_req_failure()
908 ret = nvmet_tcp_map_data(cmd); in nvmet_tcp_handle_req_failure()
916 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_req_failure()
917 cmd->flags |= NVMET_TCP_F_INIT_FAILED; in nvmet_tcp_handle_req_failure()
923 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_handle_h2c_data_pdu() local
932 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
934 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
937 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { in nvmet_tcp_handle_h2c_data_pdu()
940 cmd->rbytes_done); in nvmet_tcp_handle_h2c_data_pdu()
942 nvmet_req_complete(&cmd->req, in nvmet_tcp_handle_h2c_data_pdu()
947 cmd->pdu_len = le32_to_cpu(data->data_length); in nvmet_tcp_handle_h2c_data_pdu()
948 cmd->pdu_recv = 0; in nvmet_tcp_handle_h2c_data_pdu()
949 nvmet_tcp_map_pdu_iovec(cmd); in nvmet_tcp_handle_h2c_data_pdu()
950 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
958 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
959 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
980 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
981 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
990 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
991 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); in nvmet_tcp_done_recv_pdu()
996 req->cmd, req->cmd->common.command_id, in nvmet_tcp_done_recv_pdu()
997 req->cmd->common.opcode, in nvmet_tcp_done_recv_pdu()
998 le32_to_cpu(req->cmd->common.dptr.sgl.length)); in nvmet_tcp_done_recv_pdu()
1000 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1004 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1007 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1015 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1016 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1018 nvmet_tcp_map_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1022 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1026 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1062 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1113 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_prep_recv_ddgst() argument
1115 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst()
1117 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1125 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data() local
1128 while (msg_data_left(&cmd->recv_msg)) { in nvmet_tcp_try_recv_data()
1129 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1130 cmd->recv_msg.msg_flags); in nvmet_tcp_try_recv_data()
1134 cmd->pdu_recv += ret; in nvmet_tcp_try_recv_data()
1135 cmd->rbytes_done += ret; in nvmet_tcp_try_recv_data()
1138 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_try_recv_data()
1140 nvmet_tcp_prep_recv_ddgst(cmd); in nvmet_tcp_try_recv_data()
1144 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_data()
1145 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_data()
1153 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst() local
1157 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1171 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1173 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1174 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1175 le32_to_cpu(cmd->exp_ddgst)); in nvmet_tcp_try_recv_ddgst()
1176 nvmet_tcp_finish_cmd(cmd); in nvmet_tcp_try_recv_ddgst()
1182 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_ddgst()
1183 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_ddgst()
1297 c->req.cmd = &c->cmd_pdu->cmd; in nvmet_tcp_alloc_cmd()
1387 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_finish_cmd() argument
1389 nvmet_req_uninit(&cmd->req); in nvmet_tcp_finish_cmd()
1390 nvmet_tcp_unmap_pdu_iovec(cmd); in nvmet_tcp_finish_cmd()
1391 kfree(cmd->iov); in nvmet_tcp_finish_cmd()
1392 sgl_free(cmd->req.sg); in nvmet_tcp_finish_cmd()
1397 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds() local
1400 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1401 if (nvmet_tcp_need_data_in(cmd)) in nvmet_tcp_uninit_data_in_cmds()
1402 nvmet_tcp_finish_cmd(cmd); in nvmet_tcp_uninit_data_in_cmds()
1789 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_disc_port_addr() local
1791 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr()