Lines Matching refs:rsp
163 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
193 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_in() argument
195 return nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_in()
196 rsp->req.transfer_len && in nvmet_rdma_need_data_in()
197 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_in()
200 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_need_data_out() argument
202 return !nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_out()
203 rsp->req.transfer_len && in nvmet_rdma_need_data_out()
204 !rsp->req.cqe->status && in nvmet_rdma_need_data_out()
205 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_out()
211 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_get_rsp() local
215 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
217 if (likely(rsp)) in nvmet_rdma_get_rsp()
218 list_del(&rsp->free_list); in nvmet_rdma_get_rsp()
221 if (unlikely(!rsp)) { in nvmet_rdma_get_rsp()
224 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); in nvmet_rdma_get_rsp()
225 if (unlikely(!rsp)) in nvmet_rdma_get_rsp()
227 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp()
229 kfree(rsp); in nvmet_rdma_get_rsp()
233 rsp->allocated = true; in nvmet_rdma_get_rsp()
236 return rsp; in nvmet_rdma_get_rsp()
240 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_put_rsp() argument
244 if (unlikely(rsp->allocated)) { in nvmet_rdma_put_rsp()
245 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
246 kfree(rsp); in nvmet_rdma_put_rsp()
250 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
251 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
252 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
463 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
465 ret = nvmet_rdma_alloc_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
469 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
476 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps() local
478 list_del(&rsp->free_list); in nvmet_rdma_alloc_rsps()
479 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_alloc_rsps()
492 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps() local
494 list_del(&rsp->free_list); in nvmet_rdma_free_rsps()
495 nvmet_rdma_free_rsp(ndev, rsp); in nvmet_rdma_free_rsps()
524 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_process_wr_wait_list() local
527 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
529 list_del(&rsp->wait_list); in nvmet_rdma_process_wr_wait_list()
532 ret = nvmet_rdma_execute_command(rsp); in nvmet_rdma_process_wr_wait_list()
536 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
632 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, in nvmet_rdma_rw_ctx_init() argument
635 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
636 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_init()
640 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_init()
645 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_init()
652 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_rw_ctx_destroy() argument
654 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
655 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_destroy()
658 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_destroy()
663 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy()
667 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_release_rsp() argument
669 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp()
671 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
673 if (rsp->n_rdma) in nvmet_rdma_release_rsp()
674 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_release_rsp()
676 if (rsp->req.sg != rsp->cmd->inline_sg) in nvmet_rdma_release_rsp()
677 nvmet_req_free_sgls(&rsp->req); in nvmet_rdma_release_rsp()
682 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_release_rsp()
701 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_send_done() local
705 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_send_done()
717 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_queue_response() local
719 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
722 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { in nvmet_rdma_queue_response()
723 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; in nvmet_rdma_queue_response()
724 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; in nvmet_rdma_queue_response()
726 rsp->send_wr.opcode = IB_WR_SEND; in nvmet_rdma_queue_response()
729 if (nvmet_rdma_need_data_out(rsp)) { in nvmet_rdma_queue_response()
730 if (rsp->req.metadata_len) in nvmet_rdma_queue_response()
731 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
732 cm_id->port_num, &rsp->write_cqe, NULL); in nvmet_rdma_queue_response()
734 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
735 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response()
737 first_wr = &rsp->send_wr; in nvmet_rdma_queue_response()
740 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
742 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
743 rsp->send_sge.addr, rsp->send_sge.length, in nvmet_rdma_queue_response()
748 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_queue_response()
754 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_read_data_done() local
759 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_read_data_done()
760 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
761 rsp->n_rdma = 0; in nvmet_rdma_read_data_done()
764 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_read_data_done()
765 nvmet_req_uninit(&rsp->req); in nvmet_rdma_read_data_done()
766 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_read_data_done()
775 if (rsp->req.metadata_len) in nvmet_rdma_read_data_done()
776 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_read_data_done()
777 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_read_data_done()
780 nvmet_req_complete(&rsp->req, status); in nvmet_rdma_read_data_done()
782 rsp->req.execute(&rsp->req); in nvmet_rdma_read_data_done()
787 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_write_data_done() local
790 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
796 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_write_data_done()
797 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
798 rsp->n_rdma = 0; in nvmet_rdma_write_data_done()
801 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_write_data_done()
802 nvmet_req_uninit(&rsp->req); in nvmet_rdma_write_data_done()
803 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_write_data_done()
817 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_write_data_done()
819 rsp->req.cqe->status = cpu_to_le16(status << 1); in nvmet_rdma_write_data_done()
820 nvmet_rdma_rw_ctx_destroy(rsp); in nvmet_rdma_write_data_done()
822 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { in nvmet_rdma_write_data_done()
824 nvmet_rdma_release_rsp(rsp); in nvmet_rdma_write_data_done()
828 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, in nvmet_rdma_use_inline_sg() argument
835 sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
848 rsp->req.sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
849 rsp->req.sg_cnt = sg_count; in nvmet_rdma_use_inline_sg()
852 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl_inline() argument
854 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; in nvmet_rdma_map_sgl_inline()
858 if (!nvme_is_write(rsp->req.cmd)) { in nvmet_rdma_map_sgl_inline()
859 rsp->req.error_loc = in nvmet_rdma_map_sgl_inline()
864 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
873 nvmet_rdma_use_inline_sg(rsp, len, off); in nvmet_rdma_map_sgl_inline()
874 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; in nvmet_rdma_map_sgl_inline()
875 rsp->req.transfer_len += len; in nvmet_rdma_map_sgl_inline()
879 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, in nvmet_rdma_map_sgl_keyed() argument
887 rsp->req.transfer_len = get_unaligned_le24(sgl->length); in nvmet_rdma_map_sgl_keyed()
890 if (!rsp->req.transfer_len) in nvmet_rdma_map_sgl_keyed()
893 if (rsp->req.metadata_len) in nvmet_rdma_map_sgl_keyed()
894 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); in nvmet_rdma_map_sgl_keyed()
896 ret = nvmet_req_alloc_sgls(&rsp->req); in nvmet_rdma_map_sgl_keyed()
900 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); in nvmet_rdma_map_sgl_keyed()
903 rsp->n_rdma += ret; in nvmet_rdma_map_sgl_keyed()
906 rsp->invalidate_rkey = key; in nvmet_rdma_map_sgl_keyed()
907 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; in nvmet_rdma_map_sgl_keyed()
913 rsp->req.transfer_len = 0; in nvmet_rdma_map_sgl_keyed()
917 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_map_sgl() argument
919 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; in nvmet_rdma_map_sgl()
925 return nvmet_rdma_map_sgl_inline(rsp); in nvmet_rdma_map_sgl()
928 rsp->req.error_loc = in nvmet_rdma_map_sgl()
935 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); in nvmet_rdma_map_sgl()
937 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); in nvmet_rdma_map_sgl()
940 rsp->req.error_loc = in nvmet_rdma_map_sgl()
946 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_rdma_map_sgl()
951 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) in nvmet_rdma_execute_command() argument
953 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command()
955 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, in nvmet_rdma_execute_command()
958 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
960 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
964 if (nvmet_rdma_need_data_in(rsp)) { in nvmet_rdma_execute_command()
965 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
966 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
967 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); in nvmet_rdma_execute_command()
969 rsp->req.execute(&rsp->req); in nvmet_rdma_execute_command()
1012 struct nvmet_rdma_rsp *rsp; in nvmet_rdma_recv_done() local
1031 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1032 if (unlikely(!rsp)) { in nvmet_rdma_recv_done()
1041 rsp->queue = queue; in nvmet_rdma_recv_done()
1042 rsp->cmd = cmd; in nvmet_rdma_recv_done()
1043 rsp->flags = 0; in nvmet_rdma_recv_done()
1044 rsp->req.cmd = cmd->nvme_cmd; in nvmet_rdma_recv_done()
1045 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1046 rsp->n_rdma = 0; in nvmet_rdma_recv_done()
1053 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1055 nvmet_rdma_put_rsp(rsp); in nvmet_rdma_recv_done()
1060 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1652 struct nvmet_rdma_rsp *rsp; in __nvmet_rdma_queue_disconnect() local
1654 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1657 list_del(&rsp->wait_list); in __nvmet_rdma_queue_disconnect()
1658 nvmet_rdma_put_rsp(rsp); in __nvmet_rdma_queue_disconnect()
1985 struct nvmet_rdma_rsp *rsp = in nvmet_rdma_disc_port_addr() local
1987 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()