Lines Matching refs:sqe

65 	struct nvme_rdma_qe	sqe;  member
293 kfree(req->sqe.data); in nvme_rdma_exit_request()
306 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
307 if (!req->sqe.data) in nvme_rdma_init_request()
1639 container_of(qe, struct nvme_rdma_request, sqe); in nvme_rdma_send_done()
1724 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event() local
1725 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event()
1729 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1737 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
1739 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event()
1742 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); in nvme_rdma_submit_async_event()
2053 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq() local
2054 struct nvme_command *c = sqe->data; in nvme_rdma_queue_rq()
2067 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq()
2070 err = ib_dma_mapping_error(dev, req->sqe.dma); in nvme_rdma_queue_rq()
2074 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq()
2099 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq()
2101 ib_dma_sync_single_for_device(dev, sqe->dma, in nvme_rdma_queue_rq()
2104 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2120 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_queue_rq()
2173 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_complete_rq()