Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
14 #include <linux/blk-mq.h>
15 #include <linux/blk-mq-rdma.h>
25 #include <linux/nvme-rdma.h>
144 * allows read and write access to all physical memory.
149 "Use memory registration even for contiguous memory regions");
153 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); in nvme_rdma_free_qe()
180 kfree(qe->data); in nvme_rdma_free_qe()
186 qe->data = kzalloc(capsule_size, GFP_KERNEL); in nvme_rdma_alloc_qe()
187 if (!qe->data) in nvme_rdma_alloc_qe()
188 return -ENOMEM; in nvme_rdma_alloc_qe()
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); in nvme_rdma_alloc_qe()
191 if (ib_dma_mapping_error(ibdev, qe->dma)) { in nvme_rdma_alloc_qe()
192 kfree(qe->data); in nvme_rdma_alloc_qe()
193 qe->data = NULL; in nvme_rdma_alloc_qe()
194 return -ENOMEM; in nvme_rdma_alloc_qe()
225 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
242 ib_event_msg(event->event), event->event); in nvme_rdma_qp_event()
250 ret = wait_for_completion_interruptible_timeout(&queue->cm_done, in nvme_rdma_wait_for_cm()
255 return -ETIMEDOUT; in nvme_rdma_wait_for_cm()
256 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
257 return queue->cm_error; in nvme_rdma_wait_for_cm()
262 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
269 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
271 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
273 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; in nvme_rdma_create_qp()
276 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
277 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
278 if (queue->pi_support) in nvme_rdma_create_qp()
282 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
284 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
293 kfree(req->sqe.data); in nvme_rdma_exit_request()
300 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_init_request()
302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request()
303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request()
305 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_rdma_init_request()
306 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
307 if (!req->sqe.data) in nvme_rdma_init_request()
308 return -ENOMEM; in nvme_rdma_init_request()
311 if (queue->pi_support) in nvme_rdma_init_request()
312 req->metadata_sgl = (void *)nvme_req(rq) + in nvme_rdma_init_request()
316 req->queue = queue; in nvme_rdma_init_request()
325 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx()
327 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx()
329 hctx->driver_data = queue; in nvme_rdma_init_hctx()
337 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx()
341 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
351 list_del(&ndev->entry); in nvme_rdma_free_dev()
354 ib_dealloc_pd(ndev->pd); in nvme_rdma_free_dev()
360 kref_put(&dev->ref, nvme_rdma_free_dev); in nvme_rdma_dev_put()
365 return kref_get_unless_zero(&dev->ref); in nvme_rdma_dev_get()
375 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device()
384 ndev->dev = cm_id->device; in nvme_rdma_find_get_device()
385 kref_init(&ndev->ref); in nvme_rdma_find_get_device()
387 ndev->pd = ib_alloc_pd(ndev->dev, in nvme_rdma_find_get_device()
389 if (IS_ERR(ndev->pd)) in nvme_rdma_find_get_device()
392 if (!(ndev->dev->attrs.device_cap_flags & in nvme_rdma_find_get_device()
394 dev_err(&ndev->dev->dev, in nvme_rdma_find_get_device()
395 "Memory registrations not supported.\n"); in nvme_rdma_find_get_device()
399 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, in nvme_rdma_find_get_device()
400 ndev->dev->attrs.max_send_sge - 1); in nvme_rdma_find_get_device()
401 list_add(&ndev->entry, &device_list); in nvme_rdma_find_get_device()
407 ib_dealloc_pd(ndev->pd); in nvme_rdma_find_get_device()
418 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
420 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
428 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
431 dev = queue->device; in nvme_rdma_destroy_queue_ib()
432 ibdev = dev->dev; in nvme_rdma_destroy_queue_ib()
434 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
435 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
436 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
443 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
446 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
457 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
459 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
461 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); in nvme_rdma_get_max_fr_pages()
474 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; in nvme_rdma_create_cq()
479 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
483 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
487 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
488 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
502 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
503 if (!queue->device) { in nvme_rdma_create_queue_ib()
504 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
505 "no client data found!\n"); in nvme_rdma_create_queue_ib()
506 return -ECONNREFUSED; in nvme_rdma_create_queue_ib()
508 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
511 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
521 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
523 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
524 ret = -ENOMEM; in nvme_rdma_create_queue_ib()
533 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
534 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
535 queue->queue_size, in nvme_rdma_create_queue_ib()
539 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
541 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
545 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
546 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
547 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
550 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
552 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
557 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
562 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
564 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
567 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
571 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
582 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
583 mutex_init(&queue->queue_lock); in nvme_rdma_alloc_queue()
584 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
585 if (idx && ctrl->ctrl.max_integrity_segments) in nvme_rdma_alloc_queue()
586 queue->pi_support = true; in nvme_rdma_alloc_queue()
588 queue->pi_support = false; in nvme_rdma_alloc_queue()
589 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
592 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
594 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
596 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
598 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
600 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
601 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
602 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
603 ret = PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
607 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) in nvme_rdma_alloc_queue()
608 src_addr = (struct sockaddr *)&ctrl->src_addr; in nvme_rdma_alloc_queue()
610 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
611 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
612 (struct sockaddr *)&ctrl->addr, in nvme_rdma_alloc_queue()
615 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
622 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
627 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
632 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
635 mutex_destroy(&queue->queue_lock); in nvme_rdma_alloc_queue()
641 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
642 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
647 mutex_lock(&queue->queue_lock); in nvme_rdma_stop_queue()
648 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
650 mutex_unlock(&queue->queue_lock); in nvme_rdma_stop_queue()
655 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
658 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
660 mutex_destroy(&queue->queue_lock); in nvme_rdma_free_queue()
667 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues()
668 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues()
675 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues()
676 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues()
681 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue()
686 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll); in nvme_rdma_start_queue()
688 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_rdma_start_queue()
691 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
693 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
695 dev_info(ctrl->ctrl.device, in nvme_rdma_start_queue()
705 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_start_io_queues()
714 for (i--; i >= 1; i--) in nvme_rdma_start_io_queues()
715 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_start_io_queues()
721 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_alloc_io_queues()
722 struct ib_device *ibdev = ctrl->device->dev; in nvme_rdma_alloc_io_queues()
727 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, in nvme_rdma_alloc_io_queues()
728 min(opts->nr_io_queues, num_online_cpus())); in nvme_rdma_alloc_io_queues()
729 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors, in nvme_rdma_alloc_io_queues()
730 min(opts->nr_write_queues, num_online_cpus())); in nvme_rdma_alloc_io_queues()
731 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus()); in nvme_rdma_alloc_io_queues()
734 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_rdma_alloc_io_queues()
739 dev_err(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
741 return -ENOMEM; in nvme_rdma_alloc_io_queues()
744 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues()
745 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
748 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { in nvme_rdma_alloc_io_queues()
754 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_rdma_alloc_io_queues()
755 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_alloc_io_queues()
756 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
758 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
762 * either no write queues were requested, or we don't have in nvme_rdma_alloc_io_queues()
765 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
767 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
770 if (opts->nr_poll_queues && nr_io_queues) { in nvme_rdma_alloc_io_queues()
772 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_rdma_alloc_io_queues()
776 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues()
778 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
786 for (i--; i >= 1; i--) in nvme_rdma_alloc_io_queues()
787 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_alloc_io_queues()
800 set = &ctrl->admin_tag_set; in nvme_rdma_alloc_tagset()
802 set->ops = &nvme_rdma_admin_mq_ops; in nvme_rdma_alloc_tagset()
803 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_rdma_alloc_tagset()
804 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_rdma_alloc_tagset()
805 set->numa_node = nctrl->numa_node; in nvme_rdma_alloc_tagset()
806 set->cmd_size = sizeof(struct nvme_rdma_request) + in nvme_rdma_alloc_tagset()
808 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
809 set->nr_hw_queues = 1; in nvme_rdma_alloc_tagset()
810 set->timeout = ADMIN_TIMEOUT; in nvme_rdma_alloc_tagset()
811 set->flags = BLK_MQ_F_NO_SCHED; in nvme_rdma_alloc_tagset()
813 set = &ctrl->tag_set; in nvme_rdma_alloc_tagset()
815 set->ops = &nvme_rdma_mq_ops; in nvme_rdma_alloc_tagset()
816 set->queue_depth = nctrl->sqsize + 1; in nvme_rdma_alloc_tagset()
817 set->reserved_tags = 1; /* fabric connect */ in nvme_rdma_alloc_tagset()
818 set->numa_node = nctrl->numa_node; in nvme_rdma_alloc_tagset()
819 set->flags = BLK_MQ_F_SHOULD_MERGE; in nvme_rdma_alloc_tagset()
820 set->cmd_size = sizeof(struct nvme_rdma_request) + in nvme_rdma_alloc_tagset()
822 if (nctrl->max_integrity_segments) in nvme_rdma_alloc_tagset()
823 set->cmd_size += sizeof(struct nvme_rdma_sgl) + in nvme_rdma_alloc_tagset()
825 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
826 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_rdma_alloc_tagset()
827 set->timeout = NVME_IO_TIMEOUT; in nvme_rdma_alloc_tagset()
828 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_rdma_alloc_tagset()
842 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_destroy_admin_queue()
843 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_destroy_admin_queue()
844 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_destroy_admin_queue()
846 if (ctrl->async_event_sqe.data) { in nvme_rdma_destroy_admin_queue()
847 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_rdma_destroy_admin_queue()
848 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_destroy_admin_queue()
850 ctrl->async_event_sqe.data = NULL; in nvme_rdma_destroy_admin_queue()
852 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue()
865 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue()
866 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); in nvme_rdma_configure_admin_queue()
868 /* T10-PI support */ in nvme_rdma_configure_admin_queue()
869 if (ctrl->device->dev->attrs.device_cap_flags & in nvme_rdma_configure_admin_queue()
873 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, in nvme_rdma_configure_admin_queue()
879 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
881 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
887 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); in nvme_rdma_configure_admin_queue()
888 if (IS_ERR(ctrl->ctrl.admin_tagset)) { in nvme_rdma_configure_admin_queue()
889 error = PTR_ERR(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
893 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
894 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_rdma_configure_admin_queue()
895 error = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
899 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
900 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_rdma_configure_admin_queue()
901 error = PTR_ERR(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
910 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
914 ctrl->ctrl.max_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
915 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
917 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
919 ctrl->ctrl.max_integrity_segments = 0; in nvme_rdma_configure_admin_queue()
921 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
923 error = nvme_init_identify(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
930 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
931 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
933 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
934 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
937 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
940 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
943 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
945 if (ctrl->async_event_sqe.data) { in nvme_rdma_configure_admin_queue()
946 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
948 ctrl->async_event_sqe.data = NULL; in nvme_rdma_configure_admin_queue()
951 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
959 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_destroy_io_queues()
960 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_destroy_io_queues()
974 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); in nvme_rdma_configure_io_queues()
975 if (IS_ERR(ctrl->ctrl.tagset)) { in nvme_rdma_configure_io_queues()
976 ret = PTR_ERR(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
980 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_rdma_configure_io_queues()
981 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_rdma_configure_io_queues()
982 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
992 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
993 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { in nvme_rdma_configure_io_queues()
999 ret = -ENODEV; in nvme_rdma_configure_io_queues()
1002 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, in nvme_rdma_configure_io_queues()
1003 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues()
1004 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1010 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1011 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1014 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1016 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
1019 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
1028 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1029 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1030 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_teardown_admin_queue()
1031 if (ctrl->ctrl.admin_tagset) { in nvme_rdma_teardown_admin_queue()
1032 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, in nvme_rdma_teardown_admin_queue()
1033 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
1034 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); in nvme_rdma_teardown_admin_queue()
1037 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1044 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues()
1045 nvme_start_freeze(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1046 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1047 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1049 if (ctrl->ctrl.tagset) { in nvme_rdma_teardown_io_queues()
1050 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, in nvme_rdma_teardown_io_queues()
1051 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1052 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); in nvme_rdma_teardown_io_queues()
1055 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1064 cancel_work_sync(&ctrl->err_work); in nvme_rdma_stop_ctrl()
1065 cancel_delayed_work_sync(&ctrl->reconnect_work); in nvme_rdma_stop_ctrl()
1072 if (list_empty(&ctrl->list)) in nvme_rdma_free_ctrl()
1076 list_del(&ctrl->list); in nvme_rdma_free_ctrl()
1079 nvmf_free_options(nctrl->opts); in nvme_rdma_free_ctrl()
1081 kfree(ctrl->queues); in nvme_rdma_free_ctrl()
1088 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { in nvme_rdma_reconnect_or_remove()
1089 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || in nvme_rdma_reconnect_or_remove()
1090 ctrl->ctrl.state == NVME_CTRL_LIVE); in nvme_rdma_reconnect_or_remove()
1094 if (nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_rdma_reconnect_or_remove()
1095 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", in nvme_rdma_reconnect_or_remove()
1096 ctrl->ctrl.opts->reconnect_delay); in nvme_rdma_reconnect_or_remove()
1097 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, in nvme_rdma_reconnect_or_remove()
1098 ctrl->ctrl.opts->reconnect_delay * HZ); in nvme_rdma_reconnect_or_remove()
1100 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
1106 int ret = -EINVAL; in nvme_rdma_setup_ctrl()
1113 if (ctrl->ctrl.icdoff) { in nvme_rdma_setup_ctrl()
1114 ret = -EOPNOTSUPP; in nvme_rdma_setup_ctrl()
1115 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); in nvme_rdma_setup_ctrl()
1119 if (!(ctrl->ctrl.sgls & (1 << 2))) { in nvme_rdma_setup_ctrl()
1120 ret = -EOPNOTSUPP; in nvme_rdma_setup_ctrl()
1121 dev_err(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1126 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1127 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1129 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1132 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1133 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1135 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1136 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1139 if (ctrl->ctrl.sgls & (1 << 20)) in nvme_rdma_setup_ctrl()
1140 ctrl->use_inline_data = true; in nvme_rdma_setup_ctrl()
1142 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1148 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_rdma_setup_ctrl()
1155 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_setup_ctrl()
1156 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_setup_ctrl()
1158 ret = -EINVAL; in nvme_rdma_setup_ctrl()
1162 nvme_start_ctrl(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1166 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1167 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1168 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1170 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1174 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_setup_ctrl()
1175 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_setup_ctrl()
1176 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_setup_ctrl()
1177 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1187 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reconnect_ctrl_work()
1192 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", in nvme_rdma_reconnect_ctrl_work()
1193 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1195 ctrl->ctrl.nr_reconnects = 0; in nvme_rdma_reconnect_ctrl_work()
1200 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", in nvme_rdma_reconnect_ctrl_work()
1201 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1210 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1211 flush_work(&ctrl->ctrl.async_event_work); in nvme_rdma_error_recovery_work()
1213 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1215 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_error_recovery_work()
1217 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_error_recovery_work()
1219 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_error_recovery_work()
1220 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_error_recovery_work()
1229 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_rdma_error_recovery()
1232 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_rdma_error_recovery()
1233 queue_work(nvme_reset_wq, &ctrl->err_work); in nvme_rdma_error_recovery()
1240 if (!refcount_dec_and_test(&req->ref)) in nvme_rdma_end_request()
1242 if (!nvme_try_complete_req(rq, req->status, req->result)) in nvme_rdma_end_request()
1246 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, in nvme_rdma_wr_error() argument
1249 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error()
1250 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1252 if (ctrl->ctrl.state == NVME_CTRL_LIVE) in nvme_rdma_wr_error()
1253 dev_info(ctrl->ctrl.device, in nvme_rdma_wr_error()
1255 op, wc->wr_cqe, in nvme_rdma_wr_error()
1256 ib_wc_status_msg(wc->status), wc->status); in nvme_rdma_wr_error()
1260 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_memreg_done() argument
1262 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_memreg_done()
1263 nvme_rdma_wr_error(cq, wc, "MEMREG"); in nvme_rdma_memreg_done()
1266 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_inv_rkey_done() argument
1269 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done()
1271 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_inv_rkey_done()
1272 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); in nvme_rdma_inv_rkey_done()
1285 .ex.invalidate_rkey = req->mr->rkey, in nvme_rdma_inv_rkey()
1288 req->reg_cqe.done = nvme_rdma_inv_rkey_done; in nvme_rdma_inv_rkey()
1289 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey()
1291 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1298 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1299 struct ib_device *ibdev = dev->dev; in nvme_rdma_unmap_data()
1300 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1306 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, in nvme_rdma_unmap_data()
1307 req->metadata_sgl->nents, rq_dma_dir(rq)); in nvme_rdma_unmap_data()
1308 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_unmap_data()
1312 if (req->use_sig_mr) in nvme_rdma_unmap_data()
1313 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1315 if (req->mr) { in nvme_rdma_unmap_data()
1316 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1317 req->mr = NULL; in nvme_rdma_unmap_data()
1320 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_unmap_data()
1322 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_unmap_data()
1327 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_set_sg_null()
1329 sg->addr = 0; in nvme_rdma_set_sg_null()
1330 put_unaligned_le24(0, sg->length); in nvme_rdma_set_sg_null()
1331 put_unaligned_le32(0, sg->key); in nvme_rdma_set_sg_null()
1332 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_set_sg_null()
1340 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_rdma_map_sg_inline()
1341 struct ib_sge *sge = &req->sge[1]; in nvme_rdma_map_sg_inline()
1346 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { in nvme_rdma_map_sg_inline()
1347 sge->addr = sg_dma_address(sgl); in nvme_rdma_map_sg_inline()
1348 sge->length = sg_dma_len(sgl); in nvme_rdma_map_sg_inline()
1349 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1350 len += sge->length; in nvme_rdma_map_sg_inline()
1354 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1355 sg->length = cpu_to_le32(len); in nvme_rdma_map_sg_inline()
1356 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_rdma_map_sg_inline()
1358 req->num_sge += count; in nvme_rdma_map_sg_inline()
1365 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_single()
1367 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); in nvme_rdma_map_sg_single()
1368 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); in nvme_rdma_map_sg_single()
1369 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1370 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_single()
1378 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_fr()
1381 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1382 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_fr()
1383 return -EAGAIN; in nvme_rdma_map_sg_fr()
1389 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, in nvme_rdma_map_sg_fr()
1392 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1393 req->mr = NULL; in nvme_rdma_map_sg_fr()
1396 return -EINVAL; in nvme_rdma_map_sg_fr()
1399 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_fr()
1401 req->reg_cqe.done = nvme_rdma_memreg_done; in nvme_rdma_map_sg_fr()
1402 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); in nvme_rdma_map_sg_fr()
1403 req->reg_wr.wr.opcode = IB_WR_REG_MR; in nvme_rdma_map_sg_fr()
1404 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr()
1405 req->reg_wr.wr.num_sge = 0; in nvme_rdma_map_sg_fr()
1406 req->reg_wr.mr = req->mr; in nvme_rdma_map_sg_fr()
1407 req->reg_wr.key = req->mr->rkey; in nvme_rdma_map_sg_fr()
1408 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_fr()
1412 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_fr()
1413 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_fr()
1414 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_fr()
1415 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | in nvme_rdma_map_sg_fr()
1425 domain->sig_type = IB_SIG_TYPE_T10_DIF; in nvme_rdma_set_sig_domain()
1426 domain->sig.dif.bg_type = IB_T10DIF_CRC; in nvme_rdma_set_sig_domain()
1427 domain->sig.dif.pi_interval = 1 << bi->interval_exp; in nvme_rdma_set_sig_domain()
1428 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); in nvme_rdma_set_sig_domain()
1430 domain->sig.dif.ref_remap = true; in nvme_rdma_set_sig_domain()
1432 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); in nvme_rdma_set_sig_domain()
1433 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); in nvme_rdma_set_sig_domain()
1434 domain->sig.dif.app_escape = true; in nvme_rdma_set_sig_domain()
1436 domain->sig.dif.ref_escape = true; in nvme_rdma_set_sig_domain()
1443 u16 control = le16_to_cpu(cmd->rw.control); in nvme_rdma_set_sig_attrs()
1447 /* for WRITE_INSERT/READ_STRIP no memory domain */ in nvme_rdma_set_sig_attrs()
1448 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; in nvme_rdma_set_sig_attrs()
1449 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1453 cmd->rw.control = cpu_to_le16(control); in nvme_rdma_set_sig_attrs()
1455 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ in nvme_rdma_set_sig_attrs()
1456 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1458 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, in nvme_rdma_set_sig_attrs()
1466 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) in nvme_rdma_set_prot_checks()
1468 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) in nvme_rdma_set_prot_checks()
1472 static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_sig_done() argument
1474 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_sig_done()
1475 nvme_rdma_wr_error(cq, wc, "SIG"); in nvme_rdma_sig_done()
1482 struct nvme_rdma_sgl *sgl = &req->data_sgl; in nvme_rdma_map_sg_pi()
1483 struct ib_reg_wr *wr = &req->reg_wr; in nvme_rdma_map_sg_pi()
1485 struct nvme_ns *ns = rq->q->queuedata; in nvme_rdma_map_sg_pi()
1486 struct bio *bio = rq->bio; in nvme_rdma_map_sg_pi()
1487 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_pi()
1490 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1491 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_pi()
1492 return -EAGAIN; in nvme_rdma_map_sg_pi()
1494 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, in nvme_rdma_map_sg_pi()
1495 req->metadata_sgl->sg_table.sgl, pi_count, NULL, in nvme_rdma_map_sg_pi()
1500 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c, in nvme_rdma_map_sg_pi()
1501 req->mr->sig_attrs, ns->pi_type); in nvme_rdma_map_sg_pi()
1502 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); in nvme_rdma_map_sg_pi()
1504 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_pi()
1506 req->reg_cqe.done = nvme_rdma_sig_done; in nvme_rdma_map_sg_pi()
1508 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in nvme_rdma_map_sg_pi()
1509 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi()
1510 wr->wr.num_sge = 0; in nvme_rdma_map_sg_pi()
1511 wr->wr.send_flags = 0; in nvme_rdma_map_sg_pi()
1512 wr->mr = req->mr; in nvme_rdma_map_sg_pi()
1513 wr->key = req->mr->rkey; in nvme_rdma_map_sg_pi()
1514 wr->access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_pi()
1518 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_pi()
1519 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_pi()
1520 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_pi()
1521 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_pi()
1526 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1527 req->mr = NULL; in nvme_rdma_map_sg_pi()
1530 return -EINVAL; in nvme_rdma_map_sg_pi()
1537 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1538 struct ib_device *ibdev = dev->dev; in nvme_rdma_map_data()
1542 req->num_sge = 1; in nvme_rdma_map_data()
1543 refcount_set(&req->ref, 2); /* send and recv completions */ in nvme_rdma_map_data()
1545 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_map_data()
1550 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); in nvme_rdma_map_data()
1551 ret = sg_alloc_table_chained(&req->data_sgl.sg_table, in nvme_rdma_map_data()
1552 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, in nvme_rdma_map_data()
1555 return -ENOMEM; in nvme_rdma_map_data()
1557 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, in nvme_rdma_map_data()
1558 req->data_sgl.sg_table.sgl); in nvme_rdma_map_data()
1560 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, in nvme_rdma_map_data()
1561 req->data_sgl.nents, rq_dma_dir(rq)); in nvme_rdma_map_data()
1563 ret = -EIO; in nvme_rdma_map_data()
1568 req->metadata_sgl->sg_table.sgl = in nvme_rdma_map_data()
1569 (struct scatterlist *)(req->metadata_sgl + 1); in nvme_rdma_map_data()
1570 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_map_data()
1571 blk_rq_count_integrity_sg(rq->q, rq->bio), in nvme_rdma_map_data()
1572 req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1575 ret = -ENOMEM; in nvme_rdma_map_data()
1579 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, in nvme_rdma_map_data()
1580 rq->bio, req->metadata_sgl->sg_table.sgl); in nvme_rdma_map_data()
1582 req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1583 req->metadata_sgl->nents, in nvme_rdma_map_data()
1586 ret = -EIO; in nvme_rdma_map_data()
1591 if (req->use_sig_mr) { in nvme_rdma_map_data()
1596 if (count <= dev->num_inline_segments) { in nvme_rdma_map_data()
1598 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1605 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { in nvme_rdma_map_data()
1620 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1621 req->metadata_sgl->nents, rq_dma_dir(rq)); in nvme_rdma_map_data()
1624 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_map_data()
1627 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_map_data()
1630 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_map_data()
1634 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_send_done() argument
1637 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done()
1641 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_send_done()
1642 nvme_rdma_wr_error(cq, wc, "SEND"); in nvme_rdma_send_done()
1654 sge->addr = qe->dma; in nvme_rdma_post_send()
1655 sge->length = sizeof(struct nvme_command); in nvme_rdma_post_send()
1656 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1659 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send()
1666 first->next = ≀ in nvme_rdma_post_send()
1670 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1672 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1685 list.addr = qe->dma; in nvme_rdma_post_recv()
1687 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1689 qe->cqe.done = nvme_rdma_recv_done; in nvme_rdma_post_recv()
1692 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv()
1696 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1698 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1709 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1710 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1713 static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_async_done() argument
1715 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_async_done()
1716 nvme_rdma_wr_error(cq, wc, "ASYNC"); in nvme_rdma_async_done()
1722 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event()
1723 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1724 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event()
1725 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event()
1729 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1732 cmd->common.opcode = nvme_admin_async_event; in nvme_rdma_submit_async_event()
1733 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_rdma_submit_async_event()
1734 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_submit_async_event()
1737 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
1739 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event()
1747 struct nvme_completion *cqe, struct ib_wc *wc) in nvme_rdma_process_nvme_rsp() argument
1752 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1754 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1756 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1757 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1762 req->status = cqe->status; in nvme_rdma_process_nvme_rsp()
1763 req->result = cqe->result; in nvme_rdma_process_nvme_rsp()
1765 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { in nvme_rdma_process_nvme_rsp()
1766 if (unlikely(!req->mr || in nvme_rdma_process_nvme_rsp()
1767 wc->ex.invalidate_rkey != req->mr->rkey)) { in nvme_rdma_process_nvme_rsp()
1768 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1770 req->mr ? req->mr->rkey : 0); in nvme_rdma_process_nvme_rsp()
1771 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1773 } else if (req->mr) { in nvme_rdma_process_nvme_rsp()
1778 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1780 req->mr->rkey, ret); in nvme_rdma_process_nvme_rsp()
1781 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1790 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_recv_done() argument
1793 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
1794 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done()
1795 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1796 struct nvme_completion *cqe = qe->data; in nvme_rdma_recv_done()
1799 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvme_rdma_recv_done()
1800 nvme_rdma_wr_error(cq, wc, "RECV"); in nvme_rdma_recv_done()
1805 if (unlikely(wc->byte_len < len)) { in nvme_rdma_recv_done()
1806 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1807 "Unexpected nvme completion length(%d)\n", wc->byte_len); in nvme_rdma_recv_done()
1808 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1812 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1820 cqe->command_id))) in nvme_rdma_recv_done()
1821 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1822 &cqe->result); in nvme_rdma_recv_done()
1824 nvme_rdma_process_nvme_rsp(queue, cqe, wc); in nvme_rdma_recv_done()
1825 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1834 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1835 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1846 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1847 int status = ev->status; in nvme_rdma_conn_rejected()
1856 u16 sts = le16_to_cpu(rej_data->sts); in nvme_rdma_conn_rejected()
1858 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1862 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1866 return -ECONNRESET; in nvme_rdma_conn_rejected()
1871 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1878 if (ctrl->opts->tos >= 0) in nvme_rdma_addr_resolved()
1879 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1880 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1882 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", in nvme_rdma_addr_resolved()
1883 queue->cm_error); in nvme_rdma_addr_resolved()
1896 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1901 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1904 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1919 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvme_rdma_route_resolved()
1926 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1927 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1930 ret = rdma_connect_locked(queue->cm_id, ¶m); in nvme_rdma_route_resolved()
1932 dev_err(ctrl->ctrl.device, in nvme_rdma_route_resolved()
1943 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler()
1946 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1947 rdma_event_msg(ev->event), ev->event, in nvme_rdma_cm_handler()
1948 ev->status, cm_id); in nvme_rdma_cm_handler()
1950 switch (ev->event) { in nvme_rdma_cm_handler()
1958 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1960 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1969 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1970 "CM error event %d\n", ev->event); in nvme_rdma_cm_handler()
1971 cm_error = -ECONNRESET; in nvme_rdma_cm_handler()
1976 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1977 "disconnect received - connection closed\n"); in nvme_rdma_cm_handler()
1978 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1984 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1985 "Unexpected RDMA CM event (%d)\n", ev->event); in nvme_rdma_cm_handler()
1986 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1991 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1992 complete(&queue->cm_done); in nvme_rdma_cm_handler()
2001 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out()
2005 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_rdma_complete_timed_out()
2014 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout()
2015 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
2017 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", in nvme_rdma_timeout()
2018 rq->tag, nvme_rdma_queue_idx(queue)); in nvme_rdma_timeout()
2020 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { in nvme_rdma_timeout()
2025 * - ctrl disable/shutdown fabrics requests in nvme_rdma_timeout()
2026 * - connect requests in nvme_rdma_timeout()
2027 * - initialization admin requests in nvme_rdma_timeout()
2028 * - I/O requests that entered after unquiescing and in nvme_rdma_timeout()
2049 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
2050 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq()
2051 struct request *rq = bd->rq; in nvme_rdma_queue_rq()
2053 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq()
2054 struct nvme_command *c = sqe->data; in nvme_rdma_queue_rq()
2056 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
2060 WARN_ON_ONCE(rq->tag < 0); in nvme_rdma_queue_rq()
2062 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2063 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2065 dev = queue->device->dev; in nvme_rdma_queue_rq()
2067 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq()
2070 err = ib_dma_mapping_error(dev, req->sqe.dma); in nvme_rdma_queue_rq()
2074 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq()
2084 queue->pi_support && in nvme_rdma_queue_rq()
2085 (c->common.opcode == nvme_cmd_write || in nvme_rdma_queue_rq()
2086 c->common.opcode == nvme_cmd_read) && in nvme_rdma_queue_rq()
2088 req->use_sig_mr = true; in nvme_rdma_queue_rq()
2090 req->use_sig_mr = false; in nvme_rdma_queue_rq()
2094 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2099 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq()
2101 ib_dma_sync_single_for_device(dev, sqe->dma, in nvme_rdma_queue_rq()
2104 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2105 req->mr ? &req->reg_wr.wr : NULL); in nvme_rdma_queue_rq()
2114 if (err == -ENOMEM || err == -EAGAIN) in nvme_rdma_queue_rq()
2120 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_queue_rq()
2127 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll()
2129 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2138 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); in nvme_rdma_check_pi_status()
2141 nvme_req(rq)->status = NVME_SC_INVALID_PI; in nvme_rdma_check_pi_status()
2148 nvme_req(rq)->status = NVME_SC_GUARD_CHECK; in nvme_rdma_check_pi_status()
2151 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; in nvme_rdma_check_pi_status()
2154 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; in nvme_rdma_check_pi_status()
2166 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq()
2167 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2169 if (req->use_sig_mr) in nvme_rdma_complete_rq()
2173 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_complete_rq()
2180 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_map_queues()
2181 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_map_queues()
2183 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_rdma_map_queues()
2185 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2186 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2187 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_rdma_map_queues()
2188 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2189 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2190 set->map[HCTX_TYPE_READ].queue_offset = in nvme_rdma_map_queues()
2191 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2194 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2195 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2196 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_rdma_map_queues()
2197 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2198 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2199 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_rdma_map_queues()
2201 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], in nvme_rdma_map_queues()
2202 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2203 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], in nvme_rdma_map_queues()
2204 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2206 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_rdma_map_queues()
2208 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_rdma_map_queues()
2209 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_rdma_map_queues()
2210 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_rdma_map_queues()
2211 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_map_queues()
2212 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2213 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_rdma_map_queues()
2216 dev_info(ctrl->ctrl.device, in nvme_rdma_map_queues()
2218 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_rdma_map_queues()
2219 ctrl->io_queues[HCTX_TYPE_READ], in nvme_rdma_map_queues()
2220 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_rdma_map_queues()
2248 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_shutdown_ctrl()
2250 nvme_shutdown_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2252 nvme_disable_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2266 nvme_stop_ctrl(&ctrl->ctrl); in nvme_rdma_reset_ctrl_work()
2269 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_reset_ctrl_work()
2281 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reset_ctrl_work()
2305 * existing controller with all the other parameters the same and no
2319 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_rdma_existing_controller()
2337 return ERR_PTR(-ENOMEM); in nvme_rdma_create_ctrl()
2338 ctrl->ctrl.opts = opts; in nvme_rdma_create_ctrl()
2339 INIT_LIST_HEAD(&ctrl->list); in nvme_rdma_create_ctrl()
2341 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_rdma_create_ctrl()
2342 opts->trsvcid = in nvme_rdma_create_ctrl()
2344 if (!opts->trsvcid) { in nvme_rdma_create_ctrl()
2345 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2348 opts->mask |= NVMF_OPT_TRSVCID; in nvme_rdma_create_ctrl()
2352 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_rdma_create_ctrl()
2355 opts->traddr, opts->trsvcid); in nvme_rdma_create_ctrl()
2359 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_rdma_create_ctrl()
2361 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_rdma_create_ctrl()
2364 opts->host_traddr); in nvme_rdma_create_ctrl()
2369 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { in nvme_rdma_create_ctrl()
2370 ret = -EALREADY; in nvme_rdma_create_ctrl()
2374 INIT_DELAYED_WORK(&ctrl->reconnect_work, in nvme_rdma_create_ctrl()
2376 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); in nvme_rdma_create_ctrl()
2377 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); in nvme_rdma_create_ctrl()
2379 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_rdma_create_ctrl()
2380 opts->nr_poll_queues + 1; in nvme_rdma_create_ctrl()
2381 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
2382 ctrl->ctrl.kato = opts->kato; in nvme_rdma_create_ctrl()
2384 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2385 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_create_ctrl()
2387 if (!ctrl->queues) in nvme_rdma_create_ctrl()
2390 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, in nvme_rdma_create_ctrl()
2391 0 /* no quirks, we're perfect! */); in nvme_rdma_create_ctrl()
2395 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); in nvme_rdma_create_ctrl()
2402 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", in nvme_rdma_create_ctrl()
2403 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_rdma_create_ctrl()
2406 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); in nvme_rdma_create_ctrl()
2409 return &ctrl->ctrl; in nvme_rdma_create_ctrl()
2412 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2413 nvme_put_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2415 ret = -EIO; in nvme_rdma_create_ctrl()
2418 kfree(ctrl->queues); in nvme_rdma_create_ctrl()
2443 if (ndev->dev == ib_device) { in nvme_rdma_remove_one()
2456 if (ctrl->device->dev != ib_device) in nvme_rdma_remove_one()
2458 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_remove_one()
2498 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_cleanup_module()