1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * NVMe over Fabrics loopback device.
4*4882a593Smuzhiyun * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun #include <linux/scatterlist.h>
8*4882a593Smuzhiyun #include <linux/blk-mq.h>
9*4882a593Smuzhiyun #include <linux/nvme.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/parser.h>
12*4882a593Smuzhiyun #include "nvmet.h"
13*4882a593Smuzhiyun #include "../host/nvme.h"
14*4882a593Smuzhiyun #include "../host/fabrics.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define NVME_LOOP_MAX_SEGMENTS 256
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct nvme_loop_iod {
19*4882a593Smuzhiyun struct nvme_request nvme_req;
20*4882a593Smuzhiyun struct nvme_command cmd;
21*4882a593Smuzhiyun struct nvme_completion cqe;
22*4882a593Smuzhiyun struct nvmet_req req;
23*4882a593Smuzhiyun struct nvme_loop_queue *queue;
24*4882a593Smuzhiyun struct work_struct work;
25*4882a593Smuzhiyun struct sg_table sg_table;
26*4882a593Smuzhiyun struct scatterlist first_sgl[];
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct nvme_loop_ctrl {
30*4882a593Smuzhiyun struct nvme_loop_queue *queues;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct blk_mq_tag_set admin_tag_set;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct list_head list;
35*4882a593Smuzhiyun struct blk_mq_tag_set tag_set;
36*4882a593Smuzhiyun struct nvme_loop_iod async_event_iod;
37*4882a593Smuzhiyun struct nvme_ctrl ctrl;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct nvmet_port *port;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
to_loop_ctrl(struct nvme_ctrl * ctrl)42*4882a593Smuzhiyun static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun enum nvme_loop_queue_flags {
48*4882a593Smuzhiyun NVME_LOOP_Q_LIVE = 0,
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct nvme_loop_queue {
52*4882a593Smuzhiyun struct nvmet_cq nvme_cq;
53*4882a593Smuzhiyun struct nvmet_sq nvme_sq;
54*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl;
55*4882a593Smuzhiyun unsigned long flags;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static LIST_HEAD(nvme_loop_ports);
59*4882a593Smuzhiyun static DEFINE_MUTEX(nvme_loop_ports_mutex);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static LIST_HEAD(nvme_loop_ctrl_list);
62*4882a593Smuzhiyun static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
65*4882a593Smuzhiyun static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static const struct nvmet_fabrics_ops nvme_loop_ops;
68*4882a593Smuzhiyun
nvme_loop_queue_idx(struct nvme_loop_queue * queue)69*4882a593Smuzhiyun static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun return queue - queue->ctrl->queues;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
nvme_loop_complete_rq(struct request * req)74*4882a593Smuzhiyun static void nvme_loop_complete_rq(struct request *req)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
79*4882a593Smuzhiyun nvme_complete_rq(req);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
nvme_loop_tagset(struct nvme_loop_queue * queue)82*4882a593Smuzhiyun static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun u32 queue_idx = nvme_loop_queue_idx(queue);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (queue_idx == 0)
87*4882a593Smuzhiyun return queue->ctrl->admin_tag_set.tags[queue_idx];
88*4882a593Smuzhiyun return queue->ctrl->tag_set.tags[queue_idx - 1];
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
nvme_loop_queue_response(struct nvmet_req * req)91*4882a593Smuzhiyun static void nvme_loop_queue_response(struct nvmet_req *req)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct nvme_loop_queue *queue =
94*4882a593Smuzhiyun container_of(req->sq, struct nvme_loop_queue, nvme_sq);
95*4882a593Smuzhiyun struct nvme_completion *cqe = req->cqe;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * AEN requests are special as they don't time out and can
99*4882a593Smuzhiyun * survive any kind of queue freeze and often don't respond to
100*4882a593Smuzhiyun * aborts. We don't even bother to allocate a struct request
101*4882a593Smuzhiyun * for them but rather special case them here.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
104*4882a593Smuzhiyun cqe->command_id))) {
105*4882a593Smuzhiyun nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
106*4882a593Smuzhiyun &cqe->result);
107*4882a593Smuzhiyun } else {
108*4882a593Smuzhiyun struct request *rq;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
111*4882a593Smuzhiyun if (!rq) {
112*4882a593Smuzhiyun dev_err(queue->ctrl->ctrl.device,
113*4882a593Smuzhiyun "got bad command_id %#x on queue %d\n",
114*4882a593Smuzhiyun cqe->command_id, nvme_loop_queue_idx(queue));
115*4882a593Smuzhiyun return;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
119*4882a593Smuzhiyun nvme_loop_complete_rq(rq);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
nvme_loop_execute_work(struct work_struct * work)123*4882a593Smuzhiyun static void nvme_loop_execute_work(struct work_struct *work)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct nvme_loop_iod *iod =
126*4882a593Smuzhiyun container_of(work, struct nvme_loop_iod, work);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun iod->req.execute(&iod->req);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
nvme_loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)131*4882a593Smuzhiyun static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
132*4882a593Smuzhiyun const struct blk_mq_queue_data *bd)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct nvme_ns *ns = hctx->queue->queuedata;
135*4882a593Smuzhiyun struct nvme_loop_queue *queue = hctx->driver_data;
136*4882a593Smuzhiyun struct request *req = bd->rq;
137*4882a593Smuzhiyun struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
138*4882a593Smuzhiyun bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139*4882a593Smuzhiyun blk_status_t ret;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142*4882a593Smuzhiyun return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun ret = nvme_setup_cmd(ns, req, &iod->cmd);
145*4882a593Smuzhiyun if (ret)
146*4882a593Smuzhiyun return ret;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun blk_mq_start_request(req);
149*4882a593Smuzhiyun iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150*4882a593Smuzhiyun iod->req.port = queue->ctrl->port;
151*4882a593Smuzhiyun if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
152*4882a593Smuzhiyun &queue->nvme_sq, &nvme_loop_ops))
153*4882a593Smuzhiyun return BLK_STS_OK;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (blk_rq_nr_phys_segments(req)) {
156*4882a593Smuzhiyun iod->sg_table.sgl = iod->first_sgl;
157*4882a593Smuzhiyun if (sg_alloc_table_chained(&iod->sg_table,
158*4882a593Smuzhiyun blk_rq_nr_phys_segments(req),
159*4882a593Smuzhiyun iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160*4882a593Smuzhiyun nvme_cleanup_cmd(req);
161*4882a593Smuzhiyun return BLK_STS_RESOURCE;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun iod->req.sg = iod->sg_table.sgl;
165*4882a593Smuzhiyun iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166*4882a593Smuzhiyun iod->req.transfer_len = blk_rq_payload_bytes(req);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun schedule_work(&iod->work);
170*4882a593Smuzhiyun return BLK_STS_OK;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
nvme_loop_submit_async_event(struct nvme_ctrl * arg)173*4882a593Smuzhiyun static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176*4882a593Smuzhiyun struct nvme_loop_queue *queue = &ctrl->queues[0];
177*4882a593Smuzhiyun struct nvme_loop_iod *iod = &ctrl->async_event_iod;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun memset(&iod->cmd, 0, sizeof(iod->cmd));
180*4882a593Smuzhiyun iod->cmd.common.opcode = nvme_admin_async_event;
181*4882a593Smuzhiyun iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
182*4882a593Smuzhiyun iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
185*4882a593Smuzhiyun &nvme_loop_ops)) {
186*4882a593Smuzhiyun dev_err(ctrl->ctrl.device, "failed async event work\n");
187*4882a593Smuzhiyun return;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun schedule_work(&iod->work);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx)193*4882a593Smuzhiyun static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194*4882a593Smuzhiyun struct nvme_loop_iod *iod, unsigned int queue_idx)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun iod->req.cmd = &iod->cmd;
197*4882a593Smuzhiyun iod->req.cqe = &iod->cqe;
198*4882a593Smuzhiyun iod->queue = &ctrl->queues[queue_idx];
199*4882a593Smuzhiyun INIT_WORK(&iod->work, nvme_loop_execute_work);
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
nvme_loop_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)203*4882a593Smuzhiyun static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204*4882a593Smuzhiyun struct request *req, unsigned int hctx_idx,
205*4882a593Smuzhiyun unsigned int numa_node)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl = set->driver_data;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun nvme_req(req)->ctrl = &ctrl->ctrl;
210*4882a593Smuzhiyun return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
211*4882a593Smuzhiyun (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
nvme_loop_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)214*4882a593Smuzhiyun static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
215*4882a593Smuzhiyun unsigned int hctx_idx)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl = data;
218*4882a593Smuzhiyun struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun hctx->driver_data = queue;
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)226*4882a593Smuzhiyun static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
227*4882a593Smuzhiyun unsigned int hctx_idx)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl = data;
230*4882a593Smuzhiyun struct nvme_loop_queue *queue = &ctrl->queues[0];
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun BUG_ON(hctx_idx != 0);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun hctx->driver_data = queue;
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static const struct blk_mq_ops nvme_loop_mq_ops = {
239*4882a593Smuzhiyun .queue_rq = nvme_loop_queue_rq,
240*4882a593Smuzhiyun .complete = nvme_loop_complete_rq,
241*4882a593Smuzhiyun .init_request = nvme_loop_init_request,
242*4882a593Smuzhiyun .init_hctx = nvme_loop_init_hctx,
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
246*4882a593Smuzhiyun .queue_rq = nvme_loop_queue_rq,
247*4882a593Smuzhiyun .complete = nvme_loop_complete_rq,
248*4882a593Smuzhiyun .init_request = nvme_loop_init_request,
249*4882a593Smuzhiyun .init_hctx = nvme_loop_init_admin_hctx,
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl * ctrl)252*4882a593Smuzhiyun static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
255*4882a593Smuzhiyun return;
256*4882a593Smuzhiyun nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
257*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.admin_q);
258*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.fabrics_q);
259*4882a593Smuzhiyun blk_mq_free_tag_set(&ctrl->admin_tag_set);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
nvme_loop_free_ctrl(struct nvme_ctrl * nctrl)262*4882a593Smuzhiyun static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (list_empty(&ctrl->list))
267*4882a593Smuzhiyun goto free_ctrl;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mutex_lock(&nvme_loop_ctrl_mutex);
270*4882a593Smuzhiyun list_del(&ctrl->list);
271*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ctrl_mutex);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (nctrl->tagset) {
274*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.connect_q);
275*4882a593Smuzhiyun blk_mq_free_tag_set(&ctrl->tag_set);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun kfree(ctrl->queues);
278*4882a593Smuzhiyun nvmf_free_options(nctrl->opts);
279*4882a593Smuzhiyun free_ctrl:
280*4882a593Smuzhiyun kfree(ctrl);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
nvme_loop_destroy_io_queues(struct nvme_loop_ctrl * ctrl)283*4882a593Smuzhiyun static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun int i;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun for (i = 1; i < ctrl->ctrl.queue_count; i++) {
288*4882a593Smuzhiyun clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
289*4882a593Smuzhiyun nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun ctrl->ctrl.queue_count = 1;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
nvme_loop_init_io_queues(struct nvme_loop_ctrl * ctrl)294*4882a593Smuzhiyun static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
297*4882a593Smuzhiyun unsigned int nr_io_queues;
298*4882a593Smuzhiyun int ret, i;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
301*4882a593Smuzhiyun ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
302*4882a593Smuzhiyun if (ret || !nr_io_queues)
303*4882a593Smuzhiyun return ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun for (i = 1; i <= nr_io_queues; i++) {
308*4882a593Smuzhiyun ctrl->queues[i].ctrl = ctrl;
309*4882a593Smuzhiyun ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
310*4882a593Smuzhiyun if (ret)
311*4882a593Smuzhiyun goto out_destroy_queues;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ctrl->ctrl.queue_count++;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun out_destroy_queues:
319*4882a593Smuzhiyun nvme_loop_destroy_io_queues(ctrl);
320*4882a593Smuzhiyun return ret;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
nvme_loop_connect_io_queues(struct nvme_loop_ctrl * ctrl)323*4882a593Smuzhiyun static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun int i, ret;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun for (i = 1; i < ctrl->ctrl.queue_count; i++) {
328*4882a593Smuzhiyun ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
329*4882a593Smuzhiyun if (ret)
330*4882a593Smuzhiyun return ret;
331*4882a593Smuzhiyun set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return 0;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
nvme_loop_configure_admin_queue(struct nvme_loop_ctrl * ctrl)337*4882a593Smuzhiyun static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun int error;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
342*4882a593Smuzhiyun ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
343*4882a593Smuzhiyun ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
344*4882a593Smuzhiyun ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
345*4882a593Smuzhiyun ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
346*4882a593Smuzhiyun ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
347*4882a593Smuzhiyun NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
348*4882a593Smuzhiyun ctrl->admin_tag_set.driver_data = ctrl;
349*4882a593Smuzhiyun ctrl->admin_tag_set.nr_hw_queues = 1;
350*4882a593Smuzhiyun ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
351*4882a593Smuzhiyun ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ctrl->queues[0].ctrl = ctrl;
354*4882a593Smuzhiyun error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
355*4882a593Smuzhiyun if (error)
356*4882a593Smuzhiyun return error;
357*4882a593Smuzhiyun ctrl->ctrl.queue_count = 1;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
360*4882a593Smuzhiyun if (error)
361*4882a593Smuzhiyun goto out_free_sq;
362*4882a593Smuzhiyun ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
365*4882a593Smuzhiyun if (IS_ERR(ctrl->ctrl.fabrics_q)) {
366*4882a593Smuzhiyun error = PTR_ERR(ctrl->ctrl.fabrics_q);
367*4882a593Smuzhiyun goto out_free_tagset;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
371*4882a593Smuzhiyun if (IS_ERR(ctrl->ctrl.admin_q)) {
372*4882a593Smuzhiyun error = PTR_ERR(ctrl->ctrl.admin_q);
373*4882a593Smuzhiyun goto out_cleanup_fabrics_q;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun error = nvmf_connect_admin_queue(&ctrl->ctrl);
377*4882a593Smuzhiyun if (error)
378*4882a593Smuzhiyun goto out_cleanup_queue;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun error = nvme_enable_ctrl(&ctrl->ctrl);
383*4882a593Smuzhiyun if (error)
384*4882a593Smuzhiyun goto out_cleanup_queue;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun ctrl->ctrl.max_hw_sectors =
387*4882a593Smuzhiyun (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun error = nvme_init_identify(&ctrl->ctrl);
392*4882a593Smuzhiyun if (error)
393*4882a593Smuzhiyun goto out_cleanup_queue;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun out_cleanup_queue:
398*4882a593Smuzhiyun clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
399*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.admin_q);
400*4882a593Smuzhiyun out_cleanup_fabrics_q:
401*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.fabrics_q);
402*4882a593Smuzhiyun out_free_tagset:
403*4882a593Smuzhiyun blk_mq_free_tag_set(&ctrl->admin_tag_set);
404*4882a593Smuzhiyun out_free_sq:
405*4882a593Smuzhiyun nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
406*4882a593Smuzhiyun return error;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl * ctrl)409*4882a593Smuzhiyun static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun if (ctrl->ctrl.queue_count > 1) {
412*4882a593Smuzhiyun nvme_stop_queues(&ctrl->ctrl);
413*4882a593Smuzhiyun blk_mq_tagset_busy_iter(&ctrl->tag_set,
414*4882a593Smuzhiyun nvme_cancel_request, &ctrl->ctrl);
415*4882a593Smuzhiyun blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
416*4882a593Smuzhiyun nvme_loop_destroy_io_queues(ctrl);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
420*4882a593Smuzhiyun if (ctrl->ctrl.state == NVME_CTRL_LIVE)
421*4882a593Smuzhiyun nvme_shutdown_ctrl(&ctrl->ctrl);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
424*4882a593Smuzhiyun nvme_cancel_request, &ctrl->ctrl);
425*4882a593Smuzhiyun blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
426*4882a593Smuzhiyun nvme_loop_destroy_admin_queue(ctrl);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
nvme_loop_delete_ctrl_host(struct nvme_ctrl * ctrl)429*4882a593Smuzhiyun static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
nvme_loop_delete_ctrl(struct nvmet_ctrl * nctrl)434*4882a593Smuzhiyun static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun mutex_lock(&nvme_loop_ctrl_mutex);
439*4882a593Smuzhiyun list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
440*4882a593Smuzhiyun if (ctrl->ctrl.cntlid == nctrl->cntlid)
441*4882a593Smuzhiyun nvme_delete_ctrl(&ctrl->ctrl);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ctrl_mutex);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
nvme_loop_reset_ctrl_work(struct work_struct * work)446*4882a593Smuzhiyun static void nvme_loop_reset_ctrl_work(struct work_struct *work)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl =
449*4882a593Smuzhiyun container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
450*4882a593Smuzhiyun int ret;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun nvme_stop_ctrl(&ctrl->ctrl);
453*4882a593Smuzhiyun nvme_loop_shutdown_ctrl(ctrl);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
456*4882a593Smuzhiyun if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
457*4882a593Smuzhiyun ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
458*4882a593Smuzhiyun /* state change failure for non-deleted ctrl? */
459*4882a593Smuzhiyun WARN_ON_ONCE(1);
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ret = nvme_loop_configure_admin_queue(ctrl);
464*4882a593Smuzhiyun if (ret)
465*4882a593Smuzhiyun goto out_disable;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun ret = nvme_loop_init_io_queues(ctrl);
468*4882a593Smuzhiyun if (ret)
469*4882a593Smuzhiyun goto out_destroy_admin;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun ret = nvme_loop_connect_io_queues(ctrl);
472*4882a593Smuzhiyun if (ret)
473*4882a593Smuzhiyun goto out_destroy_io;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun blk_mq_update_nr_hw_queues(&ctrl->tag_set,
476*4882a593Smuzhiyun ctrl->ctrl.queue_count - 1);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
479*4882a593Smuzhiyun WARN_ON_ONCE(1);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun nvme_start_ctrl(&ctrl->ctrl);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun out_destroy_io:
486*4882a593Smuzhiyun nvme_loop_destroy_io_queues(ctrl);
487*4882a593Smuzhiyun out_destroy_admin:
488*4882a593Smuzhiyun nvme_loop_destroy_admin_queue(ctrl);
489*4882a593Smuzhiyun out_disable:
490*4882a593Smuzhiyun dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
491*4882a593Smuzhiyun nvme_uninit_ctrl(&ctrl->ctrl);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
495*4882a593Smuzhiyun .name = "loop",
496*4882a593Smuzhiyun .module = THIS_MODULE,
497*4882a593Smuzhiyun .flags = NVME_F_FABRICS,
498*4882a593Smuzhiyun .reg_read32 = nvmf_reg_read32,
499*4882a593Smuzhiyun .reg_read64 = nvmf_reg_read64,
500*4882a593Smuzhiyun .reg_write32 = nvmf_reg_write32,
501*4882a593Smuzhiyun .free_ctrl = nvme_loop_free_ctrl,
502*4882a593Smuzhiyun .submit_async_event = nvme_loop_submit_async_event,
503*4882a593Smuzhiyun .delete_ctrl = nvme_loop_delete_ctrl_host,
504*4882a593Smuzhiyun .get_address = nvmf_get_address,
505*4882a593Smuzhiyun };
506*4882a593Smuzhiyun
nvme_loop_create_io_queues(struct nvme_loop_ctrl * ctrl)507*4882a593Smuzhiyun static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun int ret;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun ret = nvme_loop_init_io_queues(ctrl);
512*4882a593Smuzhiyun if (ret)
513*4882a593Smuzhiyun return ret;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
516*4882a593Smuzhiyun ctrl->tag_set.ops = &nvme_loop_mq_ops;
517*4882a593Smuzhiyun ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
518*4882a593Smuzhiyun ctrl->tag_set.reserved_tags = 1; /* fabric connect */
519*4882a593Smuzhiyun ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
520*4882a593Smuzhiyun ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
521*4882a593Smuzhiyun ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
522*4882a593Smuzhiyun NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
523*4882a593Smuzhiyun ctrl->tag_set.driver_data = ctrl;
524*4882a593Smuzhiyun ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
525*4882a593Smuzhiyun ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
526*4882a593Smuzhiyun ctrl->ctrl.tagset = &ctrl->tag_set;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
529*4882a593Smuzhiyun if (ret)
530*4882a593Smuzhiyun goto out_destroy_queues;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
533*4882a593Smuzhiyun if (IS_ERR(ctrl->ctrl.connect_q)) {
534*4882a593Smuzhiyun ret = PTR_ERR(ctrl->ctrl.connect_q);
535*4882a593Smuzhiyun goto out_free_tagset;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun ret = nvme_loop_connect_io_queues(ctrl);
539*4882a593Smuzhiyun if (ret)
540*4882a593Smuzhiyun goto out_cleanup_connect_q;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun return 0;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun out_cleanup_connect_q:
545*4882a593Smuzhiyun blk_cleanup_queue(ctrl->ctrl.connect_q);
546*4882a593Smuzhiyun out_free_tagset:
547*4882a593Smuzhiyun blk_mq_free_tag_set(&ctrl->tag_set);
548*4882a593Smuzhiyun out_destroy_queues:
549*4882a593Smuzhiyun nvme_loop_destroy_io_queues(ctrl);
550*4882a593Smuzhiyun return ret;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
nvme_loop_find_port(struct nvme_ctrl * ctrl)553*4882a593Smuzhiyun static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct nvmet_port *p, *found = NULL;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun mutex_lock(&nvme_loop_ports_mutex);
558*4882a593Smuzhiyun list_for_each_entry(p, &nvme_loop_ports, entry) {
559*4882a593Smuzhiyun /* if no transport address is specified use the first port */
560*4882a593Smuzhiyun if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
561*4882a593Smuzhiyun strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
562*4882a593Smuzhiyun continue;
563*4882a593Smuzhiyun found = p;
564*4882a593Smuzhiyun break;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ports_mutex);
567*4882a593Smuzhiyun return found;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
nvme_loop_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)570*4882a593Smuzhiyun static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
571*4882a593Smuzhiyun struct nvmf_ctrl_options *opts)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl;
574*4882a593Smuzhiyun int ret;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
577*4882a593Smuzhiyun if (!ctrl)
578*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
579*4882a593Smuzhiyun ctrl->ctrl.opts = opts;
580*4882a593Smuzhiyun INIT_LIST_HEAD(&ctrl->list);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
585*4882a593Smuzhiyun 0 /* no quirks, we're perfect! */);
586*4882a593Smuzhiyun if (ret) {
587*4882a593Smuzhiyun kfree(ctrl);
588*4882a593Smuzhiyun goto out;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
592*4882a593Smuzhiyun WARN_ON_ONCE(1);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun ret = -ENOMEM;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun ctrl->ctrl.sqsize = opts->queue_size - 1;
597*4882a593Smuzhiyun ctrl->ctrl.kato = opts->kato;
598*4882a593Smuzhiyun ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
601*4882a593Smuzhiyun GFP_KERNEL);
602*4882a593Smuzhiyun if (!ctrl->queues)
603*4882a593Smuzhiyun goto out_uninit_ctrl;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun ret = nvme_loop_configure_admin_queue(ctrl);
606*4882a593Smuzhiyun if (ret)
607*4882a593Smuzhiyun goto out_free_queues;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (opts->queue_size > ctrl->ctrl.maxcmd) {
610*4882a593Smuzhiyun /* warn if maxcmd is lower than queue_size */
611*4882a593Smuzhiyun dev_warn(ctrl->ctrl.device,
612*4882a593Smuzhiyun "queue_size %zu > ctrl maxcmd %u, clamping down\n",
613*4882a593Smuzhiyun opts->queue_size, ctrl->ctrl.maxcmd);
614*4882a593Smuzhiyun opts->queue_size = ctrl->ctrl.maxcmd;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (opts->nr_io_queues) {
618*4882a593Smuzhiyun ret = nvme_loop_create_io_queues(ctrl);
619*4882a593Smuzhiyun if (ret)
620*4882a593Smuzhiyun goto out_remove_admin_queue;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun dev_info(ctrl->ctrl.device,
626*4882a593Smuzhiyun "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
629*4882a593Smuzhiyun WARN_ON_ONCE(1);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun mutex_lock(&nvme_loop_ctrl_mutex);
632*4882a593Smuzhiyun list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
633*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ctrl_mutex);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun nvme_start_ctrl(&ctrl->ctrl);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun return &ctrl->ctrl;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun out_remove_admin_queue:
640*4882a593Smuzhiyun nvme_loop_destroy_admin_queue(ctrl);
641*4882a593Smuzhiyun out_free_queues:
642*4882a593Smuzhiyun kfree(ctrl->queues);
643*4882a593Smuzhiyun out_uninit_ctrl:
644*4882a593Smuzhiyun nvme_uninit_ctrl(&ctrl->ctrl);
645*4882a593Smuzhiyun nvme_put_ctrl(&ctrl->ctrl);
646*4882a593Smuzhiyun out:
647*4882a593Smuzhiyun if (ret > 0)
648*4882a593Smuzhiyun ret = -EIO;
649*4882a593Smuzhiyun return ERR_PTR(ret);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
nvme_loop_add_port(struct nvmet_port * port)652*4882a593Smuzhiyun static int nvme_loop_add_port(struct nvmet_port *port)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun mutex_lock(&nvme_loop_ports_mutex);
655*4882a593Smuzhiyun list_add_tail(&port->entry, &nvme_loop_ports);
656*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ports_mutex);
657*4882a593Smuzhiyun return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
nvme_loop_remove_port(struct nvmet_port * port)660*4882a593Smuzhiyun static void nvme_loop_remove_port(struct nvmet_port *port)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun mutex_lock(&nvme_loop_ports_mutex);
663*4882a593Smuzhiyun list_del_init(&port->entry);
664*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ports_mutex);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun * Ensure any ctrls that are in the process of being
668*4882a593Smuzhiyun * deleted are in fact deleted before we return
669*4882a593Smuzhiyun * and free the port. This is to prevent active
670*4882a593Smuzhiyun * ctrls from using a port after it's freed.
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun flush_workqueue(nvme_delete_wq);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun static const struct nvmet_fabrics_ops nvme_loop_ops = {
676*4882a593Smuzhiyun .owner = THIS_MODULE,
677*4882a593Smuzhiyun .type = NVMF_TRTYPE_LOOP,
678*4882a593Smuzhiyun .add_port = nvme_loop_add_port,
679*4882a593Smuzhiyun .remove_port = nvme_loop_remove_port,
680*4882a593Smuzhiyun .queue_response = nvme_loop_queue_response,
681*4882a593Smuzhiyun .delete_ctrl = nvme_loop_delete_ctrl,
682*4882a593Smuzhiyun };
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun static struct nvmf_transport_ops nvme_loop_transport = {
685*4882a593Smuzhiyun .name = "loop",
686*4882a593Smuzhiyun .module = THIS_MODULE,
687*4882a593Smuzhiyun .create_ctrl = nvme_loop_create_ctrl,
688*4882a593Smuzhiyun .allowed_opts = NVMF_OPT_TRADDR,
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun
nvme_loop_init_module(void)691*4882a593Smuzhiyun static int __init nvme_loop_init_module(void)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun int ret;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun ret = nvmet_register_transport(&nvme_loop_ops);
696*4882a593Smuzhiyun if (ret)
697*4882a593Smuzhiyun return ret;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun ret = nvmf_register_transport(&nvme_loop_transport);
700*4882a593Smuzhiyun if (ret)
701*4882a593Smuzhiyun nvmet_unregister_transport(&nvme_loop_ops);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun return ret;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
nvme_loop_cleanup_module(void)706*4882a593Smuzhiyun static void __exit nvme_loop_cleanup_module(void)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct nvme_loop_ctrl *ctrl, *next;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun nvmf_unregister_transport(&nvme_loop_transport);
711*4882a593Smuzhiyun nvmet_unregister_transport(&nvme_loop_ops);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun mutex_lock(&nvme_loop_ctrl_mutex);
714*4882a593Smuzhiyun list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
715*4882a593Smuzhiyun nvme_delete_ctrl(&ctrl->ctrl);
716*4882a593Smuzhiyun mutex_unlock(&nvme_loop_ctrl_mutex);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun flush_workqueue(nvme_delete_wq);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun module_init(nvme_loop_init_module);
722*4882a593Smuzhiyun module_exit(nvme_loop_cleanup_module);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
725*4882a593Smuzhiyun MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
726