1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * NVMe Over Fabrics Target Passthrough command implementation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2017-2018 Western Digital Corporation or its
6*4882a593Smuzhiyun * affiliates.
7*4882a593Smuzhiyun * Copyright (c) 2019-2020, Eideticom Inc.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "../host/nvme.h"
14*4882a593Smuzhiyun #include "nvmet.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * xarray to maintain one passthru subsystem per nvme controller.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun static DEFINE_XARRAY(passthru_subsystems);
22*4882a593Smuzhiyun
nvmet_passthru_override_id_ctrl(struct nvmet_req * req)23*4882a593Smuzhiyun static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct nvmet_ctrl *ctrl = req->sq->ctrl;
26*4882a593Smuzhiyun struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
27*4882a593Smuzhiyun u16 status = NVME_SC_SUCCESS;
28*4882a593Smuzhiyun struct nvme_id_ctrl *id;
29*4882a593Smuzhiyun int max_hw_sectors;
30*4882a593Smuzhiyun int page_shift;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun id = kzalloc(sizeof(*id), GFP_KERNEL);
33*4882a593Smuzhiyun if (!id)
34*4882a593Smuzhiyun return NVME_SC_INTERNAL;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
37*4882a593Smuzhiyun if (status)
38*4882a593Smuzhiyun goto out_free;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun id->cntlid = cpu_to_le16(ctrl->cntlid);
41*4882a593Smuzhiyun id->ver = cpu_to_le32(ctrl->subsys->ver);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * The passthru NVMe driver may have a limit on the number of segments
45*4882a593Smuzhiyun * which depends on the host's memory fragementation. To solve this,
46*4882a593Smuzhiyun * ensure mdts is limited to the pages equal to the number of segments.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
49*4882a593Smuzhiyun pctrl->max_hw_sectors);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * nvmet_passthru_map_sg is limitted to using a single bio so limit
53*4882a593Smuzhiyun * the mdts based on BIO_MAX_PAGES as well
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
56*4882a593Smuzhiyun max_hw_sectors);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun id->acl = 3;
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * We export aerl limit for the fabrics controller, update this when
65*4882a593Smuzhiyun * passthru based aerl support is added.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun id->aerl = NVMET_ASYNC_EVENTS - 1;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* emulate kas as most of the PCIe ctrl don't have a support for kas */
70*4882a593Smuzhiyun id->kas = cpu_to_le16(NVMET_KAS);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* don't support host memory buffer */
73*4882a593Smuzhiyun id->hmpre = 0;
74*4882a593Smuzhiyun id->hmmin = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
77*4882a593Smuzhiyun id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
78*4882a593Smuzhiyun id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* don't support fuse commands */
81*4882a593Smuzhiyun id->fuses = 0;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
84*4882a593Smuzhiyun if (ctrl->ops->flags & NVMF_KEYED_SGLS)
85*4882a593Smuzhiyun id->sgls |= cpu_to_le32(1 << 2);
86*4882a593Smuzhiyun if (req->port->inline_data_size)
87*4882a593Smuzhiyun id->sgls |= cpu_to_le32(1 << 20);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * When passsthru controller is setup using nvme-loop transport it will
91*4882a593Smuzhiyun * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
92*4882a593Smuzhiyun * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
93*4882a593Smuzhiyun * code path with duplicate ctr subsynqn. In order to prevent that we
94*4882a593Smuzhiyun * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* use fabric id-ctrl values */
99*4882a593Smuzhiyun id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
100*4882a593Smuzhiyun req->port->inline_data_size) / 16);
101*4882a593Smuzhiyun id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun id->msdbd = ctrl->ops->msdbd;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Support multipath connections with fabrics */
106*4882a593Smuzhiyun id->cmic |= 1 << 1;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
109*4882a593Smuzhiyun id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun out_free:
114*4882a593Smuzhiyun kfree(id);
115*4882a593Smuzhiyun return status;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
nvmet_passthru_override_id_ns(struct nvmet_req * req)118*4882a593Smuzhiyun static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u16 status = NVME_SC_SUCCESS;
121*4882a593Smuzhiyun struct nvme_id_ns *id;
122*4882a593Smuzhiyun int i;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun id = kzalloc(sizeof(*id), GFP_KERNEL);
125*4882a593Smuzhiyun if (!id)
126*4882a593Smuzhiyun return NVME_SC_INTERNAL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
129*4882a593Smuzhiyun if (status)
130*4882a593Smuzhiyun goto out_free;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun for (i = 0; i < (id->nlbaf + 1); i++)
133*4882a593Smuzhiyun if (id->lbaf[i].ms)
134*4882a593Smuzhiyun memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun id->flbas = id->flbas & ~(1 << 4);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * Presently the NVMEof target code does not support sending
140*4882a593Smuzhiyun * metadata, so we must disable it here. This should be updated
141*4882a593Smuzhiyun * once target starts supporting metadata.
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun id->mc = 0;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun out_free:
148*4882a593Smuzhiyun kfree(id);
149*4882a593Smuzhiyun return status;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
nvmet_passthru_execute_cmd_work(struct work_struct * w)152*4882a593Smuzhiyun static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
155*4882a593Smuzhiyun struct request *rq = req->p.rq;
156*4882a593Smuzhiyun u16 status;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun nvme_execute_passthru_rq(rq);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun status = nvme_req(rq)->status;
161*4882a593Smuzhiyun if (status == NVME_SC_SUCCESS &&
162*4882a593Smuzhiyun req->cmd->common.opcode == nvme_admin_identify) {
163*4882a593Smuzhiyun switch (req->cmd->identify.cns) {
164*4882a593Smuzhiyun case NVME_ID_CNS_CTRL:
165*4882a593Smuzhiyun nvmet_passthru_override_id_ctrl(req);
166*4882a593Smuzhiyun break;
167*4882a593Smuzhiyun case NVME_ID_CNS_NS:
168*4882a593Smuzhiyun nvmet_passthru_override_id_ns(req);
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun req->cqe->result = nvme_req(rq)->result;
174*4882a593Smuzhiyun nvmet_req_complete(req, status);
175*4882a593Smuzhiyun blk_mq_free_request(rq);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
nvmet_passthru_req_done(struct request * rq,blk_status_t blk_status)178*4882a593Smuzhiyun static void nvmet_passthru_req_done(struct request *rq,
179*4882a593Smuzhiyun blk_status_t blk_status)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct nvmet_req *req = rq->end_io_data;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun req->cqe->result = nvme_req(rq)->result;
184*4882a593Smuzhiyun nvmet_req_complete(req, nvme_req(rq)->status);
185*4882a593Smuzhiyun blk_mq_free_request(rq);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
nvmet_passthru_map_sg(struct nvmet_req * req,struct request * rq)188*4882a593Smuzhiyun static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct scatterlist *sg;
191*4882a593Smuzhiyun int op_flags = 0;
192*4882a593Smuzhiyun struct bio *bio;
193*4882a593Smuzhiyun int i, ret;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (req->sg_cnt > BIO_MAX_PAGES)
196*4882a593Smuzhiyun return -EINVAL;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (req->cmd->common.opcode == nvme_cmd_flush)
199*4882a593Smuzhiyun op_flags = REQ_FUA;
200*4882a593Smuzhiyun else if (nvme_is_write(req->cmd))
201*4882a593Smuzhiyun op_flags = REQ_SYNC | REQ_IDLE;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
204*4882a593Smuzhiyun bio->bi_end_io = bio_put;
205*4882a593Smuzhiyun bio->bi_opf = req_op(rq) | op_flags;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun for_each_sg(req->sg, sg, req->sg_cnt, i) {
208*4882a593Smuzhiyun if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
209*4882a593Smuzhiyun sg->offset) < sg->length) {
210*4882a593Smuzhiyun bio_put(bio);
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun ret = blk_rq_append_bio(rq, &bio);
216*4882a593Smuzhiyun if (unlikely(ret)) {
217*4882a593Smuzhiyun bio_put(bio);
218*4882a593Smuzhiyun return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
nvmet_passthru_execute_cmd(struct nvmet_req * req)224*4882a593Smuzhiyun static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
227*4882a593Smuzhiyun struct request_queue *q = ctrl->admin_q;
228*4882a593Smuzhiyun struct nvme_ns *ns = NULL;
229*4882a593Smuzhiyun struct request *rq = NULL;
230*4882a593Smuzhiyun u32 effects;
231*4882a593Smuzhiyun u16 status;
232*4882a593Smuzhiyun int ret;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (likely(req->sq->qid != 0)) {
235*4882a593Smuzhiyun u32 nsid = le32_to_cpu(req->cmd->common.nsid);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun ns = nvme_find_get_ns(ctrl, nsid);
238*4882a593Smuzhiyun if (unlikely(!ns)) {
239*4882a593Smuzhiyun pr_err("failed to get passthru ns nsid:%u\n", nsid);
240*4882a593Smuzhiyun status = NVME_SC_INVALID_NS | NVME_SC_DNR;
241*4882a593Smuzhiyun goto out;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun q = ns->queue;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun rq = nvme_alloc_request(q, req->cmd, 0);
248*4882a593Smuzhiyun if (IS_ERR(rq)) {
249*4882a593Smuzhiyun status = NVME_SC_INTERNAL;
250*4882a593Smuzhiyun goto out_put_ns;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (req->sg_cnt) {
254*4882a593Smuzhiyun ret = nvmet_passthru_map_sg(req, rq);
255*4882a593Smuzhiyun if (unlikely(ret)) {
256*4882a593Smuzhiyun status = NVME_SC_INTERNAL;
257*4882a593Smuzhiyun goto out_put_req;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * If there are effects for the command we are about to execute, or
263*4882a593Smuzhiyun * an end_req function we need to use nvme_execute_passthru_rq()
264*4882a593Smuzhiyun * synchronously in a work item seeing the end_req function and
265*4882a593Smuzhiyun * nvme_passthru_end() can't be called in the request done callback
266*4882a593Smuzhiyun * which is typically in interrupt context.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
269*4882a593Smuzhiyun if (req->p.use_workqueue || effects) {
270*4882a593Smuzhiyun INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
271*4882a593Smuzhiyun req->p.rq = rq;
272*4882a593Smuzhiyun schedule_work(&req->p.work);
273*4882a593Smuzhiyun } else {
274*4882a593Smuzhiyun rq->end_io_data = req;
275*4882a593Smuzhiyun blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
276*4882a593Smuzhiyun nvmet_passthru_req_done);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (ns)
280*4882a593Smuzhiyun nvme_put_ns(ns);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun out_put_req:
285*4882a593Smuzhiyun blk_mq_free_request(rq);
286*4882a593Smuzhiyun out_put_ns:
287*4882a593Smuzhiyun if (ns)
288*4882a593Smuzhiyun nvme_put_ns(ns);
289*4882a593Smuzhiyun out:
290*4882a593Smuzhiyun nvmet_req_complete(req, status);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * We need to emulate set host behaviour to ensure that any requested
295*4882a593Smuzhiyun * behaviour of the target's host matches the requested behaviour
296*4882a593Smuzhiyun * of the device's host and fail otherwise.
297*4882a593Smuzhiyun */
nvmet_passthru_set_host_behaviour(struct nvmet_req * req)298*4882a593Smuzhiyun static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
301*4882a593Smuzhiyun struct nvme_feat_host_behavior *host;
302*4882a593Smuzhiyun u16 status = NVME_SC_INTERNAL;
303*4882a593Smuzhiyun int ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
306*4882a593Smuzhiyun if (!host)
307*4882a593Smuzhiyun goto out_complete_req;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
310*4882a593Smuzhiyun host, sizeof(*host), NULL);
311*4882a593Smuzhiyun if (ret)
312*4882a593Smuzhiyun goto out_free_host;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
315*4882a593Smuzhiyun if (status)
316*4882a593Smuzhiyun goto out_free_host;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
319*4882a593Smuzhiyun pr_warn("target host has requested different behaviour from the local host\n");
320*4882a593Smuzhiyun status = NVME_SC_INTERNAL;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun out_free_host:
324*4882a593Smuzhiyun kfree(host);
325*4882a593Smuzhiyun out_complete_req:
326*4882a593Smuzhiyun nvmet_req_complete(req, status);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
nvmet_setup_passthru_command(struct nvmet_req * req)329*4882a593Smuzhiyun static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun req->p.use_workqueue = false;
332*4882a593Smuzhiyun req->execute = nvmet_passthru_execute_cmd;
333*4882a593Smuzhiyun return NVME_SC_SUCCESS;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)336*4882a593Smuzhiyun u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun /* Reject any commands with non-sgl flags set (ie. fused commands) */
339*4882a593Smuzhiyun if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
340*4882a593Smuzhiyun return NVME_SC_INVALID_FIELD;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun switch (req->cmd->common.opcode) {
343*4882a593Smuzhiyun case nvme_cmd_resv_register:
344*4882a593Smuzhiyun case nvme_cmd_resv_report:
345*4882a593Smuzhiyun case nvme_cmd_resv_acquire:
346*4882a593Smuzhiyun case nvme_cmd_resv_release:
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * Reservations cannot be supported properly because the
349*4882a593Smuzhiyun * underlying device has no way of differentiating different
350*4882a593Smuzhiyun * hosts that connect via fabrics. This could potentially be
351*4882a593Smuzhiyun * emulated in the future if regular targets grow support for
352*4882a593Smuzhiyun * this feature.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return nvmet_setup_passthru_command(req);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * Only features that are emulated or specifically allowed in the list are
362*4882a593Smuzhiyun * passed down to the controller. This function implements the allow list for
363*4882a593Smuzhiyun * both get and set features.
364*4882a593Smuzhiyun */
nvmet_passthru_get_set_features(struct nvmet_req * req)365*4882a593Smuzhiyun static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun switch (le32_to_cpu(req->cmd->features.fid)) {
368*4882a593Smuzhiyun case NVME_FEAT_ARBITRATION:
369*4882a593Smuzhiyun case NVME_FEAT_POWER_MGMT:
370*4882a593Smuzhiyun case NVME_FEAT_LBA_RANGE:
371*4882a593Smuzhiyun case NVME_FEAT_TEMP_THRESH:
372*4882a593Smuzhiyun case NVME_FEAT_ERR_RECOVERY:
373*4882a593Smuzhiyun case NVME_FEAT_VOLATILE_WC:
374*4882a593Smuzhiyun case NVME_FEAT_WRITE_ATOMIC:
375*4882a593Smuzhiyun case NVME_FEAT_AUTO_PST:
376*4882a593Smuzhiyun case NVME_FEAT_TIMESTAMP:
377*4882a593Smuzhiyun case NVME_FEAT_HCTM:
378*4882a593Smuzhiyun case NVME_FEAT_NOPSC:
379*4882a593Smuzhiyun case NVME_FEAT_RRL:
380*4882a593Smuzhiyun case NVME_FEAT_PLM_CONFIG:
381*4882a593Smuzhiyun case NVME_FEAT_PLM_WINDOW:
382*4882a593Smuzhiyun case NVME_FEAT_HOST_BEHAVIOR:
383*4882a593Smuzhiyun case NVME_FEAT_SANITIZE:
384*4882a593Smuzhiyun case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
385*4882a593Smuzhiyun return nvmet_setup_passthru_command(req);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun case NVME_FEAT_ASYNC_EVENT:
388*4882a593Smuzhiyun /* There is no support for forwarding ASYNC events */
389*4882a593Smuzhiyun case NVME_FEAT_IRQ_COALESCE:
390*4882a593Smuzhiyun case NVME_FEAT_IRQ_CONFIG:
391*4882a593Smuzhiyun /* The IRQ settings will not apply to the target controller */
392*4882a593Smuzhiyun case NVME_FEAT_HOST_MEM_BUF:
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * Any HMB that's set will not be passed through and will
395*4882a593Smuzhiyun * not work as expected
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun case NVME_FEAT_SW_PROGRESS:
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * The Pre-Boot Software Load Count doesn't make much
400*4882a593Smuzhiyun * sense for a target to export
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun case NVME_FEAT_RESV_MASK:
403*4882a593Smuzhiyun case NVME_FEAT_RESV_PERSIST:
404*4882a593Smuzhiyun /* No reservations, see nvmet_parse_passthru_io_cmd() */
405*4882a593Smuzhiyun default:
406*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)410*4882a593Smuzhiyun u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun /* Reject any commands with non-sgl flags set (ie. fused commands) */
413*4882a593Smuzhiyun if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
414*4882a593Smuzhiyun return NVME_SC_INVALID_FIELD;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Passthru all vendor specific commands
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun if (req->cmd->common.opcode >= nvme_admin_vendor_start)
420*4882a593Smuzhiyun return nvmet_setup_passthru_command(req);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun switch (req->cmd->common.opcode) {
423*4882a593Smuzhiyun case nvme_admin_async_event:
424*4882a593Smuzhiyun req->execute = nvmet_execute_async_event;
425*4882a593Smuzhiyun return NVME_SC_SUCCESS;
426*4882a593Smuzhiyun case nvme_admin_keep_alive:
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * Most PCIe ctrls don't support keep alive cmd, we route keep
429*4882a593Smuzhiyun * alive to the non-passthru mode. In future please change this
430*4882a593Smuzhiyun * code when PCIe ctrls with keep alive support available.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun req->execute = nvmet_execute_keep_alive;
433*4882a593Smuzhiyun return NVME_SC_SUCCESS;
434*4882a593Smuzhiyun case nvme_admin_set_features:
435*4882a593Smuzhiyun switch (le32_to_cpu(req->cmd->features.fid)) {
436*4882a593Smuzhiyun case NVME_FEAT_ASYNC_EVENT:
437*4882a593Smuzhiyun case NVME_FEAT_KATO:
438*4882a593Smuzhiyun case NVME_FEAT_NUM_QUEUES:
439*4882a593Smuzhiyun case NVME_FEAT_HOST_ID:
440*4882a593Smuzhiyun req->execute = nvmet_execute_set_features;
441*4882a593Smuzhiyun return NVME_SC_SUCCESS;
442*4882a593Smuzhiyun case NVME_FEAT_HOST_BEHAVIOR:
443*4882a593Smuzhiyun req->execute = nvmet_passthru_set_host_behaviour;
444*4882a593Smuzhiyun return NVME_SC_SUCCESS;
445*4882a593Smuzhiyun default:
446*4882a593Smuzhiyun return nvmet_passthru_get_set_features(req);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun case nvme_admin_get_features:
450*4882a593Smuzhiyun switch (le32_to_cpu(req->cmd->features.fid)) {
451*4882a593Smuzhiyun case NVME_FEAT_ASYNC_EVENT:
452*4882a593Smuzhiyun case NVME_FEAT_KATO:
453*4882a593Smuzhiyun case NVME_FEAT_NUM_QUEUES:
454*4882a593Smuzhiyun case NVME_FEAT_HOST_ID:
455*4882a593Smuzhiyun req->execute = nvmet_execute_get_features;
456*4882a593Smuzhiyun return NVME_SC_SUCCESS;
457*4882a593Smuzhiyun default:
458*4882a593Smuzhiyun return nvmet_passthru_get_set_features(req);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun break;
461*4882a593Smuzhiyun case nvme_admin_identify:
462*4882a593Smuzhiyun switch (req->cmd->identify.cns) {
463*4882a593Smuzhiyun case NVME_ID_CNS_CTRL:
464*4882a593Smuzhiyun req->execute = nvmet_passthru_execute_cmd;
465*4882a593Smuzhiyun req->p.use_workqueue = true;
466*4882a593Smuzhiyun return NVME_SC_SUCCESS;
467*4882a593Smuzhiyun case NVME_ID_CNS_CS_CTRL:
468*4882a593Smuzhiyun switch (req->cmd->identify.csi) {
469*4882a593Smuzhiyun case NVME_CSI_ZNS:
470*4882a593Smuzhiyun req->execute = nvmet_passthru_execute_cmd;
471*4882a593Smuzhiyun req->p.use_workqueue = true;
472*4882a593Smuzhiyun return NVME_SC_SUCCESS;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
475*4882a593Smuzhiyun case NVME_ID_CNS_NS:
476*4882a593Smuzhiyun req->execute = nvmet_passthru_execute_cmd;
477*4882a593Smuzhiyun req->p.use_workqueue = true;
478*4882a593Smuzhiyun return NVME_SC_SUCCESS;
479*4882a593Smuzhiyun case NVME_ID_CNS_CS_NS:
480*4882a593Smuzhiyun switch (req->cmd->identify.csi) {
481*4882a593Smuzhiyun case NVME_CSI_ZNS:
482*4882a593Smuzhiyun req->execute = nvmet_passthru_execute_cmd;
483*4882a593Smuzhiyun req->p.use_workqueue = true;
484*4882a593Smuzhiyun return NVME_SC_SUCCESS;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
487*4882a593Smuzhiyun default:
488*4882a593Smuzhiyun return nvmet_setup_passthru_command(req);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun case nvme_admin_get_log_page:
491*4882a593Smuzhiyun return nvmet_setup_passthru_command(req);
492*4882a593Smuzhiyun default:
493*4882a593Smuzhiyun /* Reject commands not in the allowlist above */
494*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
nvmet_passthru_ctrl_enable(struct nvmet_subsys * subsys)498*4882a593Smuzhiyun int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct nvme_ctrl *ctrl;
501*4882a593Smuzhiyun struct file *file;
502*4882a593Smuzhiyun int ret = -EINVAL;
503*4882a593Smuzhiyun void *old;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun mutex_lock(&subsys->lock);
506*4882a593Smuzhiyun if (!subsys->passthru_ctrl_path)
507*4882a593Smuzhiyun goto out_unlock;
508*4882a593Smuzhiyun if (subsys->passthru_ctrl)
509*4882a593Smuzhiyun goto out_unlock;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (subsys->nr_namespaces) {
512*4882a593Smuzhiyun pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
513*4882a593Smuzhiyun goto out_unlock;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
517*4882a593Smuzhiyun if (IS_ERR(file)) {
518*4882a593Smuzhiyun ret = PTR_ERR(file);
519*4882a593Smuzhiyun goto out_unlock;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun ctrl = nvme_ctrl_from_file(file);
523*4882a593Smuzhiyun if (!ctrl) {
524*4882a593Smuzhiyun pr_err("failed to open nvme controller %s\n",
525*4882a593Smuzhiyun subsys->passthru_ctrl_path);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun goto out_put_file;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
531*4882a593Smuzhiyun subsys, GFP_KERNEL);
532*4882a593Smuzhiyun if (xa_is_err(old)) {
533*4882a593Smuzhiyun ret = xa_err(old);
534*4882a593Smuzhiyun goto out_put_file;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (old)
538*4882a593Smuzhiyun goto out_put_file;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun subsys->passthru_ctrl = ctrl;
541*4882a593Smuzhiyun subsys->ver = ctrl->vs;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (subsys->ver < NVME_VS(1, 2, 1)) {
544*4882a593Smuzhiyun pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
545*4882a593Smuzhiyun NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
546*4882a593Smuzhiyun NVME_TERTIARY(subsys->ver));
547*4882a593Smuzhiyun subsys->ver = NVME_VS(1, 2, 1);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun nvme_get_ctrl(ctrl);
550*4882a593Smuzhiyun __module_get(subsys->passthru_ctrl->ops->module);
551*4882a593Smuzhiyun ret = 0;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun out_put_file:
554*4882a593Smuzhiyun filp_close(file, NULL);
555*4882a593Smuzhiyun out_unlock:
556*4882a593Smuzhiyun mutex_unlock(&subsys->lock);
557*4882a593Smuzhiyun return ret;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
__nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)560*4882a593Smuzhiyun static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun if (subsys->passthru_ctrl) {
563*4882a593Smuzhiyun xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
564*4882a593Smuzhiyun module_put(subsys->passthru_ctrl->ops->module);
565*4882a593Smuzhiyun nvme_put_ctrl(subsys->passthru_ctrl);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun subsys->passthru_ctrl = NULL;
568*4882a593Smuzhiyun subsys->ver = NVMET_DEFAULT_VS;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)571*4882a593Smuzhiyun void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun mutex_lock(&subsys->lock);
574*4882a593Smuzhiyun __nvmet_passthru_ctrl_disable(subsys);
575*4882a593Smuzhiyun mutex_unlock(&subsys->lock);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)578*4882a593Smuzhiyun void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun mutex_lock(&subsys->lock);
581*4882a593Smuzhiyun __nvmet_passthru_ctrl_disable(subsys);
582*4882a593Smuzhiyun mutex_unlock(&subsys->lock);
583*4882a593Smuzhiyun kfree(subsys->passthru_ctrl_path);
584*4882a593Smuzhiyun }
585