xref: /OK3568_Linux_fs/kernel/drivers/nvme/target/admin-cmd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NVMe admin command implementation.
4*4882a593Smuzhiyun  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/rculist.h>
9*4882a593Smuzhiyun #include <linux/part_stat.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <generated/utsrelease.h>
12*4882a593Smuzhiyun #include <asm/unaligned.h>
13*4882a593Smuzhiyun #include "nvmet.h"
14*4882a593Smuzhiyun 
nvmet_get_log_page_len(struct nvme_command * cmd)15*4882a593Smuzhiyun u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	len <<= 16;
20*4882a593Smuzhiyun 	len += le16_to_cpu(cmd->get_log_page.numdl);
21*4882a593Smuzhiyun 	/* NUMD is a 0's based value */
22*4882a593Smuzhiyun 	len += 1;
23*4882a593Smuzhiyun 	len *= sizeof(u32);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	return len;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28*4882a593Smuzhiyun static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	switch (cdw10 & 0xff) {
31*4882a593Smuzhiyun 	case NVME_FEAT_HOST_ID:
32*4882a593Smuzhiyun 		return sizeof(req->sq->ctrl->hostid);
33*4882a593Smuzhiyun 	default:
34*4882a593Smuzhiyun 		return 0;
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
nvmet_get_log_page_offset(struct nvme_command * cmd)38*4882a593Smuzhiyun u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	return le64_to_cpu(cmd->get_log_page.lpo);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43*4882a593Smuzhiyun static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
nvmet_execute_get_log_page_error(struct nvmet_req * req)48*4882a593Smuzhiyun static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51*4882a593Smuzhiyun 	unsigned long flags;
52*4882a593Smuzhiyun 	off_t offset = 0;
53*4882a593Smuzhiyun 	u64 slot;
54*4882a593Smuzhiyun 	u64 i;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->error_lock, flags);
57*4882a593Smuzhiyun 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60*4882a593Smuzhiyun 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61*4882a593Smuzhiyun 				sizeof(struct nvme_error_slot)))
62*4882a593Smuzhiyun 			break;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		if (slot == 0)
65*4882a593Smuzhiyun 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66*4882a593Smuzhiyun 		else
67*4882a593Smuzhiyun 			slot--;
68*4882a593Smuzhiyun 		offset += sizeof(struct nvme_error_slot);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71*4882a593Smuzhiyun 	nvmet_req_complete(req, 0);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)74*4882a593Smuzhiyun static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75*4882a593Smuzhiyun 		struct nvme_smart_log *slog)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct nvmet_ns *ns;
78*4882a593Smuzhiyun 	u64 host_reads, host_writes, data_units_read, data_units_written;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
81*4882a593Smuzhiyun 	if (!ns) {
82*4882a593Smuzhiyun 		pr_err("Could not find namespace id : %d\n",
83*4882a593Smuzhiyun 				le32_to_cpu(req->cmd->get_log_page.nsid));
84*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
85*4882a593Smuzhiyun 		return NVME_SC_INVALID_NS;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* we don't have the right data for file backed ns */
89*4882a593Smuzhiyun 	if (!ns->bdev)
90*4882a593Smuzhiyun 		goto out;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
93*4882a593Smuzhiyun 	data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
94*4882a593Smuzhiyun 		sectors[READ]), 1000);
95*4882a593Smuzhiyun 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
96*4882a593Smuzhiyun 	data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
97*4882a593Smuzhiyun 		sectors[WRITE]), 1000);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
100*4882a593Smuzhiyun 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
101*4882a593Smuzhiyun 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
102*4882a593Smuzhiyun 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
103*4882a593Smuzhiyun out:
104*4882a593Smuzhiyun 	nvmet_put_namespace(ns);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return NVME_SC_SUCCESS;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)109*4882a593Smuzhiyun static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
110*4882a593Smuzhiyun 		struct nvme_smart_log *slog)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	u64 host_reads = 0, host_writes = 0;
113*4882a593Smuzhiyun 	u64 data_units_read = 0, data_units_written = 0;
114*4882a593Smuzhiyun 	struct nvmet_ns *ns;
115*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl;
116*4882a593Smuzhiyun 	unsigned long idx;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ctrl = req->sq->ctrl;
119*4882a593Smuzhiyun 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
120*4882a593Smuzhiyun 		/* we don't have the right data for file backed ns */
121*4882a593Smuzhiyun 		if (!ns->bdev)
122*4882a593Smuzhiyun 			continue;
123*4882a593Smuzhiyun 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
124*4882a593Smuzhiyun 		data_units_read += DIV_ROUND_UP(
125*4882a593Smuzhiyun 			part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
126*4882a593Smuzhiyun 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
127*4882a593Smuzhiyun 		data_units_written += DIV_ROUND_UP(
128*4882a593Smuzhiyun 			part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
132*4882a593Smuzhiyun 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
133*4882a593Smuzhiyun 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
134*4882a593Smuzhiyun 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return NVME_SC_SUCCESS;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
nvmet_execute_get_log_page_smart(struct nvmet_req * req)139*4882a593Smuzhiyun static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct nvme_smart_log *log;
142*4882a593Smuzhiyun 	u16 status = NVME_SC_INTERNAL;
143*4882a593Smuzhiyun 	unsigned long flags;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (req->transfer_len != sizeof(*log))
146*4882a593Smuzhiyun 		goto out;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	log = kzalloc(sizeof(*log), GFP_KERNEL);
149*4882a593Smuzhiyun 	if (!log)
150*4882a593Smuzhiyun 		goto out;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
153*4882a593Smuzhiyun 		status = nvmet_get_smart_log_all(req, log);
154*4882a593Smuzhiyun 	else
155*4882a593Smuzhiyun 		status = nvmet_get_smart_log_nsid(req, log);
156*4882a593Smuzhiyun 	if (status)
157*4882a593Smuzhiyun 		goto out_free_log;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
160*4882a593Smuzhiyun 	put_unaligned_le64(req->sq->ctrl->err_counter,
161*4882a593Smuzhiyun 			&log->num_err_log_entries);
162*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
165*4882a593Smuzhiyun out_free_log:
166*4882a593Smuzhiyun 	kfree(log);
167*4882a593Smuzhiyun out:
168*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)171*4882a593Smuzhiyun static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	u16 status = NVME_SC_INTERNAL;
174*4882a593Smuzhiyun 	struct nvme_effects_log *log;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	log = kzalloc(sizeof(*log), GFP_KERNEL);
177*4882a593Smuzhiyun 	if (!log)
178*4882a593Smuzhiyun 		goto out;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
181*4882a593Smuzhiyun 	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
182*4882a593Smuzhiyun 	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
183*4882a593Smuzhiyun 	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
184*4882a593Smuzhiyun 	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
185*4882a593Smuzhiyun 	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
186*4882a593Smuzhiyun 	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
189*4882a593Smuzhiyun 	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
190*4882a593Smuzhiyun 	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
191*4882a593Smuzhiyun 	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
192*4882a593Smuzhiyun 	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	kfree(log);
197*4882a593Smuzhiyun out:
198*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)201*4882a593Smuzhiyun static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
204*4882a593Smuzhiyun 	u16 status = NVME_SC_INTERNAL;
205*4882a593Smuzhiyun 	size_t len;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
208*4882a593Smuzhiyun 		goto out;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	mutex_lock(&ctrl->lock);
211*4882a593Smuzhiyun 	if (ctrl->nr_changed_ns == U32_MAX)
212*4882a593Smuzhiyun 		len = sizeof(__le32);
213*4882a593Smuzhiyun 	else
214*4882a593Smuzhiyun 		len = ctrl->nr_changed_ns * sizeof(__le32);
215*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
216*4882a593Smuzhiyun 	if (!status)
217*4882a593Smuzhiyun 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
218*4882a593Smuzhiyun 	ctrl->nr_changed_ns = 0;
219*4882a593Smuzhiyun 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
220*4882a593Smuzhiyun 	mutex_unlock(&ctrl->lock);
221*4882a593Smuzhiyun out:
222*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)225*4882a593Smuzhiyun static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
226*4882a593Smuzhiyun 		struct nvme_ana_group_desc *desc)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
229*4882a593Smuzhiyun 	struct nvmet_ns *ns;
230*4882a593Smuzhiyun 	unsigned long idx;
231*4882a593Smuzhiyun 	u32 count = 0;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
234*4882a593Smuzhiyun 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
235*4882a593Smuzhiyun 			if (ns->anagrpid == grpid)
236*4882a593Smuzhiyun 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	desc->grpid = cpu_to_le32(grpid);
240*4882a593Smuzhiyun 	desc->nnsids = cpu_to_le32(count);
241*4882a593Smuzhiyun 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
242*4882a593Smuzhiyun 	desc->state = req->port->ana_state[grpid];
243*4882a593Smuzhiyun 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
244*4882a593Smuzhiyun 	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
nvmet_execute_get_log_page_ana(struct nvmet_req * req)247*4882a593Smuzhiyun static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct nvme_ana_rsp_hdr hdr = { 0, };
250*4882a593Smuzhiyun 	struct nvme_ana_group_desc *desc;
251*4882a593Smuzhiyun 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
252*4882a593Smuzhiyun 	size_t len;
253*4882a593Smuzhiyun 	u32 grpid;
254*4882a593Smuzhiyun 	u16 ngrps = 0;
255*4882a593Smuzhiyun 	u16 status;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	status = NVME_SC_INTERNAL;
258*4882a593Smuzhiyun 	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
259*4882a593Smuzhiyun 			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
260*4882a593Smuzhiyun 	if (!desc)
261*4882a593Smuzhiyun 		goto out;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	down_read(&nvmet_ana_sem);
264*4882a593Smuzhiyun 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
265*4882a593Smuzhiyun 		if (!nvmet_ana_group_enabled[grpid])
266*4882a593Smuzhiyun 			continue;
267*4882a593Smuzhiyun 		len = nvmet_format_ana_group(req, grpid, desc);
268*4882a593Smuzhiyun 		status = nvmet_copy_to_sgl(req, offset, desc, len);
269*4882a593Smuzhiyun 		if (status)
270*4882a593Smuzhiyun 			break;
271*4882a593Smuzhiyun 		offset += len;
272*4882a593Smuzhiyun 		ngrps++;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
275*4882a593Smuzhiyun 		if (nvmet_ana_group_enabled[grpid])
276*4882a593Smuzhiyun 			ngrps++;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
280*4882a593Smuzhiyun 	hdr.ngrps = cpu_to_le16(ngrps);
281*4882a593Smuzhiyun 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
282*4882a593Smuzhiyun 	up_read(&nvmet_ana_sem);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	kfree(desc);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* copy the header last once we know the number of groups */
287*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
288*4882a593Smuzhiyun out:
289*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
nvmet_execute_get_log_page(struct nvmet_req * req)292*4882a593Smuzhiyun static void nvmet_execute_get_log_page(struct nvmet_req *req)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
295*4882a593Smuzhiyun 		return;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	switch (req->cmd->get_log_page.lid) {
298*4882a593Smuzhiyun 	case NVME_LOG_ERROR:
299*4882a593Smuzhiyun 		return nvmet_execute_get_log_page_error(req);
300*4882a593Smuzhiyun 	case NVME_LOG_SMART:
301*4882a593Smuzhiyun 		return nvmet_execute_get_log_page_smart(req);
302*4882a593Smuzhiyun 	case NVME_LOG_FW_SLOT:
303*4882a593Smuzhiyun 		/*
304*4882a593Smuzhiyun 		 * We only support a single firmware slot which always is
305*4882a593Smuzhiyun 		 * active, so we can zero out the whole firmware slot log and
306*4882a593Smuzhiyun 		 * still claim to fully implement this mandatory log page.
307*4882a593Smuzhiyun 		 */
308*4882a593Smuzhiyun 		return nvmet_execute_get_log_page_noop(req);
309*4882a593Smuzhiyun 	case NVME_LOG_CHANGED_NS:
310*4882a593Smuzhiyun 		return nvmet_execute_get_log_changed_ns(req);
311*4882a593Smuzhiyun 	case NVME_LOG_CMD_EFFECTS:
312*4882a593Smuzhiyun 		return nvmet_execute_get_log_cmd_effects_ns(req);
313*4882a593Smuzhiyun 	case NVME_LOG_ANA:
314*4882a593Smuzhiyun 		return nvmet_execute_get_log_page_ana(req);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 	pr_debug("unhandled lid %d on qid %d\n",
317*4882a593Smuzhiyun 	       req->cmd->get_log_page.lid, req->sq->qid);
318*4882a593Smuzhiyun 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
319*4882a593Smuzhiyun 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
nvmet_id_set_model_number(struct nvme_id_ctrl * id,struct nvmet_subsys * subsys)322*4882a593Smuzhiyun static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
323*4882a593Smuzhiyun 				      struct nvmet_subsys *subsys)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	const char *model = NVMET_DEFAULT_CTRL_MODEL;
326*4882a593Smuzhiyun 	struct nvmet_subsys_model *subsys_model;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	rcu_read_lock();
329*4882a593Smuzhiyun 	subsys_model = rcu_dereference(subsys->model);
330*4882a593Smuzhiyun 	if (subsys_model)
331*4882a593Smuzhiyun 		model = subsys_model->number;
332*4882a593Smuzhiyun 	memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
333*4882a593Smuzhiyun 	rcu_read_unlock();
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
nvmet_execute_identify_ctrl(struct nvmet_req * req)336*4882a593Smuzhiyun static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
339*4882a593Smuzhiyun 	struct nvme_id_ctrl *id;
340*4882a593Smuzhiyun 	u32 cmd_capsule_size;
341*4882a593Smuzhiyun 	u16 status = 0;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	id = kzalloc(sizeof(*id), GFP_KERNEL);
344*4882a593Smuzhiyun 	if (!id) {
345*4882a593Smuzhiyun 		status = NVME_SC_INTERNAL;
346*4882a593Smuzhiyun 		goto out;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* XXX: figure out how to assign real vendors IDs. */
350*4882a593Smuzhiyun 	id->vid = 0;
351*4882a593Smuzhiyun 	id->ssvid = 0;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	memset(id->sn, ' ', sizeof(id->sn));
354*4882a593Smuzhiyun 	bin2hex(id->sn, &ctrl->subsys->serial,
355*4882a593Smuzhiyun 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
356*4882a593Smuzhiyun 	nvmet_id_set_model_number(id, ctrl->subsys);
357*4882a593Smuzhiyun 	memcpy_and_pad(id->fr, sizeof(id->fr),
358*4882a593Smuzhiyun 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	id->rab = 6;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/*
363*4882a593Smuzhiyun 	 * XXX: figure out how we can assign a IEEE OUI, but until then
364*4882a593Smuzhiyun 	 * the safest is to leave it as zeroes.
365*4882a593Smuzhiyun 	 */
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* we support multiple ports, multiples hosts and ANA: */
368*4882a593Smuzhiyun 	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* Limit MDTS according to transport capability */
371*4882a593Smuzhiyun 	if (ctrl->ops->get_mdts)
372*4882a593Smuzhiyun 		id->mdts = ctrl->ops->get_mdts(ctrl);
373*4882a593Smuzhiyun 	else
374*4882a593Smuzhiyun 		id->mdts = 0;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	id->cntlid = cpu_to_le16(ctrl->cntlid);
377*4882a593Smuzhiyun 	id->ver = cpu_to_le32(ctrl->subsys->ver);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* XXX: figure out what to do about RTD3R/RTD3 */
380*4882a593Smuzhiyun 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
381*4882a593Smuzhiyun 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
382*4882a593Smuzhiyun 		NVME_CTRL_ATTR_TBKAS);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	id->oacs = 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/*
387*4882a593Smuzhiyun 	 * We don't really have a practical limit on the number of abort
388*4882a593Smuzhiyun 	 * comands.  But we don't do anything useful for abort either, so
389*4882a593Smuzhiyun 	 * no point in allowing more abort commands than the spec requires.
390*4882a593Smuzhiyun 	 */
391*4882a593Smuzhiyun 	id->acl = 3;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	id->aerl = NVMET_ASYNC_EVENTS - 1;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* first slot is read-only, only one slot supported */
396*4882a593Smuzhiyun 	id->frmw = (1 << 0) | (1 << 1);
397*4882a593Smuzhiyun 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
398*4882a593Smuzhiyun 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
399*4882a593Smuzhiyun 	id->npss = 0;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* We support keep-alive timeout in granularity of seconds */
402*4882a593Smuzhiyun 	id->kas = cpu_to_le16(NVMET_KAS);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	id->sqes = (0x6 << 4) | 0x6;
405*4882a593Smuzhiyun 	id->cqes = (0x4 << 4) | 0x4;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
408*4882a593Smuzhiyun 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
411*4882a593Smuzhiyun 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
412*4882a593Smuzhiyun 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
413*4882a593Smuzhiyun 			NVME_CTRL_ONCS_WRITE_ZEROES);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* XXX: don't report vwc if the underlying device is write through */
416*4882a593Smuzhiyun 	id->vwc = NVME_CTRL_VWC_PRESENT;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/*
419*4882a593Smuzhiyun 	 * We can't support atomic writes bigger than a LBA without support
420*4882a593Smuzhiyun 	 * from the backend device.
421*4882a593Smuzhiyun 	 */
422*4882a593Smuzhiyun 	id->awun = 0;
423*4882a593Smuzhiyun 	id->awupf = 0;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
426*4882a593Smuzhiyun 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
427*4882a593Smuzhiyun 		id->sgls |= cpu_to_le32(1 << 2);
428*4882a593Smuzhiyun 	if (req->port->inline_data_size)
429*4882a593Smuzhiyun 		id->sgls |= cpu_to_le32(1 << 20);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/*
434*4882a593Smuzhiyun 	 * Max command capsule size is sqe + in-capsule data size.
435*4882a593Smuzhiyun 	 * Disable in-capsule data for Metadata capable controllers.
436*4882a593Smuzhiyun 	 */
437*4882a593Smuzhiyun 	cmd_capsule_size = sizeof(struct nvme_command);
438*4882a593Smuzhiyun 	if (!ctrl->pi_support)
439*4882a593Smuzhiyun 		cmd_capsule_size += req->port->inline_data_size;
440*4882a593Smuzhiyun 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* Max response capsule size is cqe */
443*4882a593Smuzhiyun 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	id->msdbd = ctrl->ops->msdbd;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
448*4882a593Smuzhiyun 	id->anatt = 10; /* random value */
449*4882a593Smuzhiyun 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
450*4882a593Smuzhiyun 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/*
453*4882a593Smuzhiyun 	 * Meh, we don't really support any power state.  Fake up the same
454*4882a593Smuzhiyun 	 * values that qemu does.
455*4882a593Smuzhiyun 	 */
456*4882a593Smuzhiyun 	id->psd[0].max_power = cpu_to_le16(0x9c4);
457*4882a593Smuzhiyun 	id->psd[0].entry_lat = cpu_to_le32(0x10);
458*4882a593Smuzhiyun 	id->psd[0].exit_lat = cpu_to_le32(0x4);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	id->nwpc = 1 << 0; /* write protect and no write protect */
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	kfree(id);
465*4882a593Smuzhiyun out:
466*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
nvmet_execute_identify_ns(struct nvmet_req * req)469*4882a593Smuzhiyun static void nvmet_execute_identify_ns(struct nvmet_req *req)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
472*4882a593Smuzhiyun 	struct nvme_id_ns *id;
473*4882a593Smuzhiyun 	u16 status = 0;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
476*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_identify, nsid);
477*4882a593Smuzhiyun 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
478*4882a593Smuzhiyun 		goto out;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	id = kzalloc(sizeof(*id), GFP_KERNEL);
482*4882a593Smuzhiyun 	if (!id) {
483*4882a593Smuzhiyun 		status = NVME_SC_INTERNAL;
484*4882a593Smuzhiyun 		goto out;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* return an all zeroed buffer if we can't find an active namespace */
488*4882a593Smuzhiyun 	req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
489*4882a593Smuzhiyun 	if (!req->ns) {
490*4882a593Smuzhiyun 		status = 0;
491*4882a593Smuzhiyun 		goto done;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	nvmet_ns_revalidate(req->ns);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/*
497*4882a593Smuzhiyun 	 * nuse = ncap = nsze isn't always true, but we have no way to find
498*4882a593Smuzhiyun 	 * that out from the underlying device.
499*4882a593Smuzhiyun 	 */
500*4882a593Smuzhiyun 	id->ncap = id->nsze =
501*4882a593Smuzhiyun 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
502*4882a593Smuzhiyun 	switch (req->port->ana_state[req->ns->anagrpid]) {
503*4882a593Smuzhiyun 	case NVME_ANA_INACCESSIBLE:
504*4882a593Smuzhiyun 	case NVME_ANA_PERSISTENT_LOSS:
505*4882a593Smuzhiyun 		break;
506*4882a593Smuzhiyun 	default:
507*4882a593Smuzhiyun 		id->nuse = id->nsze;
508*4882a593Smuzhiyun 		break;
509*4882a593Smuzhiyun         }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (req->ns->bdev)
512*4882a593Smuzhiyun 		nvmet_bdev_set_limits(req->ns->bdev, id);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/*
515*4882a593Smuzhiyun 	 * We just provide a single LBA format that matches what the
516*4882a593Smuzhiyun 	 * underlying device reports.
517*4882a593Smuzhiyun 	 */
518*4882a593Smuzhiyun 	id->nlbaf = 0;
519*4882a593Smuzhiyun 	id->flbas = 0;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/*
522*4882a593Smuzhiyun 	 * Our namespace might always be shared.  Not just with other
523*4882a593Smuzhiyun 	 * controllers, but also with any other user of the block device.
524*4882a593Smuzhiyun 	 */
525*4882a593Smuzhiyun 	id->nmic = (1 << 0);
526*4882a593Smuzhiyun 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	id->lbaf[0].ds = req->ns->blksize_shift;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
533*4882a593Smuzhiyun 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
534*4882a593Smuzhiyun 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
535*4882a593Smuzhiyun 			  NVME_NS_DPC_PI_TYPE3;
536*4882a593Smuzhiyun 		id->mc = NVME_MC_EXTENDED_LBA;
537*4882a593Smuzhiyun 		id->dps = req->ns->pi_type;
538*4882a593Smuzhiyun 		id->flbas = NVME_NS_FLBAS_META_EXT;
539*4882a593Smuzhiyun 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (req->ns->readonly)
543*4882a593Smuzhiyun 		id->nsattr |= (1 << 0);
544*4882a593Smuzhiyun done:
545*4882a593Smuzhiyun 	if (!status)
546*4882a593Smuzhiyun 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	kfree(id);
549*4882a593Smuzhiyun out:
550*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
nvmet_execute_identify_nslist(struct nvmet_req * req)553*4882a593Smuzhiyun static void nvmet_execute_identify_nslist(struct nvmet_req *req)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
556*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
557*4882a593Smuzhiyun 	struct nvmet_ns *ns;
558*4882a593Smuzhiyun 	unsigned long idx;
559*4882a593Smuzhiyun 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
560*4882a593Smuzhiyun 	__le32 *list;
561*4882a593Smuzhiyun 	u16 status = 0;
562*4882a593Smuzhiyun 	int i = 0;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	list = kzalloc(buf_size, GFP_KERNEL);
565*4882a593Smuzhiyun 	if (!list) {
566*4882a593Smuzhiyun 		status = NVME_SC_INTERNAL;
567*4882a593Smuzhiyun 		goto out;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
571*4882a593Smuzhiyun 		if (ns->nsid <= min_nsid)
572*4882a593Smuzhiyun 			continue;
573*4882a593Smuzhiyun 		list[i++] = cpu_to_le32(ns->nsid);
574*4882a593Smuzhiyun 		if (i == buf_size / sizeof(__le32))
575*4882a593Smuzhiyun 			break;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	kfree(list);
581*4882a593Smuzhiyun out:
582*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)585*4882a593Smuzhiyun static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
586*4882a593Smuzhiyun 				    void *id, off_t *off)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	struct nvme_ns_id_desc desc = {
589*4882a593Smuzhiyun 		.nidt = type,
590*4882a593Smuzhiyun 		.nidl = len,
591*4882a593Smuzhiyun 	};
592*4882a593Smuzhiyun 	u16 status;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
595*4882a593Smuzhiyun 	if (status)
596*4882a593Smuzhiyun 		return status;
597*4882a593Smuzhiyun 	*off += sizeof(desc);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	status = nvmet_copy_to_sgl(req, *off, id, len);
600*4882a593Smuzhiyun 	if (status)
601*4882a593Smuzhiyun 		return status;
602*4882a593Smuzhiyun 	*off += len;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return 0;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
nvmet_execute_identify_desclist(struct nvmet_req * req)607*4882a593Smuzhiyun static void nvmet_execute_identify_desclist(struct nvmet_req *req)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct nvmet_ns *ns;
610*4882a593Smuzhiyun 	u16 status = 0;
611*4882a593Smuzhiyun 	off_t off = 0;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
614*4882a593Smuzhiyun 	if (!ns) {
615*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_identify, nsid);
616*4882a593Smuzhiyun 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
617*4882a593Smuzhiyun 		goto out;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
621*4882a593Smuzhiyun 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
622*4882a593Smuzhiyun 						  NVME_NIDT_UUID_LEN,
623*4882a593Smuzhiyun 						  &ns->uuid, &off);
624*4882a593Smuzhiyun 		if (status)
625*4882a593Smuzhiyun 			goto out_put_ns;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
628*4882a593Smuzhiyun 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
629*4882a593Smuzhiyun 						  NVME_NIDT_NGUID_LEN,
630*4882a593Smuzhiyun 						  &ns->nguid, &off);
631*4882a593Smuzhiyun 		if (status)
632*4882a593Smuzhiyun 			goto out_put_ns;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
636*4882a593Smuzhiyun 			off) != NVME_IDENTIFY_DATA_SIZE - off)
637*4882a593Smuzhiyun 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
638*4882a593Smuzhiyun out_put_ns:
639*4882a593Smuzhiyun 	nvmet_put_namespace(ns);
640*4882a593Smuzhiyun out:
641*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
nvmet_execute_identify(struct nvmet_req * req)644*4882a593Smuzhiyun static void nvmet_execute_identify(struct nvmet_req *req)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
647*4882a593Smuzhiyun 		return;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	switch (req->cmd->identify.cns) {
650*4882a593Smuzhiyun 	case NVME_ID_CNS_NS:
651*4882a593Smuzhiyun 		return nvmet_execute_identify_ns(req);
652*4882a593Smuzhiyun 	case NVME_ID_CNS_CTRL:
653*4882a593Smuzhiyun 		return nvmet_execute_identify_ctrl(req);
654*4882a593Smuzhiyun 	case NVME_ID_CNS_NS_ACTIVE_LIST:
655*4882a593Smuzhiyun 		return nvmet_execute_identify_nslist(req);
656*4882a593Smuzhiyun 	case NVME_ID_CNS_NS_DESC_LIST:
657*4882a593Smuzhiyun 		return nvmet_execute_identify_desclist(req);
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	pr_debug("unhandled identify cns %d on qid %d\n",
661*4882a593Smuzhiyun 	       req->cmd->identify.cns, req->sq->qid);
662*4882a593Smuzhiyun 	req->error_loc = offsetof(struct nvme_identify, cns);
663*4882a593Smuzhiyun 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun  * A "minimum viable" abort implementation: the command is mandatory in the
668*4882a593Smuzhiyun  * spec, but we are not required to do any useful work.  We couldn't really
669*4882a593Smuzhiyun  * do a useful abort, so don't bother even with waiting for the command
670*4882a593Smuzhiyun  * to be exectuted and return immediately telling the command to abort
671*4882a593Smuzhiyun  * wasn't found.
672*4882a593Smuzhiyun  */
nvmet_execute_abort(struct nvmet_req * req)673*4882a593Smuzhiyun static void nvmet_execute_abort(struct nvmet_req *req)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
676*4882a593Smuzhiyun 		return;
677*4882a593Smuzhiyun 	nvmet_set_result(req, 1);
678*4882a593Smuzhiyun 	nvmet_req_complete(req, 0);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
nvmet_write_protect_flush_sync(struct nvmet_req * req)681*4882a593Smuzhiyun static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	u16 status;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (req->ns->file)
686*4882a593Smuzhiyun 		status = nvmet_file_flush(req);
687*4882a593Smuzhiyun 	else
688*4882a593Smuzhiyun 		status = nvmet_bdev_flush(req);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (status)
691*4882a593Smuzhiyun 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
692*4882a593Smuzhiyun 	return status;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
nvmet_set_feat_write_protect(struct nvmet_req * req)695*4882a593Smuzhiyun static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
698*4882a593Smuzhiyun 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
699*4882a593Smuzhiyun 	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
702*4882a593Smuzhiyun 	if (unlikely(!req->ns)) {
703*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_common_command, nsid);
704*4882a593Smuzhiyun 		return status;
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	mutex_lock(&subsys->lock);
708*4882a593Smuzhiyun 	switch (write_protect) {
709*4882a593Smuzhiyun 	case NVME_NS_WRITE_PROTECT:
710*4882a593Smuzhiyun 		req->ns->readonly = true;
711*4882a593Smuzhiyun 		status = nvmet_write_protect_flush_sync(req);
712*4882a593Smuzhiyun 		if (status)
713*4882a593Smuzhiyun 			req->ns->readonly = false;
714*4882a593Smuzhiyun 		break;
715*4882a593Smuzhiyun 	case NVME_NS_NO_WRITE_PROTECT:
716*4882a593Smuzhiyun 		req->ns->readonly = false;
717*4882a593Smuzhiyun 		status = 0;
718*4882a593Smuzhiyun 		break;
719*4882a593Smuzhiyun 	default:
720*4882a593Smuzhiyun 		break;
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	if (!status)
724*4882a593Smuzhiyun 		nvmet_ns_changed(subsys, req->ns->nsid);
725*4882a593Smuzhiyun 	mutex_unlock(&subsys->lock);
726*4882a593Smuzhiyun 	return status;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun 
nvmet_set_feat_kato(struct nvmet_req * req)729*4882a593Smuzhiyun u16 nvmet_set_feat_kato(struct nvmet_req *req)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
734*4882a593Smuzhiyun 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
735*4882a593Smuzhiyun 	nvmet_start_keep_alive_timer(req->sq->ctrl);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	nvmet_set_result(req, req->sq->ctrl->kato);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	return 0;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)742*4882a593Smuzhiyun u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (val32 & ~mask) {
747*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
748*4882a593Smuzhiyun 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
752*4882a593Smuzhiyun 	nvmet_set_result(req, val32);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
nvmet_execute_set_features(struct nvmet_req * req)757*4882a593Smuzhiyun void nvmet_execute_set_features(struct nvmet_req *req)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
760*4882a593Smuzhiyun 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
761*4882a593Smuzhiyun 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
762*4882a593Smuzhiyun 	u16 status = 0;
763*4882a593Smuzhiyun 	u16 nsqr;
764*4882a593Smuzhiyun 	u16 ncqr;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
767*4882a593Smuzhiyun 		return;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	switch (cdw10 & 0xff) {
770*4882a593Smuzhiyun 	case NVME_FEAT_NUM_QUEUES:
771*4882a593Smuzhiyun 		ncqr = (cdw11 >> 16) & 0xffff;
772*4882a593Smuzhiyun 		nsqr = cdw11 & 0xffff;
773*4882a593Smuzhiyun 		if (ncqr == 0xffff || nsqr == 0xffff) {
774*4882a593Smuzhiyun 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
775*4882a593Smuzhiyun 			break;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 		nvmet_set_result(req,
778*4882a593Smuzhiyun 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
779*4882a593Smuzhiyun 		break;
780*4882a593Smuzhiyun 	case NVME_FEAT_KATO:
781*4882a593Smuzhiyun 		status = nvmet_set_feat_kato(req);
782*4882a593Smuzhiyun 		break;
783*4882a593Smuzhiyun 	case NVME_FEAT_ASYNC_EVENT:
784*4882a593Smuzhiyun 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
785*4882a593Smuzhiyun 		break;
786*4882a593Smuzhiyun 	case NVME_FEAT_HOST_ID:
787*4882a593Smuzhiyun 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
788*4882a593Smuzhiyun 		break;
789*4882a593Smuzhiyun 	case NVME_FEAT_WRITE_PROTECT:
790*4882a593Smuzhiyun 		status = nvmet_set_feat_write_protect(req);
791*4882a593Smuzhiyun 		break;
792*4882a593Smuzhiyun 	default:
793*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
794*4882a593Smuzhiyun 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
795*4882a593Smuzhiyun 		break;
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
nvmet_get_feat_write_protect(struct nvmet_req * req)801*4882a593Smuzhiyun static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
804*4882a593Smuzhiyun 	u32 result;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
807*4882a593Smuzhiyun 	if (!req->ns)  {
808*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_common_command, nsid);
809*4882a593Smuzhiyun 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 	mutex_lock(&subsys->lock);
812*4882a593Smuzhiyun 	if (req->ns->readonly == true)
813*4882a593Smuzhiyun 		result = NVME_NS_WRITE_PROTECT;
814*4882a593Smuzhiyun 	else
815*4882a593Smuzhiyun 		result = NVME_NS_NO_WRITE_PROTECT;
816*4882a593Smuzhiyun 	nvmet_set_result(req, result);
817*4882a593Smuzhiyun 	mutex_unlock(&subsys->lock);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
nvmet_get_feat_kato(struct nvmet_req * req)822*4882a593Smuzhiyun void nvmet_get_feat_kato(struct nvmet_req *req)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
nvmet_get_feat_async_event(struct nvmet_req * req)827*4882a593Smuzhiyun void nvmet_get_feat_async_event(struct nvmet_req *req)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
nvmet_execute_get_features(struct nvmet_req * req)832*4882a593Smuzhiyun void nvmet_execute_get_features(struct nvmet_req *req)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
835*4882a593Smuzhiyun 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
836*4882a593Smuzhiyun 	u16 status = 0;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
839*4882a593Smuzhiyun 		return;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	switch (cdw10 & 0xff) {
842*4882a593Smuzhiyun 	/*
843*4882a593Smuzhiyun 	 * These features are mandatory in the spec, but we don't
844*4882a593Smuzhiyun 	 * have a useful way to implement them.  We'll eventually
845*4882a593Smuzhiyun 	 * need to come up with some fake values for these.
846*4882a593Smuzhiyun 	 */
847*4882a593Smuzhiyun #if 0
848*4882a593Smuzhiyun 	case NVME_FEAT_ARBITRATION:
849*4882a593Smuzhiyun 		break;
850*4882a593Smuzhiyun 	case NVME_FEAT_POWER_MGMT:
851*4882a593Smuzhiyun 		break;
852*4882a593Smuzhiyun 	case NVME_FEAT_TEMP_THRESH:
853*4882a593Smuzhiyun 		break;
854*4882a593Smuzhiyun 	case NVME_FEAT_ERR_RECOVERY:
855*4882a593Smuzhiyun 		break;
856*4882a593Smuzhiyun 	case NVME_FEAT_IRQ_COALESCE:
857*4882a593Smuzhiyun 		break;
858*4882a593Smuzhiyun 	case NVME_FEAT_IRQ_CONFIG:
859*4882a593Smuzhiyun 		break;
860*4882a593Smuzhiyun 	case NVME_FEAT_WRITE_ATOMIC:
861*4882a593Smuzhiyun 		break;
862*4882a593Smuzhiyun #endif
863*4882a593Smuzhiyun 	case NVME_FEAT_ASYNC_EVENT:
864*4882a593Smuzhiyun 		nvmet_get_feat_async_event(req);
865*4882a593Smuzhiyun 		break;
866*4882a593Smuzhiyun 	case NVME_FEAT_VOLATILE_WC:
867*4882a593Smuzhiyun 		nvmet_set_result(req, 1);
868*4882a593Smuzhiyun 		break;
869*4882a593Smuzhiyun 	case NVME_FEAT_NUM_QUEUES:
870*4882a593Smuzhiyun 		nvmet_set_result(req,
871*4882a593Smuzhiyun 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
872*4882a593Smuzhiyun 		break;
873*4882a593Smuzhiyun 	case NVME_FEAT_KATO:
874*4882a593Smuzhiyun 		nvmet_get_feat_kato(req);
875*4882a593Smuzhiyun 		break;
876*4882a593Smuzhiyun 	case NVME_FEAT_HOST_ID:
877*4882a593Smuzhiyun 		/* need 128-bit host identifier flag */
878*4882a593Smuzhiyun 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
879*4882a593Smuzhiyun 			req->error_loc =
880*4882a593Smuzhiyun 				offsetof(struct nvme_common_command, cdw11);
881*4882a593Smuzhiyun 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
882*4882a593Smuzhiyun 			break;
883*4882a593Smuzhiyun 		}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
886*4882a593Smuzhiyun 				sizeof(req->sq->ctrl->hostid));
887*4882a593Smuzhiyun 		break;
888*4882a593Smuzhiyun 	case NVME_FEAT_WRITE_PROTECT:
889*4882a593Smuzhiyun 		status = nvmet_get_feat_write_protect(req);
890*4882a593Smuzhiyun 		break;
891*4882a593Smuzhiyun 	default:
892*4882a593Smuzhiyun 		req->error_loc =
893*4882a593Smuzhiyun 			offsetof(struct nvme_common_command, cdw10);
894*4882a593Smuzhiyun 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
895*4882a593Smuzhiyun 		break;
896*4882a593Smuzhiyun 	}
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
nvmet_execute_async_event(struct nvmet_req * req)901*4882a593Smuzhiyun void nvmet_execute_async_event(struct nvmet_req *req)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
906*4882a593Smuzhiyun 		return;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	mutex_lock(&ctrl->lock);
909*4882a593Smuzhiyun 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
910*4882a593Smuzhiyun 		mutex_unlock(&ctrl->lock);
911*4882a593Smuzhiyun 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
912*4882a593Smuzhiyun 		return;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
915*4882a593Smuzhiyun 	mutex_unlock(&ctrl->lock);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	schedule_work(&ctrl->async_event_work);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
nvmet_execute_keep_alive(struct nvmet_req * req)920*4882a593Smuzhiyun void nvmet_execute_keep_alive(struct nvmet_req *req)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
925*4882a593Smuzhiyun 		return;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
928*4882a593Smuzhiyun 		ctrl->cntlid, ctrl->kato);
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
931*4882a593Smuzhiyun 	nvmet_req_complete(req, 0);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
nvmet_parse_admin_cmd(struct nvmet_req * req)934*4882a593Smuzhiyun u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	struct nvme_command *cmd = req->cmd;
937*4882a593Smuzhiyun 	u16 ret;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (nvme_is_fabrics(cmd))
940*4882a593Smuzhiyun 		return nvmet_parse_fabrics_cmd(req);
941*4882a593Smuzhiyun 	if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
942*4882a593Smuzhiyun 		return nvmet_parse_discovery_cmd(req);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	ret = nvmet_check_ctrl_status(req, cmd);
945*4882a593Smuzhiyun 	if (unlikely(ret))
946*4882a593Smuzhiyun 		return ret;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	if (nvmet_req_passthru_ctrl(req))
949*4882a593Smuzhiyun 		return nvmet_parse_passthru_admin_cmd(req);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	switch (cmd->common.opcode) {
952*4882a593Smuzhiyun 	case nvme_admin_get_log_page:
953*4882a593Smuzhiyun 		req->execute = nvmet_execute_get_log_page;
954*4882a593Smuzhiyun 		return 0;
955*4882a593Smuzhiyun 	case nvme_admin_identify:
956*4882a593Smuzhiyun 		req->execute = nvmet_execute_identify;
957*4882a593Smuzhiyun 		return 0;
958*4882a593Smuzhiyun 	case nvme_admin_abort_cmd:
959*4882a593Smuzhiyun 		req->execute = nvmet_execute_abort;
960*4882a593Smuzhiyun 		return 0;
961*4882a593Smuzhiyun 	case nvme_admin_set_features:
962*4882a593Smuzhiyun 		req->execute = nvmet_execute_set_features;
963*4882a593Smuzhiyun 		return 0;
964*4882a593Smuzhiyun 	case nvme_admin_get_features:
965*4882a593Smuzhiyun 		req->execute = nvmet_execute_get_features;
966*4882a593Smuzhiyun 		return 0;
967*4882a593Smuzhiyun 	case nvme_admin_async_event:
968*4882a593Smuzhiyun 		req->execute = nvmet_execute_async_event;
969*4882a593Smuzhiyun 		return 0;
970*4882a593Smuzhiyun 	case nvme_admin_keep_alive:
971*4882a593Smuzhiyun 		req->execute = nvmet_execute_keep_alive;
972*4882a593Smuzhiyun 		return 0;
973*4882a593Smuzhiyun 	}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
976*4882a593Smuzhiyun 	       req->sq->qid);
977*4882a593Smuzhiyun 	req->error_loc = offsetof(struct nvme_common_command, opcode);
978*4882a593Smuzhiyun 	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
979*4882a593Smuzhiyun }
980