xref: /OK3568_Linux_fs/kernel/drivers/nvme/target/nvmet.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _NVMET_H
7*4882a593Smuzhiyun #define _NVMET_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/kref.h>
13*4882a593Smuzhiyun #include <linux/percpu-refcount.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/mutex.h>
16*4882a593Smuzhiyun #include <linux/uuid.h>
17*4882a593Smuzhiyun #include <linux/nvme.h>
18*4882a593Smuzhiyun #include <linux/configfs.h>
19*4882a593Smuzhiyun #include <linux/rcupdate.h>
20*4882a593Smuzhiyun #include <linux/blkdev.h>
21*4882a593Smuzhiyun #include <linux/radix-tree.h>
22*4882a593Smuzhiyun #include <linux/t10-pi.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define NVMET_ASYNC_EVENTS		4
27*4882a593Smuzhiyun #define NVMET_ERROR_LOG_SLOTS		128
28*4882a593Smuzhiyun #define NVMET_NO_ERROR_LOC		((u16)-1)
29*4882a593Smuzhiyun #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Supported optional AENs:
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define NVMET_AEN_CFG_OPTIONAL \
35*4882a593Smuzhiyun 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
36*4882a593Smuzhiyun #define NVMET_DISC_AEN_CFG_OPTIONAL \
37*4882a593Smuzhiyun 	(NVME_AEN_CFG_DISC_CHANGE)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #define NVMET_AEN_CFG_ALL \
43*4882a593Smuzhiyun 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
44*4882a593Smuzhiyun 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
45*4882a593Smuzhiyun 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
48*4882a593Smuzhiyun  * The 16 bit shift is to set IATTR bit to 1, which means offending
49*4882a593Smuzhiyun  * offset starts in the data section of connect()
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun #define IPO_IATTR_CONNECT_DATA(x)	\
52*4882a593Smuzhiyun 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
53*4882a593Smuzhiyun #define IPO_IATTR_CONNECT_SQE(x)	\
54*4882a593Smuzhiyun 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct nvmet_ns {
57*4882a593Smuzhiyun 	struct percpu_ref	ref;
58*4882a593Smuzhiyun 	struct block_device	*bdev;
59*4882a593Smuzhiyun 	struct file		*file;
60*4882a593Smuzhiyun 	bool			readonly;
61*4882a593Smuzhiyun 	u32			nsid;
62*4882a593Smuzhiyun 	u32			blksize_shift;
63*4882a593Smuzhiyun 	loff_t			size;
64*4882a593Smuzhiyun 	u8			nguid[16];
65*4882a593Smuzhiyun 	uuid_t			uuid;
66*4882a593Smuzhiyun 	u32			anagrpid;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	bool			buffered_io;
69*4882a593Smuzhiyun 	bool			enabled;
70*4882a593Smuzhiyun 	struct nvmet_subsys	*subsys;
71*4882a593Smuzhiyun 	const char		*device_path;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	struct config_group	device_group;
74*4882a593Smuzhiyun 	struct config_group	group;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	struct completion	disable_done;
77*4882a593Smuzhiyun 	mempool_t		*bvec_pool;
78*4882a593Smuzhiyun 	struct kmem_cache	*bvec_cache;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	int			use_p2pmem;
81*4882a593Smuzhiyun 	struct pci_dev		*p2p_dev;
82*4882a593Smuzhiyun 	int			pi_type;
83*4882a593Smuzhiyun 	int			metadata_size;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
to_nvmet_ns(struct config_item * item)86*4882a593Smuzhiyun static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_ns, group);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
nvmet_ns_dev(struct nvmet_ns * ns)91*4882a593Smuzhiyun static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun struct nvmet_cq {
97*4882a593Smuzhiyun 	u16			qid;
98*4882a593Smuzhiyun 	u16			size;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct nvmet_sq {
102*4882a593Smuzhiyun 	struct nvmet_ctrl	*ctrl;
103*4882a593Smuzhiyun 	struct percpu_ref	ref;
104*4882a593Smuzhiyun 	u16			qid;
105*4882a593Smuzhiyun 	u16			size;
106*4882a593Smuzhiyun 	u32			sqhd;
107*4882a593Smuzhiyun 	bool			sqhd_disabled;
108*4882a593Smuzhiyun 	struct completion	free_done;
109*4882a593Smuzhiyun 	struct completion	confirm_done;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun struct nvmet_ana_group {
113*4882a593Smuzhiyun 	struct config_group	group;
114*4882a593Smuzhiyun 	struct nvmet_port	*port;
115*4882a593Smuzhiyun 	u32			grpid;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
to_ana_group(struct config_item * item)118*4882a593Smuzhiyun static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_ana_group,
121*4882a593Smuzhiyun 			group);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun  * struct nvmet_port -	Common structure to keep port
126*4882a593Smuzhiyun  *				information for the target.
127*4882a593Smuzhiyun  * @entry:		Entry into referrals or transport list.
128*4882a593Smuzhiyun  * @disc_addr:		Address information is stored in a format defined
129*4882a593Smuzhiyun  *				for a discovery log page entry.
130*4882a593Smuzhiyun  * @group:		ConfigFS group for this element's folder.
131*4882a593Smuzhiyun  * @priv:		Private data for the transport.
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun struct nvmet_port {
134*4882a593Smuzhiyun 	struct list_head		entry;
135*4882a593Smuzhiyun 	struct nvmf_disc_rsp_page_entry	disc_addr;
136*4882a593Smuzhiyun 	struct config_group		group;
137*4882a593Smuzhiyun 	struct config_group		subsys_group;
138*4882a593Smuzhiyun 	struct list_head		subsystems;
139*4882a593Smuzhiyun 	struct config_group		referrals_group;
140*4882a593Smuzhiyun 	struct list_head		referrals;
141*4882a593Smuzhiyun 	struct list_head		global_entry;
142*4882a593Smuzhiyun 	struct config_group		ana_groups_group;
143*4882a593Smuzhiyun 	struct nvmet_ana_group		ana_default_group;
144*4882a593Smuzhiyun 	enum nvme_ana_state		*ana_state;
145*4882a593Smuzhiyun 	void				*priv;
146*4882a593Smuzhiyun 	bool				enabled;
147*4882a593Smuzhiyun 	int				inline_data_size;
148*4882a593Smuzhiyun 	const struct nvmet_fabrics_ops	*tr_ops;
149*4882a593Smuzhiyun 	bool				pi_enable;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
to_nvmet_port(struct config_item * item)152*4882a593Smuzhiyun static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_port,
155*4882a593Smuzhiyun 			group);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
ana_groups_to_port(struct config_item * item)158*4882a593Smuzhiyun static inline struct nvmet_port *ana_groups_to_port(
159*4882a593Smuzhiyun 		struct config_item *item)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_port,
162*4882a593Smuzhiyun 			ana_groups_group);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun struct nvmet_ctrl {
166*4882a593Smuzhiyun 	struct nvmet_subsys	*subsys;
167*4882a593Smuzhiyun 	struct nvmet_sq		**sqs;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	bool			reset_tbkas;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	struct mutex		lock;
172*4882a593Smuzhiyun 	u64			cap;
173*4882a593Smuzhiyun 	u32			cc;
174*4882a593Smuzhiyun 	u32			csts;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	uuid_t			hostid;
177*4882a593Smuzhiyun 	u16			cntlid;
178*4882a593Smuzhiyun 	u32			kato;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	struct nvmet_port	*port;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	u32			aen_enabled;
183*4882a593Smuzhiyun 	unsigned long		aen_masked;
184*4882a593Smuzhiyun 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
185*4882a593Smuzhiyun 	unsigned int		nr_async_event_cmds;
186*4882a593Smuzhiyun 	struct list_head	async_events;
187*4882a593Smuzhiyun 	struct work_struct	async_event_work;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	struct list_head	subsys_entry;
190*4882a593Smuzhiyun 	struct kref		ref;
191*4882a593Smuzhiyun 	struct delayed_work	ka_work;
192*4882a593Smuzhiyun 	struct work_struct	fatal_err_work;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	const struct nvmet_fabrics_ops *ops;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	__le32			*changed_ns_list;
197*4882a593Smuzhiyun 	u32			nr_changed_ns;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
200*4882a593Smuzhiyun 	char			hostnqn[NVMF_NQN_FIELD_LEN];
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	struct device		*p2p_client;
203*4882a593Smuzhiyun 	struct radix_tree_root	p2p_ns_map;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	spinlock_t		error_lock;
206*4882a593Smuzhiyun 	u64			err_counter;
207*4882a593Smuzhiyun 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
208*4882a593Smuzhiyun 	bool			pi_support;
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun struct nvmet_subsys_model {
212*4882a593Smuzhiyun 	struct rcu_head		rcuhead;
213*4882a593Smuzhiyun 	char			number[];
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun struct nvmet_subsys {
217*4882a593Smuzhiyun 	enum nvme_subsys_type	type;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	struct mutex		lock;
220*4882a593Smuzhiyun 	struct kref		ref;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	struct xarray		namespaces;
223*4882a593Smuzhiyun 	unsigned int		nr_namespaces;
224*4882a593Smuzhiyun 	unsigned int		max_nsid;
225*4882a593Smuzhiyun 	u16			cntlid_min;
226*4882a593Smuzhiyun 	u16			cntlid_max;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	struct list_head	ctrls;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	struct list_head	hosts;
231*4882a593Smuzhiyun 	bool			allow_any_host;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	u16			max_qid;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	u64			ver;
236*4882a593Smuzhiyun 	u64			serial;
237*4882a593Smuzhiyun 	char			*subsysnqn;
238*4882a593Smuzhiyun 	bool			pi_support;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	struct config_group	group;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	struct config_group	namespaces_group;
243*4882a593Smuzhiyun 	struct config_group	allowed_hosts_group;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	struct nvmet_subsys_model	__rcu *model;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #ifdef CONFIG_NVME_TARGET_PASSTHRU
248*4882a593Smuzhiyun 	struct nvme_ctrl	*passthru_ctrl;
249*4882a593Smuzhiyun 	char			*passthru_ctrl_path;
250*4882a593Smuzhiyun 	struct config_group	passthru_group;
251*4882a593Smuzhiyun #endif /* CONFIG_NVME_TARGET_PASSTHRU */
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
to_subsys(struct config_item * item)254*4882a593Smuzhiyun static inline struct nvmet_subsys *to_subsys(struct config_item *item)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_subsys, group);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
namespaces_to_subsys(struct config_item * item)259*4882a593Smuzhiyun static inline struct nvmet_subsys *namespaces_to_subsys(
260*4882a593Smuzhiyun 		struct config_item *item)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_subsys,
263*4882a593Smuzhiyun 			namespaces_group);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun struct nvmet_host {
267*4882a593Smuzhiyun 	struct config_group	group;
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun 
to_host(struct config_item * item)270*4882a593Smuzhiyun static inline struct nvmet_host *to_host(struct config_item *item)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	return container_of(to_config_group(item), struct nvmet_host, group);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
nvmet_host_name(struct nvmet_host * host)275*4882a593Smuzhiyun static inline char *nvmet_host_name(struct nvmet_host *host)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	return config_item_name(&host->group.cg_item);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun struct nvmet_host_link {
281*4882a593Smuzhiyun 	struct list_head	entry;
282*4882a593Smuzhiyun 	struct nvmet_host	*host;
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun struct nvmet_subsys_link {
286*4882a593Smuzhiyun 	struct list_head	entry;
287*4882a593Smuzhiyun 	struct nvmet_subsys	*subsys;
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun struct nvmet_req;
291*4882a593Smuzhiyun struct nvmet_fabrics_ops {
292*4882a593Smuzhiyun 	struct module *owner;
293*4882a593Smuzhiyun 	unsigned int type;
294*4882a593Smuzhiyun 	unsigned int msdbd;
295*4882a593Smuzhiyun 	unsigned int flags;
296*4882a593Smuzhiyun #define NVMF_KEYED_SGLS			(1 << 0)
297*4882a593Smuzhiyun #define NVMF_METADATA_SUPPORTED		(1 << 1)
298*4882a593Smuzhiyun 	void (*queue_response)(struct nvmet_req *req);
299*4882a593Smuzhiyun 	int (*add_port)(struct nvmet_port *port);
300*4882a593Smuzhiyun 	void (*remove_port)(struct nvmet_port *port);
301*4882a593Smuzhiyun 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
302*4882a593Smuzhiyun 	void (*disc_traddr)(struct nvmet_req *req,
303*4882a593Smuzhiyun 			struct nvmet_port *port, char *traddr);
304*4882a593Smuzhiyun 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
305*4882a593Smuzhiyun 	void (*discovery_chg)(struct nvmet_port *port);
306*4882a593Smuzhiyun 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun #define NVMET_MAX_INLINE_BIOVEC	8
310*4882a593Smuzhiyun #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun struct nvmet_req {
313*4882a593Smuzhiyun 	struct nvme_command	*cmd;
314*4882a593Smuzhiyun 	struct nvme_completion	*cqe;
315*4882a593Smuzhiyun 	struct nvmet_sq		*sq;
316*4882a593Smuzhiyun 	struct nvmet_cq		*cq;
317*4882a593Smuzhiyun 	struct nvmet_ns		*ns;
318*4882a593Smuzhiyun 	struct scatterlist	*sg;
319*4882a593Smuzhiyun 	struct scatterlist	*metadata_sg;
320*4882a593Smuzhiyun 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
321*4882a593Smuzhiyun 	union {
322*4882a593Smuzhiyun 		struct {
323*4882a593Smuzhiyun 			struct bio      inline_bio;
324*4882a593Smuzhiyun 		} b;
325*4882a593Smuzhiyun 		struct {
326*4882a593Smuzhiyun 			bool			mpool_alloc;
327*4882a593Smuzhiyun 			struct kiocb            iocb;
328*4882a593Smuzhiyun 			struct bio_vec          *bvec;
329*4882a593Smuzhiyun 			struct work_struct      work;
330*4882a593Smuzhiyun 		} f;
331*4882a593Smuzhiyun 		struct {
332*4882a593Smuzhiyun 			struct request		*rq;
333*4882a593Smuzhiyun 			struct work_struct      work;
334*4882a593Smuzhiyun 			bool			use_workqueue;
335*4882a593Smuzhiyun 		} p;
336*4882a593Smuzhiyun 	};
337*4882a593Smuzhiyun 	int			sg_cnt;
338*4882a593Smuzhiyun 	int			metadata_sg_cnt;
339*4882a593Smuzhiyun 	/* data length as parsed from the SGL descriptor: */
340*4882a593Smuzhiyun 	size_t			transfer_len;
341*4882a593Smuzhiyun 	size_t			metadata_len;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	struct nvmet_port	*port;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	void (*execute)(struct nvmet_req *req);
346*4882a593Smuzhiyun 	const struct nvmet_fabrics_ops *ops;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	struct pci_dev		*p2p_dev;
349*4882a593Smuzhiyun 	struct device		*p2p_client;
350*4882a593Smuzhiyun 	u16			error_loc;
351*4882a593Smuzhiyun 	u64			error_slba;
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun extern struct workqueue_struct *buffered_io_wq;
355*4882a593Smuzhiyun 
nvmet_set_result(struct nvmet_req * req,u32 result)356*4882a593Smuzhiyun static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	req->cqe->result.u32 = cpu_to_le32(result);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun  * NVMe command writes actually are DMA reads for us on the target side.
363*4882a593Smuzhiyun  */
364*4882a593Smuzhiyun static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)365*4882a593Smuzhiyun nvmet_data_dir(struct nvmet_req *req)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun struct nvmet_async_event {
371*4882a593Smuzhiyun 	struct list_head	entry;
372*4882a593Smuzhiyun 	u8			event_type;
373*4882a593Smuzhiyun 	u8			event_info;
374*4882a593Smuzhiyun 	u8			log_page;
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun 
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)377*4882a593Smuzhiyun static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (!rae)
382*4882a593Smuzhiyun 		clear_bit(bn, &req->sq->ctrl->aen_masked);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)385*4882a593Smuzhiyun static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
388*4882a593Smuzhiyun 		return true;
389*4882a593Smuzhiyun 	return test_and_set_bit(bn, &ctrl->aen_masked);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun void nvmet_get_feat_kato(struct nvmet_req *req);
393*4882a593Smuzhiyun void nvmet_get_feat_async_event(struct nvmet_req *req);
394*4882a593Smuzhiyun u16 nvmet_set_feat_kato(struct nvmet_req *req);
395*4882a593Smuzhiyun u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
396*4882a593Smuzhiyun void nvmet_execute_async_event(struct nvmet_req *req);
397*4882a593Smuzhiyun void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
398*4882a593Smuzhiyun void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
401*4882a593Smuzhiyun void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
402*4882a593Smuzhiyun u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
403*4882a593Smuzhiyun u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
404*4882a593Smuzhiyun u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
405*4882a593Smuzhiyun u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
406*4882a593Smuzhiyun u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
409*4882a593Smuzhiyun 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
410*4882a593Smuzhiyun void nvmet_req_uninit(struct nvmet_req *req);
411*4882a593Smuzhiyun bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
412*4882a593Smuzhiyun bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
413*4882a593Smuzhiyun void nvmet_req_complete(struct nvmet_req *req, u16 status);
414*4882a593Smuzhiyun int nvmet_req_alloc_sgls(struct nvmet_req *req);
415*4882a593Smuzhiyun void nvmet_req_free_sgls(struct nvmet_req *req);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun void nvmet_execute_set_features(struct nvmet_req *req);
418*4882a593Smuzhiyun void nvmet_execute_get_features(struct nvmet_req *req);
419*4882a593Smuzhiyun void nvmet_execute_keep_alive(struct nvmet_req *req);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
422*4882a593Smuzhiyun 		u16 size);
423*4882a593Smuzhiyun void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
424*4882a593Smuzhiyun 		u16 size);
425*4882a593Smuzhiyun void nvmet_sq_destroy(struct nvmet_sq *sq);
426*4882a593Smuzhiyun int nvmet_sq_init(struct nvmet_sq *sq);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
431*4882a593Smuzhiyun u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
432*4882a593Smuzhiyun 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
433*4882a593Smuzhiyun u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
434*4882a593Smuzhiyun 		struct nvmet_req *req, struct nvmet_ctrl **ret);
435*4882a593Smuzhiyun void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
436*4882a593Smuzhiyun u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
439*4882a593Smuzhiyun 		enum nvme_subsys_type type);
440*4882a593Smuzhiyun void nvmet_subsys_put(struct nvmet_subsys *subsys);
441*4882a593Smuzhiyun void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
444*4882a593Smuzhiyun void nvmet_put_namespace(struct nvmet_ns *ns);
445*4882a593Smuzhiyun int nvmet_ns_enable(struct nvmet_ns *ns);
446*4882a593Smuzhiyun void nvmet_ns_disable(struct nvmet_ns *ns);
447*4882a593Smuzhiyun struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
448*4882a593Smuzhiyun void nvmet_ns_free(struct nvmet_ns *ns);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun void nvmet_send_ana_event(struct nvmet_subsys *subsys,
451*4882a593Smuzhiyun 		struct nvmet_port *port);
452*4882a593Smuzhiyun void nvmet_port_send_ana_event(struct nvmet_port *port);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
455*4882a593Smuzhiyun void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun void nvmet_port_del_ctrls(struct nvmet_port *port,
458*4882a593Smuzhiyun 			  struct nvmet_subsys *subsys);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun int nvmet_enable_port(struct nvmet_port *port);
461*4882a593Smuzhiyun void nvmet_disable_port(struct nvmet_port *port);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
464*4882a593Smuzhiyun void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
467*4882a593Smuzhiyun 		size_t len);
468*4882a593Smuzhiyun u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
469*4882a593Smuzhiyun 		size_t len);
470*4882a593Smuzhiyun u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun u32 nvmet_get_log_page_len(struct nvme_command *cmd);
473*4882a593Smuzhiyun u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun extern struct list_head *nvmet_ports;
476*4882a593Smuzhiyun void nvmet_port_disc_changed(struct nvmet_port *port,
477*4882a593Smuzhiyun 		struct nvmet_subsys *subsys);
478*4882a593Smuzhiyun void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
479*4882a593Smuzhiyun 		struct nvmet_host *host);
480*4882a593Smuzhiyun void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
481*4882a593Smuzhiyun 		u8 event_info, u8 log_page);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #define NVMET_QUEUE_SIZE	1024
484*4882a593Smuzhiyun #define NVMET_NR_QUEUES		128
485*4882a593Smuzhiyun #define NVMET_MAX_CMD		NVMET_QUEUE_SIZE
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun  * Nice round number that makes a list of nsids fit into a page.
489*4882a593Smuzhiyun  * Should become tunable at some point in the future.
490*4882a593Smuzhiyun  */
491*4882a593Smuzhiyun #define NVMET_MAX_NAMESPACES	1024
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun  * 0 is not a valid ANA group ID, so we start numbering at 1.
495*4882a593Smuzhiyun  *
496*4882a593Smuzhiyun  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
497*4882a593Smuzhiyun  * by default, and is available in an optimized state through all ports.
498*4882a593Smuzhiyun  */
499*4882a593Smuzhiyun #define NVMET_MAX_ANAGRPS	128
500*4882a593Smuzhiyun #define NVMET_DEFAULT_ANA_GRPID	1
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun #define NVMET_KAS		10
503*4882a593Smuzhiyun #define NVMET_DISC_KATO_MS		120000
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun int __init nvmet_init_configfs(void);
506*4882a593Smuzhiyun void __exit nvmet_exit_configfs(void);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun int __init nvmet_init_discovery(void);
509*4882a593Smuzhiyun void nvmet_exit_discovery(void);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun extern struct nvmet_subsys *nvmet_disc_subsys;
512*4882a593Smuzhiyun extern struct rw_semaphore nvmet_config_sem;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
515*4882a593Smuzhiyun extern u64 nvmet_ana_chgcnt;
516*4882a593Smuzhiyun extern struct rw_semaphore nvmet_ana_sem;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
521*4882a593Smuzhiyun int nvmet_file_ns_enable(struct nvmet_ns *ns);
522*4882a593Smuzhiyun void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
523*4882a593Smuzhiyun void nvmet_file_ns_disable(struct nvmet_ns *ns);
524*4882a593Smuzhiyun u16 nvmet_bdev_flush(struct nvmet_req *req);
525*4882a593Smuzhiyun u16 nvmet_file_flush(struct nvmet_req *req);
526*4882a593Smuzhiyun void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
527*4882a593Smuzhiyun void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
528*4882a593Smuzhiyun int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
529*4882a593Smuzhiyun void nvmet_ns_revalidate(struct nvmet_ns *ns);
530*4882a593Smuzhiyun 
nvmet_rw_data_len(struct nvmet_req * req)531*4882a593Smuzhiyun static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
534*4882a593Smuzhiyun 			req->ns->blksize_shift;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
nvmet_rw_metadata_len(struct nvmet_req * req)537*4882a593Smuzhiyun static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
540*4882a593Smuzhiyun 		return 0;
541*4882a593Smuzhiyun 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
542*4882a593Smuzhiyun 			req->ns->metadata_size;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
nvmet_dsm_len(struct nvmet_req * req)545*4882a593Smuzhiyun static inline u32 nvmet_dsm_len(struct nvmet_req *req)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
548*4882a593Smuzhiyun 		sizeof(struct nvme_dsm_range);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun #ifdef CONFIG_NVME_TARGET_PASSTHRU
552*4882a593Smuzhiyun void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
553*4882a593Smuzhiyun int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
554*4882a593Smuzhiyun void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
555*4882a593Smuzhiyun u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
556*4882a593Smuzhiyun u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_passthru_ctrl(struct nvmet_subsys * subsys)557*4882a593Smuzhiyun static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	return subsys->passthru_ctrl;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)562*4882a593Smuzhiyun static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)565*4882a593Smuzhiyun static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)568*4882a593Smuzhiyun static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	return 0;
571*4882a593Smuzhiyun }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)572*4882a593Smuzhiyun static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	return 0;
575*4882a593Smuzhiyun }
nvmet_passthru_ctrl(struct nvmet_subsys * subsys)576*4882a593Smuzhiyun static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	return NULL;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun #endif /* CONFIG_NVME_TARGET_PASSTHRU */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun static inline struct nvme_ctrl *
nvmet_req_passthru_ctrl(struct nvmet_req * req)583*4882a593Smuzhiyun nvmet_req_passthru_ctrl(struct nvmet_req *req)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)591*4882a593Smuzhiyun static inline __le16 to0based(u32 a)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
nvmet_ns_has_pi(struct nvmet_ns * ns)596*4882a593Smuzhiyun static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
599*4882a593Smuzhiyun 		return false;
600*4882a593Smuzhiyun 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)603*4882a593Smuzhiyun static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)608*4882a593Smuzhiyun static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
nvmet_use_inline_bvec(struct nvmet_req * req)613*4882a593Smuzhiyun static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
616*4882a593Smuzhiyun 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun #endif /* _NVMET_H */
620