xref: /OK3568_Linux_fs/kernel/drivers/nvme/host/fc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/parser.h>
8*4882a593Smuzhiyun #include <uapi/scsi/fc/fc_fs.h>
9*4882a593Smuzhiyun #include <uapi/scsi/fc/fc_els.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/overflow.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "nvme.h"
14*4882a593Smuzhiyun #include "fabrics.h"
15*4882a593Smuzhiyun #include <linux/nvme-fc-driver.h>
16*4882a593Smuzhiyun #include <linux/nvme-fc.h>
17*4882a593Smuzhiyun #include "fc.h"
18*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* *************************** Data Structures/Defines ****************** */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun enum nvme_fc_queue_flags {
24*4882a593Smuzhiyun 	NVME_FC_Q_CONNECTED = 0,
25*4882a593Smuzhiyun 	NVME_FC_Q_LIVE,
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define NVME_FC_DEFAULT_DEV_LOSS_TMO	60	/* seconds */
29*4882a593Smuzhiyun #define NVME_FC_DEFAULT_RECONNECT_TMO	2	/* delay between reconnects
30*4882a593Smuzhiyun 						 * when connected and a
31*4882a593Smuzhiyun 						 * connection failure.
32*4882a593Smuzhiyun 						 */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct nvme_fc_queue {
35*4882a593Smuzhiyun 	struct nvme_fc_ctrl	*ctrl;
36*4882a593Smuzhiyun 	struct device		*dev;
37*4882a593Smuzhiyun 	struct blk_mq_hw_ctx	*hctx;
38*4882a593Smuzhiyun 	void			*lldd_handle;
39*4882a593Smuzhiyun 	size_t			cmnd_capsule_len;
40*4882a593Smuzhiyun 	u32			qnum;
41*4882a593Smuzhiyun 	u32			rqcnt;
42*4882a593Smuzhiyun 	u32			seqno;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	u64			connection_id;
45*4882a593Smuzhiyun 	atomic_t		csn;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	unsigned long		flags;
48*4882a593Smuzhiyun } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun enum nvme_fcop_flags {
51*4882a593Smuzhiyun 	FCOP_FLAGS_TERMIO	= (1 << 0),
52*4882a593Smuzhiyun 	FCOP_FLAGS_AEN		= (1 << 1),
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct nvmefc_ls_req_op {
56*4882a593Smuzhiyun 	struct nvmefc_ls_req	ls_req;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	struct nvme_fc_rport	*rport;
59*4882a593Smuzhiyun 	struct nvme_fc_queue	*queue;
60*4882a593Smuzhiyun 	struct request		*rq;
61*4882a593Smuzhiyun 	u32			flags;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	int			ls_error;
64*4882a593Smuzhiyun 	struct completion	ls_done;
65*4882a593Smuzhiyun 	struct list_head	lsreq_list;	/* rport->ls_req_list */
66*4882a593Smuzhiyun 	bool			req_queued;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun struct nvmefc_ls_rcv_op {
70*4882a593Smuzhiyun 	struct nvme_fc_rport		*rport;
71*4882a593Smuzhiyun 	struct nvmefc_ls_rsp		*lsrsp;
72*4882a593Smuzhiyun 	union nvmefc_ls_requests	*rqstbuf;
73*4882a593Smuzhiyun 	union nvmefc_ls_responses	*rspbuf;
74*4882a593Smuzhiyun 	u16				rqstdatalen;
75*4882a593Smuzhiyun 	bool				handled;
76*4882a593Smuzhiyun 	dma_addr_t			rspdma;
77*4882a593Smuzhiyun 	struct list_head		lsrcv_list;	/* rport->ls_rcv_list */
78*4882a593Smuzhiyun } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun enum nvme_fcpop_state {
81*4882a593Smuzhiyun 	FCPOP_STATE_UNINIT	= 0,
82*4882a593Smuzhiyun 	FCPOP_STATE_IDLE	= 1,
83*4882a593Smuzhiyun 	FCPOP_STATE_ACTIVE	= 2,
84*4882a593Smuzhiyun 	FCPOP_STATE_ABORTED	= 3,
85*4882a593Smuzhiyun 	FCPOP_STATE_COMPLETE	= 4,
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun struct nvme_fc_fcp_op {
89*4882a593Smuzhiyun 	struct nvme_request	nreq;		/*
90*4882a593Smuzhiyun 						 * nvme/host/core.c
91*4882a593Smuzhiyun 						 * requires this to be
92*4882a593Smuzhiyun 						 * the 1st element in the
93*4882a593Smuzhiyun 						 * private structure
94*4882a593Smuzhiyun 						 * associated with the
95*4882a593Smuzhiyun 						 * request.
96*4882a593Smuzhiyun 						 */
97*4882a593Smuzhiyun 	struct nvmefc_fcp_req	fcp_req;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	struct nvme_fc_ctrl	*ctrl;
100*4882a593Smuzhiyun 	struct nvme_fc_queue	*queue;
101*4882a593Smuzhiyun 	struct request		*rq;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	atomic_t		state;
104*4882a593Smuzhiyun 	u32			flags;
105*4882a593Smuzhiyun 	u32			rqno;
106*4882a593Smuzhiyun 	u32			nents;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu	cmd_iu;
109*4882a593Smuzhiyun 	struct nvme_fc_ersp_iu	rsp_iu;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun struct nvme_fcp_op_w_sgl {
113*4882a593Smuzhiyun 	struct nvme_fc_fcp_op	op;
114*4882a593Smuzhiyun 	struct scatterlist	sgl[NVME_INLINE_SG_CNT];
115*4882a593Smuzhiyun 	uint8_t			priv[];
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct nvme_fc_lport {
119*4882a593Smuzhiyun 	struct nvme_fc_local_port	localport;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	struct ida			endp_cnt;
122*4882a593Smuzhiyun 	struct list_head		port_list;	/* nvme_fc_port_list */
123*4882a593Smuzhiyun 	struct list_head		endp_list;
124*4882a593Smuzhiyun 	struct device			*dev;	/* physical device for dma */
125*4882a593Smuzhiyun 	struct nvme_fc_port_template	*ops;
126*4882a593Smuzhiyun 	struct kref			ref;
127*4882a593Smuzhiyun 	atomic_t                        act_rport_cnt;
128*4882a593Smuzhiyun } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct nvme_fc_rport {
131*4882a593Smuzhiyun 	struct nvme_fc_remote_port	remoteport;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	struct list_head		endp_list; /* for lport->endp_list */
134*4882a593Smuzhiyun 	struct list_head		ctrl_list;
135*4882a593Smuzhiyun 	struct list_head		ls_req_list;
136*4882a593Smuzhiyun 	struct list_head		ls_rcv_list;
137*4882a593Smuzhiyun 	struct list_head		disc_list;
138*4882a593Smuzhiyun 	struct device			*dev;	/* physical device for dma */
139*4882a593Smuzhiyun 	struct nvme_fc_lport		*lport;
140*4882a593Smuzhiyun 	spinlock_t			lock;
141*4882a593Smuzhiyun 	struct kref			ref;
142*4882a593Smuzhiyun 	atomic_t                        act_ctrl_cnt;
143*4882a593Smuzhiyun 	unsigned long			dev_loss_end;
144*4882a593Smuzhiyun 	struct work_struct		lsrcv_work;
145*4882a593Smuzhiyun } __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /* fc_ctrl flags values - specified as bit positions */
148*4882a593Smuzhiyun #define ASSOC_ACTIVE		0
149*4882a593Smuzhiyun #define ASSOC_FAILED		1
150*4882a593Smuzhiyun #define FCCTRL_TERMIO		2
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun struct nvme_fc_ctrl {
153*4882a593Smuzhiyun 	spinlock_t		lock;
154*4882a593Smuzhiyun 	struct nvme_fc_queue	*queues;
155*4882a593Smuzhiyun 	struct device		*dev;
156*4882a593Smuzhiyun 	struct nvme_fc_lport	*lport;
157*4882a593Smuzhiyun 	struct nvme_fc_rport	*rport;
158*4882a593Smuzhiyun 	u32			cnum;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	bool			ioq_live;
161*4882a593Smuzhiyun 	u64			association_id;
162*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op	*rcv_disconn;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	struct list_head	ctrl_list;	/* rport->ctrl_list */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	struct blk_mq_tag_set	admin_tag_set;
167*4882a593Smuzhiyun 	struct blk_mq_tag_set	tag_set;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	struct work_struct	ioerr_work;
170*4882a593Smuzhiyun 	struct delayed_work	connect_work;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	struct kref		ref;
173*4882a593Smuzhiyun 	unsigned long		flags;
174*4882a593Smuzhiyun 	u32			iocnt;
175*4882a593Smuzhiyun 	wait_queue_head_t	ioabort_wait;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	struct nvme_fc_fcp_op	aen_ops[NVME_NR_AEN_COMMANDS];
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	struct nvme_ctrl	ctrl;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun static inline struct nvme_fc_ctrl *
to_fc_ctrl(struct nvme_ctrl * ctrl)183*4882a593Smuzhiyun to_fc_ctrl(struct nvme_ctrl *ctrl)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun static inline struct nvme_fc_lport *
localport_to_lport(struct nvme_fc_local_port * portptr)189*4882a593Smuzhiyun localport_to_lport(struct nvme_fc_local_port *portptr)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	return container_of(portptr, struct nvme_fc_lport, localport);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun static inline struct nvme_fc_rport *
remoteport_to_rport(struct nvme_fc_remote_port * portptr)195*4882a593Smuzhiyun remoteport_to_rport(struct nvme_fc_remote_port *portptr)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return container_of(portptr, struct nvme_fc_rport, remoteport);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun static inline struct nvmefc_ls_req_op *
ls_req_to_lsop(struct nvmefc_ls_req * lsreq)201*4882a593Smuzhiyun ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun static inline struct nvme_fc_fcp_op *
fcp_req_to_fcp_op(struct nvmefc_fcp_req * fcpreq)207*4882a593Smuzhiyun fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /* *************************** Globals **************************** */
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun static DEFINE_SPINLOCK(nvme_fc_lock);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun static LIST_HEAD(nvme_fc_lport_list);
220*4882a593Smuzhiyun static DEFINE_IDA(nvme_fc_local_port_cnt);
221*4882a593Smuzhiyun static DEFINE_IDA(nvme_fc_ctrl_cnt);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun static struct workqueue_struct *nvme_fc_wq;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun static bool nvme_fc_waiting_to_unload;
226*4882a593Smuzhiyun static DECLARE_COMPLETION(nvme_fc_unload_proceed);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun  * These items are short-term. They will eventually be moved into
230*4882a593Smuzhiyun  * a generic FC class. See comments in module init.
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun static struct device *fc_udev_device;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static void nvme_fc_complete_rq(struct request *rq);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* *********************** FC-NVME Port Management ************************ */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
239*4882a593Smuzhiyun 			struct nvme_fc_queue *, unsigned int);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun static void
nvme_fc_free_lport(struct kref * ref)245*4882a593Smuzhiyun nvme_fc_free_lport(struct kref *ref)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct nvme_fc_lport *lport =
248*4882a593Smuzhiyun 		container_of(ref, struct nvme_fc_lport, ref);
249*4882a593Smuzhiyun 	unsigned long flags;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
252*4882a593Smuzhiyun 	WARN_ON(!list_empty(&lport->endp_list));
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* remove from transport list */
255*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
256*4882a593Smuzhiyun 	list_del(&lport->port_list);
257*4882a593Smuzhiyun 	if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
258*4882a593Smuzhiyun 		complete(&nvme_fc_unload_proceed);
259*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
262*4882a593Smuzhiyun 	ida_destroy(&lport->endp_cnt);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	put_device(lport->dev);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	kfree(lport);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static void
nvme_fc_lport_put(struct nvme_fc_lport * lport)270*4882a593Smuzhiyun nvme_fc_lport_put(struct nvme_fc_lport *lport)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	kref_put(&lport->ref, nvme_fc_free_lport);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static int
nvme_fc_lport_get(struct nvme_fc_lport * lport)276*4882a593Smuzhiyun nvme_fc_lport_get(struct nvme_fc_lport *lport)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	return kref_get_unless_zero(&lport->ref);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun static struct nvme_fc_lport *
nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info * pinfo,struct nvme_fc_port_template * ops,struct device * dev)283*4882a593Smuzhiyun nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
284*4882a593Smuzhiyun 			struct nvme_fc_port_template *ops,
285*4882a593Smuzhiyun 			struct device *dev)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct nvme_fc_lport *lport;
288*4882a593Smuzhiyun 	unsigned long flags;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
293*4882a593Smuzhiyun 		if (lport->localport.node_name != pinfo->node_name ||
294*4882a593Smuzhiyun 		    lport->localport.port_name != pinfo->port_name)
295*4882a593Smuzhiyun 			continue;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		if (lport->dev != dev) {
298*4882a593Smuzhiyun 			lport = ERR_PTR(-EXDEV);
299*4882a593Smuzhiyun 			goto out_done;
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
303*4882a593Smuzhiyun 			lport = ERR_PTR(-EEXIST);
304*4882a593Smuzhiyun 			goto out_done;
305*4882a593Smuzhiyun 		}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		if (!nvme_fc_lport_get(lport)) {
308*4882a593Smuzhiyun 			/*
309*4882a593Smuzhiyun 			 * fails if ref cnt already 0. If so,
310*4882a593Smuzhiyun 			 * act as if lport already deleted
311*4882a593Smuzhiyun 			 */
312*4882a593Smuzhiyun 			lport = NULL;
313*4882a593Smuzhiyun 			goto out_done;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		/* resume the lport */
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		lport->ops = ops;
319*4882a593Smuzhiyun 		lport->localport.port_role = pinfo->port_role;
320*4882a593Smuzhiyun 		lport->localport.port_id = pinfo->port_id;
321*4882a593Smuzhiyun 		lport->localport.port_state = FC_OBJSTATE_ONLINE;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		return lport;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	lport = NULL;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun out_done:
331*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return lport;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /**
337*4882a593Smuzhiyun  * nvme_fc_register_localport - transport entry point called by an
338*4882a593Smuzhiyun  *                              LLDD to register the existence of a NVME
339*4882a593Smuzhiyun  *                              host FC port.
340*4882a593Smuzhiyun  * @pinfo:     pointer to information about the port to be registered
341*4882a593Smuzhiyun  * @template:  LLDD entrypoints and operational parameters for the port
342*4882a593Smuzhiyun  * @dev:       physical hardware device node port corresponds to. Will be
343*4882a593Smuzhiyun  *             used for DMA mappings
344*4882a593Smuzhiyun  * @portptr:   pointer to a local port pointer. Upon success, the routine
345*4882a593Smuzhiyun  *             will allocate a nvme_fc_local_port structure and place its
346*4882a593Smuzhiyun  *             address in the local port pointer. Upon failure, local port
347*4882a593Smuzhiyun  *             pointer will be set to 0.
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * Returns:
350*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
351*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
352*4882a593Smuzhiyun  */
353*4882a593Smuzhiyun int
nvme_fc_register_localport(struct nvme_fc_port_info * pinfo,struct nvme_fc_port_template * template,struct device * dev,struct nvme_fc_local_port ** portptr)354*4882a593Smuzhiyun nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
355*4882a593Smuzhiyun 			struct nvme_fc_port_template *template,
356*4882a593Smuzhiyun 			struct device *dev,
357*4882a593Smuzhiyun 			struct nvme_fc_local_port **portptr)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct nvme_fc_lport *newrec;
360*4882a593Smuzhiyun 	unsigned long flags;
361*4882a593Smuzhiyun 	int ret, idx;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!template->localport_delete || !template->remoteport_delete ||
364*4882a593Smuzhiyun 	    !template->ls_req || !template->fcp_io ||
365*4882a593Smuzhiyun 	    !template->ls_abort || !template->fcp_abort ||
366*4882a593Smuzhiyun 	    !template->max_hw_queues || !template->max_sgl_segments ||
367*4882a593Smuzhiyun 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
368*4882a593Smuzhiyun 		ret = -EINVAL;
369*4882a593Smuzhiyun 		goto out_reghost_failed;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/*
373*4882a593Smuzhiyun 	 * look to see if there is already a localport that had been
374*4882a593Smuzhiyun 	 * deregistered and in the process of waiting for all the
375*4882a593Smuzhiyun 	 * references to fully be removed.  If the references haven't
376*4882a593Smuzhiyun 	 * expired, we can simply re-enable the localport. Remoteports
377*4882a593Smuzhiyun 	 * and controller reconnections should resume naturally.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* found an lport, but something about its state is bad */
382*4882a593Smuzhiyun 	if (IS_ERR(newrec)) {
383*4882a593Smuzhiyun 		ret = PTR_ERR(newrec);
384*4882a593Smuzhiyun 		goto out_reghost_failed;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* found existing lport, which was resumed */
387*4882a593Smuzhiyun 	} else if (newrec) {
388*4882a593Smuzhiyun 		*portptr = &newrec->localport;
389*4882a593Smuzhiyun 		return 0;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* nothing found - allocate a new localport struct */
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
395*4882a593Smuzhiyun 			 GFP_KERNEL);
396*4882a593Smuzhiyun 	if (!newrec) {
397*4882a593Smuzhiyun 		ret = -ENOMEM;
398*4882a593Smuzhiyun 		goto out_reghost_failed;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
402*4882a593Smuzhiyun 	if (idx < 0) {
403*4882a593Smuzhiyun 		ret = -ENOSPC;
404*4882a593Smuzhiyun 		goto out_fail_kfree;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (!get_device(dev) && dev) {
408*4882a593Smuzhiyun 		ret = -ENODEV;
409*4882a593Smuzhiyun 		goto out_ida_put;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->port_list);
413*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->endp_list);
414*4882a593Smuzhiyun 	kref_init(&newrec->ref);
415*4882a593Smuzhiyun 	atomic_set(&newrec->act_rport_cnt, 0);
416*4882a593Smuzhiyun 	newrec->ops = template;
417*4882a593Smuzhiyun 	newrec->dev = dev;
418*4882a593Smuzhiyun 	ida_init(&newrec->endp_cnt);
419*4882a593Smuzhiyun 	if (template->local_priv_sz)
420*4882a593Smuzhiyun 		newrec->localport.private = &newrec[1];
421*4882a593Smuzhiyun 	else
422*4882a593Smuzhiyun 		newrec->localport.private = NULL;
423*4882a593Smuzhiyun 	newrec->localport.node_name = pinfo->node_name;
424*4882a593Smuzhiyun 	newrec->localport.port_name = pinfo->port_name;
425*4882a593Smuzhiyun 	newrec->localport.port_role = pinfo->port_role;
426*4882a593Smuzhiyun 	newrec->localport.port_id = pinfo->port_id;
427*4882a593Smuzhiyun 	newrec->localport.port_state = FC_OBJSTATE_ONLINE;
428*4882a593Smuzhiyun 	newrec->localport.port_num = idx;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
431*4882a593Smuzhiyun 	list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
432*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (dev)
435*4882a593Smuzhiyun 		dma_set_seg_boundary(dev, template->dma_boundary);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	*portptr = &newrec->localport;
438*4882a593Smuzhiyun 	return 0;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun out_ida_put:
441*4882a593Smuzhiyun 	ida_simple_remove(&nvme_fc_local_port_cnt, idx);
442*4882a593Smuzhiyun out_fail_kfree:
443*4882a593Smuzhiyun 	kfree(newrec);
444*4882a593Smuzhiyun out_reghost_failed:
445*4882a593Smuzhiyun 	*portptr = NULL;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	return ret;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * nvme_fc_unregister_localport - transport entry point called by an
453*4882a593Smuzhiyun  *                              LLDD to deregister/remove a previously
454*4882a593Smuzhiyun  *                              registered a NVME host FC port.
455*4882a593Smuzhiyun  * @portptr: pointer to the (registered) local port that is to be deregistered.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * Returns:
458*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
459*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
460*4882a593Smuzhiyun  */
461*4882a593Smuzhiyun int
nvme_fc_unregister_localport(struct nvme_fc_local_port * portptr)462*4882a593Smuzhiyun nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = localport_to_lport(portptr);
465*4882a593Smuzhiyun 	unsigned long flags;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (!portptr)
468*4882a593Smuzhiyun 		return -EINVAL;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
473*4882a593Smuzhiyun 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
474*4882a593Smuzhiyun 		return -EINVAL;
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 	portptr->port_state = FC_OBJSTATE_DELETED;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (atomic_read(&lport->act_rport_cnt) == 0)
481*4882a593Smuzhiyun 		lport->ops->localport_delete(&lport->localport);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	nvme_fc_lport_put(lport);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun  * TRADDR strings, per FC-NVME are fixed format:
491*4882a593Smuzhiyun  *   "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
492*4882a593Smuzhiyun  * udev event will only differ by prefix of what field is
493*4882a593Smuzhiyun  * being specified:
494*4882a593Smuzhiyun  *    "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
495*4882a593Smuzhiyun  *  19 + 43 + null_fudge = 64 characters
496*4882a593Smuzhiyun  */
497*4882a593Smuzhiyun #define FCNVME_TRADDR_LENGTH		64
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun static void
nvme_fc_signal_discovery_scan(struct nvme_fc_lport * lport,struct nvme_fc_rport * rport)500*4882a593Smuzhiyun nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
501*4882a593Smuzhiyun 		struct nvme_fc_rport *rport)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	char hostaddr[FCNVME_TRADDR_LENGTH];	/* NVMEFC_HOST_TRADDR=...*/
504*4882a593Smuzhiyun 	char tgtaddr[FCNVME_TRADDR_LENGTH];	/* NVMEFC_TRADDR=...*/
505*4882a593Smuzhiyun 	char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
508*4882a593Smuzhiyun 		return;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	snprintf(hostaddr, sizeof(hostaddr),
511*4882a593Smuzhiyun 		"NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
512*4882a593Smuzhiyun 		lport->localport.node_name, lport->localport.port_name);
513*4882a593Smuzhiyun 	snprintf(tgtaddr, sizeof(tgtaddr),
514*4882a593Smuzhiyun 		"NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
515*4882a593Smuzhiyun 		rport->remoteport.node_name, rport->remoteport.port_name);
516*4882a593Smuzhiyun 	kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun static void
nvme_fc_free_rport(struct kref * ref)520*4882a593Smuzhiyun nvme_fc_free_rport(struct kref *ref)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	struct nvme_fc_rport *rport =
523*4882a593Smuzhiyun 		container_of(ref, struct nvme_fc_rport, ref);
524*4882a593Smuzhiyun 	struct nvme_fc_lport *lport =
525*4882a593Smuzhiyun 			localport_to_lport(rport->remoteport.localport);
526*4882a593Smuzhiyun 	unsigned long flags;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
529*4882a593Smuzhiyun 	WARN_ON(!list_empty(&rport->ctrl_list));
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* remove from lport list */
532*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
533*4882a593Smuzhiyun 	list_del(&rport->endp_list);
534*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	WARN_ON(!list_empty(&rport->disc_list));
537*4882a593Smuzhiyun 	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	kfree(rport);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	nvme_fc_lport_put(lport);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun static void
nvme_fc_rport_put(struct nvme_fc_rport * rport)545*4882a593Smuzhiyun nvme_fc_rport_put(struct nvme_fc_rport *rport)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	kref_put(&rport->ref, nvme_fc_free_rport);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun static int
nvme_fc_rport_get(struct nvme_fc_rport * rport)551*4882a593Smuzhiyun nvme_fc_rport_get(struct nvme_fc_rport *rport)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	return kref_get_unless_zero(&rport->ref);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun static void
nvme_fc_resume_controller(struct nvme_fc_ctrl * ctrl)557*4882a593Smuzhiyun nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	switch (ctrl->ctrl.state) {
560*4882a593Smuzhiyun 	case NVME_CTRL_NEW:
561*4882a593Smuzhiyun 	case NVME_CTRL_CONNECTING:
562*4882a593Smuzhiyun 		/*
563*4882a593Smuzhiyun 		 * As all reconnects were suppressed, schedule a
564*4882a593Smuzhiyun 		 * connect.
565*4882a593Smuzhiyun 		 */
566*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
567*4882a593Smuzhiyun 			"NVME-FC{%d}: connectivity re-established. "
568*4882a593Smuzhiyun 			"Attempting reconnect\n", ctrl->cnum);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 		queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
571*4882a593Smuzhiyun 		break;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	case NVME_CTRL_RESETTING:
574*4882a593Smuzhiyun 		/*
575*4882a593Smuzhiyun 		 * Controller is already in the process of terminating the
576*4882a593Smuzhiyun 		 * association. No need to do anything further. The reconnect
577*4882a593Smuzhiyun 		 * step will naturally occur after the reset completes.
578*4882a593Smuzhiyun 		 */
579*4882a593Smuzhiyun 		break;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	default:
582*4882a593Smuzhiyun 		/* no action to take - let it delete */
583*4882a593Smuzhiyun 		break;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun static struct nvme_fc_rport *
nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport * lport,struct nvme_fc_port_info * pinfo)588*4882a593Smuzhiyun nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
589*4882a593Smuzhiyun 				struct nvme_fc_port_info *pinfo)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	struct nvme_fc_rport *rport;
592*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl;
593*4882a593Smuzhiyun 	unsigned long flags;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	list_for_each_entry(rport, &lport->endp_list, endp_list) {
598*4882a593Smuzhiyun 		if (rport->remoteport.node_name != pinfo->node_name ||
599*4882a593Smuzhiyun 		    rport->remoteport.port_name != pinfo->port_name)
600*4882a593Smuzhiyun 			continue;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		if (!nvme_fc_rport_get(rport)) {
603*4882a593Smuzhiyun 			rport = ERR_PTR(-ENOLCK);
604*4882a593Smuzhiyun 			goto out_done;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		spin_lock_irqsave(&rport->lock, flags);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		/* has it been unregistered */
612*4882a593Smuzhiyun 		if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
613*4882a593Smuzhiyun 			/* means lldd called us twice */
614*4882a593Smuzhiyun 			spin_unlock_irqrestore(&rport->lock, flags);
615*4882a593Smuzhiyun 			nvme_fc_rport_put(rport);
616*4882a593Smuzhiyun 			return ERR_PTR(-ESTALE);
617*4882a593Smuzhiyun 		}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		rport->remoteport.port_role = pinfo->port_role;
620*4882a593Smuzhiyun 		rport->remoteport.port_id = pinfo->port_id;
621*4882a593Smuzhiyun 		rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
622*4882a593Smuzhiyun 		rport->dev_loss_end = 0;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		/*
625*4882a593Smuzhiyun 		 * kick off a reconnect attempt on all associations to the
626*4882a593Smuzhiyun 		 * remote port. A successful reconnects will resume i/o.
627*4882a593Smuzhiyun 		 */
628*4882a593Smuzhiyun 		list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
629*4882a593Smuzhiyun 			nvme_fc_resume_controller(ctrl);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rport->lock, flags);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		return rport;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	rport = NULL;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun out_done:
639*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	return rport;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun static inline void
__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport * rport,struct nvme_fc_port_info * pinfo)645*4882a593Smuzhiyun __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
646*4882a593Smuzhiyun 			struct nvme_fc_port_info *pinfo)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	if (pinfo->dev_loss_tmo)
649*4882a593Smuzhiyun 		rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
650*4882a593Smuzhiyun 	else
651*4882a593Smuzhiyun 		rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun  * nvme_fc_register_remoteport - transport entry point called by an
656*4882a593Smuzhiyun  *                              LLDD to register the existence of a NVME
657*4882a593Smuzhiyun  *                              subsystem FC port on its fabric.
658*4882a593Smuzhiyun  * @localport: pointer to the (registered) local port that the remote
659*4882a593Smuzhiyun  *             subsystem port is connected to.
660*4882a593Smuzhiyun  * @pinfo:     pointer to information about the port to be registered
661*4882a593Smuzhiyun  * @portptr:   pointer to a remote port pointer. Upon success, the routine
662*4882a593Smuzhiyun  *             will allocate a nvme_fc_remote_port structure and place its
663*4882a593Smuzhiyun  *             address in the remote port pointer. Upon failure, remote port
664*4882a593Smuzhiyun  *             pointer will be set to 0.
665*4882a593Smuzhiyun  *
666*4882a593Smuzhiyun  * Returns:
667*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
668*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
669*4882a593Smuzhiyun  */
670*4882a593Smuzhiyun int
nvme_fc_register_remoteport(struct nvme_fc_local_port * localport,struct nvme_fc_port_info * pinfo,struct nvme_fc_remote_port ** portptr)671*4882a593Smuzhiyun nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
672*4882a593Smuzhiyun 				struct nvme_fc_port_info *pinfo,
673*4882a593Smuzhiyun 				struct nvme_fc_remote_port **portptr)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = localport_to_lport(localport);
676*4882a593Smuzhiyun 	struct nvme_fc_rport *newrec;
677*4882a593Smuzhiyun 	unsigned long flags;
678*4882a593Smuzhiyun 	int ret, idx;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (!nvme_fc_lport_get(lport)) {
681*4882a593Smuzhiyun 		ret = -ESHUTDOWN;
682*4882a593Smuzhiyun 		goto out_reghost_failed;
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/*
686*4882a593Smuzhiyun 	 * look to see if there is already a remoteport that is waiting
687*4882a593Smuzhiyun 	 * for a reconnect (within dev_loss_tmo) with the same WWN's.
688*4882a593Smuzhiyun 	 * If so, transition to it and reconnect.
689*4882a593Smuzhiyun 	 */
690*4882a593Smuzhiyun 	newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/* found an rport, but something about its state is bad */
693*4882a593Smuzhiyun 	if (IS_ERR(newrec)) {
694*4882a593Smuzhiyun 		ret = PTR_ERR(newrec);
695*4882a593Smuzhiyun 		goto out_lport_put;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* found existing rport, which was resumed */
698*4882a593Smuzhiyun 	} else if (newrec) {
699*4882a593Smuzhiyun 		nvme_fc_lport_put(lport);
700*4882a593Smuzhiyun 		__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
701*4882a593Smuzhiyun 		nvme_fc_signal_discovery_scan(lport, newrec);
702*4882a593Smuzhiyun 		*portptr = &newrec->remoteport;
703*4882a593Smuzhiyun 		return 0;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* nothing found - allocate a new remoteport struct */
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
709*4882a593Smuzhiyun 			 GFP_KERNEL);
710*4882a593Smuzhiyun 	if (!newrec) {
711*4882a593Smuzhiyun 		ret = -ENOMEM;
712*4882a593Smuzhiyun 		goto out_lport_put;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
716*4882a593Smuzhiyun 	if (idx < 0) {
717*4882a593Smuzhiyun 		ret = -ENOSPC;
718*4882a593Smuzhiyun 		goto out_kfree_rport;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->endp_list);
722*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ctrl_list);
723*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ls_req_list);
724*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->disc_list);
725*4882a593Smuzhiyun 	kref_init(&newrec->ref);
726*4882a593Smuzhiyun 	atomic_set(&newrec->act_ctrl_cnt, 0);
727*4882a593Smuzhiyun 	spin_lock_init(&newrec->lock);
728*4882a593Smuzhiyun 	newrec->remoteport.localport = &lport->localport;
729*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
730*4882a593Smuzhiyun 	newrec->dev = lport->dev;
731*4882a593Smuzhiyun 	newrec->lport = lport;
732*4882a593Smuzhiyun 	if (lport->ops->remote_priv_sz)
733*4882a593Smuzhiyun 		newrec->remoteport.private = &newrec[1];
734*4882a593Smuzhiyun 	else
735*4882a593Smuzhiyun 		newrec->remoteport.private = NULL;
736*4882a593Smuzhiyun 	newrec->remoteport.port_role = pinfo->port_role;
737*4882a593Smuzhiyun 	newrec->remoteport.node_name = pinfo->node_name;
738*4882a593Smuzhiyun 	newrec->remoteport.port_name = pinfo->port_name;
739*4882a593Smuzhiyun 	newrec->remoteport.port_id = pinfo->port_id;
740*4882a593Smuzhiyun 	newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
741*4882a593Smuzhiyun 	newrec->remoteport.port_num = idx;
742*4882a593Smuzhiyun 	__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
743*4882a593Smuzhiyun 	INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
746*4882a593Smuzhiyun 	list_add_tail(&newrec->endp_list, &lport->endp_list);
747*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	nvme_fc_signal_discovery_scan(lport, newrec);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	*portptr = &newrec->remoteport;
752*4882a593Smuzhiyun 	return 0;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun out_kfree_rport:
755*4882a593Smuzhiyun 	kfree(newrec);
756*4882a593Smuzhiyun out_lport_put:
757*4882a593Smuzhiyun 	nvme_fc_lport_put(lport);
758*4882a593Smuzhiyun out_reghost_failed:
759*4882a593Smuzhiyun 	*portptr = NULL;
760*4882a593Smuzhiyun 	return ret;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun static int
nvme_fc_abort_lsops(struct nvme_fc_rport * rport)765*4882a593Smuzhiyun nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop;
768*4882a593Smuzhiyun 	unsigned long flags;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun restart:
771*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
774*4882a593Smuzhiyun 		if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
775*4882a593Smuzhiyun 			lsop->flags |= FCOP_FLAGS_TERMIO;
776*4882a593Smuzhiyun 			spin_unlock_irqrestore(&rport->lock, flags);
777*4882a593Smuzhiyun 			rport->lport->ops->ls_abort(&rport->lport->localport,
778*4882a593Smuzhiyun 						&rport->remoteport,
779*4882a593Smuzhiyun 						&lsop->ls_req);
780*4882a593Smuzhiyun 			goto restart;
781*4882a593Smuzhiyun 		}
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl * ctrl)789*4882a593Smuzhiyun nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device,
792*4882a593Smuzhiyun 		"NVME-FC{%d}: controller connectivity lost. Awaiting "
793*4882a593Smuzhiyun 		"Reconnect", ctrl->cnum);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	switch (ctrl->ctrl.state) {
796*4882a593Smuzhiyun 	case NVME_CTRL_NEW:
797*4882a593Smuzhiyun 	case NVME_CTRL_LIVE:
798*4882a593Smuzhiyun 		/*
799*4882a593Smuzhiyun 		 * Schedule a controller reset. The reset will terminate the
800*4882a593Smuzhiyun 		 * association and schedule the reconnect timer.  Reconnects
801*4882a593Smuzhiyun 		 * will be attempted until either the ctlr_loss_tmo
802*4882a593Smuzhiyun 		 * (max_retries * connect_delay) expires or the remoteport's
803*4882a593Smuzhiyun 		 * dev_loss_tmo expires.
804*4882a593Smuzhiyun 		 */
805*4882a593Smuzhiyun 		if (nvme_reset_ctrl(&ctrl->ctrl)) {
806*4882a593Smuzhiyun 			dev_warn(ctrl->ctrl.device,
807*4882a593Smuzhiyun 				"NVME-FC{%d}: Couldn't schedule reset.\n",
808*4882a593Smuzhiyun 				ctrl->cnum);
809*4882a593Smuzhiyun 			nvme_delete_ctrl(&ctrl->ctrl);
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 		break;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	case NVME_CTRL_CONNECTING:
814*4882a593Smuzhiyun 		/*
815*4882a593Smuzhiyun 		 * The association has already been terminated and the
816*4882a593Smuzhiyun 		 * controller is attempting reconnects.  No need to do anything
817*4882a593Smuzhiyun 		 * futher.  Reconnects will be attempted until either the
818*4882a593Smuzhiyun 		 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
819*4882a593Smuzhiyun 		 * remoteport's dev_loss_tmo expires.
820*4882a593Smuzhiyun 		 */
821*4882a593Smuzhiyun 		break;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	case NVME_CTRL_RESETTING:
824*4882a593Smuzhiyun 		/*
825*4882a593Smuzhiyun 		 * Controller is already in the process of terminating the
826*4882a593Smuzhiyun 		 * association.  No need to do anything further. The reconnect
827*4882a593Smuzhiyun 		 * step will kick in naturally after the association is
828*4882a593Smuzhiyun 		 * terminated.
829*4882a593Smuzhiyun 		 */
830*4882a593Smuzhiyun 		break;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	case NVME_CTRL_DELETING:
833*4882a593Smuzhiyun 	case NVME_CTRL_DELETING_NOIO:
834*4882a593Smuzhiyun 	default:
835*4882a593Smuzhiyun 		/* no action to take - let it delete */
836*4882a593Smuzhiyun 		break;
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun /**
841*4882a593Smuzhiyun  * nvme_fc_unregister_remoteport - transport entry point called by an
842*4882a593Smuzhiyun  *                              LLDD to deregister/remove a previously
843*4882a593Smuzhiyun  *                              registered a NVME subsystem FC port.
844*4882a593Smuzhiyun  * @portptr: pointer to the (registered) remote port that is to be
845*4882a593Smuzhiyun  *           deregistered.
846*4882a593Smuzhiyun  *
847*4882a593Smuzhiyun  * Returns:
848*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
849*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
850*4882a593Smuzhiyun  */
851*4882a593Smuzhiyun int
nvme_fc_unregister_remoteport(struct nvme_fc_remote_port * portptr)852*4882a593Smuzhiyun nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
855*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl;
856*4882a593Smuzhiyun 	unsigned long flags;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (!portptr)
859*4882a593Smuzhiyun 		return -EINVAL;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
864*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rport->lock, flags);
865*4882a593Smuzhiyun 		return -EINVAL;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 	portptr->port_state = FC_OBJSTATE_DELETED;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
872*4882a593Smuzhiyun 		/* if dev_loss_tmo==0, dev loss is immediate */
873*4882a593Smuzhiyun 		if (!portptr->dev_loss_tmo) {
874*4882a593Smuzhiyun 			dev_warn(ctrl->ctrl.device,
875*4882a593Smuzhiyun 				"NVME-FC{%d}: controller connectivity lost.\n",
876*4882a593Smuzhiyun 				ctrl->cnum);
877*4882a593Smuzhiyun 			nvme_delete_ctrl(&ctrl->ctrl);
878*4882a593Smuzhiyun 		} else
879*4882a593Smuzhiyun 			nvme_fc_ctrl_connectivity_loss(ctrl);
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	nvme_fc_abort_lsops(rport);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (atomic_read(&rport->act_ctrl_cnt) == 0)
887*4882a593Smuzhiyun 		rport->lport->ops->remoteport_delete(portptr);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/*
890*4882a593Smuzhiyun 	 * release the reference, which will allow, if all controllers
891*4882a593Smuzhiyun 	 * go away, which should only occur after dev_loss_tmo occurs,
892*4882a593Smuzhiyun 	 * for the rport to be torn down.
893*4882a593Smuzhiyun 	 */
894*4882a593Smuzhiyun 	nvme_fc_rport_put(rport);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun  * nvme_fc_rescan_remoteport - transport entry point called by an
902*4882a593Smuzhiyun  *                              LLDD to request a nvme device rescan.
903*4882a593Smuzhiyun  * @remoteport: pointer to the (registered) remote port that is to be
904*4882a593Smuzhiyun  *              rescanned.
905*4882a593Smuzhiyun  *
906*4882a593Smuzhiyun  * Returns: N/A
907*4882a593Smuzhiyun  */
908*4882a593Smuzhiyun void
nvme_fc_rescan_remoteport(struct nvme_fc_remote_port * remoteport)909*4882a593Smuzhiyun nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	nvme_fc_signal_discovery_scan(rport->lport, rport);
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun int
nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port * portptr,u32 dev_loss_tmo)918*4882a593Smuzhiyun nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
919*4882a593Smuzhiyun 			u32 dev_loss_tmo)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
922*4882a593Smuzhiyun 	unsigned long flags;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
927*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rport->lock, flags);
928*4882a593Smuzhiyun 		return -EINVAL;
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	/* a dev_loss_tmo of 0 (immediate) is allowed to be set */
932*4882a593Smuzhiyun 	rport->remoteport.dev_loss_tmo = dev_loss_tmo;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	return 0;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun /* *********************** FC-NVME DMA Handling **************************** */
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun  * The fcloop device passes in a NULL device pointer. Real LLD's will
945*4882a593Smuzhiyun  * pass in a valid device pointer. If NULL is passed to the dma mapping
946*4882a593Smuzhiyun  * routines, depending on the platform, it may or may not succeed, and
947*4882a593Smuzhiyun  * may crash.
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * As such:
950*4882a593Smuzhiyun  * Wrapper all the dma routines and check the dev pointer.
951*4882a593Smuzhiyun  *
952*4882a593Smuzhiyun  * If simple mappings (return just a dma address, we'll noop them,
953*4882a593Smuzhiyun  * returning a dma address of 0.
954*4882a593Smuzhiyun  *
955*4882a593Smuzhiyun  * On more complex mappings (dma_map_sg), a pseudo routine fills
956*4882a593Smuzhiyun  * in the scatter list, setting all dma addresses to 0.
957*4882a593Smuzhiyun  */
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)960*4882a593Smuzhiyun fc_dma_map_single(struct device *dev, void *ptr, size_t size,
961*4882a593Smuzhiyun 		enum dma_data_direction dir)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)967*4882a593Smuzhiyun fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)973*4882a593Smuzhiyun fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
974*4882a593Smuzhiyun 	enum dma_data_direction dir)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	if (dev)
977*4882a593Smuzhiyun 		dma_unmap_single(dev, addr, size, dir);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)981*4882a593Smuzhiyun fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
982*4882a593Smuzhiyun 		enum dma_data_direction dir)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	if (dev)
985*4882a593Smuzhiyun 		dma_sync_single_for_cpu(dev, addr, size, dir);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)989*4882a593Smuzhiyun fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
990*4882a593Smuzhiyun 		enum dma_data_direction dir)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	if (dev)
993*4882a593Smuzhiyun 		dma_sync_single_for_device(dev, addr, size, dir);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun /* pseudo dma_map_sg call */
997*4882a593Smuzhiyun static int
fc_map_sg(struct scatterlist * sg,int nents)998*4882a593Smuzhiyun fc_map_sg(struct scatterlist *sg, int nents)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun 	struct scatterlist *s;
1001*4882a593Smuzhiyun 	int i;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	WARN_ON(nents == 0 || sg[0].length == 0);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	for_each_sg(sg, s, nents, i) {
1006*4882a593Smuzhiyun 		s->dma_address = 0L;
1007*4882a593Smuzhiyun #ifdef CONFIG_NEED_SG_DMA_LENGTH
1008*4882a593Smuzhiyun 		s->dma_length = s->length;
1009*4882a593Smuzhiyun #endif
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 	return nents;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1015*4882a593Smuzhiyun fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1016*4882a593Smuzhiyun 		enum dma_data_direction dir)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)1022*4882a593Smuzhiyun fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1023*4882a593Smuzhiyun 		enum dma_data_direction dir)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun 	if (dev)
1026*4882a593Smuzhiyun 		dma_unmap_sg(dev, sg, nents, dir);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun /* *********************** FC-NVME LS Handling **************************** */
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1032*4882a593Smuzhiyun static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op * lsop)1037*4882a593Smuzhiyun __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = lsop->rport;
1040*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1041*4882a593Smuzhiyun 	unsigned long flags;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (!lsop->req_queued) {
1046*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rport->lock, flags);
1047*4882a593Smuzhiyun 		return;
1048*4882a593Smuzhiyun 	}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	list_del(&lsop->lsreq_list);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	lsop->req_queued = false;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1057*4882a593Smuzhiyun 				  (lsreq->rqstlen + lsreq->rsplen),
1058*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	nvme_fc_rport_put(rport);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun static int
__nvme_fc_send_ls_req(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))1064*4882a593Smuzhiyun __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1065*4882a593Smuzhiyun 		struct nvmefc_ls_req_op *lsop,
1066*4882a593Smuzhiyun 		void (*done)(struct nvmefc_ls_req *req, int status))
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1069*4882a593Smuzhiyun 	unsigned long flags;
1070*4882a593Smuzhiyun 	int ret = 0;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1073*4882a593Smuzhiyun 		return -ECONNREFUSED;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	if (!nvme_fc_rport_get(rport))
1076*4882a593Smuzhiyun 		return -ESHUTDOWN;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	lsreq->done = done;
1079*4882a593Smuzhiyun 	lsop->rport = rport;
1080*4882a593Smuzhiyun 	lsop->req_queued = false;
1081*4882a593Smuzhiyun 	INIT_LIST_HEAD(&lsop->lsreq_list);
1082*4882a593Smuzhiyun 	init_completion(&lsop->ls_done);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1085*4882a593Smuzhiyun 				  lsreq->rqstlen + lsreq->rsplen,
1086*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
1087*4882a593Smuzhiyun 	if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1088*4882a593Smuzhiyun 		ret = -EFAULT;
1089*4882a593Smuzhiyun 		goto out_putrport;
1090*4882a593Smuzhiyun 	}
1091*4882a593Smuzhiyun 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	lsop->req_queued = true;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	ret = rport->lport->ops->ls_req(&rport->lport->localport,
1102*4882a593Smuzhiyun 					&rport->remoteport, lsreq);
1103*4882a593Smuzhiyun 	if (ret)
1104*4882a593Smuzhiyun 		goto out_unlink;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return 0;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun out_unlink:
1109*4882a593Smuzhiyun 	lsop->ls_error = ret;
1110*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1111*4882a593Smuzhiyun 	lsop->req_queued = false;
1112*4882a593Smuzhiyun 	list_del(&lsop->lsreq_list);
1113*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1114*4882a593Smuzhiyun 	fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1115*4882a593Smuzhiyun 				  (lsreq->rqstlen + lsreq->rsplen),
1116*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
1117*4882a593Smuzhiyun out_putrport:
1118*4882a593Smuzhiyun 	nvme_fc_rport_put(rport);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	return ret;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun static void
nvme_fc_send_ls_req_done(struct nvmefc_ls_req * lsreq,int status)1124*4882a593Smuzhiyun nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	lsop->ls_error = status;
1129*4882a593Smuzhiyun 	complete(&lsop->ls_done);
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun static int
nvme_fc_send_ls_req(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop)1133*4882a593Smuzhiyun nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1136*4882a593Smuzhiyun 	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1137*4882a593Smuzhiyun 	int ret;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (!ret) {
1142*4882a593Smuzhiyun 		/*
1143*4882a593Smuzhiyun 		 * No timeout/not interruptible as we need the struct
1144*4882a593Smuzhiyun 		 * to exist until the lldd calls us back. Thus mandate
1145*4882a593Smuzhiyun 		 * wait until driver calls back. lldd responsible for
1146*4882a593Smuzhiyun 		 * the timeout action
1147*4882a593Smuzhiyun 		 */
1148*4882a593Smuzhiyun 		wait_for_completion(&lsop->ls_done);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		__nvme_fc_finish_ls_req(lsop);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 		ret = lsop->ls_error;
1153*4882a593Smuzhiyun 	}
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	if (ret)
1156*4882a593Smuzhiyun 		return ret;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	/* ACC or RJT payload ? */
1159*4882a593Smuzhiyun 	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1160*4882a593Smuzhiyun 		return -ENXIO;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	return 0;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun static int
nvme_fc_send_ls_req_async(struct nvme_fc_rport * rport,struct nvmefc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))1166*4882a593Smuzhiyun nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1167*4882a593Smuzhiyun 		struct nvmefc_ls_req_op *lsop,
1168*4882a593Smuzhiyun 		void (*done)(struct nvmefc_ls_req *req, int status))
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	/* don't wait for completion */
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	return __nvme_fc_send_ls_req(rport, lsop, done);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,u16 qsize,u16 ersp_ratio)1176*4882a593Smuzhiyun nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177*4882a593Smuzhiyun 	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop;
1180*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq;
1181*4882a593Smuzhiyun 	struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182*4882a593Smuzhiyun 	struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1183*4882a593Smuzhiyun 	unsigned long flags;
1184*4882a593Smuzhiyun 	int ret, fcret = 0;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	lsop = kzalloc((sizeof(*lsop) +
1187*4882a593Smuzhiyun 			 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1188*4882a593Smuzhiyun 			 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1189*4882a593Smuzhiyun 	if (!lsop) {
1190*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
1191*4882a593Smuzhiyun 			"NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1192*4882a593Smuzhiyun 			ctrl->cnum);
1193*4882a593Smuzhiyun 		ret = -ENOMEM;
1194*4882a593Smuzhiyun 		goto out_no_memory;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1198*4882a593Smuzhiyun 	assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1199*4882a593Smuzhiyun 	lsreq = &lsop->ls_req;
1200*4882a593Smuzhiyun 	if (ctrl->lport->ops->lsrqst_priv_sz)
1201*4882a593Smuzhiyun 		lsreq->private = &assoc_acc[1];
1202*4882a593Smuzhiyun 	else
1203*4882a593Smuzhiyun 		lsreq->private = NULL;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1206*4882a593Smuzhiyun 	assoc_rqst->desc_list_len =
1207*4882a593Smuzhiyun 			cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	assoc_rqst->assoc_cmd.desc_tag =
1210*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1211*4882a593Smuzhiyun 	assoc_rqst->assoc_cmd.desc_len =
1212*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1213*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1216*4882a593Smuzhiyun 	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1217*4882a593Smuzhiyun 	/* Linux supports only Dynamic controllers */
1218*4882a593Smuzhiyun 	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1219*4882a593Smuzhiyun 	uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1220*4882a593Smuzhiyun 	strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1221*4882a593Smuzhiyun 		min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1222*4882a593Smuzhiyun 	strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1223*4882a593Smuzhiyun 		min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	lsop->queue = queue;
1226*4882a593Smuzhiyun 	lsreq->rqstaddr = assoc_rqst;
1227*4882a593Smuzhiyun 	lsreq->rqstlen = sizeof(*assoc_rqst);
1228*4882a593Smuzhiyun 	lsreq->rspaddr = assoc_acc;
1229*4882a593Smuzhiyun 	lsreq->rsplen = sizeof(*assoc_acc);
1230*4882a593Smuzhiyun 	lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1233*4882a593Smuzhiyun 	if (ret)
1234*4882a593Smuzhiyun 		goto out_free_buffer;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	/* process connect LS completion */
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	/* validate the ACC response */
1239*4882a593Smuzhiyun 	if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1240*4882a593Smuzhiyun 		fcret = VERR_LSACC;
1241*4882a593Smuzhiyun 	else if (assoc_acc->hdr.desc_list_len !=
1242*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1243*4882a593Smuzhiyun 				sizeof(struct fcnvme_ls_cr_assoc_acc)))
1244*4882a593Smuzhiyun 		fcret = VERR_CR_ASSOC_ACC_LEN;
1245*4882a593Smuzhiyun 	else if (assoc_acc->hdr.rqst.desc_tag !=
1246*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_RQST))
1247*4882a593Smuzhiyun 		fcret = VERR_LSDESC_RQST;
1248*4882a593Smuzhiyun 	else if (assoc_acc->hdr.rqst.desc_len !=
1249*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1250*4882a593Smuzhiyun 		fcret = VERR_LSDESC_RQST_LEN;
1251*4882a593Smuzhiyun 	else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1252*4882a593Smuzhiyun 		fcret = VERR_CR_ASSOC;
1253*4882a593Smuzhiyun 	else if (assoc_acc->associd.desc_tag !=
1254*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1255*4882a593Smuzhiyun 		fcret = VERR_ASSOC_ID;
1256*4882a593Smuzhiyun 	else if (assoc_acc->associd.desc_len !=
1257*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1258*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1259*4882a593Smuzhiyun 		fcret = VERR_ASSOC_ID_LEN;
1260*4882a593Smuzhiyun 	else if (assoc_acc->connectid.desc_tag !=
1261*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1262*4882a593Smuzhiyun 		fcret = VERR_CONN_ID;
1263*4882a593Smuzhiyun 	else if (assoc_acc->connectid.desc_len !=
1264*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1265*4882a593Smuzhiyun 		fcret = VERR_CONN_ID_LEN;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	if (fcret) {
1268*4882a593Smuzhiyun 		ret = -EBADF;
1269*4882a593Smuzhiyun 		dev_err(ctrl->dev,
1270*4882a593Smuzhiyun 			"q %d Create Association LS failed: %s\n",
1271*4882a593Smuzhiyun 			queue->qnum, validation_errors[fcret]);
1272*4882a593Smuzhiyun 	} else {
1273*4882a593Smuzhiyun 		spin_lock_irqsave(&ctrl->lock, flags);
1274*4882a593Smuzhiyun 		ctrl->association_id =
1275*4882a593Smuzhiyun 			be64_to_cpu(assoc_acc->associd.association_id);
1276*4882a593Smuzhiyun 		queue->connection_id =
1277*4882a593Smuzhiyun 			be64_to_cpu(assoc_acc->connectid.connection_id);
1278*4882a593Smuzhiyun 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1279*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctrl->lock, flags);
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun out_free_buffer:
1283*4882a593Smuzhiyun 	kfree(lsop);
1284*4882a593Smuzhiyun out_no_memory:
1285*4882a593Smuzhiyun 	if (ret)
1286*4882a593Smuzhiyun 		dev_err(ctrl->dev,
1287*4882a593Smuzhiyun 			"queue %d connect admin queue failed (%d).\n",
1288*4882a593Smuzhiyun 			queue->qnum, ret);
1289*4882a593Smuzhiyun 	return ret;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun static int
nvme_fc_connect_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,u16 qsize,u16 ersp_ratio)1293*4882a593Smuzhiyun nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1294*4882a593Smuzhiyun 			u16 qsize, u16 ersp_ratio)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop;
1297*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq;
1298*4882a593Smuzhiyun 	struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1299*4882a593Smuzhiyun 	struct fcnvme_ls_cr_conn_acc *conn_acc;
1300*4882a593Smuzhiyun 	int ret, fcret = 0;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	lsop = kzalloc((sizeof(*lsop) +
1303*4882a593Smuzhiyun 			 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1304*4882a593Smuzhiyun 			 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1305*4882a593Smuzhiyun 	if (!lsop) {
1306*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
1307*4882a593Smuzhiyun 			"NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1308*4882a593Smuzhiyun 			ctrl->cnum);
1309*4882a593Smuzhiyun 		ret = -ENOMEM;
1310*4882a593Smuzhiyun 		goto out_no_memory;
1311*4882a593Smuzhiyun 	}
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1314*4882a593Smuzhiyun 	conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1315*4882a593Smuzhiyun 	lsreq = &lsop->ls_req;
1316*4882a593Smuzhiyun 	if (ctrl->lport->ops->lsrqst_priv_sz)
1317*4882a593Smuzhiyun 		lsreq->private = (void *)&conn_acc[1];
1318*4882a593Smuzhiyun 	else
1319*4882a593Smuzhiyun 		lsreq->private = NULL;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1322*4882a593Smuzhiyun 	conn_rqst->desc_list_len = cpu_to_be32(
1323*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_assoc_id) +
1324*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327*4882a593Smuzhiyun 	conn_rqst->associd.desc_len =
1328*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1329*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_assoc_id));
1330*4882a593Smuzhiyun 	conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1331*4882a593Smuzhiyun 	conn_rqst->connect_cmd.desc_tag =
1332*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1333*4882a593Smuzhiyun 	conn_rqst->connect_cmd.desc_len =
1334*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1335*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1336*4882a593Smuzhiyun 	conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1337*4882a593Smuzhiyun 	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
1338*4882a593Smuzhiyun 	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	lsop->queue = queue;
1341*4882a593Smuzhiyun 	lsreq->rqstaddr = conn_rqst;
1342*4882a593Smuzhiyun 	lsreq->rqstlen = sizeof(*conn_rqst);
1343*4882a593Smuzhiyun 	lsreq->rspaddr = conn_acc;
1344*4882a593Smuzhiyun 	lsreq->rsplen = sizeof(*conn_acc);
1345*4882a593Smuzhiyun 	lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1348*4882a593Smuzhiyun 	if (ret)
1349*4882a593Smuzhiyun 		goto out_free_buffer;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	/* process connect LS completion */
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	/* validate the ACC response */
1354*4882a593Smuzhiyun 	if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1355*4882a593Smuzhiyun 		fcret = VERR_LSACC;
1356*4882a593Smuzhiyun 	else if (conn_acc->hdr.desc_list_len !=
1357*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1358*4882a593Smuzhiyun 		fcret = VERR_CR_CONN_ACC_LEN;
1359*4882a593Smuzhiyun 	else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1360*4882a593Smuzhiyun 		fcret = VERR_LSDESC_RQST;
1361*4882a593Smuzhiyun 	else if (conn_acc->hdr.rqst.desc_len !=
1362*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1363*4882a593Smuzhiyun 		fcret = VERR_LSDESC_RQST_LEN;
1364*4882a593Smuzhiyun 	else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1365*4882a593Smuzhiyun 		fcret = VERR_CR_CONN;
1366*4882a593Smuzhiyun 	else if (conn_acc->connectid.desc_tag !=
1367*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1368*4882a593Smuzhiyun 		fcret = VERR_CONN_ID;
1369*4882a593Smuzhiyun 	else if (conn_acc->connectid.desc_len !=
1370*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1371*4882a593Smuzhiyun 		fcret = VERR_CONN_ID_LEN;
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	if (fcret) {
1374*4882a593Smuzhiyun 		ret = -EBADF;
1375*4882a593Smuzhiyun 		dev_err(ctrl->dev,
1376*4882a593Smuzhiyun 			"q %d Create I/O Connection LS failed: %s\n",
1377*4882a593Smuzhiyun 			queue->qnum, validation_errors[fcret]);
1378*4882a593Smuzhiyun 	} else {
1379*4882a593Smuzhiyun 		queue->connection_id =
1380*4882a593Smuzhiyun 			be64_to_cpu(conn_acc->connectid.connection_id);
1381*4882a593Smuzhiyun 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1382*4882a593Smuzhiyun 	}
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun out_free_buffer:
1385*4882a593Smuzhiyun 	kfree(lsop);
1386*4882a593Smuzhiyun out_no_memory:
1387*4882a593Smuzhiyun 	if (ret)
1388*4882a593Smuzhiyun 		dev_err(ctrl->dev,
1389*4882a593Smuzhiyun 			"queue %d connect I/O queue failed (%d).\n",
1390*4882a593Smuzhiyun 			queue->qnum, ret);
1391*4882a593Smuzhiyun 	return ret;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun static void
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)1395*4882a593Smuzhiyun nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	__nvme_fc_finish_ls_req(lsop);
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	/* fc-nvme initiator doesn't care about success or failure of cmd */
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	kfree(lsop);
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun /*
1407*4882a593Smuzhiyun  * This routine sends a FC-NVME LS to disconnect (aka terminate)
1408*4882a593Smuzhiyun  * the FC-NVME Association.  Terminating the association also
1409*4882a593Smuzhiyun  * terminates the FC-NVME connections (per queue, both admin and io
1410*4882a593Smuzhiyun  * queues) that are part of the association. E.g. things are torn
1411*4882a593Smuzhiyun  * down, and the related FC-NVME Association ID and Connection IDs
1412*4882a593Smuzhiyun  * become invalid.
1413*4882a593Smuzhiyun  *
1414*4882a593Smuzhiyun  * The behavior of the fc-nvme initiator is such that it's
1415*4882a593Smuzhiyun  * understanding of the association and connections will implicitly
1416*4882a593Smuzhiyun  * be torn down. The action is implicit as it may be due to a loss of
1417*4882a593Smuzhiyun  * connectivity with the fc-nvme target, so you may never get a
1418*4882a593Smuzhiyun  * response even if you tried.  As such, the action of this routine
1419*4882a593Smuzhiyun  * is to asynchronously send the LS, ignore any results of the LS, and
1420*4882a593Smuzhiyun  * continue on with terminating the association. If the fc-nvme target
1421*4882a593Smuzhiyun  * is present and receives the LS, it too can tear down.
1422*4882a593Smuzhiyun  */
1423*4882a593Smuzhiyun static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl * ctrl)1424*4882a593Smuzhiyun nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1427*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1428*4882a593Smuzhiyun 	struct nvmefc_ls_req_op *lsop;
1429*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq;
1430*4882a593Smuzhiyun 	int ret;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	lsop = kzalloc((sizeof(*lsop) +
1433*4882a593Smuzhiyun 			sizeof(*discon_rqst) + sizeof(*discon_acc) +
1434*4882a593Smuzhiyun 			ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1435*4882a593Smuzhiyun 	if (!lsop) {
1436*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
1437*4882a593Smuzhiyun 			"NVME-FC{%d}: send Disconnect Association "
1438*4882a593Smuzhiyun 			"failed: ENOMEM\n",
1439*4882a593Smuzhiyun 			ctrl->cnum);
1440*4882a593Smuzhiyun 		return;
1441*4882a593Smuzhiyun 	}
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1444*4882a593Smuzhiyun 	discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1445*4882a593Smuzhiyun 	lsreq = &lsop->ls_req;
1446*4882a593Smuzhiyun 	if (ctrl->lport->ops->lsrqst_priv_sz)
1447*4882a593Smuzhiyun 		lsreq->private = (void *)&discon_acc[1];
1448*4882a593Smuzhiyun 	else
1449*4882a593Smuzhiyun 		lsreq->private = NULL;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1452*4882a593Smuzhiyun 				ctrl->association_id);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1455*4882a593Smuzhiyun 				nvme_fc_disconnect_assoc_done);
1456*4882a593Smuzhiyun 	if (ret)
1457*4882a593Smuzhiyun 		kfree(lsop);
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun static void
nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)1461*4882a593Smuzhiyun nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1464*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = lsop->rport;
1465*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
1466*4882a593Smuzhiyun 	unsigned long flags;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1469*4882a593Smuzhiyun 	list_del(&lsop->lsrcv_list);
1470*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1473*4882a593Smuzhiyun 				sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1474*4882a593Smuzhiyun 	fc_dma_unmap_single(lport->dev, lsop->rspdma,
1475*4882a593Smuzhiyun 			sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	kfree(lsop);
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	nvme_fc_rport_put(rport);
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun static void
nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op * lsop)1483*4882a593Smuzhiyun nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = lsop->rport;
1486*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
1487*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1488*4882a593Smuzhiyun 	int ret;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1491*4882a593Smuzhiyun 				  sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1494*4882a593Smuzhiyun 				     lsop->lsrsp);
1495*4882a593Smuzhiyun 	if (ret) {
1496*4882a593Smuzhiyun 		dev_warn(lport->dev,
1497*4882a593Smuzhiyun 			"LLDD rejected LS RSP xmt: LS %d status %d\n",
1498*4882a593Smuzhiyun 			w0->ls_cmd, ret);
1499*4882a593Smuzhiyun 		nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1500*4882a593Smuzhiyun 		return;
1501*4882a593Smuzhiyun 	}
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun static struct nvme_fc_ctrl *
nvme_fc_match_disconn_ls(struct nvme_fc_rport * rport,struct nvmefc_ls_rcv_op * lsop)1505*4882a593Smuzhiyun nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1506*4882a593Smuzhiyun 		      struct nvmefc_ls_rcv_op *lsop)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1509*4882a593Smuzhiyun 					&lsop->rqstbuf->rq_dis_assoc;
1510*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl, *ret = NULL;
1511*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *oldls = NULL;
1512*4882a593Smuzhiyun 	u64 association_id = be64_to_cpu(rqst->associd.association_id);
1513*4882a593Smuzhiyun 	unsigned long flags;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1518*4882a593Smuzhiyun 		if (!nvme_fc_ctrl_get(ctrl))
1519*4882a593Smuzhiyun 			continue;
1520*4882a593Smuzhiyun 		spin_lock(&ctrl->lock);
1521*4882a593Smuzhiyun 		if (association_id == ctrl->association_id) {
1522*4882a593Smuzhiyun 			oldls = ctrl->rcv_disconn;
1523*4882a593Smuzhiyun 			ctrl->rcv_disconn = lsop;
1524*4882a593Smuzhiyun 			ret = ctrl;
1525*4882a593Smuzhiyun 		}
1526*4882a593Smuzhiyun 		spin_unlock(&ctrl->lock);
1527*4882a593Smuzhiyun 		if (ret)
1528*4882a593Smuzhiyun 			/* leave the ctrl get reference */
1529*4882a593Smuzhiyun 			break;
1530*4882a593Smuzhiyun 		nvme_fc_ctrl_put(ctrl);
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	/* transmit a response for anything that was pending */
1536*4882a593Smuzhiyun 	if (oldls) {
1537*4882a593Smuzhiyun 		dev_info(rport->lport->dev,
1538*4882a593Smuzhiyun 			"NVME-FC{%d}: Multiple Disconnect Association "
1539*4882a593Smuzhiyun 			"LS's received\n", ctrl->cnum);
1540*4882a593Smuzhiyun 		/* overwrite good response with bogus failure */
1541*4882a593Smuzhiyun 		oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1542*4882a593Smuzhiyun 						sizeof(*oldls->rspbuf),
1543*4882a593Smuzhiyun 						rqst->w0.ls_cmd,
1544*4882a593Smuzhiyun 						FCNVME_RJT_RC_UNAB,
1545*4882a593Smuzhiyun 						FCNVME_RJT_EXP_NONE, 0);
1546*4882a593Smuzhiyun 		nvme_fc_xmt_ls_rsp(oldls);
1547*4882a593Smuzhiyun 	}
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	return ret;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun /*
1553*4882a593Smuzhiyun  * returns true to mean LS handled and ls_rsp can be sent
1554*4882a593Smuzhiyun  * returns false to defer ls_rsp xmt (will be done as part of
1555*4882a593Smuzhiyun  *     association termination)
1556*4882a593Smuzhiyun  */
1557*4882a593Smuzhiyun static bool
nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op * lsop)1558*4882a593Smuzhiyun nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = lsop->rport;
1561*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1562*4882a593Smuzhiyun 					&lsop->rqstbuf->rq_dis_assoc;
1563*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_acc *acc =
1564*4882a593Smuzhiyun 					&lsop->rspbuf->rsp_dis_assoc;
1565*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = NULL;
1566*4882a593Smuzhiyun 	int ret = 0;
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	memset(acc, 0, sizeof(*acc));
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1571*4882a593Smuzhiyun 	if (!ret) {
1572*4882a593Smuzhiyun 		/* match an active association */
1573*4882a593Smuzhiyun 		ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1574*4882a593Smuzhiyun 		if (!ctrl)
1575*4882a593Smuzhiyun 			ret = VERR_NO_ASSOC;
1576*4882a593Smuzhiyun 	}
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	if (ret) {
1579*4882a593Smuzhiyun 		dev_info(rport->lport->dev,
1580*4882a593Smuzhiyun 			"Disconnect LS failed: %s\n",
1581*4882a593Smuzhiyun 			validation_errors[ret]);
1582*4882a593Smuzhiyun 		lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1583*4882a593Smuzhiyun 					sizeof(*acc), rqst->w0.ls_cmd,
1584*4882a593Smuzhiyun 					(ret == VERR_NO_ASSOC) ?
1585*4882a593Smuzhiyun 						FCNVME_RJT_RC_INV_ASSOC :
1586*4882a593Smuzhiyun 						FCNVME_RJT_RC_LOGIC,
1587*4882a593Smuzhiyun 					FCNVME_RJT_EXP_NONE, 0);
1588*4882a593Smuzhiyun 		return true;
1589*4882a593Smuzhiyun 	}
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	/* format an ACCept response */
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	lsop->lsrsp->rsplen = sizeof(*acc);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1596*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1597*4882a593Smuzhiyun 				sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1598*4882a593Smuzhiyun 			FCNVME_LS_DISCONNECT_ASSOC);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	/*
1601*4882a593Smuzhiyun 	 * the transmit of the response will occur after the exchanges
1602*4882a593Smuzhiyun 	 * for the association have been ABTS'd by
1603*4882a593Smuzhiyun 	 * nvme_fc_delete_association().
1604*4882a593Smuzhiyun 	 */
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	/* fail the association */
1607*4882a593Smuzhiyun 	nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	/* release the reference taken by nvme_fc_match_disconn_ls() */
1610*4882a593Smuzhiyun 	nvme_fc_ctrl_put(ctrl);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	return false;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun  * Actual Processing routine for received FC-NVME LS Requests from the LLD
1617*4882a593Smuzhiyun  * returns true if a response should be sent afterward, false if rsp will
1618*4882a593Smuzhiyun  * be sent asynchronously.
1619*4882a593Smuzhiyun  */
1620*4882a593Smuzhiyun static bool
nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op * lsop)1621*4882a593Smuzhiyun nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1624*4882a593Smuzhiyun 	bool ret = true;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	lsop->lsrsp->nvme_fc_private = lsop;
1627*4882a593Smuzhiyun 	lsop->lsrsp->rspbuf = lsop->rspbuf;
1628*4882a593Smuzhiyun 	lsop->lsrsp->rspdma = lsop->rspdma;
1629*4882a593Smuzhiyun 	lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1630*4882a593Smuzhiyun 	/* Be preventative. handlers will later set to valid length */
1631*4882a593Smuzhiyun 	lsop->lsrsp->rsplen = 0;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	/*
1634*4882a593Smuzhiyun 	 * handlers:
1635*4882a593Smuzhiyun 	 *   parse request input, execute the request, and format the
1636*4882a593Smuzhiyun 	 *   LS response
1637*4882a593Smuzhiyun 	 */
1638*4882a593Smuzhiyun 	switch (w0->ls_cmd) {
1639*4882a593Smuzhiyun 	case FCNVME_LS_DISCONNECT_ASSOC:
1640*4882a593Smuzhiyun 		ret = nvme_fc_ls_disconnect_assoc(lsop);
1641*4882a593Smuzhiyun 		break;
1642*4882a593Smuzhiyun 	case FCNVME_LS_DISCONNECT_CONN:
1643*4882a593Smuzhiyun 		lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1644*4882a593Smuzhiyun 				sizeof(*lsop->rspbuf), w0->ls_cmd,
1645*4882a593Smuzhiyun 				FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1646*4882a593Smuzhiyun 		break;
1647*4882a593Smuzhiyun 	case FCNVME_LS_CREATE_ASSOCIATION:
1648*4882a593Smuzhiyun 	case FCNVME_LS_CREATE_CONNECTION:
1649*4882a593Smuzhiyun 		lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1650*4882a593Smuzhiyun 				sizeof(*lsop->rspbuf), w0->ls_cmd,
1651*4882a593Smuzhiyun 				FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1652*4882a593Smuzhiyun 		break;
1653*4882a593Smuzhiyun 	default:
1654*4882a593Smuzhiyun 		lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1655*4882a593Smuzhiyun 				sizeof(*lsop->rspbuf), w0->ls_cmd,
1656*4882a593Smuzhiyun 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1657*4882a593Smuzhiyun 		break;
1658*4882a593Smuzhiyun 	}
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	return(ret);
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun static void
nvme_fc_handle_ls_rqst_work(struct work_struct * work)1664*4882a593Smuzhiyun nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1665*4882a593Smuzhiyun {
1666*4882a593Smuzhiyun 	struct nvme_fc_rport *rport =
1667*4882a593Smuzhiyun 		container_of(work, struct nvme_fc_rport, lsrcv_work);
1668*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0;
1669*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *lsop;
1670*4882a593Smuzhiyun 	unsigned long flags;
1671*4882a593Smuzhiyun 	bool sendrsp;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun restart:
1674*4882a593Smuzhiyun 	sendrsp = true;
1675*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1676*4882a593Smuzhiyun 	list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1677*4882a593Smuzhiyun 		if (lsop->handled)
1678*4882a593Smuzhiyun 			continue;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 		lsop->handled = true;
1681*4882a593Smuzhiyun 		if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1682*4882a593Smuzhiyun 			spin_unlock_irqrestore(&rport->lock, flags);
1683*4882a593Smuzhiyun 			sendrsp = nvme_fc_handle_ls_rqst(lsop);
1684*4882a593Smuzhiyun 		} else {
1685*4882a593Smuzhiyun 			spin_unlock_irqrestore(&rport->lock, flags);
1686*4882a593Smuzhiyun 			w0 = &lsop->rqstbuf->w0;
1687*4882a593Smuzhiyun 			lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1688*4882a593Smuzhiyun 						lsop->rspbuf,
1689*4882a593Smuzhiyun 						sizeof(*lsop->rspbuf),
1690*4882a593Smuzhiyun 						w0->ls_cmd,
1691*4882a593Smuzhiyun 						FCNVME_RJT_RC_UNAB,
1692*4882a593Smuzhiyun 						FCNVME_RJT_EXP_NONE, 0);
1693*4882a593Smuzhiyun 		}
1694*4882a593Smuzhiyun 		if (sendrsp)
1695*4882a593Smuzhiyun 			nvme_fc_xmt_ls_rsp(lsop);
1696*4882a593Smuzhiyun 		goto restart;
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun /**
1702*4882a593Smuzhiyun  * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1703*4882a593Smuzhiyun  *                       upon the reception of a NVME LS request.
1704*4882a593Smuzhiyun  *
1705*4882a593Smuzhiyun  * The nvme-fc layer will copy payload to an internal structure for
1706*4882a593Smuzhiyun  * processing.  As such, upon completion of the routine, the LLDD may
1707*4882a593Smuzhiyun  * immediately free/reuse the LS request buffer passed in the call.
1708*4882a593Smuzhiyun  *
1709*4882a593Smuzhiyun  * If this routine returns error, the LLDD should abort the exchange.
1710*4882a593Smuzhiyun  *
1711*4882a593Smuzhiyun  * @remoteport: pointer to the (registered) remote port that the LS
1712*4882a593Smuzhiyun  *              was received from. The remoteport is associated with
1713*4882a593Smuzhiyun  *              a specific localport.
1714*4882a593Smuzhiyun  * @lsrsp:      pointer to a nvmefc_ls_rsp response structure to be
1715*4882a593Smuzhiyun  *              used to reference the exchange corresponding to the LS
1716*4882a593Smuzhiyun  *              when issuing an ls response.
1717*4882a593Smuzhiyun  * @lsreqbuf:   pointer to the buffer containing the LS Request
1718*4882a593Smuzhiyun  * @lsreqbuf_len: length, in bytes, of the received LS request
1719*4882a593Smuzhiyun  */
1720*4882a593Smuzhiyun int
nvme_fc_rcv_ls_req(struct nvme_fc_remote_port * portptr,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)1721*4882a593Smuzhiyun nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1722*4882a593Smuzhiyun 			struct nvmefc_ls_rsp *lsrsp,
1723*4882a593Smuzhiyun 			void *lsreqbuf, u32 lsreqbuf_len)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1726*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
1727*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1728*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *lsop;
1729*4882a593Smuzhiyun 	unsigned long flags;
1730*4882a593Smuzhiyun 	int ret;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	nvme_fc_rport_get(rport);
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/* validate there's a routine to transmit a response */
1735*4882a593Smuzhiyun 	if (!lport->ops->xmt_ls_rsp) {
1736*4882a593Smuzhiyun 		dev_info(lport->dev,
1737*4882a593Smuzhiyun 			"RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1738*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1739*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
1740*4882a593Smuzhiyun 		ret = -EINVAL;
1741*4882a593Smuzhiyun 		goto out_put;
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1745*4882a593Smuzhiyun 		dev_info(lport->dev,
1746*4882a593Smuzhiyun 			"RCV %s LS failed: payload too large\n",
1747*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1748*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
1749*4882a593Smuzhiyun 		ret = -E2BIG;
1750*4882a593Smuzhiyun 		goto out_put;
1751*4882a593Smuzhiyun 	}
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	lsop = kzalloc(sizeof(*lsop) +
1754*4882a593Smuzhiyun 			sizeof(union nvmefc_ls_requests) +
1755*4882a593Smuzhiyun 			sizeof(union nvmefc_ls_responses),
1756*4882a593Smuzhiyun 			GFP_KERNEL);
1757*4882a593Smuzhiyun 	if (!lsop) {
1758*4882a593Smuzhiyun 		dev_info(lport->dev,
1759*4882a593Smuzhiyun 			"RCV %s LS failed: No memory\n",
1760*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1761*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
1762*4882a593Smuzhiyun 		ret = -ENOMEM;
1763*4882a593Smuzhiyun 		goto out_put;
1764*4882a593Smuzhiyun 	}
1765*4882a593Smuzhiyun 	lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1766*4882a593Smuzhiyun 	lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1769*4882a593Smuzhiyun 					sizeof(*lsop->rspbuf),
1770*4882a593Smuzhiyun 					DMA_TO_DEVICE);
1771*4882a593Smuzhiyun 	if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1772*4882a593Smuzhiyun 		dev_info(lport->dev,
1773*4882a593Smuzhiyun 			"RCV %s LS failed: DMA mapping failure\n",
1774*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1775*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
1776*4882a593Smuzhiyun 		ret = -EFAULT;
1777*4882a593Smuzhiyun 		goto out_free;
1778*4882a593Smuzhiyun 	}
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	lsop->rport = rport;
1781*4882a593Smuzhiyun 	lsop->lsrsp = lsrsp;
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1784*4882a593Smuzhiyun 	lsop->rqstdatalen = lsreqbuf_len;
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
1787*4882a593Smuzhiyun 	if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1788*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rport->lock, flags);
1789*4882a593Smuzhiyun 		ret = -ENOTCONN;
1790*4882a593Smuzhiyun 		goto out_unmap;
1791*4882a593Smuzhiyun 	}
1792*4882a593Smuzhiyun 	list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1793*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	schedule_work(&rport->lsrcv_work);
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	return 0;
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun out_unmap:
1800*4882a593Smuzhiyun 	fc_dma_unmap_single(lport->dev, lsop->rspdma,
1801*4882a593Smuzhiyun 			sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1802*4882a593Smuzhiyun out_free:
1803*4882a593Smuzhiyun 	kfree(lsop);
1804*4882a593Smuzhiyun out_put:
1805*4882a593Smuzhiyun 	nvme_fc_rport_put(rport);
1806*4882a593Smuzhiyun 	return ret;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun /* *********************** NVME Ctrl Routines **************************** */
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun static void
__nvme_fc_exit_request(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op)1814*4882a593Smuzhiyun __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1815*4882a593Smuzhiyun 		struct nvme_fc_fcp_op *op)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1818*4882a593Smuzhiyun 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1819*4882a593Smuzhiyun 	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1820*4882a593Smuzhiyun 				sizeof(op->cmd_iu), DMA_TO_DEVICE);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	atomic_set(&op->state, FCPOP_STATE_UNINIT);
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun static void
nvme_fc_exit_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx)1826*4882a593Smuzhiyun nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1827*4882a593Smuzhiyun 		unsigned int hctx_idx)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	return __nvme_fc_exit_request(set->driver_data, op);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun static int
__nvme_fc_abort_op(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op)1835*4882a593Smuzhiyun __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun 	unsigned long flags;
1838*4882a593Smuzhiyun 	int opstate;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->lock, flags);
1841*4882a593Smuzhiyun 	opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1842*4882a593Smuzhiyun 	if (opstate != FCPOP_STATE_ACTIVE)
1843*4882a593Smuzhiyun 		atomic_set(&op->state, opstate);
1844*4882a593Smuzhiyun 	else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1845*4882a593Smuzhiyun 		op->flags |= FCOP_FLAGS_TERMIO;
1846*4882a593Smuzhiyun 		ctrl->iocnt++;
1847*4882a593Smuzhiyun 	}
1848*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->lock, flags);
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if (opstate != FCPOP_STATE_ACTIVE)
1851*4882a593Smuzhiyun 		return -ECANCELED;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1854*4882a593Smuzhiyun 					&ctrl->rport->remoteport,
1855*4882a593Smuzhiyun 					op->queue->lldd_handle,
1856*4882a593Smuzhiyun 					&op->fcp_req);
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	return 0;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun static void
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl * ctrl)1862*4882a593Smuzhiyun nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1865*4882a593Smuzhiyun 	int i;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	/* ensure we've initialized the ops once */
1868*4882a593Smuzhiyun 	if (!(aen_op->flags & FCOP_FLAGS_AEN))
1869*4882a593Smuzhiyun 		return;
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1872*4882a593Smuzhiyun 		__nvme_fc_abort_op(ctrl, aen_op);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl * ctrl,struct nvme_fc_fcp_op * op,int opstate)1876*4882a593Smuzhiyun __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1877*4882a593Smuzhiyun 		struct nvme_fc_fcp_op *op, int opstate)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun 	unsigned long flags;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	if (opstate == FCPOP_STATE_ABORTED) {
1882*4882a593Smuzhiyun 		spin_lock_irqsave(&ctrl->lock, flags);
1883*4882a593Smuzhiyun 		if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1884*4882a593Smuzhiyun 		    op->flags & FCOP_FLAGS_TERMIO) {
1885*4882a593Smuzhiyun 			if (!--ctrl->iocnt)
1886*4882a593Smuzhiyun 				wake_up(&ctrl->ioabort_wait);
1887*4882a593Smuzhiyun 		}
1888*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctrl->lock, flags);
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun static void
nvme_fc_ctrl_ioerr_work(struct work_struct * work)1893*4882a593Smuzhiyun nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1894*4882a593Smuzhiyun {
1895*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl =
1896*4882a593Smuzhiyun 			container_of(work, struct nvme_fc_ctrl, ioerr_work);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	nvme_fc_error_recovery(ctrl, "transport detected io error");
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req * req)1902*4882a593Smuzhiyun nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1903*4882a593Smuzhiyun {
1904*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1905*4882a593Smuzhiyun 	struct request *rq = op->rq;
1906*4882a593Smuzhiyun 	struct nvmefc_fcp_req *freq = &op->fcp_req;
1907*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = op->ctrl;
1908*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = op->queue;
1909*4882a593Smuzhiyun 	struct nvme_completion *cqe = &op->rsp_iu.cqe;
1910*4882a593Smuzhiyun 	struct nvme_command *sqe = &op->cmd_iu.sqe;
1911*4882a593Smuzhiyun 	__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1912*4882a593Smuzhiyun 	union nvme_result result;
1913*4882a593Smuzhiyun 	bool terminate_assoc = true;
1914*4882a593Smuzhiyun 	int opstate;
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	/*
1917*4882a593Smuzhiyun 	 * WARNING:
1918*4882a593Smuzhiyun 	 * The current linux implementation of a nvme controller
1919*4882a593Smuzhiyun 	 * allocates a single tag set for all io queues and sizes
1920*4882a593Smuzhiyun 	 * the io queues to fully hold all possible tags. Thus, the
1921*4882a593Smuzhiyun 	 * implementation does not reference or care about the sqhd
1922*4882a593Smuzhiyun 	 * value as it never needs to use the sqhd/sqtail pointers
1923*4882a593Smuzhiyun 	 * for submission pacing.
1924*4882a593Smuzhiyun 	 *
1925*4882a593Smuzhiyun 	 * This affects the FC-NVME implementation in two ways:
1926*4882a593Smuzhiyun 	 * 1) As the value doesn't matter, we don't need to waste
1927*4882a593Smuzhiyun 	 *    cycles extracting it from ERSPs and stamping it in the
1928*4882a593Smuzhiyun 	 *    cases where the transport fabricates CQEs on successful
1929*4882a593Smuzhiyun 	 *    completions.
1930*4882a593Smuzhiyun 	 * 2) The FC-NVME implementation requires that delivery of
1931*4882a593Smuzhiyun 	 *    ERSP completions are to go back to the nvme layer in order
1932*4882a593Smuzhiyun 	 *    relative to the rsn, such that the sqhd value will always
1933*4882a593Smuzhiyun 	 *    be "in order" for the nvme layer. As the nvme layer in
1934*4882a593Smuzhiyun 	 *    linux doesn't care about sqhd, there's no need to return
1935*4882a593Smuzhiyun 	 *    them in order.
1936*4882a593Smuzhiyun 	 *
1937*4882a593Smuzhiyun 	 * Additionally:
1938*4882a593Smuzhiyun 	 * As the core nvme layer in linux currently does not look at
1939*4882a593Smuzhiyun 	 * every field in the cqe - in cases where the FC transport must
1940*4882a593Smuzhiyun 	 * fabricate a CQE, the following fields will not be set as they
1941*4882a593Smuzhiyun 	 * are not referenced:
1942*4882a593Smuzhiyun 	 *      cqe.sqid,  cqe.sqhd,  cqe.command_id
1943*4882a593Smuzhiyun 	 *
1944*4882a593Smuzhiyun 	 * Failure or error of an individual i/o, in a transport
1945*4882a593Smuzhiyun 	 * detected fashion unrelated to the nvme completion status,
1946*4882a593Smuzhiyun 	 * potentially cause the initiator and target sides to get out
1947*4882a593Smuzhiyun 	 * of sync on SQ head/tail (aka outstanding io count allowed).
1948*4882a593Smuzhiyun 	 * Per FC-NVME spec, failure of an individual command requires
1949*4882a593Smuzhiyun 	 * the connection to be terminated, which in turn requires the
1950*4882a593Smuzhiyun 	 * association to be terminated.
1951*4882a593Smuzhiyun 	 */
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1956*4882a593Smuzhiyun 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (opstate == FCPOP_STATE_ABORTED)
1959*4882a593Smuzhiyun 		status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1960*4882a593Smuzhiyun 	else if (freq->status) {
1961*4882a593Smuzhiyun 		status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1962*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
1963*4882a593Smuzhiyun 			"NVME-FC{%d}: io failed due to lldd error %d\n",
1964*4882a593Smuzhiyun 			ctrl->cnum, freq->status);
1965*4882a593Smuzhiyun 	}
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/*
1968*4882a593Smuzhiyun 	 * For the linux implementation, if we have an unsuccesful
1969*4882a593Smuzhiyun 	 * status, they blk-mq layer can typically be called with the
1970*4882a593Smuzhiyun 	 * non-zero status and the content of the cqe isn't important.
1971*4882a593Smuzhiyun 	 */
1972*4882a593Smuzhiyun 	if (status)
1973*4882a593Smuzhiyun 		goto done;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	/*
1976*4882a593Smuzhiyun 	 * command completed successfully relative to the wire
1977*4882a593Smuzhiyun 	 * protocol. However, validate anything received and
1978*4882a593Smuzhiyun 	 * extract the status and result from the cqe (create it
1979*4882a593Smuzhiyun 	 * where necessary).
1980*4882a593Smuzhiyun 	 */
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	switch (freq->rcv_rsplen) {
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	case 0:
1985*4882a593Smuzhiyun 	case NVME_FC_SIZEOF_ZEROS_RSP:
1986*4882a593Smuzhiyun 		/*
1987*4882a593Smuzhiyun 		 * No response payload or 12 bytes of payload (which
1988*4882a593Smuzhiyun 		 * should all be zeros) are considered successful and
1989*4882a593Smuzhiyun 		 * no payload in the CQE by the transport.
1990*4882a593Smuzhiyun 		 */
1991*4882a593Smuzhiyun 		if (freq->transferred_length !=
1992*4882a593Smuzhiyun 		    be32_to_cpu(op->cmd_iu.data_len)) {
1993*4882a593Smuzhiyun 			status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1994*4882a593Smuzhiyun 			dev_info(ctrl->ctrl.device,
1995*4882a593Smuzhiyun 				"NVME-FC{%d}: io failed due to bad transfer "
1996*4882a593Smuzhiyun 				"length: %d vs expected %d\n",
1997*4882a593Smuzhiyun 				ctrl->cnum, freq->transferred_length,
1998*4882a593Smuzhiyun 				be32_to_cpu(op->cmd_iu.data_len));
1999*4882a593Smuzhiyun 			goto done;
2000*4882a593Smuzhiyun 		}
2001*4882a593Smuzhiyun 		result.u64 = 0;
2002*4882a593Smuzhiyun 		break;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	case sizeof(struct nvme_fc_ersp_iu):
2005*4882a593Smuzhiyun 		/*
2006*4882a593Smuzhiyun 		 * The ERSP IU contains a full completion with CQE.
2007*4882a593Smuzhiyun 		 * Validate ERSP IU and look at cqe.
2008*4882a593Smuzhiyun 		 */
2009*4882a593Smuzhiyun 		if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2010*4882a593Smuzhiyun 					(freq->rcv_rsplen / 4) ||
2011*4882a593Smuzhiyun 			     be32_to_cpu(op->rsp_iu.xfrd_len) !=
2012*4882a593Smuzhiyun 					freq->transferred_length ||
2013*4882a593Smuzhiyun 			     op->rsp_iu.ersp_result ||
2014*4882a593Smuzhiyun 			     sqe->common.command_id != cqe->command_id)) {
2015*4882a593Smuzhiyun 			status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2016*4882a593Smuzhiyun 			dev_info(ctrl->ctrl.device,
2017*4882a593Smuzhiyun 				"NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2018*4882a593Smuzhiyun 				"iu len %d, xfr len %d vs %d, status code "
2019*4882a593Smuzhiyun 				"%d, cmdid %d vs %d\n",
2020*4882a593Smuzhiyun 				ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2021*4882a593Smuzhiyun 				be32_to_cpu(op->rsp_iu.xfrd_len),
2022*4882a593Smuzhiyun 				freq->transferred_length,
2023*4882a593Smuzhiyun 				op->rsp_iu.ersp_result,
2024*4882a593Smuzhiyun 				sqe->common.command_id,
2025*4882a593Smuzhiyun 				cqe->command_id);
2026*4882a593Smuzhiyun 			goto done;
2027*4882a593Smuzhiyun 		}
2028*4882a593Smuzhiyun 		result = cqe->result;
2029*4882a593Smuzhiyun 		status = cqe->status;
2030*4882a593Smuzhiyun 		break;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 	default:
2033*4882a593Smuzhiyun 		status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2034*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
2035*4882a593Smuzhiyun 			"NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2036*4882a593Smuzhiyun 			"len %d\n",
2037*4882a593Smuzhiyun 			ctrl->cnum, freq->rcv_rsplen);
2038*4882a593Smuzhiyun 		goto done;
2039*4882a593Smuzhiyun 	}
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	terminate_assoc = false;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun done:
2044*4882a593Smuzhiyun 	if (op->flags & FCOP_FLAGS_AEN) {
2045*4882a593Smuzhiyun 		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2046*4882a593Smuzhiyun 		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2047*4882a593Smuzhiyun 		atomic_set(&op->state, FCPOP_STATE_IDLE);
2048*4882a593Smuzhiyun 		op->flags = FCOP_FLAGS_AEN;	/* clear other flags */
2049*4882a593Smuzhiyun 		nvme_fc_ctrl_put(ctrl);
2050*4882a593Smuzhiyun 		goto check_error;
2051*4882a593Smuzhiyun 	}
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2054*4882a593Smuzhiyun 	if (!nvme_try_complete_req(rq, status, result))
2055*4882a593Smuzhiyun 		nvme_fc_complete_rq(rq);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun check_error:
2058*4882a593Smuzhiyun 	if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2059*4882a593Smuzhiyun 		queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun static int
__nvme_fc_init_request(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,struct nvme_fc_fcp_op * op,struct request * rq,u32 rqno)2063*4882a593Smuzhiyun __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2064*4882a593Smuzhiyun 		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2065*4882a593Smuzhiyun 		struct request *rq, u32 rqno)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun 	struct nvme_fcp_op_w_sgl *op_w_sgl =
2068*4882a593Smuzhiyun 		container_of(op, typeof(*op_w_sgl), op);
2069*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2070*4882a593Smuzhiyun 	int ret = 0;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	memset(op, 0, sizeof(*op));
2073*4882a593Smuzhiyun 	op->fcp_req.cmdaddr = &op->cmd_iu;
2074*4882a593Smuzhiyun 	op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2075*4882a593Smuzhiyun 	op->fcp_req.rspaddr = &op->rsp_iu;
2076*4882a593Smuzhiyun 	op->fcp_req.rsplen = sizeof(op->rsp_iu);
2077*4882a593Smuzhiyun 	op->fcp_req.done = nvme_fc_fcpio_done;
2078*4882a593Smuzhiyun 	op->ctrl = ctrl;
2079*4882a593Smuzhiyun 	op->queue = queue;
2080*4882a593Smuzhiyun 	op->rq = rq;
2081*4882a593Smuzhiyun 	op->rqno = rqno;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	cmdiu->format_id = NVME_CMD_FORMAT_ID;
2084*4882a593Smuzhiyun 	cmdiu->fc_id = NVME_CMD_FC_ID;
2085*4882a593Smuzhiyun 	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2086*4882a593Smuzhiyun 	if (queue->qnum)
2087*4882a593Smuzhiyun 		cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2088*4882a593Smuzhiyun 					(NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2089*4882a593Smuzhiyun 	else
2090*4882a593Smuzhiyun 		cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2093*4882a593Smuzhiyun 				&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2094*4882a593Smuzhiyun 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2095*4882a593Smuzhiyun 		dev_err(ctrl->dev,
2096*4882a593Smuzhiyun 			"FCP Op failed - cmdiu dma mapping failed.\n");
2097*4882a593Smuzhiyun 		ret = -EFAULT;
2098*4882a593Smuzhiyun 		goto out_on_error;
2099*4882a593Smuzhiyun 	}
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2102*4882a593Smuzhiyun 				&op->rsp_iu, sizeof(op->rsp_iu),
2103*4882a593Smuzhiyun 				DMA_FROM_DEVICE);
2104*4882a593Smuzhiyun 	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2105*4882a593Smuzhiyun 		dev_err(ctrl->dev,
2106*4882a593Smuzhiyun 			"FCP Op failed - rspiu dma mapping failed.\n");
2107*4882a593Smuzhiyun 		ret = -EFAULT;
2108*4882a593Smuzhiyun 	}
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	atomic_set(&op->state, FCPOP_STATE_IDLE);
2111*4882a593Smuzhiyun out_on_error:
2112*4882a593Smuzhiyun 	return ret;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun static int
nvme_fc_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)2116*4882a593Smuzhiyun nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2117*4882a593Smuzhiyun 		unsigned int hctx_idx, unsigned int numa_node)
2118*4882a593Smuzhiyun {
2119*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = set->driver_data;
2120*4882a593Smuzhiyun 	struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2121*4882a593Smuzhiyun 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2122*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2123*4882a593Smuzhiyun 	int res;
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2126*4882a593Smuzhiyun 	if (res)
2127*4882a593Smuzhiyun 		return res;
2128*4882a593Smuzhiyun 	op->op.fcp_req.first_sgl = op->sgl;
2129*4882a593Smuzhiyun 	op->op.fcp_req.private = &op->priv[0];
2130*4882a593Smuzhiyun 	nvme_req(rq)->ctrl = &ctrl->ctrl;
2131*4882a593Smuzhiyun 	return res;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun static int
nvme_fc_init_aen_ops(struct nvme_fc_ctrl * ctrl)2135*4882a593Smuzhiyun nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *aen_op;
2138*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu;
2139*4882a593Smuzhiyun 	struct nvme_command *sqe;
2140*4882a593Smuzhiyun 	void *private = NULL;
2141*4882a593Smuzhiyun 	int i, ret;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	aen_op = ctrl->aen_ops;
2144*4882a593Smuzhiyun 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2145*4882a593Smuzhiyun 		if (ctrl->lport->ops->fcprqst_priv_sz) {
2146*4882a593Smuzhiyun 			private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2147*4882a593Smuzhiyun 						GFP_KERNEL);
2148*4882a593Smuzhiyun 			if (!private)
2149*4882a593Smuzhiyun 				return -ENOMEM;
2150*4882a593Smuzhiyun 		}
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 		cmdiu = &aen_op->cmd_iu;
2153*4882a593Smuzhiyun 		sqe = &cmdiu->sqe;
2154*4882a593Smuzhiyun 		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2155*4882a593Smuzhiyun 				aen_op, (struct request *)NULL,
2156*4882a593Smuzhiyun 				(NVME_AQ_BLK_MQ_DEPTH + i));
2157*4882a593Smuzhiyun 		if (ret) {
2158*4882a593Smuzhiyun 			kfree(private);
2159*4882a593Smuzhiyun 			return ret;
2160*4882a593Smuzhiyun 		}
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 		aen_op->flags = FCOP_FLAGS_AEN;
2163*4882a593Smuzhiyun 		aen_op->fcp_req.private = private;
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 		memset(sqe, 0, sizeof(*sqe));
2166*4882a593Smuzhiyun 		sqe->common.opcode = nvme_admin_async_event;
2167*4882a593Smuzhiyun 		/* Note: core layer may overwrite the sqe.command_id value */
2168*4882a593Smuzhiyun 		sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2169*4882a593Smuzhiyun 	}
2170*4882a593Smuzhiyun 	return 0;
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun static void
nvme_fc_term_aen_ops(struct nvme_fc_ctrl * ctrl)2174*4882a593Smuzhiyun nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2175*4882a593Smuzhiyun {
2176*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *aen_op;
2177*4882a593Smuzhiyun 	int i;
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	cancel_work_sync(&ctrl->ctrl.async_event_work);
2180*4882a593Smuzhiyun 	aen_op = ctrl->aen_ops;
2181*4882a593Smuzhiyun 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2182*4882a593Smuzhiyun 		__nvme_fc_exit_request(ctrl, aen_op);
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 		kfree(aen_op->fcp_req.private);
2185*4882a593Smuzhiyun 		aen_op->fcp_req.private = NULL;
2186*4882a593Smuzhiyun 	}
2187*4882a593Smuzhiyun }
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun static inline void
__nvme_fc_init_hctx(struct blk_mq_hw_ctx * hctx,struct nvme_fc_ctrl * ctrl,unsigned int qidx)2190*4882a593Smuzhiyun __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2191*4882a593Smuzhiyun 		unsigned int qidx)
2192*4882a593Smuzhiyun {
2193*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	hctx->driver_data = queue;
2196*4882a593Smuzhiyun 	queue->hctx = hctx;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun static int
nvme_fc_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)2200*4882a593Smuzhiyun nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2201*4882a593Smuzhiyun 		unsigned int hctx_idx)
2202*4882a593Smuzhiyun {
2203*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = data;
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	return 0;
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)2211*4882a593Smuzhiyun nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2212*4882a593Smuzhiyun 		unsigned int hctx_idx)
2213*4882a593Smuzhiyun {
2214*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = data;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	return 0;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun static void
nvme_fc_init_queue(struct nvme_fc_ctrl * ctrl,int idx)2222*4882a593Smuzhiyun nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2223*4882a593Smuzhiyun {
2224*4882a593Smuzhiyun 	struct nvme_fc_queue *queue;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	queue = &ctrl->queues[idx];
2227*4882a593Smuzhiyun 	memset(queue, 0, sizeof(*queue));
2228*4882a593Smuzhiyun 	queue->ctrl = ctrl;
2229*4882a593Smuzhiyun 	queue->qnum = idx;
2230*4882a593Smuzhiyun 	atomic_set(&queue->csn, 0);
2231*4882a593Smuzhiyun 	queue->dev = ctrl->dev;
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 	if (idx > 0)
2234*4882a593Smuzhiyun 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2235*4882a593Smuzhiyun 	else
2236*4882a593Smuzhiyun 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	/*
2239*4882a593Smuzhiyun 	 * Considered whether we should allocate buffers for all SQEs
2240*4882a593Smuzhiyun 	 * and CQEs and dma map them - mapping their respective entries
2241*4882a593Smuzhiyun 	 * into the request structures (kernel vm addr and dma address)
2242*4882a593Smuzhiyun 	 * thus the driver could use the buffers/mappings directly.
2243*4882a593Smuzhiyun 	 * It only makes sense if the LLDD would use them for its
2244*4882a593Smuzhiyun 	 * messaging api. It's very unlikely most adapter api's would use
2245*4882a593Smuzhiyun 	 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2246*4882a593Smuzhiyun 	 * structures were used instead.
2247*4882a593Smuzhiyun 	 */
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun /*
2251*4882a593Smuzhiyun  * This routine terminates a queue at the transport level.
2252*4882a593Smuzhiyun  * The transport has already ensured that all outstanding ios on
2253*4882a593Smuzhiyun  * the queue have been terminated.
2254*4882a593Smuzhiyun  * The transport will send a Disconnect LS request to terminate
2255*4882a593Smuzhiyun  * the queue's connection. Termination of the admin queue will also
2256*4882a593Smuzhiyun  * terminate the association at the target.
2257*4882a593Smuzhiyun  */
2258*4882a593Smuzhiyun static void
nvme_fc_free_queue(struct nvme_fc_queue * queue)2259*4882a593Smuzhiyun nvme_fc_free_queue(struct nvme_fc_queue *queue)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun 	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2262*4882a593Smuzhiyun 		return;
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2265*4882a593Smuzhiyun 	/*
2266*4882a593Smuzhiyun 	 * Current implementation never disconnects a single queue.
2267*4882a593Smuzhiyun 	 * It always terminates a whole association. So there is never
2268*4882a593Smuzhiyun 	 * a disconnect(queue) LS sent to the target.
2269*4882a593Smuzhiyun 	 */
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	queue->connection_id = 0;
2272*4882a593Smuzhiyun 	atomic_set(&queue->csn, 0);
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun static void
__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,unsigned int qidx)2276*4882a593Smuzhiyun __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2277*4882a593Smuzhiyun 	struct nvme_fc_queue *queue, unsigned int qidx)
2278*4882a593Smuzhiyun {
2279*4882a593Smuzhiyun 	if (ctrl->lport->ops->delete_queue)
2280*4882a593Smuzhiyun 		ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2281*4882a593Smuzhiyun 				queue->lldd_handle);
2282*4882a593Smuzhiyun 	queue->lldd_handle = NULL;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun static void
nvme_fc_free_io_queues(struct nvme_fc_ctrl * ctrl)2286*4882a593Smuzhiyun nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2287*4882a593Smuzhiyun {
2288*4882a593Smuzhiyun 	int i;
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	for (i = 1; i < ctrl->ctrl.queue_count; i++)
2291*4882a593Smuzhiyun 		nvme_fc_free_queue(&ctrl->queues[i]);
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun static int
__nvme_fc_create_hw_queue(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,unsigned int qidx,u16 qsize)2295*4882a593Smuzhiyun __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2296*4882a593Smuzhiyun 	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun 	int ret = 0;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	queue->lldd_handle = NULL;
2301*4882a593Smuzhiyun 	if (ctrl->lport->ops->create_queue)
2302*4882a593Smuzhiyun 		ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2303*4882a593Smuzhiyun 				qidx, qsize, &queue->lldd_handle);
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 	return ret;
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun static void
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl * ctrl)2309*4882a593Smuzhiyun nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2310*4882a593Smuzhiyun {
2311*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2312*4882a593Smuzhiyun 	int i;
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2315*4882a593Smuzhiyun 		__nvme_fc_delete_hw_queue(ctrl, queue, i);
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl * ctrl,u16 qsize)2319*4882a593Smuzhiyun nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = &ctrl->queues[1];
2322*4882a593Smuzhiyun 	int i, ret;
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2325*4882a593Smuzhiyun 		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2326*4882a593Smuzhiyun 		if (ret)
2327*4882a593Smuzhiyun 			goto delete_queues;
2328*4882a593Smuzhiyun 	}
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	return 0;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun delete_queues:
2333*4882a593Smuzhiyun 	for (; i > 0; i--)
2334*4882a593Smuzhiyun 		__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2335*4882a593Smuzhiyun 	return ret;
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun static int
nvme_fc_connect_io_queues(struct nvme_fc_ctrl * ctrl,u16 qsize)2339*4882a593Smuzhiyun nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	int i, ret = 0;
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2344*4882a593Smuzhiyun 		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2345*4882a593Smuzhiyun 					(qsize / 5));
2346*4882a593Smuzhiyun 		if (ret)
2347*4882a593Smuzhiyun 			break;
2348*4882a593Smuzhiyun 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
2349*4882a593Smuzhiyun 		if (ret)
2350*4882a593Smuzhiyun 			break;
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 		set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2353*4882a593Smuzhiyun 	}
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	return ret;
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun static void
nvme_fc_init_io_queues(struct nvme_fc_ctrl * ctrl)2359*4882a593Smuzhiyun nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun 	int i;
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	for (i = 1; i < ctrl->ctrl.queue_count; i++)
2364*4882a593Smuzhiyun 		nvme_fc_init_queue(ctrl, i);
2365*4882a593Smuzhiyun }
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun static void
nvme_fc_ctrl_free(struct kref * ref)2368*4882a593Smuzhiyun nvme_fc_ctrl_free(struct kref *ref)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl =
2371*4882a593Smuzhiyun 		container_of(ref, struct nvme_fc_ctrl, ref);
2372*4882a593Smuzhiyun 	unsigned long flags;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	if (ctrl->ctrl.tagset) {
2375*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->ctrl.connect_q);
2376*4882a593Smuzhiyun 		blk_mq_free_tag_set(&ctrl->tag_set);
2377*4882a593Smuzhiyun 	}
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	/* remove from rport list */
2380*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->rport->lock, flags);
2381*4882a593Smuzhiyun 	list_del(&ctrl->ctrl_list);
2382*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2385*4882a593Smuzhiyun 	blk_cleanup_queue(ctrl->ctrl.admin_q);
2386*4882a593Smuzhiyun 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2387*4882a593Smuzhiyun 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	kfree(ctrl->queues);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	put_device(ctrl->dev);
2392*4882a593Smuzhiyun 	nvme_fc_rport_put(ctrl->rport);
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2395*4882a593Smuzhiyun 	if (ctrl->ctrl.opts)
2396*4882a593Smuzhiyun 		nvmf_free_options(ctrl->ctrl.opts);
2397*4882a593Smuzhiyun 	kfree(ctrl);
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun static void
nvme_fc_ctrl_put(struct nvme_fc_ctrl * ctrl)2401*4882a593Smuzhiyun nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2402*4882a593Smuzhiyun {
2403*4882a593Smuzhiyun 	kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2404*4882a593Smuzhiyun }
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun static int
nvme_fc_ctrl_get(struct nvme_fc_ctrl * ctrl)2407*4882a593Smuzhiyun nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2408*4882a593Smuzhiyun {
2409*4882a593Smuzhiyun 	return kref_get_unless_zero(&ctrl->ref);
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun /*
2413*4882a593Smuzhiyun  * All accesses from nvme core layer done - can now free the
2414*4882a593Smuzhiyun  * controller. Called after last nvme_put_ctrl() call
2415*4882a593Smuzhiyun  */
2416*4882a593Smuzhiyun static void
nvme_fc_nvme_ctrl_freed(struct nvme_ctrl * nctrl)2417*4882a593Smuzhiyun nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2418*4882a593Smuzhiyun {
2419*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	WARN_ON(nctrl != &ctrl->ctrl);
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun 	nvme_fc_ctrl_put(ctrl);
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun /*
2427*4882a593Smuzhiyun  * This routine is used by the transport when it needs to find active
2428*4882a593Smuzhiyun  * io on a queue that is to be terminated. The transport uses
2429*4882a593Smuzhiyun  * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2430*4882a593Smuzhiyun  * this routine to kill them on a 1 by 1 basis.
2431*4882a593Smuzhiyun  *
2432*4882a593Smuzhiyun  * As FC allocates FC exchange for each io, the transport must contact
2433*4882a593Smuzhiyun  * the LLDD to terminate the exchange, thus releasing the FC exchange.
2434*4882a593Smuzhiyun  * After terminating the exchange the LLDD will call the transport's
2435*4882a593Smuzhiyun  * normal io done path for the request, but it will have an aborted
2436*4882a593Smuzhiyun  * status. The done path will return the io request back to the block
2437*4882a593Smuzhiyun  * layer with an error status.
2438*4882a593Smuzhiyun  */
2439*4882a593Smuzhiyun static bool
nvme_fc_terminate_exchange(struct request * req,void * data,bool reserved)2440*4882a593Smuzhiyun nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun 	struct nvme_ctrl *nctrl = data;
2443*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2444*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	op->nreq.flags |= NVME_REQ_CANCELLED;
2447*4882a593Smuzhiyun 	__nvme_fc_abort_op(ctrl, op);
2448*4882a593Smuzhiyun 	return true;
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun /*
2452*4882a593Smuzhiyun  * This routine runs through all outstanding commands on the association
2453*4882a593Smuzhiyun  * and aborts them.  This routine is typically be called by the
2454*4882a593Smuzhiyun  * delete_association routine. It is also called due to an error during
2455*4882a593Smuzhiyun  * reconnect. In that scenario, it is most likely a command that initializes
2456*4882a593Smuzhiyun  * the controller, including fabric Connect commands on io queues, that
2457*4882a593Smuzhiyun  * may have timed out or failed thus the io must be killed for the connect
2458*4882a593Smuzhiyun  * thread to see the error.
2459*4882a593Smuzhiyun  */
2460*4882a593Smuzhiyun static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl * ctrl,bool start_queues)2461*4882a593Smuzhiyun __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2462*4882a593Smuzhiyun {
2463*4882a593Smuzhiyun 	int q;
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun 	/*
2466*4882a593Smuzhiyun 	 * if aborting io, the queues are no longer good, mark them
2467*4882a593Smuzhiyun 	 * all as not live.
2468*4882a593Smuzhiyun 	 */
2469*4882a593Smuzhiyun 	if (ctrl->ctrl.queue_count > 1) {
2470*4882a593Smuzhiyun 		for (q = 1; q < ctrl->ctrl.queue_count; q++)
2471*4882a593Smuzhiyun 			clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2472*4882a593Smuzhiyun 	}
2473*4882a593Smuzhiyun 	clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	/*
2476*4882a593Smuzhiyun 	 * If io queues are present, stop them and terminate all outstanding
2477*4882a593Smuzhiyun 	 * ios on them. As FC allocates FC exchange for each io, the
2478*4882a593Smuzhiyun 	 * transport must contact the LLDD to terminate the exchange,
2479*4882a593Smuzhiyun 	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2480*4882a593Smuzhiyun 	 * to tell us what io's are busy and invoke a transport routine
2481*4882a593Smuzhiyun 	 * to kill them with the LLDD.  After terminating the exchange
2482*4882a593Smuzhiyun 	 * the LLDD will call the transport's normal io done path, but it
2483*4882a593Smuzhiyun 	 * will have an aborted status. The done path will return the
2484*4882a593Smuzhiyun 	 * io requests back to the block layer as part of normal completions
2485*4882a593Smuzhiyun 	 * (but with error status).
2486*4882a593Smuzhiyun 	 */
2487*4882a593Smuzhiyun 	if (ctrl->ctrl.queue_count > 1) {
2488*4882a593Smuzhiyun 		nvme_stop_queues(&ctrl->ctrl);
2489*4882a593Smuzhiyun 		nvme_sync_io_queues(&ctrl->ctrl);
2490*4882a593Smuzhiyun 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
2491*4882a593Smuzhiyun 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2492*4882a593Smuzhiyun 		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2493*4882a593Smuzhiyun 		if (start_queues)
2494*4882a593Smuzhiyun 			nvme_start_queues(&ctrl->ctrl);
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	/*
2498*4882a593Smuzhiyun 	 * Other transports, which don't have link-level contexts bound
2499*4882a593Smuzhiyun 	 * to sqe's, would try to gracefully shutdown the controller by
2500*4882a593Smuzhiyun 	 * writing the registers for shutdown and polling (call
2501*4882a593Smuzhiyun 	 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2502*4882a593Smuzhiyun 	 * just aborted and we will wait on those contexts, and given
2503*4882a593Smuzhiyun 	 * there was no indication of how live the controlelr is on the
2504*4882a593Smuzhiyun 	 * link, don't send more io to create more contexts for the
2505*4882a593Smuzhiyun 	 * shutdown. Let the controller fail via keepalive failure if
2506*4882a593Smuzhiyun 	 * its still present.
2507*4882a593Smuzhiyun 	 */
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	/*
2510*4882a593Smuzhiyun 	 * clean up the admin queue. Same thing as above.
2511*4882a593Smuzhiyun 	 */
2512*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2513*4882a593Smuzhiyun 	blk_sync_queue(ctrl->ctrl.admin_q);
2514*4882a593Smuzhiyun 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2515*4882a593Smuzhiyun 				nvme_fc_terminate_exchange, &ctrl->ctrl);
2516*4882a593Smuzhiyun 	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun static void
nvme_fc_error_recovery(struct nvme_fc_ctrl * ctrl,char * errmsg)2520*4882a593Smuzhiyun nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2521*4882a593Smuzhiyun {
2522*4882a593Smuzhiyun 	/*
2523*4882a593Smuzhiyun 	 * if an error (io timeout, etc) while (re)connecting, the remote
2524*4882a593Smuzhiyun 	 * port requested terminating of the association (disconnect_ls)
2525*4882a593Smuzhiyun 	 * or an error (timeout or abort) occurred on an io while creating
2526*4882a593Smuzhiyun 	 * the controller.  Abort any ios on the association and let the
2527*4882a593Smuzhiyun 	 * create_association error path resolve things.
2528*4882a593Smuzhiyun 	 */
2529*4882a593Smuzhiyun 	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2530*4882a593Smuzhiyun 		__nvme_fc_abort_outstanding_ios(ctrl, true);
2531*4882a593Smuzhiyun 		set_bit(ASSOC_FAILED, &ctrl->flags);
2532*4882a593Smuzhiyun 		return;
2533*4882a593Smuzhiyun 	}
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	/* Otherwise, only proceed if in LIVE state - e.g. on first error */
2536*4882a593Smuzhiyun 	if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2537*4882a593Smuzhiyun 		return;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	dev_warn(ctrl->ctrl.device,
2540*4882a593Smuzhiyun 		"NVME-FC{%d}: transport association event: %s\n",
2541*4882a593Smuzhiyun 		ctrl->cnum, errmsg);
2542*4882a593Smuzhiyun 	dev_warn(ctrl->ctrl.device,
2543*4882a593Smuzhiyun 		"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2544*4882a593Smuzhiyun 
2545*4882a593Smuzhiyun 	nvme_reset_ctrl(&ctrl->ctrl);
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun static enum blk_eh_timer_return
nvme_fc_timeout(struct request * rq,bool reserved)2549*4882a593Smuzhiyun nvme_fc_timeout(struct request *rq, bool reserved)
2550*4882a593Smuzhiyun {
2551*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2552*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2553*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2554*4882a593Smuzhiyun 	struct nvme_command *sqe = &cmdiu->sqe;
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	/*
2557*4882a593Smuzhiyun 	 * Attempt to abort the offending command. Command completion
2558*4882a593Smuzhiyun 	 * will detect the aborted io and will fail the connection.
2559*4882a593Smuzhiyun 	 */
2560*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device,
2561*4882a593Smuzhiyun 		"NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2562*4882a593Smuzhiyun 		"x%08x/x%08x\n",
2563*4882a593Smuzhiyun 		ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2564*4882a593Smuzhiyun 		sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2565*4882a593Smuzhiyun 	if (__nvme_fc_abort_op(ctrl, op))
2566*4882a593Smuzhiyun 		nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	/*
2569*4882a593Smuzhiyun 	 * the io abort has been initiated. Have the reset timer
2570*4882a593Smuzhiyun 	 * restarted and the abort completion will complete the io
2571*4882a593Smuzhiyun 	 * shortly. Avoids a synchronous wait while the abort finishes.
2572*4882a593Smuzhiyun 	 */
2573*4882a593Smuzhiyun 	return BLK_EH_RESET_TIMER;
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun static int
nvme_fc_map_data(struct nvme_fc_ctrl * ctrl,struct request * rq,struct nvme_fc_fcp_op * op)2577*4882a593Smuzhiyun nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2578*4882a593Smuzhiyun 		struct nvme_fc_fcp_op *op)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun 	struct nvmefc_fcp_req *freq = &op->fcp_req;
2581*4882a593Smuzhiyun 	int ret;
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	freq->sg_cnt = 0;
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 	if (!blk_rq_nr_phys_segments(rq))
2586*4882a593Smuzhiyun 		return 0;
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun 	freq->sg_table.sgl = freq->first_sgl;
2589*4882a593Smuzhiyun 	ret = sg_alloc_table_chained(&freq->sg_table,
2590*4882a593Smuzhiyun 			blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2591*4882a593Smuzhiyun 			NVME_INLINE_SG_CNT);
2592*4882a593Smuzhiyun 	if (ret)
2593*4882a593Smuzhiyun 		return -ENOMEM;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2596*4882a593Smuzhiyun 	WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2597*4882a593Smuzhiyun 	freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2598*4882a593Smuzhiyun 				op->nents, rq_dma_dir(rq));
2599*4882a593Smuzhiyun 	if (unlikely(freq->sg_cnt <= 0)) {
2600*4882a593Smuzhiyun 		sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2601*4882a593Smuzhiyun 		freq->sg_cnt = 0;
2602*4882a593Smuzhiyun 		return -EFAULT;
2603*4882a593Smuzhiyun 	}
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	/*
2606*4882a593Smuzhiyun 	 * TODO: blk_integrity_rq(rq)  for DIF
2607*4882a593Smuzhiyun 	 */
2608*4882a593Smuzhiyun 	return 0;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun static void
nvme_fc_unmap_data(struct nvme_fc_ctrl * ctrl,struct request * rq,struct nvme_fc_fcp_op * op)2612*4882a593Smuzhiyun nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2613*4882a593Smuzhiyun 		struct nvme_fc_fcp_op *op)
2614*4882a593Smuzhiyun {
2615*4882a593Smuzhiyun 	struct nvmefc_fcp_req *freq = &op->fcp_req;
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 	if (!freq->sg_cnt)
2618*4882a593Smuzhiyun 		return;
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2621*4882a593Smuzhiyun 			rq_dma_dir(rq));
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	freq->sg_cnt = 0;
2626*4882a593Smuzhiyun }
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun /*
2629*4882a593Smuzhiyun  * In FC, the queue is a logical thing. At transport connect, the target
2630*4882a593Smuzhiyun  * creates its "queue" and returns a handle that is to be given to the
2631*4882a593Smuzhiyun  * target whenever it posts something to the corresponding SQ.  When an
2632*4882a593Smuzhiyun  * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2633*4882a593Smuzhiyun  * command contained within the SQE, an io, and assigns a FC exchange
2634*4882a593Smuzhiyun  * to it. The SQE and the associated SQ handle are sent in the initial
2635*4882a593Smuzhiyun  * CMD IU sents on the exchange. All transfers relative to the io occur
2636*4882a593Smuzhiyun  * as part of the exchange.  The CQE is the last thing for the io,
2637*4882a593Smuzhiyun  * which is transferred (explicitly or implicitly) with the RSP IU
2638*4882a593Smuzhiyun  * sent on the exchange. After the CQE is received, the FC exchange is
2639*4882a593Smuzhiyun  * terminaed and the Exchange may be used on a different io.
2640*4882a593Smuzhiyun  *
2641*4882a593Smuzhiyun  * The transport to LLDD api has the transport making a request for a
2642*4882a593Smuzhiyun  * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2643*4882a593Smuzhiyun  * resource and transfers the command. The LLDD will then process all
2644*4882a593Smuzhiyun  * steps to complete the io. Upon completion, the transport done routine
2645*4882a593Smuzhiyun  * is called.
2646*4882a593Smuzhiyun  *
2647*4882a593Smuzhiyun  * So - while the operation is outstanding to the LLDD, there is a link
2648*4882a593Smuzhiyun  * level FC exchange resource that is also outstanding. This must be
2649*4882a593Smuzhiyun  * considered in all cleanup operations.
2650*4882a593Smuzhiyun  */
2651*4882a593Smuzhiyun static blk_status_t
nvme_fc_start_fcp_op(struct nvme_fc_ctrl * ctrl,struct nvme_fc_queue * queue,struct nvme_fc_fcp_op * op,u32 data_len,enum nvmefc_fcp_datadir io_dir)2652*4882a593Smuzhiyun nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2653*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op, u32 data_len,
2654*4882a593Smuzhiyun 	enum nvmefc_fcp_datadir	io_dir)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2657*4882a593Smuzhiyun 	struct nvme_command *sqe = &cmdiu->sqe;
2658*4882a593Smuzhiyun 	int ret, opstate;
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	/*
2661*4882a593Smuzhiyun 	 * before attempting to send the io, check to see if we believe
2662*4882a593Smuzhiyun 	 * the target device is present
2663*4882a593Smuzhiyun 	 */
2664*4882a593Smuzhiyun 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2665*4882a593Smuzhiyun 		return BLK_STS_RESOURCE;
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	if (!nvme_fc_ctrl_get(ctrl))
2668*4882a593Smuzhiyun 		return BLK_STS_IOERR;
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	/* format the FC-NVME CMD IU and fcp_req */
2671*4882a593Smuzhiyun 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2672*4882a593Smuzhiyun 	cmdiu->data_len = cpu_to_be32(data_len);
2673*4882a593Smuzhiyun 	switch (io_dir) {
2674*4882a593Smuzhiyun 	case NVMEFC_FCP_WRITE:
2675*4882a593Smuzhiyun 		cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2676*4882a593Smuzhiyun 		break;
2677*4882a593Smuzhiyun 	case NVMEFC_FCP_READ:
2678*4882a593Smuzhiyun 		cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2679*4882a593Smuzhiyun 		break;
2680*4882a593Smuzhiyun 	case NVMEFC_FCP_NODATA:
2681*4882a593Smuzhiyun 		cmdiu->flags = 0;
2682*4882a593Smuzhiyun 		break;
2683*4882a593Smuzhiyun 	}
2684*4882a593Smuzhiyun 	op->fcp_req.payload_length = data_len;
2685*4882a593Smuzhiyun 	op->fcp_req.io_dir = io_dir;
2686*4882a593Smuzhiyun 	op->fcp_req.transferred_length = 0;
2687*4882a593Smuzhiyun 	op->fcp_req.rcv_rsplen = 0;
2688*4882a593Smuzhiyun 	op->fcp_req.status = NVME_SC_SUCCESS;
2689*4882a593Smuzhiyun 	op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	/*
2692*4882a593Smuzhiyun 	 * validate per fabric rules, set fields mandated by fabric spec
2693*4882a593Smuzhiyun 	 * as well as those by FC-NVME spec.
2694*4882a593Smuzhiyun 	 */
2695*4882a593Smuzhiyun 	WARN_ON_ONCE(sqe->common.metadata);
2696*4882a593Smuzhiyun 	sqe->common.flags |= NVME_CMD_SGL_METABUF;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	/*
2699*4882a593Smuzhiyun 	 * format SQE DPTR field per FC-NVME rules:
2700*4882a593Smuzhiyun 	 *    type=0x5     Transport SGL Data Block Descriptor
2701*4882a593Smuzhiyun 	 *    subtype=0xA  Transport-specific value
2702*4882a593Smuzhiyun 	 *    address=0
2703*4882a593Smuzhiyun 	 *    length=length of the data series
2704*4882a593Smuzhiyun 	 */
2705*4882a593Smuzhiyun 	sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2706*4882a593Smuzhiyun 					NVME_SGL_FMT_TRANSPORT_A;
2707*4882a593Smuzhiyun 	sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2708*4882a593Smuzhiyun 	sqe->rw.dptr.sgl.addr = 0;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	if (!(op->flags & FCOP_FLAGS_AEN)) {
2711*4882a593Smuzhiyun 		ret = nvme_fc_map_data(ctrl, op->rq, op);
2712*4882a593Smuzhiyun 		if (ret < 0) {
2713*4882a593Smuzhiyun 			nvme_cleanup_cmd(op->rq);
2714*4882a593Smuzhiyun 			nvme_fc_ctrl_put(ctrl);
2715*4882a593Smuzhiyun 			if (ret == -ENOMEM || ret == -EAGAIN)
2716*4882a593Smuzhiyun 				return BLK_STS_RESOURCE;
2717*4882a593Smuzhiyun 			return BLK_STS_IOERR;
2718*4882a593Smuzhiyun 		}
2719*4882a593Smuzhiyun 	}
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2722*4882a593Smuzhiyun 				  sizeof(op->cmd_iu), DMA_TO_DEVICE);
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 	atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	if (!(op->flags & FCOP_FLAGS_AEN))
2727*4882a593Smuzhiyun 		blk_mq_start_request(op->rq);
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2730*4882a593Smuzhiyun 	ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2731*4882a593Smuzhiyun 					&ctrl->rport->remoteport,
2732*4882a593Smuzhiyun 					queue->lldd_handle, &op->fcp_req);
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	if (ret) {
2735*4882a593Smuzhiyun 		/*
2736*4882a593Smuzhiyun 		 * If the lld fails to send the command is there an issue with
2737*4882a593Smuzhiyun 		 * the csn value?  If the command that fails is the Connect,
2738*4882a593Smuzhiyun 		 * no - as the connection won't be live.  If it is a command
2739*4882a593Smuzhiyun 		 * post-connect, it's possible a gap in csn may be created.
2740*4882a593Smuzhiyun 		 * Does this matter?  As Linux initiators don't send fused
2741*4882a593Smuzhiyun 		 * commands, no.  The gap would exist, but as there's nothing
2742*4882a593Smuzhiyun 		 * that depends on csn order to be delivered on the target
2743*4882a593Smuzhiyun 		 * side, it shouldn't hurt.  It would be difficult for a
2744*4882a593Smuzhiyun 		 * target to even detect the csn gap as it has no idea when the
2745*4882a593Smuzhiyun 		 * cmd with the csn was supposed to arrive.
2746*4882a593Smuzhiyun 		 */
2747*4882a593Smuzhiyun 		opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2748*4882a593Smuzhiyun 		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 		if (!(op->flags & FCOP_FLAGS_AEN)) {
2751*4882a593Smuzhiyun 			nvme_fc_unmap_data(ctrl, op->rq, op);
2752*4882a593Smuzhiyun 			nvme_cleanup_cmd(op->rq);
2753*4882a593Smuzhiyun 		}
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 		nvme_fc_ctrl_put(ctrl);
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 		if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2758*4882a593Smuzhiyun 				ret != -EBUSY)
2759*4882a593Smuzhiyun 			return BLK_STS_IOERR;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 		return BLK_STS_RESOURCE;
2762*4882a593Smuzhiyun 	}
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 	return BLK_STS_OK;
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)2768*4882a593Smuzhiyun nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2769*4882a593Smuzhiyun 			const struct blk_mq_queue_data *bd)
2770*4882a593Smuzhiyun {
2771*4882a593Smuzhiyun 	struct nvme_ns *ns = hctx->queue->queuedata;
2772*4882a593Smuzhiyun 	struct nvme_fc_queue *queue = hctx->driver_data;
2773*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
2774*4882a593Smuzhiyun 	struct request *rq = bd->rq;
2775*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2776*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2777*4882a593Smuzhiyun 	struct nvme_command *sqe = &cmdiu->sqe;
2778*4882a593Smuzhiyun 	enum nvmefc_fcp_datadir	io_dir;
2779*4882a593Smuzhiyun 	bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2780*4882a593Smuzhiyun 	u32 data_len;
2781*4882a593Smuzhiyun 	blk_status_t ret;
2782*4882a593Smuzhiyun 
2783*4882a593Smuzhiyun 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2784*4882a593Smuzhiyun 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2785*4882a593Smuzhiyun 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	ret = nvme_setup_cmd(ns, rq, sqe);
2788*4882a593Smuzhiyun 	if (ret)
2789*4882a593Smuzhiyun 		return ret;
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	/*
2792*4882a593Smuzhiyun 	 * nvme core doesn't quite treat the rq opaquely. Commands such
2793*4882a593Smuzhiyun 	 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2794*4882a593Smuzhiyun 	 * there is no actual payload to be transferred.
2795*4882a593Smuzhiyun 	 * To get it right, key data transmission on there being 1 or
2796*4882a593Smuzhiyun 	 * more physical segments in the sg list. If there is no
2797*4882a593Smuzhiyun 	 * physical segments, there is no payload.
2798*4882a593Smuzhiyun 	 */
2799*4882a593Smuzhiyun 	if (blk_rq_nr_phys_segments(rq)) {
2800*4882a593Smuzhiyun 		data_len = blk_rq_payload_bytes(rq);
2801*4882a593Smuzhiyun 		io_dir = ((rq_data_dir(rq) == WRITE) ?
2802*4882a593Smuzhiyun 					NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2803*4882a593Smuzhiyun 	} else {
2804*4882a593Smuzhiyun 		data_len = 0;
2805*4882a593Smuzhiyun 		io_dir = NVMEFC_FCP_NODATA;
2806*4882a593Smuzhiyun 	}
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 
2809*4882a593Smuzhiyun 	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun static void
nvme_fc_submit_async_event(struct nvme_ctrl * arg)2813*4882a593Smuzhiyun nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2814*4882a593Smuzhiyun {
2815*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2816*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *aen_op;
2817*4882a593Smuzhiyun 	blk_status_t ret;
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 	if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2820*4882a593Smuzhiyun 		return;
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 	aen_op = &ctrl->aen_ops[0];
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2825*4882a593Smuzhiyun 					NVMEFC_FCP_NODATA);
2826*4882a593Smuzhiyun 	if (ret)
2827*4882a593Smuzhiyun 		dev_err(ctrl->ctrl.device,
2828*4882a593Smuzhiyun 			"failed async event work\n");
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun static void
nvme_fc_complete_rq(struct request * rq)2832*4882a593Smuzhiyun nvme_fc_complete_rq(struct request *rq)
2833*4882a593Smuzhiyun {
2834*4882a593Smuzhiyun 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2835*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = op->ctrl;
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	atomic_set(&op->state, FCPOP_STATE_IDLE);
2838*4882a593Smuzhiyun 	op->flags &= ~FCOP_FLAGS_TERMIO;
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	nvme_fc_unmap_data(ctrl, rq, op);
2841*4882a593Smuzhiyun 	nvme_complete_rq(rq);
2842*4882a593Smuzhiyun 	nvme_fc_ctrl_put(ctrl);
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun static const struct blk_mq_ops nvme_fc_mq_ops = {
2847*4882a593Smuzhiyun 	.queue_rq	= nvme_fc_queue_rq,
2848*4882a593Smuzhiyun 	.complete	= nvme_fc_complete_rq,
2849*4882a593Smuzhiyun 	.init_request	= nvme_fc_init_request,
2850*4882a593Smuzhiyun 	.exit_request	= nvme_fc_exit_request,
2851*4882a593Smuzhiyun 	.init_hctx	= nvme_fc_init_hctx,
2852*4882a593Smuzhiyun 	.timeout	= nvme_fc_timeout,
2853*4882a593Smuzhiyun };
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun static int
nvme_fc_create_io_queues(struct nvme_fc_ctrl * ctrl)2856*4882a593Smuzhiyun nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2857*4882a593Smuzhiyun {
2858*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2859*4882a593Smuzhiyun 	unsigned int nr_io_queues;
2860*4882a593Smuzhiyun 	int ret;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2863*4882a593Smuzhiyun 				ctrl->lport->ops->max_hw_queues);
2864*4882a593Smuzhiyun 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2865*4882a593Smuzhiyun 	if (ret) {
2866*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
2867*4882a593Smuzhiyun 			"set_queue_count failed: %d\n", ret);
2868*4882a593Smuzhiyun 		return ret;
2869*4882a593Smuzhiyun 	}
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	ctrl->ctrl.queue_count = nr_io_queues + 1;
2872*4882a593Smuzhiyun 	if (!nr_io_queues)
2873*4882a593Smuzhiyun 		return 0;
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 	nvme_fc_init_io_queues(ctrl);
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2878*4882a593Smuzhiyun 	ctrl->tag_set.ops = &nvme_fc_mq_ops;
2879*4882a593Smuzhiyun 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2880*4882a593Smuzhiyun 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2881*4882a593Smuzhiyun 	ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2882*4882a593Smuzhiyun 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2883*4882a593Smuzhiyun 	ctrl->tag_set.cmd_size =
2884*4882a593Smuzhiyun 		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2885*4882a593Smuzhiyun 			    ctrl->lport->ops->fcprqst_priv_sz);
2886*4882a593Smuzhiyun 	ctrl->tag_set.driver_data = ctrl;
2887*4882a593Smuzhiyun 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2888*4882a593Smuzhiyun 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2891*4882a593Smuzhiyun 	if (ret)
2892*4882a593Smuzhiyun 		return ret;
2893*4882a593Smuzhiyun 
2894*4882a593Smuzhiyun 	ctrl->ctrl.tagset = &ctrl->tag_set;
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2897*4882a593Smuzhiyun 	if (IS_ERR(ctrl->ctrl.connect_q)) {
2898*4882a593Smuzhiyun 		ret = PTR_ERR(ctrl->ctrl.connect_q);
2899*4882a593Smuzhiyun 		goto out_free_tag_set;
2900*4882a593Smuzhiyun 	}
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2903*4882a593Smuzhiyun 	if (ret)
2904*4882a593Smuzhiyun 		goto out_cleanup_blk_queue;
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2907*4882a593Smuzhiyun 	if (ret)
2908*4882a593Smuzhiyun 		goto out_delete_hw_queues;
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	ctrl->ioq_live = true;
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 	return 0;
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun out_delete_hw_queues:
2915*4882a593Smuzhiyun 	nvme_fc_delete_hw_io_queues(ctrl);
2916*4882a593Smuzhiyun out_cleanup_blk_queue:
2917*4882a593Smuzhiyun 	blk_cleanup_queue(ctrl->ctrl.connect_q);
2918*4882a593Smuzhiyun out_free_tag_set:
2919*4882a593Smuzhiyun 	blk_mq_free_tag_set(&ctrl->tag_set);
2920*4882a593Smuzhiyun 	nvme_fc_free_io_queues(ctrl);
2921*4882a593Smuzhiyun 
2922*4882a593Smuzhiyun 	/* force put free routine to ignore io queues */
2923*4882a593Smuzhiyun 	ctrl->ctrl.tagset = NULL;
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	return ret;
2926*4882a593Smuzhiyun }
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun static int
nvme_fc_recreate_io_queues(struct nvme_fc_ctrl * ctrl)2929*4882a593Smuzhiyun nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2930*4882a593Smuzhiyun {
2931*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2932*4882a593Smuzhiyun 	u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2933*4882a593Smuzhiyun 	unsigned int nr_io_queues;
2934*4882a593Smuzhiyun 	int ret;
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2937*4882a593Smuzhiyun 				ctrl->lport->ops->max_hw_queues);
2938*4882a593Smuzhiyun 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2939*4882a593Smuzhiyun 	if (ret) {
2940*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
2941*4882a593Smuzhiyun 			"set_queue_count failed: %d\n", ret);
2942*4882a593Smuzhiyun 		return ret;
2943*4882a593Smuzhiyun 	}
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun 	if (!nr_io_queues && prior_ioq_cnt) {
2946*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
2947*4882a593Smuzhiyun 			"Fail Reconnect: At least 1 io queue "
2948*4882a593Smuzhiyun 			"required (was %d)\n", prior_ioq_cnt);
2949*4882a593Smuzhiyun 		return -ENOSPC;
2950*4882a593Smuzhiyun 	}
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	ctrl->ctrl.queue_count = nr_io_queues + 1;
2953*4882a593Smuzhiyun 	/* check for io queues existing */
2954*4882a593Smuzhiyun 	if (ctrl->ctrl.queue_count == 1)
2955*4882a593Smuzhiyun 		return 0;
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun 	if (prior_ioq_cnt != nr_io_queues) {
2958*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
2959*4882a593Smuzhiyun 			"reconnect: revising io queue count from %d to %d\n",
2960*4882a593Smuzhiyun 			prior_ioq_cnt, nr_io_queues);
2961*4882a593Smuzhiyun 		nvme_wait_freeze(&ctrl->ctrl);
2962*4882a593Smuzhiyun 		blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2963*4882a593Smuzhiyun 		nvme_unfreeze(&ctrl->ctrl);
2964*4882a593Smuzhiyun 	}
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun 	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2967*4882a593Smuzhiyun 	if (ret)
2968*4882a593Smuzhiyun 		goto out_free_io_queues;
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2971*4882a593Smuzhiyun 	if (ret)
2972*4882a593Smuzhiyun 		goto out_delete_hw_queues;
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 	return 0;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun out_delete_hw_queues:
2977*4882a593Smuzhiyun 	nvme_fc_delete_hw_io_queues(ctrl);
2978*4882a593Smuzhiyun out_free_io_queues:
2979*4882a593Smuzhiyun 	nvme_fc_free_io_queues(ctrl);
2980*4882a593Smuzhiyun 	return ret;
2981*4882a593Smuzhiyun }
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun static void
nvme_fc_rport_active_on_lport(struct nvme_fc_rport * rport)2984*4882a593Smuzhiyun nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2985*4882a593Smuzhiyun {
2986*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun 	atomic_inc(&lport->act_rport_cnt);
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun static void
nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport * rport)2992*4882a593Smuzhiyun nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
2995*4882a593Smuzhiyun 	u32 cnt;
2996*4882a593Smuzhiyun 
2997*4882a593Smuzhiyun 	cnt = atomic_dec_return(&lport->act_rport_cnt);
2998*4882a593Smuzhiyun 	if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2999*4882a593Smuzhiyun 		lport->ops->localport_delete(&lport->localport);
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun static int
nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl * ctrl)3003*4882a593Smuzhiyun nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3004*4882a593Smuzhiyun {
3005*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = ctrl->rport;
3006*4882a593Smuzhiyun 	u32 cnt;
3007*4882a593Smuzhiyun 
3008*4882a593Smuzhiyun 	if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3009*4882a593Smuzhiyun 		return 1;
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3012*4882a593Smuzhiyun 	if (cnt == 1)
3013*4882a593Smuzhiyun 		nvme_fc_rport_active_on_lport(rport);
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun 	return 0;
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun 
3018*4882a593Smuzhiyun static int
nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl * ctrl)3019*4882a593Smuzhiyun nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3020*4882a593Smuzhiyun {
3021*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = ctrl->rport;
3022*4882a593Smuzhiyun 	struct nvme_fc_lport *lport = rport->lport;
3023*4882a593Smuzhiyun 	u32 cnt;
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	/* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3028*4882a593Smuzhiyun 	if (cnt == 0) {
3029*4882a593Smuzhiyun 		if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3030*4882a593Smuzhiyun 			lport->ops->remoteport_delete(&rport->remoteport);
3031*4882a593Smuzhiyun 		nvme_fc_rport_inactive_on_lport(rport);
3032*4882a593Smuzhiyun 	}
3033*4882a593Smuzhiyun 
3034*4882a593Smuzhiyun 	return 0;
3035*4882a593Smuzhiyun }
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun /*
3038*4882a593Smuzhiyun  * This routine restarts the controller on the host side, and
3039*4882a593Smuzhiyun  * on the link side, recreates the controller association.
3040*4882a593Smuzhiyun  */
3041*4882a593Smuzhiyun static int
nvme_fc_create_association(struct nvme_fc_ctrl * ctrl)3042*4882a593Smuzhiyun nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3043*4882a593Smuzhiyun {
3044*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3045*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *disls = NULL;
3046*4882a593Smuzhiyun 	unsigned long flags;
3047*4882a593Smuzhiyun 	int ret;
3048*4882a593Smuzhiyun 	bool changed;
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 	++ctrl->ctrl.nr_reconnects;
3051*4882a593Smuzhiyun 
3052*4882a593Smuzhiyun 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3053*4882a593Smuzhiyun 		return -ENODEV;
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	if (nvme_fc_ctlr_active_on_rport(ctrl))
3056*4882a593Smuzhiyun 		return -ENOTUNIQ;
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device,
3059*4882a593Smuzhiyun 		"NVME-FC{%d}: create association : host wwpn 0x%016llx "
3060*4882a593Smuzhiyun 		" rport wwpn 0x%016llx: NQN \"%s\"\n",
3061*4882a593Smuzhiyun 		ctrl->cnum, ctrl->lport->localport.port_name,
3062*4882a593Smuzhiyun 		ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3063*4882a593Smuzhiyun 
3064*4882a593Smuzhiyun 	clear_bit(ASSOC_FAILED, &ctrl->flags);
3065*4882a593Smuzhiyun 
3066*4882a593Smuzhiyun 	/*
3067*4882a593Smuzhiyun 	 * Create the admin queue
3068*4882a593Smuzhiyun 	 */
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun 	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3071*4882a593Smuzhiyun 				NVME_AQ_DEPTH);
3072*4882a593Smuzhiyun 	if (ret)
3073*4882a593Smuzhiyun 		goto out_free_queue;
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3076*4882a593Smuzhiyun 				NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3077*4882a593Smuzhiyun 	if (ret)
3078*4882a593Smuzhiyun 		goto out_delete_hw_queue;
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 	ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3081*4882a593Smuzhiyun 	if (ret)
3082*4882a593Smuzhiyun 		goto out_disconnect_admin_queue;
3083*4882a593Smuzhiyun 
3084*4882a593Smuzhiyun 	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun 	/*
3087*4882a593Smuzhiyun 	 * Check controller capabilities
3088*4882a593Smuzhiyun 	 *
3089*4882a593Smuzhiyun 	 * todo:- add code to check if ctrl attributes changed from
3090*4882a593Smuzhiyun 	 * prior connection values
3091*4882a593Smuzhiyun 	 */
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun 	ret = nvme_enable_ctrl(&ctrl->ctrl);
3094*4882a593Smuzhiyun 	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3095*4882a593Smuzhiyun 		goto out_disconnect_admin_queue;
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun 	ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3098*4882a593Smuzhiyun 	ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3099*4882a593Smuzhiyun 						(ilog2(SZ_4K) - 9);
3100*4882a593Smuzhiyun 
3101*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 	ret = nvme_init_identify(&ctrl->ctrl);
3104*4882a593Smuzhiyun 	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3105*4882a593Smuzhiyun 		goto out_disconnect_admin_queue;
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	/* sanity checks */
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun 	/* FC-NVME does not have other data in the capsule */
3110*4882a593Smuzhiyun 	if (ctrl->ctrl.icdoff) {
3111*4882a593Smuzhiyun 		dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3112*4882a593Smuzhiyun 				ctrl->ctrl.icdoff);
3113*4882a593Smuzhiyun 		goto out_disconnect_admin_queue;
3114*4882a593Smuzhiyun 	}
3115*4882a593Smuzhiyun 
3116*4882a593Smuzhiyun 	/* FC-NVME supports normal SGL Data Block Descriptors */
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
3119*4882a593Smuzhiyun 		/* warn if maxcmd is lower than queue_size */
3120*4882a593Smuzhiyun 		dev_warn(ctrl->ctrl.device,
3121*4882a593Smuzhiyun 			"queue_size %zu > ctrl maxcmd %u, reducing "
3122*4882a593Smuzhiyun 			"to maxcmd\n",
3123*4882a593Smuzhiyun 			opts->queue_size, ctrl->ctrl.maxcmd);
3124*4882a593Smuzhiyun 		opts->queue_size = ctrl->ctrl.maxcmd;
3125*4882a593Smuzhiyun 	}
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun 	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3128*4882a593Smuzhiyun 		/* warn if sqsize is lower than queue_size */
3129*4882a593Smuzhiyun 		dev_warn(ctrl->ctrl.device,
3130*4882a593Smuzhiyun 			"queue_size %zu > ctrl sqsize %u, reducing "
3131*4882a593Smuzhiyun 			"to sqsize\n",
3132*4882a593Smuzhiyun 			opts->queue_size, ctrl->ctrl.sqsize + 1);
3133*4882a593Smuzhiyun 		opts->queue_size = ctrl->ctrl.sqsize + 1;
3134*4882a593Smuzhiyun 	}
3135*4882a593Smuzhiyun 
3136*4882a593Smuzhiyun 	ret = nvme_fc_init_aen_ops(ctrl);
3137*4882a593Smuzhiyun 	if (ret)
3138*4882a593Smuzhiyun 		goto out_term_aen_ops;
3139*4882a593Smuzhiyun 
3140*4882a593Smuzhiyun 	/*
3141*4882a593Smuzhiyun 	 * Create the io queues
3142*4882a593Smuzhiyun 	 */
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 	if (ctrl->ctrl.queue_count > 1) {
3145*4882a593Smuzhiyun 		if (!ctrl->ioq_live)
3146*4882a593Smuzhiyun 			ret = nvme_fc_create_io_queues(ctrl);
3147*4882a593Smuzhiyun 		else
3148*4882a593Smuzhiyun 			ret = nvme_fc_recreate_io_queues(ctrl);
3149*4882a593Smuzhiyun 	}
3150*4882a593Smuzhiyun 	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3151*4882a593Smuzhiyun 		goto out_term_aen_ops;
3152*4882a593Smuzhiyun 
3153*4882a593Smuzhiyun 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 	ctrl->ctrl.nr_reconnects = 0;
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun 	if (changed)
3158*4882a593Smuzhiyun 		nvme_start_ctrl(&ctrl->ctrl);
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun 	return 0;	/* Success */
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun out_term_aen_ops:
3163*4882a593Smuzhiyun 	nvme_fc_term_aen_ops(ctrl);
3164*4882a593Smuzhiyun out_disconnect_admin_queue:
3165*4882a593Smuzhiyun 	/* send a Disconnect(association) LS to fc-nvme target */
3166*4882a593Smuzhiyun 	nvme_fc_xmt_disconnect_assoc(ctrl);
3167*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->lock, flags);
3168*4882a593Smuzhiyun 	ctrl->association_id = 0;
3169*4882a593Smuzhiyun 	disls = ctrl->rcv_disconn;
3170*4882a593Smuzhiyun 	ctrl->rcv_disconn = NULL;
3171*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->lock, flags);
3172*4882a593Smuzhiyun 	if (disls)
3173*4882a593Smuzhiyun 		nvme_fc_xmt_ls_rsp(disls);
3174*4882a593Smuzhiyun out_delete_hw_queue:
3175*4882a593Smuzhiyun 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3176*4882a593Smuzhiyun out_free_queue:
3177*4882a593Smuzhiyun 	nvme_fc_free_queue(&ctrl->queues[0]);
3178*4882a593Smuzhiyun 	clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3179*4882a593Smuzhiyun 	nvme_fc_ctlr_inactive_on_rport(ctrl);
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 	return ret;
3182*4882a593Smuzhiyun }
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun /*
3186*4882a593Smuzhiyun  * This routine stops operation of the controller on the host side.
3187*4882a593Smuzhiyun  * On the host os stack side: Admin and IO queues are stopped,
3188*4882a593Smuzhiyun  *   outstanding ios on them terminated via FC ABTS.
3189*4882a593Smuzhiyun  * On the link side: the association is terminated.
3190*4882a593Smuzhiyun  */
3191*4882a593Smuzhiyun static void
nvme_fc_delete_association(struct nvme_fc_ctrl * ctrl)3192*4882a593Smuzhiyun nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3193*4882a593Smuzhiyun {
3194*4882a593Smuzhiyun 	struct nvmefc_ls_rcv_op *disls = NULL;
3195*4882a593Smuzhiyun 	unsigned long flags;
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun 	if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3198*4882a593Smuzhiyun 		return;
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->lock, flags);
3201*4882a593Smuzhiyun 	set_bit(FCCTRL_TERMIO, &ctrl->flags);
3202*4882a593Smuzhiyun 	ctrl->iocnt = 0;
3203*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->lock, flags);
3204*4882a593Smuzhiyun 
3205*4882a593Smuzhiyun 	__nvme_fc_abort_outstanding_ios(ctrl, false);
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	/* kill the aens as they are a separate path */
3208*4882a593Smuzhiyun 	nvme_fc_abort_aen_ops(ctrl);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	/* wait for all io that had to be aborted */
3211*4882a593Smuzhiyun 	spin_lock_irq(&ctrl->lock);
3212*4882a593Smuzhiyun 	wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3213*4882a593Smuzhiyun 	clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3214*4882a593Smuzhiyun 	spin_unlock_irq(&ctrl->lock);
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 	nvme_fc_term_aen_ops(ctrl);
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun 	/*
3219*4882a593Smuzhiyun 	 * send a Disconnect(association) LS to fc-nvme target
3220*4882a593Smuzhiyun 	 * Note: could have been sent at top of process, but
3221*4882a593Smuzhiyun 	 * cleaner on link traffic if after the aborts complete.
3222*4882a593Smuzhiyun 	 * Note: if association doesn't exist, association_id will be 0
3223*4882a593Smuzhiyun 	 */
3224*4882a593Smuzhiyun 	if (ctrl->association_id)
3225*4882a593Smuzhiyun 		nvme_fc_xmt_disconnect_assoc(ctrl);
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	spin_lock_irqsave(&ctrl->lock, flags);
3228*4882a593Smuzhiyun 	ctrl->association_id = 0;
3229*4882a593Smuzhiyun 	disls = ctrl->rcv_disconn;
3230*4882a593Smuzhiyun 	ctrl->rcv_disconn = NULL;
3231*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctrl->lock, flags);
3232*4882a593Smuzhiyun 	if (disls)
3233*4882a593Smuzhiyun 		/*
3234*4882a593Smuzhiyun 		 * if a Disconnect Request was waiting for a response, send
3235*4882a593Smuzhiyun 		 * now that all ABTS's have been issued (and are complete).
3236*4882a593Smuzhiyun 		 */
3237*4882a593Smuzhiyun 		nvme_fc_xmt_ls_rsp(disls);
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	if (ctrl->ctrl.tagset) {
3240*4882a593Smuzhiyun 		nvme_fc_delete_hw_io_queues(ctrl);
3241*4882a593Smuzhiyun 		nvme_fc_free_io_queues(ctrl);
3242*4882a593Smuzhiyun 	}
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3245*4882a593Smuzhiyun 	nvme_fc_free_queue(&ctrl->queues[0]);
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 	/* re-enable the admin_q so anything new can fast fail */
3248*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun 	/* resume the io queues so that things will fast fail */
3251*4882a593Smuzhiyun 	nvme_start_queues(&ctrl->ctrl);
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 	nvme_fc_ctlr_inactive_on_rport(ctrl);
3254*4882a593Smuzhiyun }
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun static void
nvme_fc_delete_ctrl(struct nvme_ctrl * nctrl)3257*4882a593Smuzhiyun nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3258*4882a593Smuzhiyun {
3259*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	cancel_work_sync(&ctrl->ioerr_work);
3262*4882a593Smuzhiyun 	cancel_delayed_work_sync(&ctrl->connect_work);
3263*4882a593Smuzhiyun 	/*
3264*4882a593Smuzhiyun 	 * kill the association on the link side.  this will block
3265*4882a593Smuzhiyun 	 * waiting for io to terminate
3266*4882a593Smuzhiyun 	 */
3267*4882a593Smuzhiyun 	nvme_fc_delete_association(ctrl);
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl * ctrl,int status)3271*4882a593Smuzhiyun nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3272*4882a593Smuzhiyun {
3273*4882a593Smuzhiyun 	struct nvme_fc_rport *rport = ctrl->rport;
3274*4882a593Smuzhiyun 	struct nvme_fc_remote_port *portptr = &rport->remoteport;
3275*4882a593Smuzhiyun 	unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3276*4882a593Smuzhiyun 	bool recon = true;
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun 	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3279*4882a593Smuzhiyun 		return;
3280*4882a593Smuzhiyun 
3281*4882a593Smuzhiyun 	if (portptr->port_state == FC_OBJSTATE_ONLINE)
3282*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
3283*4882a593Smuzhiyun 			"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3284*4882a593Smuzhiyun 			ctrl->cnum, status);
3285*4882a593Smuzhiyun 	else if (time_after_eq(jiffies, rport->dev_loss_end))
3286*4882a593Smuzhiyun 		recon = false;
3287*4882a593Smuzhiyun 
3288*4882a593Smuzhiyun 	if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3289*4882a593Smuzhiyun 		if (portptr->port_state == FC_OBJSTATE_ONLINE)
3290*4882a593Smuzhiyun 			dev_info(ctrl->ctrl.device,
3291*4882a593Smuzhiyun 				"NVME-FC{%d}: Reconnect attempt in %ld "
3292*4882a593Smuzhiyun 				"seconds\n",
3293*4882a593Smuzhiyun 				ctrl->cnum, recon_delay / HZ);
3294*4882a593Smuzhiyun 		else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3295*4882a593Smuzhiyun 			recon_delay = rport->dev_loss_end - jiffies;
3296*4882a593Smuzhiyun 
3297*4882a593Smuzhiyun 		queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3298*4882a593Smuzhiyun 	} else {
3299*4882a593Smuzhiyun 		if (portptr->port_state == FC_OBJSTATE_ONLINE)
3300*4882a593Smuzhiyun 			dev_warn(ctrl->ctrl.device,
3301*4882a593Smuzhiyun 				"NVME-FC{%d}: Max reconnect attempts (%d) "
3302*4882a593Smuzhiyun 				"reached.\n",
3303*4882a593Smuzhiyun 				ctrl->cnum, ctrl->ctrl.nr_reconnects);
3304*4882a593Smuzhiyun 		else
3305*4882a593Smuzhiyun 			dev_warn(ctrl->ctrl.device,
3306*4882a593Smuzhiyun 				"NVME-FC{%d}: dev_loss_tmo (%d) expired "
3307*4882a593Smuzhiyun 				"while waiting for remoteport connectivity.\n",
3308*4882a593Smuzhiyun 				ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3309*4882a593Smuzhiyun 					(ctrl->ctrl.opts->max_reconnects *
3310*4882a593Smuzhiyun 					 ctrl->ctrl.opts->reconnect_delay)));
3311*4882a593Smuzhiyun 		WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3312*4882a593Smuzhiyun 	}
3313*4882a593Smuzhiyun }
3314*4882a593Smuzhiyun 
3315*4882a593Smuzhiyun static void
nvme_fc_reset_ctrl_work(struct work_struct * work)3316*4882a593Smuzhiyun nvme_fc_reset_ctrl_work(struct work_struct *work)
3317*4882a593Smuzhiyun {
3318*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl =
3319*4882a593Smuzhiyun 		container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 	nvme_stop_ctrl(&ctrl->ctrl);
3322*4882a593Smuzhiyun 
3323*4882a593Smuzhiyun 	/* will block will waiting for io to terminate */
3324*4882a593Smuzhiyun 	nvme_fc_delete_association(ctrl);
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3327*4882a593Smuzhiyun 		dev_err(ctrl->ctrl.device,
3328*4882a593Smuzhiyun 			"NVME-FC{%d}: error_recovery: Couldn't change state "
3329*4882a593Smuzhiyun 			"to CONNECTING\n", ctrl->cnum);
3330*4882a593Smuzhiyun 
3331*4882a593Smuzhiyun 	if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3332*4882a593Smuzhiyun 		if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3333*4882a593Smuzhiyun 			dev_err(ctrl->ctrl.device,
3334*4882a593Smuzhiyun 				"NVME-FC{%d}: failed to schedule connect "
3335*4882a593Smuzhiyun 				"after reset\n", ctrl->cnum);
3336*4882a593Smuzhiyun 		} else {
3337*4882a593Smuzhiyun 			flush_delayed_work(&ctrl->connect_work);
3338*4882a593Smuzhiyun 		}
3339*4882a593Smuzhiyun 	} else {
3340*4882a593Smuzhiyun 		nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3341*4882a593Smuzhiyun 	}
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun 
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3346*4882a593Smuzhiyun 	.name			= "fc",
3347*4882a593Smuzhiyun 	.module			= THIS_MODULE,
3348*4882a593Smuzhiyun 	.flags			= NVME_F_FABRICS,
3349*4882a593Smuzhiyun 	.reg_read32		= nvmf_reg_read32,
3350*4882a593Smuzhiyun 	.reg_read64		= nvmf_reg_read64,
3351*4882a593Smuzhiyun 	.reg_write32		= nvmf_reg_write32,
3352*4882a593Smuzhiyun 	.free_ctrl		= nvme_fc_nvme_ctrl_freed,
3353*4882a593Smuzhiyun 	.submit_async_event	= nvme_fc_submit_async_event,
3354*4882a593Smuzhiyun 	.delete_ctrl		= nvme_fc_delete_ctrl,
3355*4882a593Smuzhiyun 	.get_address		= nvmf_get_address,
3356*4882a593Smuzhiyun };
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun static void
nvme_fc_connect_ctrl_work(struct work_struct * work)3359*4882a593Smuzhiyun nvme_fc_connect_ctrl_work(struct work_struct *work)
3360*4882a593Smuzhiyun {
3361*4882a593Smuzhiyun 	int ret;
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl =
3364*4882a593Smuzhiyun 			container_of(to_delayed_work(work),
3365*4882a593Smuzhiyun 				struct nvme_fc_ctrl, connect_work);
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	ret = nvme_fc_create_association(ctrl);
3368*4882a593Smuzhiyun 	if (ret)
3369*4882a593Smuzhiyun 		nvme_fc_reconnect_or_delete(ctrl, ret);
3370*4882a593Smuzhiyun 	else
3371*4882a593Smuzhiyun 		dev_info(ctrl->ctrl.device,
3372*4882a593Smuzhiyun 			"NVME-FC{%d}: controller connect complete\n",
3373*4882a593Smuzhiyun 			ctrl->cnum);
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 
3377*4882a593Smuzhiyun static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3378*4882a593Smuzhiyun 	.queue_rq	= nvme_fc_queue_rq,
3379*4882a593Smuzhiyun 	.complete	= nvme_fc_complete_rq,
3380*4882a593Smuzhiyun 	.init_request	= nvme_fc_init_request,
3381*4882a593Smuzhiyun 	.exit_request	= nvme_fc_exit_request,
3382*4882a593Smuzhiyun 	.init_hctx	= nvme_fc_init_admin_hctx,
3383*4882a593Smuzhiyun 	.timeout	= nvme_fc_timeout,
3384*4882a593Smuzhiyun };
3385*4882a593Smuzhiyun 
3386*4882a593Smuzhiyun 
3387*4882a593Smuzhiyun /*
3388*4882a593Smuzhiyun  * Fails a controller request if it matches an existing controller
3389*4882a593Smuzhiyun  * (association) with the same tuple:
3390*4882a593Smuzhiyun  * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3391*4882a593Smuzhiyun  *
3392*4882a593Smuzhiyun  * The ports don't need to be compared as they are intrinsically
3393*4882a593Smuzhiyun  * already matched by the port pointers supplied.
3394*4882a593Smuzhiyun  */
3395*4882a593Smuzhiyun static bool
nvme_fc_existing_controller(struct nvme_fc_rport * rport,struct nvmf_ctrl_options * opts)3396*4882a593Smuzhiyun nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3397*4882a593Smuzhiyun 		struct nvmf_ctrl_options *opts)
3398*4882a593Smuzhiyun {
3399*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl;
3400*4882a593Smuzhiyun 	unsigned long flags;
3401*4882a593Smuzhiyun 	bool found = false;
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
3404*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3405*4882a593Smuzhiyun 		found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3406*4882a593Smuzhiyun 		if (found)
3407*4882a593Smuzhiyun 			break;
3408*4882a593Smuzhiyun 	}
3409*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	return found;
3412*4882a593Smuzhiyun }
3413*4882a593Smuzhiyun 
3414*4882a593Smuzhiyun static struct nvme_ctrl *
nvme_fc_init_ctrl(struct device * dev,struct nvmf_ctrl_options * opts,struct nvme_fc_lport * lport,struct nvme_fc_rport * rport)3415*4882a593Smuzhiyun nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3416*4882a593Smuzhiyun 	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3417*4882a593Smuzhiyun {
3418*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl;
3419*4882a593Smuzhiyun 	unsigned long flags;
3420*4882a593Smuzhiyun 	int ret, idx, ctrl_loss_tmo;
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 	if (!(rport->remoteport.port_role &
3423*4882a593Smuzhiyun 	    (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3424*4882a593Smuzhiyun 		ret = -EBADR;
3425*4882a593Smuzhiyun 		goto out_fail;
3426*4882a593Smuzhiyun 	}
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	if (!opts->duplicate_connect &&
3429*4882a593Smuzhiyun 	    nvme_fc_existing_controller(rport, opts)) {
3430*4882a593Smuzhiyun 		ret = -EALREADY;
3431*4882a593Smuzhiyun 		goto out_fail;
3432*4882a593Smuzhiyun 	}
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3435*4882a593Smuzhiyun 	if (!ctrl) {
3436*4882a593Smuzhiyun 		ret = -ENOMEM;
3437*4882a593Smuzhiyun 		goto out_fail;
3438*4882a593Smuzhiyun 	}
3439*4882a593Smuzhiyun 
3440*4882a593Smuzhiyun 	idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3441*4882a593Smuzhiyun 	if (idx < 0) {
3442*4882a593Smuzhiyun 		ret = -ENOSPC;
3443*4882a593Smuzhiyun 		goto out_free_ctrl;
3444*4882a593Smuzhiyun 	}
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun 	/*
3447*4882a593Smuzhiyun 	 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3448*4882a593Smuzhiyun 	 * is being used, change to a shorter reconnect delay for FC.
3449*4882a593Smuzhiyun 	 */
3450*4882a593Smuzhiyun 	if (opts->max_reconnects != -1 &&
3451*4882a593Smuzhiyun 	    opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3452*4882a593Smuzhiyun 	    opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3453*4882a593Smuzhiyun 		ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3454*4882a593Smuzhiyun 		opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3455*4882a593Smuzhiyun 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3456*4882a593Smuzhiyun 						opts->reconnect_delay);
3457*4882a593Smuzhiyun 	}
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	ctrl->ctrl.opts = opts;
3460*4882a593Smuzhiyun 	ctrl->ctrl.nr_reconnects = 0;
3461*4882a593Smuzhiyun 	if (lport->dev)
3462*4882a593Smuzhiyun 		ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3463*4882a593Smuzhiyun 	else
3464*4882a593Smuzhiyun 		ctrl->ctrl.numa_node = NUMA_NO_NODE;
3465*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctrl->ctrl_list);
3466*4882a593Smuzhiyun 	ctrl->lport = lport;
3467*4882a593Smuzhiyun 	ctrl->rport = rport;
3468*4882a593Smuzhiyun 	ctrl->dev = lport->dev;
3469*4882a593Smuzhiyun 	ctrl->cnum = idx;
3470*4882a593Smuzhiyun 	ctrl->ioq_live = false;
3471*4882a593Smuzhiyun 	init_waitqueue_head(&ctrl->ioabort_wait);
3472*4882a593Smuzhiyun 
3473*4882a593Smuzhiyun 	get_device(ctrl->dev);
3474*4882a593Smuzhiyun 	kref_init(&ctrl->ref);
3475*4882a593Smuzhiyun 
3476*4882a593Smuzhiyun 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3477*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3478*4882a593Smuzhiyun 	INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3479*4882a593Smuzhiyun 	spin_lock_init(&ctrl->lock);
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 	/* io queue count */
3482*4882a593Smuzhiyun 	ctrl->ctrl.queue_count = min_t(unsigned int,
3483*4882a593Smuzhiyun 				opts->nr_io_queues,
3484*4882a593Smuzhiyun 				lport->ops->max_hw_queues);
3485*4882a593Smuzhiyun 	ctrl->ctrl.queue_count++;	/* +1 for admin queue */
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 	ctrl->ctrl.sqsize = opts->queue_size - 1;
3488*4882a593Smuzhiyun 	ctrl->ctrl.kato = opts->kato;
3489*4882a593Smuzhiyun 	ctrl->ctrl.cntlid = 0xffff;
3490*4882a593Smuzhiyun 
3491*4882a593Smuzhiyun 	ret = -ENOMEM;
3492*4882a593Smuzhiyun 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3493*4882a593Smuzhiyun 				sizeof(struct nvme_fc_queue), GFP_KERNEL);
3494*4882a593Smuzhiyun 	if (!ctrl->queues)
3495*4882a593Smuzhiyun 		goto out_free_ida;
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	nvme_fc_init_queue(ctrl, 0);
3498*4882a593Smuzhiyun 
3499*4882a593Smuzhiyun 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3500*4882a593Smuzhiyun 	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3501*4882a593Smuzhiyun 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3502*4882a593Smuzhiyun 	ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3503*4882a593Smuzhiyun 	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3504*4882a593Smuzhiyun 	ctrl->admin_tag_set.cmd_size =
3505*4882a593Smuzhiyun 		struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3506*4882a593Smuzhiyun 			    ctrl->lport->ops->fcprqst_priv_sz);
3507*4882a593Smuzhiyun 	ctrl->admin_tag_set.driver_data = ctrl;
3508*4882a593Smuzhiyun 	ctrl->admin_tag_set.nr_hw_queues = 1;
3509*4882a593Smuzhiyun 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3510*4882a593Smuzhiyun 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3511*4882a593Smuzhiyun 
3512*4882a593Smuzhiyun 	ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3513*4882a593Smuzhiyun 	if (ret)
3514*4882a593Smuzhiyun 		goto out_free_queues;
3515*4882a593Smuzhiyun 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3518*4882a593Smuzhiyun 	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3519*4882a593Smuzhiyun 		ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3520*4882a593Smuzhiyun 		goto out_free_admin_tag_set;
3521*4882a593Smuzhiyun 	}
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3524*4882a593Smuzhiyun 	if (IS_ERR(ctrl->ctrl.admin_q)) {
3525*4882a593Smuzhiyun 		ret = PTR_ERR(ctrl->ctrl.admin_q);
3526*4882a593Smuzhiyun 		goto out_cleanup_fabrics_q;
3527*4882a593Smuzhiyun 	}
3528*4882a593Smuzhiyun 
3529*4882a593Smuzhiyun 	/*
3530*4882a593Smuzhiyun 	 * Would have been nice to init io queues tag set as well.
3531*4882a593Smuzhiyun 	 * However, we require interaction from the controller
3532*4882a593Smuzhiyun 	 * for max io queue count before we can do so.
3533*4882a593Smuzhiyun 	 * Defer this to the connect path.
3534*4882a593Smuzhiyun 	 */
3535*4882a593Smuzhiyun 
3536*4882a593Smuzhiyun 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3537*4882a593Smuzhiyun 	if (ret)
3538*4882a593Smuzhiyun 		goto out_cleanup_admin_q;
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun 	/* at this point, teardown path changes to ref counting on nvme ctrl */
3541*4882a593Smuzhiyun 
3542*4882a593Smuzhiyun 	spin_lock_irqsave(&rport->lock, flags);
3543*4882a593Smuzhiyun 	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3544*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rport->lock, flags);
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3547*4882a593Smuzhiyun 	    !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3548*4882a593Smuzhiyun 		dev_err(ctrl->ctrl.device,
3549*4882a593Smuzhiyun 			"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3550*4882a593Smuzhiyun 		goto fail_ctrl;
3551*4882a593Smuzhiyun 	}
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun 	if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3554*4882a593Smuzhiyun 		dev_err(ctrl->ctrl.device,
3555*4882a593Smuzhiyun 			"NVME-FC{%d}: failed to schedule initial connect\n",
3556*4882a593Smuzhiyun 			ctrl->cnum);
3557*4882a593Smuzhiyun 		goto fail_ctrl;
3558*4882a593Smuzhiyun 	}
3559*4882a593Smuzhiyun 
3560*4882a593Smuzhiyun 	flush_delayed_work(&ctrl->connect_work);
3561*4882a593Smuzhiyun 
3562*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device,
3563*4882a593Smuzhiyun 		"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3564*4882a593Smuzhiyun 		ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 	return &ctrl->ctrl;
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun fail_ctrl:
3569*4882a593Smuzhiyun 	nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3570*4882a593Smuzhiyun 	cancel_work_sync(&ctrl->ioerr_work);
3571*4882a593Smuzhiyun 	cancel_work_sync(&ctrl->ctrl.reset_work);
3572*4882a593Smuzhiyun 	cancel_delayed_work_sync(&ctrl->connect_work);
3573*4882a593Smuzhiyun 
3574*4882a593Smuzhiyun 	ctrl->ctrl.opts = NULL;
3575*4882a593Smuzhiyun 
3576*4882a593Smuzhiyun 	/* initiate nvme ctrl ref counting teardown */
3577*4882a593Smuzhiyun 	nvme_uninit_ctrl(&ctrl->ctrl);
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	/* Remove core ctrl ref. */
3580*4882a593Smuzhiyun 	nvme_put_ctrl(&ctrl->ctrl);
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	/* as we're past the point where we transition to the ref
3583*4882a593Smuzhiyun 	 * counting teardown path, if we return a bad pointer here,
3584*4882a593Smuzhiyun 	 * the calling routine, thinking it's prior to the
3585*4882a593Smuzhiyun 	 * transition, will do an rport put. Since the teardown
3586*4882a593Smuzhiyun 	 * path also does a rport put, we do an extra get here to
3587*4882a593Smuzhiyun 	 * so proper order/teardown happens.
3588*4882a593Smuzhiyun 	 */
3589*4882a593Smuzhiyun 	nvme_fc_rport_get(rport);
3590*4882a593Smuzhiyun 
3591*4882a593Smuzhiyun 	return ERR_PTR(-EIO);
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun out_cleanup_admin_q:
3594*4882a593Smuzhiyun 	blk_cleanup_queue(ctrl->ctrl.admin_q);
3595*4882a593Smuzhiyun out_cleanup_fabrics_q:
3596*4882a593Smuzhiyun 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3597*4882a593Smuzhiyun out_free_admin_tag_set:
3598*4882a593Smuzhiyun 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
3599*4882a593Smuzhiyun out_free_queues:
3600*4882a593Smuzhiyun 	kfree(ctrl->queues);
3601*4882a593Smuzhiyun out_free_ida:
3602*4882a593Smuzhiyun 	put_device(ctrl->dev);
3603*4882a593Smuzhiyun 	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3604*4882a593Smuzhiyun out_free_ctrl:
3605*4882a593Smuzhiyun 	kfree(ctrl);
3606*4882a593Smuzhiyun out_fail:
3607*4882a593Smuzhiyun 	/* exit via here doesn't follow ctlr ref points */
3608*4882a593Smuzhiyun 	return ERR_PTR(ret);
3609*4882a593Smuzhiyun }
3610*4882a593Smuzhiyun 
3611*4882a593Smuzhiyun 
3612*4882a593Smuzhiyun struct nvmet_fc_traddr {
3613*4882a593Smuzhiyun 	u64	nn;
3614*4882a593Smuzhiyun 	u64	pn;
3615*4882a593Smuzhiyun };
3616*4882a593Smuzhiyun 
3617*4882a593Smuzhiyun static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)3618*4882a593Smuzhiyun __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3619*4882a593Smuzhiyun {
3620*4882a593Smuzhiyun 	u64 token64;
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun 	if (match_u64(sstr, &token64))
3623*4882a593Smuzhiyun 		return -EINVAL;
3624*4882a593Smuzhiyun 	*val = token64;
3625*4882a593Smuzhiyun 
3626*4882a593Smuzhiyun 	return 0;
3627*4882a593Smuzhiyun }
3628*4882a593Smuzhiyun 
3629*4882a593Smuzhiyun /*
3630*4882a593Smuzhiyun  * This routine validates and extracts the WWN's from the TRADDR string.
3631*4882a593Smuzhiyun  * As kernel parsers need the 0x to determine number base, universally
3632*4882a593Smuzhiyun  * build string to parse with 0x prefix before parsing name strings.
3633*4882a593Smuzhiyun  */
3634*4882a593Smuzhiyun static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)3635*4882a593Smuzhiyun nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3636*4882a593Smuzhiyun {
3637*4882a593Smuzhiyun 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3638*4882a593Smuzhiyun 	substring_t wwn = { name, &name[sizeof(name)-1] };
3639*4882a593Smuzhiyun 	int nnoffset, pnoffset;
3640*4882a593Smuzhiyun 
3641*4882a593Smuzhiyun 	/* validate if string is one of the 2 allowed formats */
3642*4882a593Smuzhiyun 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3643*4882a593Smuzhiyun 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3644*4882a593Smuzhiyun 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3645*4882a593Smuzhiyun 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3646*4882a593Smuzhiyun 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
3647*4882a593Smuzhiyun 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3648*4882a593Smuzhiyun 						NVME_FC_TRADDR_OXNNLEN;
3649*4882a593Smuzhiyun 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3650*4882a593Smuzhiyun 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3651*4882a593Smuzhiyun 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3652*4882a593Smuzhiyun 				"pn-", NVME_FC_TRADDR_NNLEN))) {
3653*4882a593Smuzhiyun 		nnoffset = NVME_FC_TRADDR_NNLEN;
3654*4882a593Smuzhiyun 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3655*4882a593Smuzhiyun 	} else
3656*4882a593Smuzhiyun 		goto out_einval;
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 	name[0] = '0';
3659*4882a593Smuzhiyun 	name[1] = 'x';
3660*4882a593Smuzhiyun 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3663*4882a593Smuzhiyun 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3664*4882a593Smuzhiyun 		goto out_einval;
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3667*4882a593Smuzhiyun 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3668*4882a593Smuzhiyun 		goto out_einval;
3669*4882a593Smuzhiyun 
3670*4882a593Smuzhiyun 	return 0;
3671*4882a593Smuzhiyun 
3672*4882a593Smuzhiyun out_einval:
3673*4882a593Smuzhiyun 	pr_warn("%s: bad traddr string\n", __func__);
3674*4882a593Smuzhiyun 	return -EINVAL;
3675*4882a593Smuzhiyun }
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun static struct nvme_ctrl *
nvme_fc_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)3678*4882a593Smuzhiyun nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3679*4882a593Smuzhiyun {
3680*4882a593Smuzhiyun 	struct nvme_fc_lport *lport;
3681*4882a593Smuzhiyun 	struct nvme_fc_rport *rport;
3682*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl;
3683*4882a593Smuzhiyun 	struct nvmet_fc_traddr laddr = { 0L, 0L };
3684*4882a593Smuzhiyun 	struct nvmet_fc_traddr raddr = { 0L, 0L };
3685*4882a593Smuzhiyun 	unsigned long flags;
3686*4882a593Smuzhiyun 	int ret;
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun 	ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3689*4882a593Smuzhiyun 	if (ret || !raddr.nn || !raddr.pn)
3690*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun 	ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3693*4882a593Smuzhiyun 	if (ret || !laddr.nn || !laddr.pn)
3694*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3695*4882a593Smuzhiyun 
3696*4882a593Smuzhiyun 	/* find the host and remote ports to connect together */
3697*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
3698*4882a593Smuzhiyun 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3699*4882a593Smuzhiyun 		if (lport->localport.node_name != laddr.nn ||
3700*4882a593Smuzhiyun 		    lport->localport.port_name != laddr.pn ||
3701*4882a593Smuzhiyun 		    lport->localport.port_state != FC_OBJSTATE_ONLINE)
3702*4882a593Smuzhiyun 			continue;
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun 		list_for_each_entry(rport, &lport->endp_list, endp_list) {
3705*4882a593Smuzhiyun 			if (rport->remoteport.node_name != raddr.nn ||
3706*4882a593Smuzhiyun 			    rport->remoteport.port_name != raddr.pn ||
3707*4882a593Smuzhiyun 			    rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3708*4882a593Smuzhiyun 				continue;
3709*4882a593Smuzhiyun 
3710*4882a593Smuzhiyun 			/* if fail to get reference fall through. Will error */
3711*4882a593Smuzhiyun 			if (!nvme_fc_rport_get(rport))
3712*4882a593Smuzhiyun 				break;
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun 			spin_unlock_irqrestore(&nvme_fc_lock, flags);
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun 			ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3717*4882a593Smuzhiyun 			if (IS_ERR(ctrl))
3718*4882a593Smuzhiyun 				nvme_fc_rport_put(rport);
3719*4882a593Smuzhiyun 			return ctrl;
3720*4882a593Smuzhiyun 		}
3721*4882a593Smuzhiyun 	}
3722*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
3723*4882a593Smuzhiyun 
3724*4882a593Smuzhiyun 	pr_warn("%s: %s - %s combination not found\n",
3725*4882a593Smuzhiyun 		__func__, opts->traddr, opts->host_traddr);
3726*4882a593Smuzhiyun 	return ERR_PTR(-ENOENT);
3727*4882a593Smuzhiyun }
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun static struct nvmf_transport_ops nvme_fc_transport = {
3731*4882a593Smuzhiyun 	.name		= "fc",
3732*4882a593Smuzhiyun 	.module		= THIS_MODULE,
3733*4882a593Smuzhiyun 	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3734*4882a593Smuzhiyun 	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3735*4882a593Smuzhiyun 	.create_ctrl	= nvme_fc_create_ctrl,
3736*4882a593Smuzhiyun };
3737*4882a593Smuzhiyun 
3738*4882a593Smuzhiyun /* Arbitrary successive failures max. With lots of subsystems could be high */
3739*4882a593Smuzhiyun #define DISCOVERY_MAX_FAIL	20
3740*4882a593Smuzhiyun 
nvme_fc_nvme_discovery_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3741*4882a593Smuzhiyun static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3742*4882a593Smuzhiyun 		struct device_attribute *attr, const char *buf, size_t count)
3743*4882a593Smuzhiyun {
3744*4882a593Smuzhiyun 	unsigned long flags;
3745*4882a593Smuzhiyun 	LIST_HEAD(local_disc_list);
3746*4882a593Smuzhiyun 	struct nvme_fc_lport *lport;
3747*4882a593Smuzhiyun 	struct nvme_fc_rport *rport;
3748*4882a593Smuzhiyun 	int failcnt = 0;
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
3751*4882a593Smuzhiyun restart:
3752*4882a593Smuzhiyun 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3753*4882a593Smuzhiyun 		list_for_each_entry(rport, &lport->endp_list, endp_list) {
3754*4882a593Smuzhiyun 			if (!nvme_fc_lport_get(lport))
3755*4882a593Smuzhiyun 				continue;
3756*4882a593Smuzhiyun 			if (!nvme_fc_rport_get(rport)) {
3757*4882a593Smuzhiyun 				/*
3758*4882a593Smuzhiyun 				 * This is a temporary condition. Upon restart
3759*4882a593Smuzhiyun 				 * this rport will be gone from the list.
3760*4882a593Smuzhiyun 				 *
3761*4882a593Smuzhiyun 				 * Revert the lport put and retry.  Anything
3762*4882a593Smuzhiyun 				 * added to the list already will be skipped (as
3763*4882a593Smuzhiyun 				 * they are no longer list_empty).  Loops should
3764*4882a593Smuzhiyun 				 * resume at rports that were not yet seen.
3765*4882a593Smuzhiyun 				 */
3766*4882a593Smuzhiyun 				nvme_fc_lport_put(lport);
3767*4882a593Smuzhiyun 
3768*4882a593Smuzhiyun 				if (failcnt++ < DISCOVERY_MAX_FAIL)
3769*4882a593Smuzhiyun 					goto restart;
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun 				pr_err("nvme_discovery: too many reference "
3772*4882a593Smuzhiyun 				       "failures\n");
3773*4882a593Smuzhiyun 				goto process_local_list;
3774*4882a593Smuzhiyun 			}
3775*4882a593Smuzhiyun 			if (list_empty(&rport->disc_list))
3776*4882a593Smuzhiyun 				list_add_tail(&rport->disc_list,
3777*4882a593Smuzhiyun 					      &local_disc_list);
3778*4882a593Smuzhiyun 		}
3779*4882a593Smuzhiyun 	}
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun process_local_list:
3782*4882a593Smuzhiyun 	while (!list_empty(&local_disc_list)) {
3783*4882a593Smuzhiyun 		rport = list_first_entry(&local_disc_list,
3784*4882a593Smuzhiyun 					 struct nvme_fc_rport, disc_list);
3785*4882a593Smuzhiyun 		list_del_init(&rport->disc_list);
3786*4882a593Smuzhiyun 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun 		lport = rport->lport;
3789*4882a593Smuzhiyun 		/* signal discovery. Won't hurt if it repeats */
3790*4882a593Smuzhiyun 		nvme_fc_signal_discovery_scan(lport, rport);
3791*4882a593Smuzhiyun 		nvme_fc_rport_put(rport);
3792*4882a593Smuzhiyun 		nvme_fc_lport_put(lport);
3793*4882a593Smuzhiyun 
3794*4882a593Smuzhiyun 		spin_lock_irqsave(&nvme_fc_lock, flags);
3795*4882a593Smuzhiyun 	}
3796*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
3797*4882a593Smuzhiyun 
3798*4882a593Smuzhiyun 	return count;
3799*4882a593Smuzhiyun }
3800*4882a593Smuzhiyun static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun static struct attribute *nvme_fc_attrs[] = {
3803*4882a593Smuzhiyun 	&dev_attr_nvme_discovery.attr,
3804*4882a593Smuzhiyun 	NULL
3805*4882a593Smuzhiyun };
3806*4882a593Smuzhiyun 
3807*4882a593Smuzhiyun static struct attribute_group nvme_fc_attr_group = {
3808*4882a593Smuzhiyun 	.attrs = nvme_fc_attrs,
3809*4882a593Smuzhiyun };
3810*4882a593Smuzhiyun 
3811*4882a593Smuzhiyun static const struct attribute_group *nvme_fc_attr_groups[] = {
3812*4882a593Smuzhiyun 	&nvme_fc_attr_group,
3813*4882a593Smuzhiyun 	NULL
3814*4882a593Smuzhiyun };
3815*4882a593Smuzhiyun 
3816*4882a593Smuzhiyun static struct class fc_class = {
3817*4882a593Smuzhiyun 	.name = "fc",
3818*4882a593Smuzhiyun 	.dev_groups = nvme_fc_attr_groups,
3819*4882a593Smuzhiyun 	.owner = THIS_MODULE,
3820*4882a593Smuzhiyun };
3821*4882a593Smuzhiyun 
nvme_fc_init_module(void)3822*4882a593Smuzhiyun static int __init nvme_fc_init_module(void)
3823*4882a593Smuzhiyun {
3824*4882a593Smuzhiyun 	int ret;
3825*4882a593Smuzhiyun 
3826*4882a593Smuzhiyun 	nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3827*4882a593Smuzhiyun 	if (!nvme_fc_wq)
3828*4882a593Smuzhiyun 		return -ENOMEM;
3829*4882a593Smuzhiyun 
3830*4882a593Smuzhiyun 	/*
3831*4882a593Smuzhiyun 	 * NOTE:
3832*4882a593Smuzhiyun 	 * It is expected that in the future the kernel will combine
3833*4882a593Smuzhiyun 	 * the FC-isms that are currently under scsi and now being
3834*4882a593Smuzhiyun 	 * added to by NVME into a new standalone FC class. The SCSI
3835*4882a593Smuzhiyun 	 * and NVME protocols and their devices would be under this
3836*4882a593Smuzhiyun 	 * new FC class.
3837*4882a593Smuzhiyun 	 *
3838*4882a593Smuzhiyun 	 * As we need something to post FC-specific udev events to,
3839*4882a593Smuzhiyun 	 * specifically for nvme probe events, start by creating the
3840*4882a593Smuzhiyun 	 * new device class.  When the new standalone FC class is
3841*4882a593Smuzhiyun 	 * put in place, this code will move to a more generic
3842*4882a593Smuzhiyun 	 * location for the class.
3843*4882a593Smuzhiyun 	 */
3844*4882a593Smuzhiyun 	ret = class_register(&fc_class);
3845*4882a593Smuzhiyun 	if (ret) {
3846*4882a593Smuzhiyun 		pr_err("couldn't register class fc\n");
3847*4882a593Smuzhiyun 		goto out_destroy_wq;
3848*4882a593Smuzhiyun 	}
3849*4882a593Smuzhiyun 
3850*4882a593Smuzhiyun 	/*
3851*4882a593Smuzhiyun 	 * Create a device for the FC-centric udev events
3852*4882a593Smuzhiyun 	 */
3853*4882a593Smuzhiyun 	fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3854*4882a593Smuzhiyun 				"fc_udev_device");
3855*4882a593Smuzhiyun 	if (IS_ERR(fc_udev_device)) {
3856*4882a593Smuzhiyun 		pr_err("couldn't create fc_udev device!\n");
3857*4882a593Smuzhiyun 		ret = PTR_ERR(fc_udev_device);
3858*4882a593Smuzhiyun 		goto out_destroy_class;
3859*4882a593Smuzhiyun 	}
3860*4882a593Smuzhiyun 
3861*4882a593Smuzhiyun 	ret = nvmf_register_transport(&nvme_fc_transport);
3862*4882a593Smuzhiyun 	if (ret)
3863*4882a593Smuzhiyun 		goto out_destroy_device;
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun 	return 0;
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun out_destroy_device:
3868*4882a593Smuzhiyun 	device_destroy(&fc_class, MKDEV(0, 0));
3869*4882a593Smuzhiyun out_destroy_class:
3870*4882a593Smuzhiyun 	class_unregister(&fc_class);
3871*4882a593Smuzhiyun out_destroy_wq:
3872*4882a593Smuzhiyun 	destroy_workqueue(nvme_fc_wq);
3873*4882a593Smuzhiyun 
3874*4882a593Smuzhiyun 	return ret;
3875*4882a593Smuzhiyun }
3876*4882a593Smuzhiyun 
3877*4882a593Smuzhiyun static void
nvme_fc_delete_controllers(struct nvme_fc_rport * rport)3878*4882a593Smuzhiyun nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3879*4882a593Smuzhiyun {
3880*4882a593Smuzhiyun 	struct nvme_fc_ctrl *ctrl;
3881*4882a593Smuzhiyun 
3882*4882a593Smuzhiyun 	spin_lock(&rport->lock);
3883*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3884*4882a593Smuzhiyun 		dev_warn(ctrl->ctrl.device,
3885*4882a593Smuzhiyun 			"NVME-FC{%d}: transport unloading: deleting ctrl\n",
3886*4882a593Smuzhiyun 			ctrl->cnum);
3887*4882a593Smuzhiyun 		nvme_delete_ctrl(&ctrl->ctrl);
3888*4882a593Smuzhiyun 	}
3889*4882a593Smuzhiyun 	spin_unlock(&rport->lock);
3890*4882a593Smuzhiyun }
3891*4882a593Smuzhiyun 
3892*4882a593Smuzhiyun static void
nvme_fc_cleanup_for_unload(void)3893*4882a593Smuzhiyun nvme_fc_cleanup_for_unload(void)
3894*4882a593Smuzhiyun {
3895*4882a593Smuzhiyun 	struct nvme_fc_lport *lport;
3896*4882a593Smuzhiyun 	struct nvme_fc_rport *rport;
3897*4882a593Smuzhiyun 
3898*4882a593Smuzhiyun 	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3899*4882a593Smuzhiyun 		list_for_each_entry(rport, &lport->endp_list, endp_list) {
3900*4882a593Smuzhiyun 			nvme_fc_delete_controllers(rport);
3901*4882a593Smuzhiyun 		}
3902*4882a593Smuzhiyun 	}
3903*4882a593Smuzhiyun }
3904*4882a593Smuzhiyun 
nvme_fc_exit_module(void)3905*4882a593Smuzhiyun static void __exit nvme_fc_exit_module(void)
3906*4882a593Smuzhiyun {
3907*4882a593Smuzhiyun 	unsigned long flags;
3908*4882a593Smuzhiyun 	bool need_cleanup = false;
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun 	spin_lock_irqsave(&nvme_fc_lock, flags);
3911*4882a593Smuzhiyun 	nvme_fc_waiting_to_unload = true;
3912*4882a593Smuzhiyun 	if (!list_empty(&nvme_fc_lport_list)) {
3913*4882a593Smuzhiyun 		need_cleanup = true;
3914*4882a593Smuzhiyun 		nvme_fc_cleanup_for_unload();
3915*4882a593Smuzhiyun 	}
3916*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
3917*4882a593Smuzhiyun 	if (need_cleanup) {
3918*4882a593Smuzhiyun 		pr_info("%s: waiting for ctlr deletes\n", __func__);
3919*4882a593Smuzhiyun 		wait_for_completion(&nvme_fc_unload_proceed);
3920*4882a593Smuzhiyun 		pr_info("%s: ctrl deletes complete\n", __func__);
3921*4882a593Smuzhiyun 	}
3922*4882a593Smuzhiyun 
3923*4882a593Smuzhiyun 	nvmf_unregister_transport(&nvme_fc_transport);
3924*4882a593Smuzhiyun 
3925*4882a593Smuzhiyun 	ida_destroy(&nvme_fc_local_port_cnt);
3926*4882a593Smuzhiyun 	ida_destroy(&nvme_fc_ctrl_cnt);
3927*4882a593Smuzhiyun 
3928*4882a593Smuzhiyun 	device_destroy(&fc_class, MKDEV(0, 0));
3929*4882a593Smuzhiyun 	class_unregister(&fc_class);
3930*4882a593Smuzhiyun 	destroy_workqueue(nvme_fc_wq);
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun 
3933*4882a593Smuzhiyun module_init(nvme_fc_init_module);
3934*4882a593Smuzhiyun module_exit(nvme_fc_exit_module);
3935*4882a593Smuzhiyun 
3936*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
3937