xref: /OK3568_Linux_fs/kernel/drivers/nvme/target/fc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/blk-mq.h>
9*4882a593Smuzhiyun #include <linux/parser.h>
10*4882a593Smuzhiyun #include <linux/random.h>
11*4882a593Smuzhiyun #include <uapi/scsi/fc/fc_fs.h>
12*4882a593Smuzhiyun #include <uapi/scsi/fc/fc_els.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "nvmet.h"
15*4882a593Smuzhiyun #include <linux/nvme-fc-driver.h>
16*4882a593Smuzhiyun #include <linux/nvme-fc.h>
17*4882a593Smuzhiyun #include "../host/fc.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* *************************** Data Structures/Defines ****************** */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define NVMET_LS_CTX_COUNT		256
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct nvmet_fc_tgtport;
26*4882a593Smuzhiyun struct nvmet_fc_tgt_assoc;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun struct nvmet_fc_ls_iod {		/* for an LS RQST RCV */
29*4882a593Smuzhiyun 	struct nvmefc_ls_rsp		*lsrsp;
30*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	struct list_head		ls_rcv_list; /* tgtport->ls_rcv_list */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
35*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc	*assoc;
36*4882a593Smuzhiyun 	void				*hosthandle;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	union nvmefc_ls_requests	*rqstbuf;
39*4882a593Smuzhiyun 	union nvmefc_ls_responses	*rspbuf;
40*4882a593Smuzhiyun 	u16				rqstdatalen;
41*4882a593Smuzhiyun 	dma_addr_t			rspdma;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	struct scatterlist		sg[2];
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	struct work_struct		work;
46*4882a593Smuzhiyun } __aligned(sizeof(unsigned long long));
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct nvmet_fc_ls_req_op {		/* for an LS RQST XMT */
49*4882a593Smuzhiyun 	struct nvmefc_ls_req		ls_req;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
52*4882a593Smuzhiyun 	void				*hosthandle;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	int				ls_error;
55*4882a593Smuzhiyun 	struct list_head		lsreq_list; /* tgtport->ls_req_list */
56*4882a593Smuzhiyun 	bool				req_queued;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* desired maximum for a single sequence - if sg list allows it */
61*4882a593Smuzhiyun #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun enum nvmet_fcp_datadir {
64*4882a593Smuzhiyun 	NVMET_FCP_NODATA,
65*4882a593Smuzhiyun 	NVMET_FCP_WRITE,
66*4882a593Smuzhiyun 	NVMET_FCP_READ,
67*4882a593Smuzhiyun 	NVMET_FCP_ABORTED,
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct nvmet_fc_fcp_iod {
71*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req	*fcpreq;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu		cmdiubuf;
74*4882a593Smuzhiyun 	struct nvme_fc_ersp_iu		rspiubuf;
75*4882a593Smuzhiyun 	dma_addr_t			rspdma;
76*4882a593Smuzhiyun 	struct scatterlist		*next_sg;
77*4882a593Smuzhiyun 	struct scatterlist		*data_sg;
78*4882a593Smuzhiyun 	int				data_sg_cnt;
79*4882a593Smuzhiyun 	u32				offset;
80*4882a593Smuzhiyun 	enum nvmet_fcp_datadir		io_dir;
81*4882a593Smuzhiyun 	bool				active;
82*4882a593Smuzhiyun 	bool				abort;
83*4882a593Smuzhiyun 	bool				aborted;
84*4882a593Smuzhiyun 	bool				writedataactive;
85*4882a593Smuzhiyun 	spinlock_t			flock;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	struct nvmet_req		req;
88*4882a593Smuzhiyun 	struct work_struct		defer_work;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
91*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue	*queue;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	struct list_head		fcp_list;	/* tgtport->fcp_list */
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun struct nvmet_fc_tgtport {
97*4882a593Smuzhiyun 	struct nvmet_fc_target_port	fc_target_port;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	struct list_head		tgt_list; /* nvmet_fc_target_list */
100*4882a593Smuzhiyun 	struct device			*dev;	/* dev for dma mapping */
101*4882a593Smuzhiyun 	struct nvmet_fc_target_template	*ops;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod		*iod;
104*4882a593Smuzhiyun 	spinlock_t			lock;
105*4882a593Smuzhiyun 	struct list_head		ls_rcv_list;
106*4882a593Smuzhiyun 	struct list_head		ls_req_list;
107*4882a593Smuzhiyun 	struct list_head		ls_busylist;
108*4882a593Smuzhiyun 	struct list_head		assoc_list;
109*4882a593Smuzhiyun 	struct list_head		host_list;
110*4882a593Smuzhiyun 	struct ida			assoc_cnt;
111*4882a593Smuzhiyun 	struct nvmet_fc_port_entry	*pe;
112*4882a593Smuzhiyun 	struct kref			ref;
113*4882a593Smuzhiyun 	u32				max_sg_cnt;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun struct nvmet_fc_port_entry {
117*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
118*4882a593Smuzhiyun 	struct nvmet_port		*port;
119*4882a593Smuzhiyun 	u64				node_name;
120*4882a593Smuzhiyun 	u64				port_name;
121*4882a593Smuzhiyun 	struct list_head		pe_list;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun struct nvmet_fc_defer_fcp_req {
125*4882a593Smuzhiyun 	struct list_head		req_list;
126*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req	*fcp_req;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun struct nvmet_fc_tgt_queue {
130*4882a593Smuzhiyun 	bool				ninetypercent;
131*4882a593Smuzhiyun 	u16				qid;
132*4882a593Smuzhiyun 	u16				sqsize;
133*4882a593Smuzhiyun 	u16				ersp_ratio;
134*4882a593Smuzhiyun 	__le16				sqhd;
135*4882a593Smuzhiyun 	atomic_t			connected;
136*4882a593Smuzhiyun 	atomic_t			sqtail;
137*4882a593Smuzhiyun 	atomic_t			zrspcnt;
138*4882a593Smuzhiyun 	atomic_t			rsn;
139*4882a593Smuzhiyun 	spinlock_t			qlock;
140*4882a593Smuzhiyun 	struct nvmet_cq			nvme_cq;
141*4882a593Smuzhiyun 	struct nvmet_sq			nvme_sq;
142*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc	*assoc;
143*4882a593Smuzhiyun 	struct list_head		fod_list;
144*4882a593Smuzhiyun 	struct list_head		pending_cmd_list;
145*4882a593Smuzhiyun 	struct list_head		avail_defer_list;
146*4882a593Smuzhiyun 	struct workqueue_struct		*work_q;
147*4882a593Smuzhiyun 	struct kref			ref;
148*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
149*4882a593Smuzhiyun } __aligned(sizeof(unsigned long long));
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct nvmet_fc_hostport {
152*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
153*4882a593Smuzhiyun 	void				*hosthandle;
154*4882a593Smuzhiyun 	struct list_head		host_list;
155*4882a593Smuzhiyun 	struct kref			ref;
156*4882a593Smuzhiyun 	u8				invalid;
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun struct nvmet_fc_tgt_assoc {
160*4882a593Smuzhiyun 	u64				association_id;
161*4882a593Smuzhiyun 	u32				a_id;
162*4882a593Smuzhiyun 	atomic_t			terminating;
163*4882a593Smuzhiyun 	struct nvmet_fc_tgtport		*tgtport;
164*4882a593Smuzhiyun 	struct nvmet_fc_hostport	*hostport;
165*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod		*rcv_disconn;
166*4882a593Smuzhiyun 	struct list_head		a_list;
167*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
168*4882a593Smuzhiyun 	struct kref			ref;
169*4882a593Smuzhiyun 	struct work_struct		del_work;
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun static inline int
nvmet_fc_iodnum(struct nvmet_fc_ls_iod * iodptr)174*4882a593Smuzhiyun nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	return (iodptr - iodptr->tgtport->iod);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun static inline int
nvmet_fc_fodnum(struct nvmet_fc_fcp_iod * fodptr)180*4882a593Smuzhiyun nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	return (fodptr - fodptr->queue->fod);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun  * Association and Connection IDs:
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * Association ID will have random number in upper 6 bytes and zero
190*4882a593Smuzhiyun  *   in lower 2 bytes
191*4882a593Smuzhiyun  *
192*4882a593Smuzhiyun  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * note: Association ID = Connection ID for queue 0
195*4882a593Smuzhiyun  */
196*4882a593Smuzhiyun #define BYTES_FOR_QID			sizeof(u16)
197*4882a593Smuzhiyun #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
198*4882a593Smuzhiyun #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc * assoc,u16 qid)201*4882a593Smuzhiyun nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	return (assoc->association_id | qid);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun static inline u64
nvmet_fc_getassociationid(u64 connectionid)207*4882a593Smuzhiyun nvmet_fc_getassociationid(u64 connectionid)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun static inline u16
nvmet_fc_getqueueid(u64 connectionid)213*4882a593Smuzhiyun nvmet_fc_getqueueid(u64 connectionid)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port * targetport)219*4882a593Smuzhiyun targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	return container_of(targetport, struct nvmet_fc_tgtport,
222*4882a593Smuzhiyun 				 fc_target_port);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req * nvme_req)226*4882a593Smuzhiyun nvmet_req_to_fod(struct nvmet_req *nvme_req)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /* *************************** Globals **************************** */
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun static LIST_HEAD(nvmet_fc_target_list);
238*4882a593Smuzhiyun static DEFINE_IDA(nvmet_fc_tgtport_cnt);
239*4882a593Smuzhiyun static LIST_HEAD(nvmet_fc_portentry_list);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
243*4882a593Smuzhiyun static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
244*4882a593Smuzhiyun static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
245*4882a593Smuzhiyun static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
246*4882a593Smuzhiyun static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
247*4882a593Smuzhiyun static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
248*4882a593Smuzhiyun static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
249*4882a593Smuzhiyun static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
250*4882a593Smuzhiyun static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
251*4882a593Smuzhiyun 					struct nvmet_fc_fcp_iod *fod);
252*4882a593Smuzhiyun static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
253*4882a593Smuzhiyun static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
254*4882a593Smuzhiyun 				struct nvmet_fc_ls_iod *iod);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* *********************** FC-NVME DMA Handling **************************** */
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun  * The fcloop device passes in a NULL device pointer. Real LLD's will
261*4882a593Smuzhiyun  * pass in a valid device pointer. If NULL is passed to the dma mapping
262*4882a593Smuzhiyun  * routines, depending on the platform, it may or may not succeed, and
263*4882a593Smuzhiyun  * may crash.
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * As such:
266*4882a593Smuzhiyun  * Wrapper all the dma routines and check the dev pointer.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * If simple mappings (return just a dma address, we'll noop them,
269*4882a593Smuzhiyun  * returning a dma address of 0.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * On more complex mappings (dma_map_sg), a pseudo routine fills
272*4882a593Smuzhiyun  * in the scatter list, setting all dma addresses to 0.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)276*4882a593Smuzhiyun fc_dma_map_single(struct device *dev, void *ptr, size_t size,
277*4882a593Smuzhiyun 		enum dma_data_direction dir)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)283*4882a593Smuzhiyun fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)289*4882a593Smuzhiyun fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
290*4882a593Smuzhiyun 	enum dma_data_direction dir)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	if (dev)
293*4882a593Smuzhiyun 		dma_unmap_single(dev, addr, size, dir);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)297*4882a593Smuzhiyun fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
298*4882a593Smuzhiyun 		enum dma_data_direction dir)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	if (dev)
301*4882a593Smuzhiyun 		dma_sync_single_for_cpu(dev, addr, size, dir);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)305*4882a593Smuzhiyun fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
306*4882a593Smuzhiyun 		enum dma_data_direction dir)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	if (dev)
309*4882a593Smuzhiyun 		dma_sync_single_for_device(dev, addr, size, dir);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* pseudo dma_map_sg call */
313*4882a593Smuzhiyun static int
fc_map_sg(struct scatterlist * sg,int nents)314*4882a593Smuzhiyun fc_map_sg(struct scatterlist *sg, int nents)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct scatterlist *s;
317*4882a593Smuzhiyun 	int i;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	WARN_ON(nents == 0 || sg[0].length == 0);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	for_each_sg(sg, s, nents, i) {
322*4882a593Smuzhiyun 		s->dma_address = 0L;
323*4882a593Smuzhiyun #ifdef CONFIG_NEED_SG_DMA_LENGTH
324*4882a593Smuzhiyun 		s->dma_length = s->length;
325*4882a593Smuzhiyun #endif
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 	return nents;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)331*4882a593Smuzhiyun fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
332*4882a593Smuzhiyun 		enum dma_data_direction dir)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)338*4882a593Smuzhiyun fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
339*4882a593Smuzhiyun 		enum dma_data_direction dir)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	if (dev)
342*4882a593Smuzhiyun 		dma_unmap_sg(dev, sg, nents, dir);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun /* ********************** FC-NVME LS XMT Handling ************************* */
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op * lsop)350*4882a593Smuzhiyun __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
353*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
354*4882a593Smuzhiyun 	unsigned long flags;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (!lsop->req_queued) {
359*4882a593Smuzhiyun 		spin_unlock_irqrestore(&tgtport->lock, flags);
360*4882a593Smuzhiyun 		return;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	list_del(&lsop->lsreq_list);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	lsop->req_queued = false;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
370*4882a593Smuzhiyun 				  (lsreq->rqstlen + lsreq->rsplen),
371*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))377*4882a593Smuzhiyun __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
378*4882a593Smuzhiyun 		struct nvmet_fc_ls_req_op *lsop,
379*4882a593Smuzhiyun 		void (*done)(struct nvmefc_ls_req *req, int status))
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
382*4882a593Smuzhiyun 	unsigned long flags;
383*4882a593Smuzhiyun 	int ret = 0;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (!tgtport->ops->ls_req)
386*4882a593Smuzhiyun 		return -EOPNOTSUPP;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (!nvmet_fc_tgtport_get(tgtport))
389*4882a593Smuzhiyun 		return -ESHUTDOWN;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	lsreq->done = done;
392*4882a593Smuzhiyun 	lsop->req_queued = false;
393*4882a593Smuzhiyun 	INIT_LIST_HEAD(&lsop->lsreq_list);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
396*4882a593Smuzhiyun 				  lsreq->rqstlen + lsreq->rsplen,
397*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
398*4882a593Smuzhiyun 	if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
399*4882a593Smuzhiyun 		ret = -EFAULT;
400*4882a593Smuzhiyun 		goto out_puttgtport;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	lsop->req_queued = true;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
413*4882a593Smuzhiyun 				   lsreq);
414*4882a593Smuzhiyun 	if (ret)
415*4882a593Smuzhiyun 		goto out_unlink;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun out_unlink:
420*4882a593Smuzhiyun 	lsop->ls_error = ret;
421*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
422*4882a593Smuzhiyun 	lsop->req_queued = false;
423*4882a593Smuzhiyun 	list_del(&lsop->lsreq_list);
424*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
425*4882a593Smuzhiyun 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
426*4882a593Smuzhiyun 				  (lsreq->rqstlen + lsreq->rsplen),
427*4882a593Smuzhiyun 				  DMA_BIDIRECTIONAL);
428*4882a593Smuzhiyun out_puttgtport:
429*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return ret;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))435*4882a593Smuzhiyun nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
436*4882a593Smuzhiyun 		struct nvmet_fc_ls_req_op *lsop,
437*4882a593Smuzhiyun 		void (*done)(struct nvmefc_ls_req *req, int status))
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	/* don't wait for completion */
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return __nvmet_fc_send_ls_req(tgtport, lsop, done);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)445*4882a593Smuzhiyun nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	struct nvmet_fc_ls_req_op *lsop =
448*4882a593Smuzhiyun 		container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	__nvmet_fc_finish_ls_req(lsop);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/* fc-nvme target doesn't care about success or failure of cmd */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	kfree(lsop);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun  * This routine sends a FC-NVME LS to disconnect (aka terminate)
459*4882a593Smuzhiyun  * the FC-NVME Association.  Terminating the association also
460*4882a593Smuzhiyun  * terminates the FC-NVME connections (per queue, both admin and io
461*4882a593Smuzhiyun  * queues) that are part of the association. E.g. things are torn
462*4882a593Smuzhiyun  * down, and the related FC-NVME Association ID and Connection IDs
463*4882a593Smuzhiyun  * become invalid.
464*4882a593Smuzhiyun  *
465*4882a593Smuzhiyun  * The behavior of the fc-nvme target is such that it's
466*4882a593Smuzhiyun  * understanding of the association and connections will implicitly
467*4882a593Smuzhiyun  * be torn down. The action is implicit as it may be due to a loss of
468*4882a593Smuzhiyun  * connectivity with the fc-nvme host, so the target may never get a
469*4882a593Smuzhiyun  * response even if it tried.  As such, the action of this routine
470*4882a593Smuzhiyun  * is to asynchronously send the LS, ignore any results of the LS, and
471*4882a593Smuzhiyun  * continue on with terminating the association. If the fc-nvme host
472*4882a593Smuzhiyun  * is present and receives the LS, it too can tear down.
473*4882a593Smuzhiyun  */
474*4882a593Smuzhiyun static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc * assoc)475*4882a593Smuzhiyun nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
478*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
479*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
480*4882a593Smuzhiyun 	struct nvmet_fc_ls_req_op *lsop;
481*4882a593Smuzhiyun 	struct nvmefc_ls_req *lsreq;
482*4882a593Smuzhiyun 	int ret;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/*
485*4882a593Smuzhiyun 	 * If ls_req is NULL or no hosthandle, it's an older lldd and no
486*4882a593Smuzhiyun 	 * message is normal. Otherwise, send unless the hostport has
487*4882a593Smuzhiyun 	 * already been invalidated by the lldd.
488*4882a593Smuzhiyun 	 */
489*4882a593Smuzhiyun 	if (!tgtport->ops->ls_req || !assoc->hostport ||
490*4882a593Smuzhiyun 	    assoc->hostport->invalid)
491*4882a593Smuzhiyun 		return;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	lsop = kzalloc((sizeof(*lsop) +
494*4882a593Smuzhiyun 			sizeof(*discon_rqst) + sizeof(*discon_acc) +
495*4882a593Smuzhiyun 			tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
496*4882a593Smuzhiyun 	if (!lsop) {
497*4882a593Smuzhiyun 		dev_info(tgtport->dev,
498*4882a593Smuzhiyun 			"{%d:%d} send Disconnect Association failed: ENOMEM\n",
499*4882a593Smuzhiyun 			tgtport->fc_target_port.port_num, assoc->a_id);
500*4882a593Smuzhiyun 		return;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
504*4882a593Smuzhiyun 	discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
505*4882a593Smuzhiyun 	lsreq = &lsop->ls_req;
506*4882a593Smuzhiyun 	if (tgtport->ops->lsrqst_priv_sz)
507*4882a593Smuzhiyun 		lsreq->private = (void *)&discon_acc[1];
508*4882a593Smuzhiyun 	else
509*4882a593Smuzhiyun 		lsreq->private = NULL;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	lsop->tgtport = tgtport;
512*4882a593Smuzhiyun 	lsop->hosthandle = assoc->hostport->hosthandle;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
515*4882a593Smuzhiyun 				assoc->association_id);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
518*4882a593Smuzhiyun 				nvmet_fc_disconnect_assoc_done);
519*4882a593Smuzhiyun 	if (ret) {
520*4882a593Smuzhiyun 		dev_info(tgtport->dev,
521*4882a593Smuzhiyun 			"{%d:%d} XMT Disconnect Association failed: %d\n",
522*4882a593Smuzhiyun 			tgtport->fc_target_port.port_num, assoc->a_id, ret);
523*4882a593Smuzhiyun 		kfree(lsop);
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun /* *********************** FC-NVME Port Management ************************ */
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport * tgtport)532*4882a593Smuzhiyun nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod;
535*4882a593Smuzhiyun 	int i;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
538*4882a593Smuzhiyun 			GFP_KERNEL);
539*4882a593Smuzhiyun 	if (!iod)
540*4882a593Smuzhiyun 		return -ENOMEM;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	tgtport->iod = iod;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
545*4882a593Smuzhiyun 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
546*4882a593Smuzhiyun 		iod->tgtport = tgtport;
547*4882a593Smuzhiyun 		list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
550*4882a593Smuzhiyun 				       sizeof(union nvmefc_ls_responses),
551*4882a593Smuzhiyun 				       GFP_KERNEL);
552*4882a593Smuzhiyun 		if (!iod->rqstbuf)
553*4882a593Smuzhiyun 			goto out_fail;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
558*4882a593Smuzhiyun 						sizeof(*iod->rspbuf),
559*4882a593Smuzhiyun 						DMA_TO_DEVICE);
560*4882a593Smuzhiyun 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
561*4882a593Smuzhiyun 			goto out_fail;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return 0;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun out_fail:
567*4882a593Smuzhiyun 	kfree(iod->rqstbuf);
568*4882a593Smuzhiyun 	list_del(&iod->ls_rcv_list);
569*4882a593Smuzhiyun 	for (iod--, i--; i >= 0; iod--, i--) {
570*4882a593Smuzhiyun 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
571*4882a593Smuzhiyun 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
572*4882a593Smuzhiyun 		kfree(iod->rqstbuf);
573*4882a593Smuzhiyun 		list_del(&iod->ls_rcv_list);
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	kfree(iod);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	return -EFAULT;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport * tgtport)582*4882a593Smuzhiyun nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
585*4882a593Smuzhiyun 	int i;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
588*4882a593Smuzhiyun 		fc_dma_unmap_single(tgtport->dev,
589*4882a593Smuzhiyun 				iod->rspdma, sizeof(*iod->rspbuf),
590*4882a593Smuzhiyun 				DMA_TO_DEVICE);
591*4882a593Smuzhiyun 		kfree(iod->rqstbuf);
592*4882a593Smuzhiyun 		list_del(&iod->ls_rcv_list);
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 	kfree(tgtport->iod);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport * tgtport)598*4882a593Smuzhiyun nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod;
601*4882a593Smuzhiyun 	unsigned long flags;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
604*4882a593Smuzhiyun 	iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
605*4882a593Smuzhiyun 					struct nvmet_fc_ls_iod, ls_rcv_list);
606*4882a593Smuzhiyun 	if (iod)
607*4882a593Smuzhiyun 		list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
608*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
609*4882a593Smuzhiyun 	return iod;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)614*4882a593Smuzhiyun nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
615*4882a593Smuzhiyun 			struct nvmet_fc_ls_iod *iod)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	unsigned long flags;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
620*4882a593Smuzhiyun 	list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
621*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)625*4882a593Smuzhiyun nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
626*4882a593Smuzhiyun 				struct nvmet_fc_tgt_queue *queue)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = queue->fod;
629*4882a593Smuzhiyun 	int i;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	for (i = 0; i < queue->sqsize; fod++, i++) {
632*4882a593Smuzhiyun 		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
633*4882a593Smuzhiyun 		fod->tgtport = tgtport;
634*4882a593Smuzhiyun 		fod->queue = queue;
635*4882a593Smuzhiyun 		fod->active = false;
636*4882a593Smuzhiyun 		fod->abort = false;
637*4882a593Smuzhiyun 		fod->aborted = false;
638*4882a593Smuzhiyun 		fod->fcpreq = NULL;
639*4882a593Smuzhiyun 		list_add_tail(&fod->fcp_list, &queue->fod_list);
640*4882a593Smuzhiyun 		spin_lock_init(&fod->flock);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
643*4882a593Smuzhiyun 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
644*4882a593Smuzhiyun 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
645*4882a593Smuzhiyun 			list_del(&fod->fcp_list);
646*4882a593Smuzhiyun 			for (fod--, i--; i >= 0; fod--, i--) {
647*4882a593Smuzhiyun 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
648*4882a593Smuzhiyun 						sizeof(fod->rspiubuf),
649*4882a593Smuzhiyun 						DMA_TO_DEVICE);
650*4882a593Smuzhiyun 				fod->rspdma = 0L;
651*4882a593Smuzhiyun 				list_del(&fod->fcp_list);
652*4882a593Smuzhiyun 			}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 			return;
655*4882a593Smuzhiyun 		}
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)660*4882a593Smuzhiyun nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
661*4882a593Smuzhiyun 				struct nvmet_fc_tgt_queue *queue)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = queue->fod;
664*4882a593Smuzhiyun 	int i;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	for (i = 0; i < queue->sqsize; fod++, i++) {
667*4882a593Smuzhiyun 		if (fod->rspdma)
668*4882a593Smuzhiyun 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
669*4882a593Smuzhiyun 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue * queue)674*4882a593Smuzhiyun nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	lockdep_assert_held(&queue->qlock);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	fod = list_first_entry_or_null(&queue->fod_list,
681*4882a593Smuzhiyun 					struct nvmet_fc_fcp_iod, fcp_list);
682*4882a593Smuzhiyun 	if (fod) {
683*4882a593Smuzhiyun 		list_del(&fod->fcp_list);
684*4882a593Smuzhiyun 		fod->active = true;
685*4882a593Smuzhiyun 		/*
686*4882a593Smuzhiyun 		 * no queue reference is taken, as it was taken by the
687*4882a593Smuzhiyun 		 * queue lookup just prior to the allocation. The iod
688*4882a593Smuzhiyun 		 * will "inherit" that reference.
689*4882a593Smuzhiyun 		 */
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 	return fod;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue,struct nvmefc_tgt_fcp_req * fcpreq)696*4882a593Smuzhiyun nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
697*4882a593Smuzhiyun 		       struct nvmet_fc_tgt_queue *queue,
698*4882a593Smuzhiyun 		       struct nvmefc_tgt_fcp_req *fcpreq)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/*
703*4882a593Smuzhiyun 	 * put all admin cmds on hw queue id 0. All io commands go to
704*4882a593Smuzhiyun 	 * the respective hw queue based on a modulo basis
705*4882a593Smuzhiyun 	 */
706*4882a593Smuzhiyun 	fcpreq->hwqid = queue->qid ?
707*4882a593Smuzhiyun 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct * work)713*4882a593Smuzhiyun nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod =
716*4882a593Smuzhiyun 		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* Submit deferred IO for processing */
719*4882a593Smuzhiyun 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue * queue,struct nvmet_fc_fcp_iod * fod)724*4882a593Smuzhiyun nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
725*4882a593Smuzhiyun 			struct nvmet_fc_fcp_iod *fod)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
728*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
729*4882a593Smuzhiyun 	struct nvmet_fc_defer_fcp_req *deferfcp;
730*4882a593Smuzhiyun 	unsigned long flags;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
733*4882a593Smuzhiyun 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	fcpreq->nvmet_fc_private = NULL;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	fod->active = false;
738*4882a593Smuzhiyun 	fod->abort = false;
739*4882a593Smuzhiyun 	fod->aborted = false;
740*4882a593Smuzhiyun 	fod->writedataactive = false;
741*4882a593Smuzhiyun 	fod->fcpreq = NULL;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* release the queue lookup reference on the completed IO */
746*4882a593Smuzhiyun 	nvmet_fc_tgt_q_put(queue);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->qlock, flags);
749*4882a593Smuzhiyun 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
750*4882a593Smuzhiyun 				struct nvmet_fc_defer_fcp_req, req_list);
751*4882a593Smuzhiyun 	if (!deferfcp) {
752*4882a593Smuzhiyun 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
753*4882a593Smuzhiyun 		spin_unlock_irqrestore(&queue->qlock, flags);
754*4882a593Smuzhiyun 		return;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/* Re-use the fod for the next pending cmd that was deferred */
758*4882a593Smuzhiyun 	list_del(&deferfcp->req_list);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	fcpreq = deferfcp->fcp_req;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/* deferfcp can be reused for another IO at a later date */
763*4882a593Smuzhiyun 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->qlock, flags);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/* Save NVME CMD IO in fod */
768*4882a593Smuzhiyun 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	/* Setup new fcpreq to be processed */
771*4882a593Smuzhiyun 	fcpreq->rspaddr = NULL;
772*4882a593Smuzhiyun 	fcpreq->rsplen  = 0;
773*4882a593Smuzhiyun 	fcpreq->nvmet_fc_private = fod;
774*4882a593Smuzhiyun 	fod->fcpreq = fcpreq;
775*4882a593Smuzhiyun 	fod->active = true;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	/* inform LLDD IO is now being processed */
778*4882a593Smuzhiyun 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/*
781*4882a593Smuzhiyun 	 * Leave the queue lookup get reference taken when
782*4882a593Smuzhiyun 	 * fod was originally allocated.
783*4882a593Smuzhiyun 	 */
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	queue_work(queue->work_q, &fod->defer_work);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc * assoc,u16 qid,u16 sqsize)789*4882a593Smuzhiyun nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
790*4882a593Smuzhiyun 			u16 qid, u16 sqsize)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
793*4882a593Smuzhiyun 	unsigned long flags;
794*4882a593Smuzhiyun 	int ret;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (qid > NVMET_NR_QUEUES)
797*4882a593Smuzhiyun 		return NULL;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
800*4882a593Smuzhiyun 	if (!queue)
801*4882a593Smuzhiyun 		return NULL;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	if (!nvmet_fc_tgt_a_get(assoc))
804*4882a593Smuzhiyun 		goto out_free_queue;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
807*4882a593Smuzhiyun 				assoc->tgtport->fc_target_port.port_num,
808*4882a593Smuzhiyun 				assoc->a_id, qid);
809*4882a593Smuzhiyun 	if (!queue->work_q)
810*4882a593Smuzhiyun 		goto out_a_put;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	queue->qid = qid;
813*4882a593Smuzhiyun 	queue->sqsize = sqsize;
814*4882a593Smuzhiyun 	queue->assoc = assoc;
815*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->fod_list);
816*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->avail_defer_list);
817*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->pending_cmd_list);
818*4882a593Smuzhiyun 	atomic_set(&queue->connected, 0);
819*4882a593Smuzhiyun 	atomic_set(&queue->sqtail, 0);
820*4882a593Smuzhiyun 	atomic_set(&queue->rsn, 1);
821*4882a593Smuzhiyun 	atomic_set(&queue->zrspcnt, 0);
822*4882a593Smuzhiyun 	spin_lock_init(&queue->qlock);
823*4882a593Smuzhiyun 	kref_init(&queue->ref);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	ret = nvmet_sq_init(&queue->nvme_sq);
828*4882a593Smuzhiyun 	if (ret)
829*4882a593Smuzhiyun 		goto out_fail_iodlist;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	WARN_ON(assoc->queues[qid]);
832*4882a593Smuzhiyun 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
833*4882a593Smuzhiyun 	assoc->queues[qid] = queue;
834*4882a593Smuzhiyun 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	return queue;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun out_fail_iodlist:
839*4882a593Smuzhiyun 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
840*4882a593Smuzhiyun 	destroy_workqueue(queue->work_q);
841*4882a593Smuzhiyun out_a_put:
842*4882a593Smuzhiyun 	nvmet_fc_tgt_a_put(assoc);
843*4882a593Smuzhiyun out_free_queue:
844*4882a593Smuzhiyun 	kfree(queue);
845*4882a593Smuzhiyun 	return NULL;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun static void
nvmet_fc_tgt_queue_free(struct kref * ref)850*4882a593Smuzhiyun nvmet_fc_tgt_queue_free(struct kref *ref)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue =
853*4882a593Smuzhiyun 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
854*4882a593Smuzhiyun 	unsigned long flags;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
857*4882a593Smuzhiyun 	queue->assoc->queues[queue->qid] = NULL;
858*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	nvmet_fc_tgt_a_put(queue->assoc);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	destroy_workqueue(queue->work_q);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	kfree(queue);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue * queue)870*4882a593Smuzhiyun nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue * queue)876*4882a593Smuzhiyun nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	return kref_get_unless_zero(&queue->ref);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue * queue)883*4882a593Smuzhiyun nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
886*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = queue->fod;
887*4882a593Smuzhiyun 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
888*4882a593Smuzhiyun 	unsigned long flags;
889*4882a593Smuzhiyun 	int i;
890*4882a593Smuzhiyun 	bool disconnect;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	disconnect = atomic_xchg(&queue->connected, 0);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* if not connected, nothing to do */
895*4882a593Smuzhiyun 	if (!disconnect)
896*4882a593Smuzhiyun 		return;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->qlock, flags);
899*4882a593Smuzhiyun 	/* abort outstanding io's */
900*4882a593Smuzhiyun 	for (i = 0; i < queue->sqsize; fod++, i++) {
901*4882a593Smuzhiyun 		if (fod->active) {
902*4882a593Smuzhiyun 			spin_lock(&fod->flock);
903*4882a593Smuzhiyun 			fod->abort = true;
904*4882a593Smuzhiyun 			/*
905*4882a593Smuzhiyun 			 * only call lldd abort routine if waiting for
906*4882a593Smuzhiyun 			 * writedata. other outstanding ops should finish
907*4882a593Smuzhiyun 			 * on their own.
908*4882a593Smuzhiyun 			 */
909*4882a593Smuzhiyun 			if (fod->writedataactive) {
910*4882a593Smuzhiyun 				fod->aborted = true;
911*4882a593Smuzhiyun 				spin_unlock(&fod->flock);
912*4882a593Smuzhiyun 				tgtport->ops->fcp_abort(
913*4882a593Smuzhiyun 					&tgtport->fc_target_port, fod->fcpreq);
914*4882a593Smuzhiyun 			} else
915*4882a593Smuzhiyun 				spin_unlock(&fod->flock);
916*4882a593Smuzhiyun 		}
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* Cleanup defer'ed IOs in queue */
920*4882a593Smuzhiyun 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
921*4882a593Smuzhiyun 				req_list) {
922*4882a593Smuzhiyun 		list_del(&deferfcp->req_list);
923*4882a593Smuzhiyun 		kfree(deferfcp);
924*4882a593Smuzhiyun 	}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	for (;;) {
927*4882a593Smuzhiyun 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
928*4882a593Smuzhiyun 				struct nvmet_fc_defer_fcp_req, req_list);
929*4882a593Smuzhiyun 		if (!deferfcp)
930*4882a593Smuzhiyun 			break;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		list_del(&deferfcp->req_list);
933*4882a593Smuzhiyun 		spin_unlock_irqrestore(&queue->qlock, flags);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
936*4882a593Smuzhiyun 				deferfcp->fcp_req);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
939*4882a593Smuzhiyun 				deferfcp->fcp_req);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
942*4882a593Smuzhiyun 				deferfcp->fcp_req);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		/* release the queue lookup reference */
945*4882a593Smuzhiyun 		nvmet_fc_tgt_q_put(queue);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 		kfree(deferfcp);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		spin_lock_irqsave(&queue->qlock, flags);
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->qlock, flags);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	flush_workqueue(queue->work_q);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	nvmet_sq_destroy(&queue->nvme_sq);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	nvmet_fc_tgt_q_put(queue);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport * tgtport,u64 connection_id)961*4882a593Smuzhiyun nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
962*4882a593Smuzhiyun 				u64 connection_id)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc;
965*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
966*4882a593Smuzhiyun 	u64 association_id = nvmet_fc_getassociationid(connection_id);
967*4882a593Smuzhiyun 	u16 qid = nvmet_fc_getqueueid(connection_id);
968*4882a593Smuzhiyun 	unsigned long flags;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (qid > NVMET_NR_QUEUES)
971*4882a593Smuzhiyun 		return NULL;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
974*4882a593Smuzhiyun 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
975*4882a593Smuzhiyun 		if (association_id == assoc->association_id) {
976*4882a593Smuzhiyun 			queue = assoc->queues[qid];
977*4882a593Smuzhiyun 			if (queue &&
978*4882a593Smuzhiyun 			    (!atomic_read(&queue->connected) ||
979*4882a593Smuzhiyun 			     !nvmet_fc_tgt_q_get(queue)))
980*4882a593Smuzhiyun 				queue = NULL;
981*4882a593Smuzhiyun 			spin_unlock_irqrestore(&tgtport->lock, flags);
982*4882a593Smuzhiyun 			return queue;
983*4882a593Smuzhiyun 		}
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
986*4882a593Smuzhiyun 	return NULL;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun static void
nvmet_fc_hostport_free(struct kref * ref)990*4882a593Smuzhiyun nvmet_fc_hostport_free(struct kref *ref)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct nvmet_fc_hostport *hostport =
993*4882a593Smuzhiyun 		container_of(ref, struct nvmet_fc_hostport, ref);
994*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
995*4882a593Smuzhiyun 	unsigned long flags;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
998*4882a593Smuzhiyun 	list_del(&hostport->host_list);
999*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1000*4882a593Smuzhiyun 	if (tgtport->ops->host_release && hostport->invalid)
1001*4882a593Smuzhiyun 		tgtport->ops->host_release(hostport->hosthandle);
1002*4882a593Smuzhiyun 	kfree(hostport);
1003*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport * hostport)1007*4882a593Smuzhiyun nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	kref_put(&hostport->ref, nvmet_fc_hostport_free);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport * hostport)1013*4882a593Smuzhiyun nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	return kref_get_unless_zero(&hostport->ref);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport * hostport)1019*4882a593Smuzhiyun nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	/* if LLDD not implemented, leave as NULL */
1022*4882a593Smuzhiyun 	if (!hostport || !hostport->hosthandle)
1023*4882a593Smuzhiyun 		return;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	nvmet_fc_hostport_put(hostport);
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1029*4882a593Smuzhiyun nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct nvmet_fc_hostport *newhost, *host, *match = NULL;
1032*4882a593Smuzhiyun 	unsigned long flags;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* if LLDD not implemented, leave as NULL */
1035*4882a593Smuzhiyun 	if (!hosthandle)
1036*4882a593Smuzhiyun 		return NULL;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	/* take reference for what will be the newly allocated hostport */
1039*4882a593Smuzhiyun 	if (!nvmet_fc_tgtport_get(tgtport))
1040*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1043*4882a593Smuzhiyun 	if (!newhost) {
1044*4882a593Smuzhiyun 		spin_lock_irqsave(&tgtport->lock, flags);
1045*4882a593Smuzhiyun 		list_for_each_entry(host, &tgtport->host_list, host_list) {
1046*4882a593Smuzhiyun 			if (host->hosthandle == hosthandle && !host->invalid) {
1047*4882a593Smuzhiyun 				if (nvmet_fc_hostport_get(host)) {
1048*4882a593Smuzhiyun 					match = host;
1049*4882a593Smuzhiyun 					break;
1050*4882a593Smuzhiyun 				}
1051*4882a593Smuzhiyun 			}
1052*4882a593Smuzhiyun 		}
1053*4882a593Smuzhiyun 		spin_unlock_irqrestore(&tgtport->lock, flags);
1054*4882a593Smuzhiyun 		/* no allocation - release reference */
1055*4882a593Smuzhiyun 		nvmet_fc_tgtport_put(tgtport);
1056*4882a593Smuzhiyun 		return (match) ? match : ERR_PTR(-ENOMEM);
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	newhost->tgtport = tgtport;
1060*4882a593Smuzhiyun 	newhost->hosthandle = hosthandle;
1061*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newhost->host_list);
1062*4882a593Smuzhiyun 	kref_init(&newhost->ref);
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1065*4882a593Smuzhiyun 	list_for_each_entry(host, &tgtport->host_list, host_list) {
1066*4882a593Smuzhiyun 		if (host->hosthandle == hosthandle && !host->invalid) {
1067*4882a593Smuzhiyun 			if (nvmet_fc_hostport_get(host)) {
1068*4882a593Smuzhiyun 				match = host;
1069*4882a593Smuzhiyun 				break;
1070*4882a593Smuzhiyun 			}
1071*4882a593Smuzhiyun 		}
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	if (match) {
1074*4882a593Smuzhiyun 		kfree(newhost);
1075*4882a593Smuzhiyun 		newhost = NULL;
1076*4882a593Smuzhiyun 		/* releasing allocation - release reference */
1077*4882a593Smuzhiyun 		nvmet_fc_tgtport_put(tgtport);
1078*4882a593Smuzhiyun 	} else
1079*4882a593Smuzhiyun 		list_add_tail(&newhost->host_list, &tgtport->host_list);
1080*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	return (match) ? match : newhost;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun static void
nvmet_fc_delete_assoc(struct work_struct * work)1086*4882a593Smuzhiyun nvmet_fc_delete_assoc(struct work_struct *work)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc =
1089*4882a593Smuzhiyun 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	nvmet_fc_delete_target_assoc(assoc);
1092*4882a593Smuzhiyun 	nvmet_fc_tgt_a_put(assoc);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1096*4882a593Smuzhiyun nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1099*4882a593Smuzhiyun 	unsigned long flags;
1100*4882a593Smuzhiyun 	u64 ran;
1101*4882a593Smuzhiyun 	int idx;
1102*4882a593Smuzhiyun 	bool needrandom = true;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1105*4882a593Smuzhiyun 	if (!assoc)
1106*4882a593Smuzhiyun 		return NULL;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
1109*4882a593Smuzhiyun 	if (idx < 0)
1110*4882a593Smuzhiyun 		goto out_free_assoc;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	if (!nvmet_fc_tgtport_get(tgtport))
1113*4882a593Smuzhiyun 		goto out_ida;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1116*4882a593Smuzhiyun 	if (IS_ERR(assoc->hostport))
1117*4882a593Smuzhiyun 		goto out_put;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	assoc->tgtport = tgtport;
1120*4882a593Smuzhiyun 	assoc->a_id = idx;
1121*4882a593Smuzhiyun 	INIT_LIST_HEAD(&assoc->a_list);
1122*4882a593Smuzhiyun 	kref_init(&assoc->ref);
1123*4882a593Smuzhiyun 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1124*4882a593Smuzhiyun 	atomic_set(&assoc->terminating, 0);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	while (needrandom) {
1127*4882a593Smuzhiyun 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1128*4882a593Smuzhiyun 		ran = ran << BYTES_FOR_QID_SHIFT;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 		spin_lock_irqsave(&tgtport->lock, flags);
1131*4882a593Smuzhiyun 		needrandom = false;
1132*4882a593Smuzhiyun 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1133*4882a593Smuzhiyun 			if (ran == tmpassoc->association_id) {
1134*4882a593Smuzhiyun 				needrandom = true;
1135*4882a593Smuzhiyun 				break;
1136*4882a593Smuzhiyun 			}
1137*4882a593Smuzhiyun 		}
1138*4882a593Smuzhiyun 		if (!needrandom) {
1139*4882a593Smuzhiyun 			assoc->association_id = ran;
1140*4882a593Smuzhiyun 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
1141*4882a593Smuzhiyun 		}
1142*4882a593Smuzhiyun 		spin_unlock_irqrestore(&tgtport->lock, flags);
1143*4882a593Smuzhiyun 	}
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	return assoc;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun out_put:
1148*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
1149*4882a593Smuzhiyun out_ida:
1150*4882a593Smuzhiyun 	ida_simple_remove(&tgtport->assoc_cnt, idx);
1151*4882a593Smuzhiyun out_free_assoc:
1152*4882a593Smuzhiyun 	kfree(assoc);
1153*4882a593Smuzhiyun 	return NULL;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun static void
nvmet_fc_target_assoc_free(struct kref * ref)1157*4882a593Smuzhiyun nvmet_fc_target_assoc_free(struct kref *ref)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc =
1160*4882a593Smuzhiyun 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1161*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1162*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod	*oldls;
1163*4882a593Smuzhiyun 	unsigned long flags;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* Send Disconnect now that all i/o has completed */
1166*4882a593Smuzhiyun 	nvmet_fc_xmt_disconnect_assoc(assoc);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	nvmet_fc_free_hostport(assoc->hostport);
1169*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1170*4882a593Smuzhiyun 	list_del(&assoc->a_list);
1171*4882a593Smuzhiyun 	oldls = assoc->rcv_disconn;
1172*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1173*4882a593Smuzhiyun 	/* if pending Rcv Disconnect Association LS, send rsp now */
1174*4882a593Smuzhiyun 	if (oldls)
1175*4882a593Smuzhiyun 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1176*4882a593Smuzhiyun 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1177*4882a593Smuzhiyun 	dev_info(tgtport->dev,
1178*4882a593Smuzhiyun 		"{%d:%d} Association freed\n",
1179*4882a593Smuzhiyun 		tgtport->fc_target_port.port_num, assoc->a_id);
1180*4882a593Smuzhiyun 	kfree(assoc);
1181*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc * assoc)1185*4882a593Smuzhiyun nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc * assoc)1191*4882a593Smuzhiyun nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	return kref_get_unless_zero(&assoc->ref);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc * assoc)1197*4882a593Smuzhiyun nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1200*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
1201*4882a593Smuzhiyun 	unsigned long flags;
1202*4882a593Smuzhiyun 	int i, terminating;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	terminating = atomic_xchg(&assoc->terminating, 1);
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	/* if already terminating, do nothing */
1207*4882a593Smuzhiyun 	if (terminating)
1208*4882a593Smuzhiyun 		return;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1211*4882a593Smuzhiyun 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1212*4882a593Smuzhiyun 		queue = assoc->queues[i];
1213*4882a593Smuzhiyun 		if (queue) {
1214*4882a593Smuzhiyun 			if (!nvmet_fc_tgt_q_get(queue))
1215*4882a593Smuzhiyun 				continue;
1216*4882a593Smuzhiyun 			spin_unlock_irqrestore(&tgtport->lock, flags);
1217*4882a593Smuzhiyun 			nvmet_fc_delete_target_queue(queue);
1218*4882a593Smuzhiyun 			nvmet_fc_tgt_q_put(queue);
1219*4882a593Smuzhiyun 			spin_lock_irqsave(&tgtport->lock, flags);
1220*4882a593Smuzhiyun 		}
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	dev_info(tgtport->dev,
1225*4882a593Smuzhiyun 		"{%d:%d} Association deleted\n",
1226*4882a593Smuzhiyun 		tgtport->fc_target_port.port_num, assoc->a_id);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	nvmet_fc_tgt_a_put(assoc);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport * tgtport,u64 association_id)1232*4882a593Smuzhiyun nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1233*4882a593Smuzhiyun 				u64 association_id)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc;
1236*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *ret = NULL;
1237*4882a593Smuzhiyun 	unsigned long flags;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1240*4882a593Smuzhiyun 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1241*4882a593Smuzhiyun 		if (association_id == assoc->association_id) {
1242*4882a593Smuzhiyun 			ret = assoc;
1243*4882a593Smuzhiyun 			if (!nvmet_fc_tgt_a_get(assoc))
1244*4882a593Smuzhiyun 				ret = NULL;
1245*4882a593Smuzhiyun 			break;
1246*4882a593Smuzhiyun 		}
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	return ret;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_port_entry * pe,struct nvmet_port * port)1254*4882a593Smuzhiyun nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1255*4882a593Smuzhiyun 			struct nvmet_fc_port_entry *pe,
1256*4882a593Smuzhiyun 			struct nvmet_port *port)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	lockdep_assert_held(&nvmet_fc_tgtlock);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	pe->tgtport = tgtport;
1261*4882a593Smuzhiyun 	tgtport->pe = pe;
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	pe->port = port;
1264*4882a593Smuzhiyun 	port->priv = pe;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	pe->node_name = tgtport->fc_target_port.node_name;
1267*4882a593Smuzhiyun 	pe->port_name = tgtport->fc_target_port.port_name;
1268*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pe->pe_list);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry * pe)1274*4882a593Smuzhiyun nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun 	unsigned long flags;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1279*4882a593Smuzhiyun 	if (pe->tgtport)
1280*4882a593Smuzhiyun 		pe->tgtport->pe = NULL;
1281*4882a593Smuzhiyun 	list_del(&pe->pe_list);
1282*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun /*
1286*4882a593Smuzhiyun  * called when a targetport deregisters. Breaks the relationship
1287*4882a593Smuzhiyun  * with the nvmet port, but leaves the port_entry in place so that
1288*4882a593Smuzhiyun  * re-registration can resume operation.
1289*4882a593Smuzhiyun  */
1290*4882a593Smuzhiyun static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport * tgtport)1291*4882a593Smuzhiyun nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	struct nvmet_fc_port_entry *pe;
1294*4882a593Smuzhiyun 	unsigned long flags;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1297*4882a593Smuzhiyun 	pe = tgtport->pe;
1298*4882a593Smuzhiyun 	if (pe)
1299*4882a593Smuzhiyun 		pe->tgtport = NULL;
1300*4882a593Smuzhiyun 	tgtport->pe = NULL;
1301*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun  * called when a new targetport is registered. Looks in the
1306*4882a593Smuzhiyun  * existing nvmet port_entries to see if the nvmet layer is
1307*4882a593Smuzhiyun  * configured for the targetport's wwn's. (the targetport existed,
1308*4882a593Smuzhiyun  * nvmet configured, the lldd unregistered the tgtport, and is now
1309*4882a593Smuzhiyun  * reregistering the same targetport).  If so, set the nvmet port
1310*4882a593Smuzhiyun  * port entry on the targetport.
1311*4882a593Smuzhiyun  */
1312*4882a593Smuzhiyun static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport * tgtport)1313*4882a593Smuzhiyun nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun 	struct nvmet_fc_port_entry *pe;
1316*4882a593Smuzhiyun 	unsigned long flags;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1319*4882a593Smuzhiyun 	list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1320*4882a593Smuzhiyun 		if (tgtport->fc_target_port.node_name == pe->node_name &&
1321*4882a593Smuzhiyun 		    tgtport->fc_target_port.port_name == pe->port_name) {
1322*4882a593Smuzhiyun 			WARN_ON(pe->tgtport);
1323*4882a593Smuzhiyun 			tgtport->pe = pe;
1324*4882a593Smuzhiyun 			pe->tgtport = tgtport;
1325*4882a593Smuzhiyun 			break;
1326*4882a593Smuzhiyun 		}
1327*4882a593Smuzhiyun 	}
1328*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun /**
1332*4882a593Smuzhiyun  * nvme_fc_register_targetport - transport entry point called by an
1333*4882a593Smuzhiyun  *                              LLDD to register the existence of a local
1334*4882a593Smuzhiyun  *                              NVME subystem FC port.
1335*4882a593Smuzhiyun  * @pinfo:     pointer to information about the port to be registered
1336*4882a593Smuzhiyun  * @template:  LLDD entrypoints and operational parameters for the port
1337*4882a593Smuzhiyun  * @dev:       physical hardware device node port corresponds to. Will be
1338*4882a593Smuzhiyun  *             used for DMA mappings
1339*4882a593Smuzhiyun  * @portptr:   pointer to a local port pointer. Upon success, the routine
1340*4882a593Smuzhiyun  *             will allocate a nvme_fc_local_port structure and place its
1341*4882a593Smuzhiyun  *             address in the local port pointer. Upon failure, local port
1342*4882a593Smuzhiyun  *             pointer will be set to NULL.
1343*4882a593Smuzhiyun  *
1344*4882a593Smuzhiyun  * Returns:
1345*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
1346*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
1347*4882a593Smuzhiyun  */
1348*4882a593Smuzhiyun int
nvmet_fc_register_targetport(struct nvmet_fc_port_info * pinfo,struct nvmet_fc_target_template * template,struct device * dev,struct nvmet_fc_target_port ** portptr)1349*4882a593Smuzhiyun nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1350*4882a593Smuzhiyun 			struct nvmet_fc_target_template *template,
1351*4882a593Smuzhiyun 			struct device *dev,
1352*4882a593Smuzhiyun 			struct nvmet_fc_target_port **portptr)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *newrec;
1355*4882a593Smuzhiyun 	unsigned long flags;
1356*4882a593Smuzhiyun 	int ret, idx;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (!template->xmt_ls_rsp || !template->fcp_op ||
1359*4882a593Smuzhiyun 	    !template->fcp_abort ||
1360*4882a593Smuzhiyun 	    !template->fcp_req_release || !template->targetport_delete ||
1361*4882a593Smuzhiyun 	    !template->max_hw_queues || !template->max_sgl_segments ||
1362*4882a593Smuzhiyun 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
1363*4882a593Smuzhiyun 		ret = -EINVAL;
1364*4882a593Smuzhiyun 		goto out_regtgt_failed;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1368*4882a593Smuzhiyun 			 GFP_KERNEL);
1369*4882a593Smuzhiyun 	if (!newrec) {
1370*4882a593Smuzhiyun 		ret = -ENOMEM;
1371*4882a593Smuzhiyun 		goto out_regtgt_failed;
1372*4882a593Smuzhiyun 	}
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1375*4882a593Smuzhiyun 	if (idx < 0) {
1376*4882a593Smuzhiyun 		ret = -ENOSPC;
1377*4882a593Smuzhiyun 		goto out_fail_kfree;
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	if (!get_device(dev) && dev) {
1381*4882a593Smuzhiyun 		ret = -ENODEV;
1382*4882a593Smuzhiyun 		goto out_ida_put;
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	newrec->fc_target_port.node_name = pinfo->node_name;
1386*4882a593Smuzhiyun 	newrec->fc_target_port.port_name = pinfo->port_name;
1387*4882a593Smuzhiyun 	if (template->target_priv_sz)
1388*4882a593Smuzhiyun 		newrec->fc_target_port.private = &newrec[1];
1389*4882a593Smuzhiyun 	else
1390*4882a593Smuzhiyun 		newrec->fc_target_port.private = NULL;
1391*4882a593Smuzhiyun 	newrec->fc_target_port.port_id = pinfo->port_id;
1392*4882a593Smuzhiyun 	newrec->fc_target_port.port_num = idx;
1393*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->tgt_list);
1394*4882a593Smuzhiyun 	newrec->dev = dev;
1395*4882a593Smuzhiyun 	newrec->ops = template;
1396*4882a593Smuzhiyun 	spin_lock_init(&newrec->lock);
1397*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
1398*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ls_req_list);
1399*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->ls_busylist);
1400*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->assoc_list);
1401*4882a593Smuzhiyun 	INIT_LIST_HEAD(&newrec->host_list);
1402*4882a593Smuzhiyun 	kref_init(&newrec->ref);
1403*4882a593Smuzhiyun 	ida_init(&newrec->assoc_cnt);
1404*4882a593Smuzhiyun 	newrec->max_sg_cnt = template->max_sgl_segments;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
1407*4882a593Smuzhiyun 	if (ret) {
1408*4882a593Smuzhiyun 		ret = -ENOMEM;
1409*4882a593Smuzhiyun 		goto out_free_newrec;
1410*4882a593Smuzhiyun 	}
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	nvmet_fc_portentry_rebind_tgt(newrec);
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1415*4882a593Smuzhiyun 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1416*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	*portptr = &newrec->fc_target_port;
1419*4882a593Smuzhiyun 	return 0;
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun out_free_newrec:
1422*4882a593Smuzhiyun 	put_device(dev);
1423*4882a593Smuzhiyun out_ida_put:
1424*4882a593Smuzhiyun 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1425*4882a593Smuzhiyun out_fail_kfree:
1426*4882a593Smuzhiyun 	kfree(newrec);
1427*4882a593Smuzhiyun out_regtgt_failed:
1428*4882a593Smuzhiyun 	*portptr = NULL;
1429*4882a593Smuzhiyun 	return ret;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun static void
nvmet_fc_free_tgtport(struct kref * ref)1435*4882a593Smuzhiyun nvmet_fc_free_tgtport(struct kref *ref)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport =
1438*4882a593Smuzhiyun 		container_of(ref, struct nvmet_fc_tgtport, ref);
1439*4882a593Smuzhiyun 	struct device *dev = tgtport->dev;
1440*4882a593Smuzhiyun 	unsigned long flags;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1443*4882a593Smuzhiyun 	list_del(&tgtport->tgt_list);
1444*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	nvmet_fc_free_ls_iodlist(tgtport);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	/* let the LLDD know we've finished tearing it down */
1449*4882a593Smuzhiyun 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
1452*4882a593Smuzhiyun 			tgtport->fc_target_port.port_num);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	ida_destroy(&tgtport->assoc_cnt);
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	kfree(tgtport);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	put_device(dev);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport * tgtport)1462*4882a593Smuzhiyun nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport * tgtport)1468*4882a593Smuzhiyun nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun 	return kref_get_unless_zero(&tgtport->ref);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport * tgtport)1474*4882a593Smuzhiyun __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc, *next;
1477*4882a593Smuzhiyun 	unsigned long flags;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1480*4882a593Smuzhiyun 	list_for_each_entry_safe(assoc, next,
1481*4882a593Smuzhiyun 				&tgtport->assoc_list, a_list) {
1482*4882a593Smuzhiyun 		if (!nvmet_fc_tgt_a_get(assoc))
1483*4882a593Smuzhiyun 			continue;
1484*4882a593Smuzhiyun 		if (!schedule_work(&assoc->del_work))
1485*4882a593Smuzhiyun 			/* already deleting - release local reference */
1486*4882a593Smuzhiyun 			nvmet_fc_tgt_a_put(assoc);
1487*4882a593Smuzhiyun 	}
1488*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun /**
1492*4882a593Smuzhiyun  * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1493*4882a593Smuzhiyun  *                       to remove references to a hosthandle for LS's.
1494*4882a593Smuzhiyun  *
1495*4882a593Smuzhiyun  * The nvmet-fc layer ensures that any references to the hosthandle
1496*4882a593Smuzhiyun  * on the targetport are forgotten (set to NULL).  The LLDD will
1497*4882a593Smuzhiyun  * typically call this when a login with a remote host port has been
1498*4882a593Smuzhiyun  * lost, thus LS's for the remote host port are no longer possible.
1499*4882a593Smuzhiyun  *
1500*4882a593Smuzhiyun  * If an LS request is outstanding to the targetport/hosthandle (or
1501*4882a593Smuzhiyun  * issued concurrently with the call to invalidate the host), the
1502*4882a593Smuzhiyun  * LLDD is responsible for terminating/aborting the LS and completing
1503*4882a593Smuzhiyun  * the LS request. It is recommended that these terminations/aborts
1504*4882a593Smuzhiyun  * occur after calling to invalidate the host handle to avoid additional
1505*4882a593Smuzhiyun  * retries by the nvmet-fc transport. The nvmet-fc transport may
1506*4882a593Smuzhiyun  * continue to reference host handle while it cleans up outstanding
1507*4882a593Smuzhiyun  * NVME associations. The nvmet-fc transport will call the
1508*4882a593Smuzhiyun  * ops->host_release() callback to notify the LLDD that all references
1509*4882a593Smuzhiyun  * are complete and the related host handle can be recovered.
1510*4882a593Smuzhiyun  * Note: if there are no references, the callback may be called before
1511*4882a593Smuzhiyun  * the invalidate host call returns.
1512*4882a593Smuzhiyun  *
1513*4882a593Smuzhiyun  * @target_port: pointer to the (registered) target port that a prior
1514*4882a593Smuzhiyun  *              LS was received on and which supplied the transport the
1515*4882a593Smuzhiyun  *              hosthandle.
1516*4882a593Smuzhiyun  * @hosthandle: the handle (pointer) that represents the host port
1517*4882a593Smuzhiyun  *              that no longer has connectivity and that LS's should
1518*4882a593Smuzhiyun  *              no longer be directed to.
1519*4882a593Smuzhiyun  */
1520*4882a593Smuzhiyun void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port * target_port,void * hosthandle)1521*4882a593Smuzhiyun nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1522*4882a593Smuzhiyun 			void *hosthandle)
1523*4882a593Smuzhiyun {
1524*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1525*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc, *next;
1526*4882a593Smuzhiyun 	unsigned long flags;
1527*4882a593Smuzhiyun 	bool noassoc = true;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1530*4882a593Smuzhiyun 	list_for_each_entry_safe(assoc, next,
1531*4882a593Smuzhiyun 				&tgtport->assoc_list, a_list) {
1532*4882a593Smuzhiyun 		if (!assoc->hostport ||
1533*4882a593Smuzhiyun 		    assoc->hostport->hosthandle != hosthandle)
1534*4882a593Smuzhiyun 			continue;
1535*4882a593Smuzhiyun 		if (!nvmet_fc_tgt_a_get(assoc))
1536*4882a593Smuzhiyun 			continue;
1537*4882a593Smuzhiyun 		assoc->hostport->invalid = 1;
1538*4882a593Smuzhiyun 		noassoc = false;
1539*4882a593Smuzhiyun 		if (!schedule_work(&assoc->del_work))
1540*4882a593Smuzhiyun 			/* already deleting - release local reference */
1541*4882a593Smuzhiyun 			nvmet_fc_tgt_a_put(assoc);
1542*4882a593Smuzhiyun 	}
1543*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	/* if there's nothing to wait for - call the callback */
1546*4882a593Smuzhiyun 	if (noassoc && tgtport->ops->host_release)
1547*4882a593Smuzhiyun 		tgtport->ops->host_release(hosthandle);
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun /*
1552*4882a593Smuzhiyun  * nvmet layer has called to terminate an association
1553*4882a593Smuzhiyun  */
1554*4882a593Smuzhiyun static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl * ctrl)1555*4882a593Smuzhiyun nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport, *next;
1558*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc;
1559*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
1560*4882a593Smuzhiyun 	unsigned long flags;
1561*4882a593Smuzhiyun 	bool found_ctrl = false;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	/* this is a bit ugly, but don't want to make locks layered */
1564*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1565*4882a593Smuzhiyun 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1566*4882a593Smuzhiyun 			tgt_list) {
1567*4882a593Smuzhiyun 		if (!nvmet_fc_tgtport_get(tgtport))
1568*4882a593Smuzhiyun 			continue;
1569*4882a593Smuzhiyun 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 		spin_lock_irqsave(&tgtport->lock, flags);
1572*4882a593Smuzhiyun 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1573*4882a593Smuzhiyun 			queue = assoc->queues[0];
1574*4882a593Smuzhiyun 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1575*4882a593Smuzhiyun 				if (nvmet_fc_tgt_a_get(assoc))
1576*4882a593Smuzhiyun 					found_ctrl = true;
1577*4882a593Smuzhiyun 				break;
1578*4882a593Smuzhiyun 			}
1579*4882a593Smuzhiyun 		}
1580*4882a593Smuzhiyun 		spin_unlock_irqrestore(&tgtport->lock, flags);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 		nvmet_fc_tgtport_put(tgtport);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		if (found_ctrl) {
1585*4882a593Smuzhiyun 			if (!schedule_work(&assoc->del_work))
1586*4882a593Smuzhiyun 				/* already deleting - release local reference */
1587*4882a593Smuzhiyun 				nvmet_fc_tgt_a_put(assoc);
1588*4882a593Smuzhiyun 			return;
1589*4882a593Smuzhiyun 		}
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1592*4882a593Smuzhiyun 	}
1593*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun /**
1597*4882a593Smuzhiyun  * nvme_fc_unregister_targetport - transport entry point called by an
1598*4882a593Smuzhiyun  *                              LLDD to deregister/remove a previously
1599*4882a593Smuzhiyun  *                              registered a local NVME subsystem FC port.
1600*4882a593Smuzhiyun  * @target_port: pointer to the (registered) target port that is to be
1601*4882a593Smuzhiyun  *               deregistered.
1602*4882a593Smuzhiyun  *
1603*4882a593Smuzhiyun  * Returns:
1604*4882a593Smuzhiyun  * a completion status. Must be 0 upon success; a negative errno
1605*4882a593Smuzhiyun  * (ex: -ENXIO) upon failure.
1606*4882a593Smuzhiyun  */
1607*4882a593Smuzhiyun int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port * target_port)1608*4882a593Smuzhiyun nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	nvmet_fc_portentry_unbind_tgt(tgtport);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	/* terminate any outstanding associations */
1615*4882a593Smuzhiyun 	__nvmet_fc_free_assocs(tgtport);
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	/*
1618*4882a593Smuzhiyun 	 * should terminate LS's as well. However, LS's will be generated
1619*4882a593Smuzhiyun 	 * at the tail end of association termination, so they likely don't
1620*4882a593Smuzhiyun 	 * exist yet. And even if they did, it's worthwhile to just let
1621*4882a593Smuzhiyun 	 * them finish and targetport ref counting will clean things up.
1622*4882a593Smuzhiyun 	 */
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	return 0;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun /* ********************** FC-NVME LS RCV Handling ************************* */
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1635*4882a593Smuzhiyun nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1636*4882a593Smuzhiyun 			struct nvmet_fc_ls_iod *iod)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1639*4882a593Smuzhiyun 	struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1640*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
1641*4882a593Smuzhiyun 	int ret = 0;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	memset(acc, 0, sizeof(*acc));
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	/*
1646*4882a593Smuzhiyun 	 * FC-NVME spec changes. There are initiators sending different
1647*4882a593Smuzhiyun 	 * lengths as padding sizes for Create Association Cmd descriptor
1648*4882a593Smuzhiyun 	 * was incorrect.
1649*4882a593Smuzhiyun 	 * Accept anything of "minimum" length. Assume format per 1.15
1650*4882a593Smuzhiyun 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1651*4882a593Smuzhiyun 	 * trailing pad length is.
1652*4882a593Smuzhiyun 	 */
1653*4882a593Smuzhiyun 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1654*4882a593Smuzhiyun 		ret = VERR_CR_ASSOC_LEN;
1655*4882a593Smuzhiyun 	else if (be32_to_cpu(rqst->desc_list_len) <
1656*4882a593Smuzhiyun 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1657*4882a593Smuzhiyun 		ret = VERR_CR_ASSOC_RQST_LEN;
1658*4882a593Smuzhiyun 	else if (rqst->assoc_cmd.desc_tag !=
1659*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1660*4882a593Smuzhiyun 		ret = VERR_CR_ASSOC_CMD;
1661*4882a593Smuzhiyun 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1662*4882a593Smuzhiyun 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1663*4882a593Smuzhiyun 		ret = VERR_CR_ASSOC_CMD_LEN;
1664*4882a593Smuzhiyun 	else if (!rqst->assoc_cmd.ersp_ratio ||
1665*4882a593Smuzhiyun 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1666*4882a593Smuzhiyun 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1667*4882a593Smuzhiyun 		ret = VERR_ERSP_RATIO;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	else {
1670*4882a593Smuzhiyun 		/* new association w/ admin queue */
1671*4882a593Smuzhiyun 		iod->assoc = nvmet_fc_alloc_target_assoc(
1672*4882a593Smuzhiyun 						tgtport, iod->hosthandle);
1673*4882a593Smuzhiyun 		if (!iod->assoc)
1674*4882a593Smuzhiyun 			ret = VERR_ASSOC_ALLOC_FAIL;
1675*4882a593Smuzhiyun 		else {
1676*4882a593Smuzhiyun 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1677*4882a593Smuzhiyun 					be16_to_cpu(rqst->assoc_cmd.sqsize));
1678*4882a593Smuzhiyun 			if (!queue)
1679*4882a593Smuzhiyun 				ret = VERR_QUEUE_ALLOC_FAIL;
1680*4882a593Smuzhiyun 		}
1681*4882a593Smuzhiyun 	}
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	if (ret) {
1684*4882a593Smuzhiyun 		dev_err(tgtport->dev,
1685*4882a593Smuzhiyun 			"Create Association LS failed: %s\n",
1686*4882a593Smuzhiyun 			validation_errors[ret]);
1687*4882a593Smuzhiyun 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1688*4882a593Smuzhiyun 				sizeof(*acc), rqst->w0.ls_cmd,
1689*4882a593Smuzhiyun 				FCNVME_RJT_RC_LOGIC,
1690*4882a593Smuzhiyun 				FCNVME_RJT_EXP_NONE, 0);
1691*4882a593Smuzhiyun 		return;
1692*4882a593Smuzhiyun 	}
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1695*4882a593Smuzhiyun 	atomic_set(&queue->connected, 1);
1696*4882a593Smuzhiyun 	queue->sqhd = 0;	/* best place to init value */
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	dev_info(tgtport->dev,
1699*4882a593Smuzhiyun 		"{%d:%d} Association created\n",
1700*4882a593Smuzhiyun 		tgtport->fc_target_port.port_num, iod->assoc->a_id);
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	/* format a response */
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	iod->lsrsp->rsplen = sizeof(*acc);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1707*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1708*4882a593Smuzhiyun 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1709*4882a593Smuzhiyun 			FCNVME_LS_CREATE_ASSOCIATION);
1710*4882a593Smuzhiyun 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1711*4882a593Smuzhiyun 	acc->associd.desc_len =
1712*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1713*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_assoc_id));
1714*4882a593Smuzhiyun 	acc->associd.association_id =
1715*4882a593Smuzhiyun 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1716*4882a593Smuzhiyun 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1717*4882a593Smuzhiyun 	acc->connectid.desc_len =
1718*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1719*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_conn_id));
1720*4882a593Smuzhiyun 	acc->connectid.connection_id = acc->associd.association_id;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1724*4882a593Smuzhiyun nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1725*4882a593Smuzhiyun 			struct nvmet_fc_ls_iod *iod)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun 	struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1728*4882a593Smuzhiyun 	struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1729*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
1730*4882a593Smuzhiyun 	int ret = 0;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	memset(acc, 0, sizeof(*acc));
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1735*4882a593Smuzhiyun 		ret = VERR_CR_CONN_LEN;
1736*4882a593Smuzhiyun 	else if (rqst->desc_list_len !=
1737*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1738*4882a593Smuzhiyun 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1739*4882a593Smuzhiyun 		ret = VERR_CR_CONN_RQST_LEN;
1740*4882a593Smuzhiyun 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1741*4882a593Smuzhiyun 		ret = VERR_ASSOC_ID;
1742*4882a593Smuzhiyun 	else if (rqst->associd.desc_len !=
1743*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1744*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1745*4882a593Smuzhiyun 		ret = VERR_ASSOC_ID_LEN;
1746*4882a593Smuzhiyun 	else if (rqst->connect_cmd.desc_tag !=
1747*4882a593Smuzhiyun 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1748*4882a593Smuzhiyun 		ret = VERR_CR_CONN_CMD;
1749*4882a593Smuzhiyun 	else if (rqst->connect_cmd.desc_len !=
1750*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1751*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1752*4882a593Smuzhiyun 		ret = VERR_CR_CONN_CMD_LEN;
1753*4882a593Smuzhiyun 	else if (!rqst->connect_cmd.ersp_ratio ||
1754*4882a593Smuzhiyun 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1755*4882a593Smuzhiyun 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1756*4882a593Smuzhiyun 		ret = VERR_ERSP_RATIO;
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	else {
1759*4882a593Smuzhiyun 		/* new io queue */
1760*4882a593Smuzhiyun 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1761*4882a593Smuzhiyun 				be64_to_cpu(rqst->associd.association_id));
1762*4882a593Smuzhiyun 		if (!iod->assoc)
1763*4882a593Smuzhiyun 			ret = VERR_NO_ASSOC;
1764*4882a593Smuzhiyun 		else {
1765*4882a593Smuzhiyun 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1766*4882a593Smuzhiyun 					be16_to_cpu(rqst->connect_cmd.qid),
1767*4882a593Smuzhiyun 					be16_to_cpu(rqst->connect_cmd.sqsize));
1768*4882a593Smuzhiyun 			if (!queue)
1769*4882a593Smuzhiyun 				ret = VERR_QUEUE_ALLOC_FAIL;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 			/* release get taken in nvmet_fc_find_target_assoc */
1772*4882a593Smuzhiyun 			nvmet_fc_tgt_a_put(iod->assoc);
1773*4882a593Smuzhiyun 		}
1774*4882a593Smuzhiyun 	}
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	if (ret) {
1777*4882a593Smuzhiyun 		dev_err(tgtport->dev,
1778*4882a593Smuzhiyun 			"Create Connection LS failed: %s\n",
1779*4882a593Smuzhiyun 			validation_errors[ret]);
1780*4882a593Smuzhiyun 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1781*4882a593Smuzhiyun 				sizeof(*acc), rqst->w0.ls_cmd,
1782*4882a593Smuzhiyun 				(ret == VERR_NO_ASSOC) ?
1783*4882a593Smuzhiyun 					FCNVME_RJT_RC_INV_ASSOC :
1784*4882a593Smuzhiyun 					FCNVME_RJT_RC_LOGIC,
1785*4882a593Smuzhiyun 				FCNVME_RJT_EXP_NONE, 0);
1786*4882a593Smuzhiyun 		return;
1787*4882a593Smuzhiyun 	}
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1790*4882a593Smuzhiyun 	atomic_set(&queue->connected, 1);
1791*4882a593Smuzhiyun 	queue->sqhd = 0;	/* best place to init value */
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	/* format a response */
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	iod->lsrsp->rsplen = sizeof(*acc);
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1798*4882a593Smuzhiyun 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1799*4882a593Smuzhiyun 			FCNVME_LS_CREATE_CONNECTION);
1800*4882a593Smuzhiyun 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1801*4882a593Smuzhiyun 	acc->connectid.desc_len =
1802*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1803*4882a593Smuzhiyun 				sizeof(struct fcnvme_lsdesc_conn_id));
1804*4882a593Smuzhiyun 	acc->connectid.connection_id =
1805*4882a593Smuzhiyun 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1806*4882a593Smuzhiyun 				be16_to_cpu(rqst->connect_cmd.qid)));
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun /*
1810*4882a593Smuzhiyun  * Returns true if the LS response is to be transmit
1811*4882a593Smuzhiyun  * Returns false if the LS response is to be delayed
1812*4882a593Smuzhiyun  */
1813*4882a593Smuzhiyun static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1814*4882a593Smuzhiyun nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1815*4882a593Smuzhiyun 			struct nvmet_fc_ls_iod *iod)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1818*4882a593Smuzhiyun 						&iod->rqstbuf->rq_dis_assoc;
1819*4882a593Smuzhiyun 	struct fcnvme_ls_disconnect_assoc_acc *acc =
1820*4882a593Smuzhiyun 						&iod->rspbuf->rsp_dis_assoc;
1821*4882a593Smuzhiyun 	struct nvmet_fc_tgt_assoc *assoc = NULL;
1822*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *oldls = NULL;
1823*4882a593Smuzhiyun 	unsigned long flags;
1824*4882a593Smuzhiyun 	int ret = 0;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	memset(acc, 0, sizeof(*acc));
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1829*4882a593Smuzhiyun 	if (!ret) {
1830*4882a593Smuzhiyun 		/* match an active association - takes an assoc ref if !NULL */
1831*4882a593Smuzhiyun 		assoc = nvmet_fc_find_target_assoc(tgtport,
1832*4882a593Smuzhiyun 				be64_to_cpu(rqst->associd.association_id));
1833*4882a593Smuzhiyun 		iod->assoc = assoc;
1834*4882a593Smuzhiyun 		if (!assoc)
1835*4882a593Smuzhiyun 			ret = VERR_NO_ASSOC;
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	if (ret || !assoc) {
1839*4882a593Smuzhiyun 		dev_err(tgtport->dev,
1840*4882a593Smuzhiyun 			"Disconnect LS failed: %s\n",
1841*4882a593Smuzhiyun 			validation_errors[ret]);
1842*4882a593Smuzhiyun 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1843*4882a593Smuzhiyun 				sizeof(*acc), rqst->w0.ls_cmd,
1844*4882a593Smuzhiyun 				(ret == VERR_NO_ASSOC) ?
1845*4882a593Smuzhiyun 					FCNVME_RJT_RC_INV_ASSOC :
1846*4882a593Smuzhiyun 					FCNVME_RJT_RC_LOGIC,
1847*4882a593Smuzhiyun 				FCNVME_RJT_EXP_NONE, 0);
1848*4882a593Smuzhiyun 		return true;
1849*4882a593Smuzhiyun 	}
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	/* format a response */
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	iod->lsrsp->rsplen = sizeof(*acc);
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1856*4882a593Smuzhiyun 			fcnvme_lsdesc_len(
1857*4882a593Smuzhiyun 				sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1858*4882a593Smuzhiyun 			FCNVME_LS_DISCONNECT_ASSOC);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	/* release get taken in nvmet_fc_find_target_assoc */
1861*4882a593Smuzhiyun 	nvmet_fc_tgt_a_put(assoc);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	/*
1864*4882a593Smuzhiyun 	 * The rules for LS response says the response cannot
1865*4882a593Smuzhiyun 	 * go back until ABTS's have been sent for all outstanding
1866*4882a593Smuzhiyun 	 * I/O and a Disconnect Association LS has been sent.
1867*4882a593Smuzhiyun 	 * So... save off the Disconnect LS to send the response
1868*4882a593Smuzhiyun 	 * later. If there was a prior LS already saved, replace
1869*4882a593Smuzhiyun 	 * it with the newer one and send a can't perform reject
1870*4882a593Smuzhiyun 	 * on the older one.
1871*4882a593Smuzhiyun 	 */
1872*4882a593Smuzhiyun 	spin_lock_irqsave(&tgtport->lock, flags);
1873*4882a593Smuzhiyun 	oldls = assoc->rcv_disconn;
1874*4882a593Smuzhiyun 	assoc->rcv_disconn = iod;
1875*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tgtport->lock, flags);
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	nvmet_fc_delete_target_assoc(assoc);
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 	if (oldls) {
1880*4882a593Smuzhiyun 		dev_info(tgtport->dev,
1881*4882a593Smuzhiyun 			"{%d:%d} Multiple Disconnect Association LS's "
1882*4882a593Smuzhiyun 			"received\n",
1883*4882a593Smuzhiyun 			tgtport->fc_target_port.port_num, assoc->a_id);
1884*4882a593Smuzhiyun 		/* overwrite good response with bogus failure */
1885*4882a593Smuzhiyun 		oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1886*4882a593Smuzhiyun 						sizeof(*iod->rspbuf),
1887*4882a593Smuzhiyun 						/* ok to use rqst, LS is same */
1888*4882a593Smuzhiyun 						rqst->w0.ls_cmd,
1889*4882a593Smuzhiyun 						FCNVME_RJT_RC_UNAB,
1890*4882a593Smuzhiyun 						FCNVME_RJT_EXP_NONE, 0);
1891*4882a593Smuzhiyun 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1892*4882a593Smuzhiyun 	}
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	return false;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun /* *********************** NVME Ctrl Routines **************************** */
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)1906*4882a593Smuzhiyun nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1907*4882a593Smuzhiyun {
1908*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1909*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1912*4882a593Smuzhiyun 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1913*4882a593Smuzhiyun 	nvmet_fc_free_ls_iod(tgtport, iod);
1914*4882a593Smuzhiyun 	nvmet_fc_tgtport_put(tgtport);
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1918*4882a593Smuzhiyun nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1919*4882a593Smuzhiyun 				struct nvmet_fc_ls_iod *iod)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	int ret;
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1924*4882a593Smuzhiyun 				  sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1927*4882a593Smuzhiyun 	if (ret)
1928*4882a593Smuzhiyun 		nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun /*
1932*4882a593Smuzhiyun  * Actual processing routine for received FC-NVME LS Requests from the LLD
1933*4882a593Smuzhiyun  */
1934*4882a593Smuzhiyun static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1935*4882a593Smuzhiyun nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1936*4882a593Smuzhiyun 			struct nvmet_fc_ls_iod *iod)
1937*4882a593Smuzhiyun {
1938*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1939*4882a593Smuzhiyun 	bool sendrsp = true;
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun 	iod->lsrsp->nvme_fc_private = iod;
1942*4882a593Smuzhiyun 	iod->lsrsp->rspbuf = iod->rspbuf;
1943*4882a593Smuzhiyun 	iod->lsrsp->rspdma = iod->rspdma;
1944*4882a593Smuzhiyun 	iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1945*4882a593Smuzhiyun 	/* Be preventative. handlers will later set to valid length */
1946*4882a593Smuzhiyun 	iod->lsrsp->rsplen = 0;
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	iod->assoc = NULL;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	/*
1951*4882a593Smuzhiyun 	 * handlers:
1952*4882a593Smuzhiyun 	 *   parse request input, execute the request, and format the
1953*4882a593Smuzhiyun 	 *   LS response
1954*4882a593Smuzhiyun 	 */
1955*4882a593Smuzhiyun 	switch (w0->ls_cmd) {
1956*4882a593Smuzhiyun 	case FCNVME_LS_CREATE_ASSOCIATION:
1957*4882a593Smuzhiyun 		/* Creates Association and initial Admin Queue/Connection */
1958*4882a593Smuzhiyun 		nvmet_fc_ls_create_association(tgtport, iod);
1959*4882a593Smuzhiyun 		break;
1960*4882a593Smuzhiyun 	case FCNVME_LS_CREATE_CONNECTION:
1961*4882a593Smuzhiyun 		/* Creates an IO Queue/Connection */
1962*4882a593Smuzhiyun 		nvmet_fc_ls_create_connection(tgtport, iod);
1963*4882a593Smuzhiyun 		break;
1964*4882a593Smuzhiyun 	case FCNVME_LS_DISCONNECT_ASSOC:
1965*4882a593Smuzhiyun 		/* Terminate a Queue/Connection or the Association */
1966*4882a593Smuzhiyun 		sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1967*4882a593Smuzhiyun 		break;
1968*4882a593Smuzhiyun 	default:
1969*4882a593Smuzhiyun 		iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1970*4882a593Smuzhiyun 				sizeof(*iod->rspbuf), w0->ls_cmd,
1971*4882a593Smuzhiyun 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1972*4882a593Smuzhiyun 	}
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	if (sendrsp)
1975*4882a593Smuzhiyun 		nvmet_fc_xmt_ls_rsp(tgtport, iod);
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun /*
1979*4882a593Smuzhiyun  * Actual processing routine for received FC-NVME LS Requests from the LLD
1980*4882a593Smuzhiyun  */
1981*4882a593Smuzhiyun static void
nvmet_fc_handle_ls_rqst_work(struct work_struct * work)1982*4882a593Smuzhiyun nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod =
1985*4882a593Smuzhiyun 		container_of(work, struct nvmet_fc_ls_iod, work);
1986*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	nvmet_fc_handle_ls_rqst(tgtport, iod);
1989*4882a593Smuzhiyun }
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun /**
1993*4882a593Smuzhiyun  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1994*4882a593Smuzhiyun  *                       upon the reception of a NVME LS request.
1995*4882a593Smuzhiyun  *
1996*4882a593Smuzhiyun  * The nvmet-fc layer will copy payload to an internal structure for
1997*4882a593Smuzhiyun  * processing.  As such, upon completion of the routine, the LLDD may
1998*4882a593Smuzhiyun  * immediately free/reuse the LS request buffer passed in the call.
1999*4882a593Smuzhiyun  *
2000*4882a593Smuzhiyun  * If this routine returns error, the LLDD should abort the exchange.
2001*4882a593Smuzhiyun  *
2002*4882a593Smuzhiyun  * @target_port: pointer to the (registered) target port the LS was
2003*4882a593Smuzhiyun  *              received on.
2004*4882a593Smuzhiyun  * @lsrsp:      pointer to a lsrsp structure to be used to reference
2005*4882a593Smuzhiyun  *              the exchange corresponding to the LS.
2006*4882a593Smuzhiyun  * @lsreqbuf:   pointer to the buffer containing the LS Request
2007*4882a593Smuzhiyun  * @lsreqbuf_len: length, in bytes, of the received LS request
2008*4882a593Smuzhiyun  */
2009*4882a593Smuzhiyun int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port * target_port,void * hosthandle,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)2010*4882a593Smuzhiyun nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2011*4882a593Smuzhiyun 			void *hosthandle,
2012*4882a593Smuzhiyun 			struct nvmefc_ls_rsp *lsrsp,
2013*4882a593Smuzhiyun 			void *lsreqbuf, u32 lsreqbuf_len)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2016*4882a593Smuzhiyun 	struct nvmet_fc_ls_iod *iod;
2017*4882a593Smuzhiyun 	struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun 	if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2020*4882a593Smuzhiyun 		dev_info(tgtport->dev,
2021*4882a593Smuzhiyun 			"RCV %s LS failed: payload too large (%d)\n",
2022*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2023*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "",
2024*4882a593Smuzhiyun 			lsreqbuf_len);
2025*4882a593Smuzhiyun 		return -E2BIG;
2026*4882a593Smuzhiyun 	}
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	if (!nvmet_fc_tgtport_get(tgtport)) {
2029*4882a593Smuzhiyun 		dev_info(tgtport->dev,
2030*4882a593Smuzhiyun 			"RCV %s LS failed: target deleting\n",
2031*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2032*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
2033*4882a593Smuzhiyun 		return -ESHUTDOWN;
2034*4882a593Smuzhiyun 	}
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	iod = nvmet_fc_alloc_ls_iod(tgtport);
2037*4882a593Smuzhiyun 	if (!iod) {
2038*4882a593Smuzhiyun 		dev_info(tgtport->dev,
2039*4882a593Smuzhiyun 			"RCV %s LS failed: context allocation failed\n",
2040*4882a593Smuzhiyun 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2041*4882a593Smuzhiyun 				nvmefc_ls_names[w0->ls_cmd] : "");
2042*4882a593Smuzhiyun 		nvmet_fc_tgtport_put(tgtport);
2043*4882a593Smuzhiyun 		return -ENOENT;
2044*4882a593Smuzhiyun 	}
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	iod->lsrsp = lsrsp;
2047*4882a593Smuzhiyun 	iod->fcpreq = NULL;
2048*4882a593Smuzhiyun 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2049*4882a593Smuzhiyun 	iod->rqstdatalen = lsreqbuf_len;
2050*4882a593Smuzhiyun 	iod->hosthandle = hosthandle;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	schedule_work(&iod->work);
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	return 0;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun /*
2060*4882a593Smuzhiyun  * **********************
2061*4882a593Smuzhiyun  * Start of FCP handling
2062*4882a593Smuzhiyun  * **********************
2063*4882a593Smuzhiyun  */
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2066*4882a593Smuzhiyun nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2067*4882a593Smuzhiyun {
2068*4882a593Smuzhiyun 	struct scatterlist *sg;
2069*4882a593Smuzhiyun 	unsigned int nent;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2072*4882a593Smuzhiyun 	if (!sg)
2073*4882a593Smuzhiyun 		goto out;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	fod->data_sg = sg;
2076*4882a593Smuzhiyun 	fod->data_sg_cnt = nent;
2077*4882a593Smuzhiyun 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2078*4882a593Smuzhiyun 				((fod->io_dir == NVMET_FCP_WRITE) ?
2079*4882a593Smuzhiyun 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
2080*4882a593Smuzhiyun 				/* note: write from initiator perspective */
2081*4882a593Smuzhiyun 	fod->next_sg = fod->data_sg;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	return 0;
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun out:
2086*4882a593Smuzhiyun 	return NVME_SC_INTERNAL;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2090*4882a593Smuzhiyun nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	if (!fod->data_sg || !fod->data_sg_cnt)
2093*4882a593Smuzhiyun 		return;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2096*4882a593Smuzhiyun 				((fod->io_dir == NVMET_FCP_WRITE) ?
2097*4882a593Smuzhiyun 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
2098*4882a593Smuzhiyun 	sgl_free(fod->data_sg);
2099*4882a593Smuzhiyun 	fod->data_sg = NULL;
2100*4882a593Smuzhiyun 	fod->data_sg_cnt = 0;
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun static bool
queue_90percent_full(struct nvmet_fc_tgt_queue * q,u32 sqhd)2105*4882a593Smuzhiyun queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2106*4882a593Smuzhiyun {
2107*4882a593Smuzhiyun 	u32 sqtail, used;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	/* egad, this is ugly. And sqtail is just a best guess */
2110*4882a593Smuzhiyun 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2113*4882a593Smuzhiyun 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun /*
2117*4882a593Smuzhiyun  * Prep RSP payload.
2118*4882a593Smuzhiyun  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
2119*4882a593Smuzhiyun  */
2120*4882a593Smuzhiyun static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2121*4882a593Smuzhiyun nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2122*4882a593Smuzhiyun 				struct nvmet_fc_fcp_iod *fod)
2123*4882a593Smuzhiyun {
2124*4882a593Smuzhiyun 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2125*4882a593Smuzhiyun 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2126*4882a593Smuzhiyun 	struct nvme_completion *cqe = &ersp->cqe;
2127*4882a593Smuzhiyun 	u32 *cqewd = (u32 *)cqe;
2128*4882a593Smuzhiyun 	bool send_ersp = false;
2129*4882a593Smuzhiyun 	u32 rsn, rspcnt, xfr_length;
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2132*4882a593Smuzhiyun 		xfr_length = fod->req.transfer_len;
2133*4882a593Smuzhiyun 	else
2134*4882a593Smuzhiyun 		xfr_length = fod->offset;
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 	/*
2137*4882a593Smuzhiyun 	 * check to see if we can send a 0's rsp.
2138*4882a593Smuzhiyun 	 *   Note: to send a 0's response, the NVME-FC host transport will
2139*4882a593Smuzhiyun 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
2140*4882a593Smuzhiyun 	 *   seen in an ersp), and command_id. Thus it will create a
2141*4882a593Smuzhiyun 	 *   zero-filled CQE with those known fields filled in. Transport
2142*4882a593Smuzhiyun 	 *   must send an ersp for any condition where the cqe won't match
2143*4882a593Smuzhiyun 	 *   this.
2144*4882a593Smuzhiyun 	 *
2145*4882a593Smuzhiyun 	 * Here are the FC-NVME mandated cases where we must send an ersp:
2146*4882a593Smuzhiyun 	 *  every N responses, where N=ersp_ratio
2147*4882a593Smuzhiyun 	 *  force fabric commands to send ersp's (not in FC-NVME but good
2148*4882a593Smuzhiyun 	 *    practice)
2149*4882a593Smuzhiyun 	 *  normal cmds: any time status is non-zero, or status is zero
2150*4882a593Smuzhiyun 	 *     but words 0 or 1 are non-zero.
2151*4882a593Smuzhiyun 	 *  the SQ is 90% or more full
2152*4882a593Smuzhiyun 	 *  the cmd is a fused command
2153*4882a593Smuzhiyun 	 *  transferred data length not equal to cmd iu length
2154*4882a593Smuzhiyun 	 */
2155*4882a593Smuzhiyun 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2156*4882a593Smuzhiyun 	if (!(rspcnt % fod->queue->ersp_ratio) ||
2157*4882a593Smuzhiyun 	    nvme_is_fabrics((struct nvme_command *) sqe) ||
2158*4882a593Smuzhiyun 	    xfr_length != fod->req.transfer_len ||
2159*4882a593Smuzhiyun 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2160*4882a593Smuzhiyun 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2161*4882a593Smuzhiyun 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2162*4882a593Smuzhiyun 		send_ersp = true;
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	/* re-set the fields */
2165*4882a593Smuzhiyun 	fod->fcpreq->rspaddr = ersp;
2166*4882a593Smuzhiyun 	fod->fcpreq->rspdma = fod->rspdma;
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	if (!send_ersp) {
2169*4882a593Smuzhiyun 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2170*4882a593Smuzhiyun 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2171*4882a593Smuzhiyun 	} else {
2172*4882a593Smuzhiyun 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2173*4882a593Smuzhiyun 		rsn = atomic_inc_return(&fod->queue->rsn);
2174*4882a593Smuzhiyun 		ersp->rsn = cpu_to_be32(rsn);
2175*4882a593Smuzhiyun 		ersp->xfrd_len = cpu_to_be32(xfr_length);
2176*4882a593Smuzhiyun 		fod->fcpreq->rsplen = sizeof(*ersp);
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2180*4882a593Smuzhiyun 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2186*4882a593Smuzhiyun nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2187*4882a593Smuzhiyun 				struct nvmet_fc_fcp_iod *fod)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	/* data no longer needed */
2192*4882a593Smuzhiyun 	nvmet_fc_free_tgt_pgs(fod);
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	/*
2195*4882a593Smuzhiyun 	 * if an ABTS was received or we issued the fcp_abort early
2196*4882a593Smuzhiyun 	 * don't call abort routine again.
2197*4882a593Smuzhiyun 	 */
2198*4882a593Smuzhiyun 	/* no need to take lock - lock was taken earlier to get here */
2199*4882a593Smuzhiyun 	if (!fod->aborted)
2200*4882a593Smuzhiyun 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	nvmet_fc_free_fcp_iod(fod->queue, fod);
2203*4882a593Smuzhiyun }
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2206*4882a593Smuzhiyun nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2207*4882a593Smuzhiyun 				struct nvmet_fc_fcp_iod *fod)
2208*4882a593Smuzhiyun {
2209*4882a593Smuzhiyun 	int ret;
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 	fod->fcpreq->op = NVMET_FCOP_RSP;
2212*4882a593Smuzhiyun 	fod->fcpreq->timeout = 0;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2217*4882a593Smuzhiyun 	if (ret)
2218*4882a593Smuzhiyun 		nvmet_fc_abort_op(tgtport, fod);
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,u8 op)2222*4882a593Smuzhiyun nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2223*4882a593Smuzhiyun 				struct nvmet_fc_fcp_iod *fod, u8 op)
2224*4882a593Smuzhiyun {
2225*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2226*4882a593Smuzhiyun 	struct scatterlist *sg = fod->next_sg;
2227*4882a593Smuzhiyun 	unsigned long flags;
2228*4882a593Smuzhiyun 	u32 remaininglen = fod->req.transfer_len - fod->offset;
2229*4882a593Smuzhiyun 	u32 tlen = 0;
2230*4882a593Smuzhiyun 	int ret;
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	fcpreq->op = op;
2233*4882a593Smuzhiyun 	fcpreq->offset = fod->offset;
2234*4882a593Smuzhiyun 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	/*
2237*4882a593Smuzhiyun 	 * for next sequence:
2238*4882a593Smuzhiyun 	 *  break at a sg element boundary
2239*4882a593Smuzhiyun 	 *  attempt to keep sequence length capped at
2240*4882a593Smuzhiyun 	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
2241*4882a593Smuzhiyun 	 *    be longer if a single sg element is larger
2242*4882a593Smuzhiyun 	 *    than that amount. This is done to avoid creating
2243*4882a593Smuzhiyun 	 *    a new sg list to use for the tgtport api.
2244*4882a593Smuzhiyun 	 */
2245*4882a593Smuzhiyun 	fcpreq->sg = sg;
2246*4882a593Smuzhiyun 	fcpreq->sg_cnt = 0;
2247*4882a593Smuzhiyun 	while (tlen < remaininglen &&
2248*4882a593Smuzhiyun 	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2249*4882a593Smuzhiyun 	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2250*4882a593Smuzhiyun 		fcpreq->sg_cnt++;
2251*4882a593Smuzhiyun 		tlen += sg_dma_len(sg);
2252*4882a593Smuzhiyun 		sg = sg_next(sg);
2253*4882a593Smuzhiyun 	}
2254*4882a593Smuzhiyun 	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2255*4882a593Smuzhiyun 		fcpreq->sg_cnt++;
2256*4882a593Smuzhiyun 		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2257*4882a593Smuzhiyun 		sg = sg_next(sg);
2258*4882a593Smuzhiyun 	}
2259*4882a593Smuzhiyun 	if (tlen < remaininglen)
2260*4882a593Smuzhiyun 		fod->next_sg = sg;
2261*4882a593Smuzhiyun 	else
2262*4882a593Smuzhiyun 		fod->next_sg = NULL;
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	fcpreq->transfer_length = tlen;
2265*4882a593Smuzhiyun 	fcpreq->transferred_length = 0;
2266*4882a593Smuzhiyun 	fcpreq->fcp_error = 0;
2267*4882a593Smuzhiyun 	fcpreq->rsplen = 0;
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	/*
2270*4882a593Smuzhiyun 	 * If the last READDATA request: check if LLDD supports
2271*4882a593Smuzhiyun 	 * combined xfr with response.
2272*4882a593Smuzhiyun 	 */
2273*4882a593Smuzhiyun 	if ((op == NVMET_FCOP_READDATA) &&
2274*4882a593Smuzhiyun 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2275*4882a593Smuzhiyun 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2276*4882a593Smuzhiyun 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
2277*4882a593Smuzhiyun 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
2278*4882a593Smuzhiyun 	}
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2281*4882a593Smuzhiyun 	if (ret) {
2282*4882a593Smuzhiyun 		/*
2283*4882a593Smuzhiyun 		 * should be ok to set w/o lock as its in the thread of
2284*4882a593Smuzhiyun 		 * execution (not an async timer routine) and doesn't
2285*4882a593Smuzhiyun 		 * contend with any clearing action
2286*4882a593Smuzhiyun 		 */
2287*4882a593Smuzhiyun 		fod->abort = true;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 		if (op == NVMET_FCOP_WRITEDATA) {
2290*4882a593Smuzhiyun 			spin_lock_irqsave(&fod->flock, flags);
2291*4882a593Smuzhiyun 			fod->writedataactive = false;
2292*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fod->flock, flags);
2293*4882a593Smuzhiyun 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2294*4882a593Smuzhiyun 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2295*4882a593Smuzhiyun 			fcpreq->fcp_error = ret;
2296*4882a593Smuzhiyun 			fcpreq->transferred_length = 0;
2297*4882a593Smuzhiyun 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2298*4882a593Smuzhiyun 		}
2299*4882a593Smuzhiyun 	}
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod * fod,bool abort)2303*4882a593Smuzhiyun __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2306*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	/* if in the middle of an io and we need to tear down */
2309*4882a593Smuzhiyun 	if (abort) {
2310*4882a593Smuzhiyun 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2311*4882a593Smuzhiyun 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2312*4882a593Smuzhiyun 			return true;
2313*4882a593Smuzhiyun 		}
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 		nvmet_fc_abort_op(tgtport, fod);
2316*4882a593Smuzhiyun 		return true;
2317*4882a593Smuzhiyun 	}
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 	return false;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun /*
2323*4882a593Smuzhiyun  * actual done handler for FCP operations when completed by the lldd
2324*4882a593Smuzhiyun  */
2325*4882a593Smuzhiyun static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod * fod)2326*4882a593Smuzhiyun nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2329*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2330*4882a593Smuzhiyun 	unsigned long flags;
2331*4882a593Smuzhiyun 	bool abort;
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	spin_lock_irqsave(&fod->flock, flags);
2334*4882a593Smuzhiyun 	abort = fod->abort;
2335*4882a593Smuzhiyun 	fod->writedataactive = false;
2336*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fod->flock, flags);
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	switch (fcpreq->op) {
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	case NVMET_FCOP_WRITEDATA:
2341*4882a593Smuzhiyun 		if (__nvmet_fc_fod_op_abort(fod, abort))
2342*4882a593Smuzhiyun 			return;
2343*4882a593Smuzhiyun 		if (fcpreq->fcp_error ||
2344*4882a593Smuzhiyun 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2345*4882a593Smuzhiyun 			spin_lock_irqsave(&fod->flock, flags);
2346*4882a593Smuzhiyun 			fod->abort = true;
2347*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fod->flock, flags);
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2350*4882a593Smuzhiyun 			return;
2351*4882a593Smuzhiyun 		}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 		fod->offset += fcpreq->transferred_length;
2354*4882a593Smuzhiyun 		if (fod->offset != fod->req.transfer_len) {
2355*4882a593Smuzhiyun 			spin_lock_irqsave(&fod->flock, flags);
2356*4882a593Smuzhiyun 			fod->writedataactive = true;
2357*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fod->flock, flags);
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 			/* transfer the next chunk */
2360*4882a593Smuzhiyun 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2361*4882a593Smuzhiyun 						NVMET_FCOP_WRITEDATA);
2362*4882a593Smuzhiyun 			return;
2363*4882a593Smuzhiyun 		}
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 		/* data transfer complete, resume with nvmet layer */
2366*4882a593Smuzhiyun 		fod->req.execute(&fod->req);
2367*4882a593Smuzhiyun 		break;
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 	case NVMET_FCOP_READDATA:
2370*4882a593Smuzhiyun 	case NVMET_FCOP_READDATA_RSP:
2371*4882a593Smuzhiyun 		if (__nvmet_fc_fod_op_abort(fod, abort))
2372*4882a593Smuzhiyun 			return;
2373*4882a593Smuzhiyun 		if (fcpreq->fcp_error ||
2374*4882a593Smuzhiyun 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2375*4882a593Smuzhiyun 			nvmet_fc_abort_op(tgtport, fod);
2376*4882a593Smuzhiyun 			return;
2377*4882a593Smuzhiyun 		}
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 		/* success */
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2382*4882a593Smuzhiyun 			/* data no longer needed */
2383*4882a593Smuzhiyun 			nvmet_fc_free_tgt_pgs(fod);
2384*4882a593Smuzhiyun 			nvmet_fc_free_fcp_iod(fod->queue, fod);
2385*4882a593Smuzhiyun 			return;
2386*4882a593Smuzhiyun 		}
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 		fod->offset += fcpreq->transferred_length;
2389*4882a593Smuzhiyun 		if (fod->offset != fod->req.transfer_len) {
2390*4882a593Smuzhiyun 			/* transfer the next chunk */
2391*4882a593Smuzhiyun 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2392*4882a593Smuzhiyun 						NVMET_FCOP_READDATA);
2393*4882a593Smuzhiyun 			return;
2394*4882a593Smuzhiyun 		}
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 		/* data transfer complete, send response */
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 		/* data no longer needed */
2399*4882a593Smuzhiyun 		nvmet_fc_free_tgt_pgs(fod);
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		break;
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 	case NVMET_FCOP_RSP:
2406*4882a593Smuzhiyun 		if (__nvmet_fc_fod_op_abort(fod, abort))
2407*4882a593Smuzhiyun 			return;
2408*4882a593Smuzhiyun 		nvmet_fc_free_fcp_iod(fod->queue, fod);
2409*4882a593Smuzhiyun 		break;
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	default:
2412*4882a593Smuzhiyun 		break;
2413*4882a593Smuzhiyun 	}
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req * fcpreq)2417*4882a593Smuzhiyun nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2418*4882a593Smuzhiyun {
2419*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	nvmet_fc_fod_op_done(fod);
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun /*
2425*4882a593Smuzhiyun  * actual completion handler after execution by the nvmet layer
2426*4882a593Smuzhiyun  */
2427*4882a593Smuzhiyun static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,int status)2428*4882a593Smuzhiyun __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2429*4882a593Smuzhiyun 			struct nvmet_fc_fcp_iod *fod, int status)
2430*4882a593Smuzhiyun {
2431*4882a593Smuzhiyun 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2432*4882a593Smuzhiyun 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2433*4882a593Smuzhiyun 	unsigned long flags;
2434*4882a593Smuzhiyun 	bool abort;
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	spin_lock_irqsave(&fod->flock, flags);
2437*4882a593Smuzhiyun 	abort = fod->abort;
2438*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fod->flock, flags);
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	/* if we have a CQE, snoop the last sq_head value */
2441*4882a593Smuzhiyun 	if (!status)
2442*4882a593Smuzhiyun 		fod->queue->sqhd = cqe->sq_head;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	if (abort) {
2445*4882a593Smuzhiyun 		nvmet_fc_abort_op(tgtport, fod);
2446*4882a593Smuzhiyun 		return;
2447*4882a593Smuzhiyun 	}
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 	/* if an error handling the cmd post initial parsing */
2450*4882a593Smuzhiyun 	if (status) {
2451*4882a593Smuzhiyun 		/* fudge up a failed CQE status for our transport error */
2452*4882a593Smuzhiyun 		memset(cqe, 0, sizeof(*cqe));
2453*4882a593Smuzhiyun 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2454*4882a593Smuzhiyun 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2455*4882a593Smuzhiyun 		cqe->command_id = sqe->command_id;
2456*4882a593Smuzhiyun 		cqe->status = cpu_to_le16(status);
2457*4882a593Smuzhiyun 	} else {
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 		/*
2460*4882a593Smuzhiyun 		 * try to push the data even if the SQE status is non-zero.
2461*4882a593Smuzhiyun 		 * There may be a status where data still was intended to
2462*4882a593Smuzhiyun 		 * be moved
2463*4882a593Smuzhiyun 		 */
2464*4882a593Smuzhiyun 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2465*4882a593Smuzhiyun 			/* push the data over before sending rsp */
2466*4882a593Smuzhiyun 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2467*4882a593Smuzhiyun 						NVMET_FCOP_READDATA);
2468*4882a593Smuzhiyun 			return;
2469*4882a593Smuzhiyun 		}
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 		/* writes & no data - fall thru */
2472*4882a593Smuzhiyun 	}
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	/* data no longer needed */
2475*4882a593Smuzhiyun 	nvmet_fc_free_tgt_pgs(fod);
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req * nvme_req)2482*4882a593Smuzhiyun nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2483*4882a593Smuzhiyun {
2484*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2485*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun /*
2492*4882a593Smuzhiyun  * Actual processing routine for received FC-NVME I/O Requests from the LLD
2493*4882a593Smuzhiyun  */
2494*4882a593Smuzhiyun static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2495*4882a593Smuzhiyun nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2496*4882a593Smuzhiyun 			struct nvmet_fc_fcp_iod *fod)
2497*4882a593Smuzhiyun {
2498*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2499*4882a593Smuzhiyun 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2500*4882a593Smuzhiyun 	int ret;
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	/*
2503*4882a593Smuzhiyun 	 * Fused commands are currently not supported in the linux
2504*4882a593Smuzhiyun 	 * implementation.
2505*4882a593Smuzhiyun 	 *
2506*4882a593Smuzhiyun 	 * As such, the implementation of the FC transport does not
2507*4882a593Smuzhiyun 	 * look at the fused commands and order delivery to the upper
2508*4882a593Smuzhiyun 	 * layer until we have both based on csn.
2509*4882a593Smuzhiyun 	 */
2510*4882a593Smuzhiyun 
2511*4882a593Smuzhiyun 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2514*4882a593Smuzhiyun 		fod->io_dir = NVMET_FCP_WRITE;
2515*4882a593Smuzhiyun 		if (!nvme_is_write(&cmdiu->sqe))
2516*4882a593Smuzhiyun 			goto transport_error;
2517*4882a593Smuzhiyun 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2518*4882a593Smuzhiyun 		fod->io_dir = NVMET_FCP_READ;
2519*4882a593Smuzhiyun 		if (nvme_is_write(&cmdiu->sqe))
2520*4882a593Smuzhiyun 			goto transport_error;
2521*4882a593Smuzhiyun 	} else {
2522*4882a593Smuzhiyun 		fod->io_dir = NVMET_FCP_NODATA;
2523*4882a593Smuzhiyun 		if (xfrlen)
2524*4882a593Smuzhiyun 			goto transport_error;
2525*4882a593Smuzhiyun 	}
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	fod->req.cmd = &fod->cmdiubuf.sqe;
2528*4882a593Smuzhiyun 	fod->req.cqe = &fod->rspiubuf.cqe;
2529*4882a593Smuzhiyun 	if (tgtport->pe)
2530*4882a593Smuzhiyun 		fod->req.port = tgtport->pe->port;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	/* clear any response payload */
2533*4882a593Smuzhiyun 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	fod->data_sg = NULL;
2536*4882a593Smuzhiyun 	fod->data_sg_cnt = 0;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	ret = nvmet_req_init(&fod->req,
2539*4882a593Smuzhiyun 				&fod->queue->nvme_cq,
2540*4882a593Smuzhiyun 				&fod->queue->nvme_sq,
2541*4882a593Smuzhiyun 				&nvmet_fc_tgt_fcp_ops);
2542*4882a593Smuzhiyun 	if (!ret) {
2543*4882a593Smuzhiyun 		/* bad SQE content or invalid ctrl state */
2544*4882a593Smuzhiyun 		/* nvmet layer has already called op done to send rsp. */
2545*4882a593Smuzhiyun 		return;
2546*4882a593Smuzhiyun 	}
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	fod->req.transfer_len = xfrlen;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	/* keep a running counter of tail position */
2551*4882a593Smuzhiyun 	atomic_inc(&fod->queue->sqtail);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	if (fod->req.transfer_len) {
2554*4882a593Smuzhiyun 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2555*4882a593Smuzhiyun 		if (ret) {
2556*4882a593Smuzhiyun 			nvmet_req_complete(&fod->req, ret);
2557*4882a593Smuzhiyun 			return;
2558*4882a593Smuzhiyun 		}
2559*4882a593Smuzhiyun 	}
2560*4882a593Smuzhiyun 	fod->req.sg = fod->data_sg;
2561*4882a593Smuzhiyun 	fod->req.sg_cnt = fod->data_sg_cnt;
2562*4882a593Smuzhiyun 	fod->offset = 0;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	if (fod->io_dir == NVMET_FCP_WRITE) {
2565*4882a593Smuzhiyun 		/* pull the data over before invoking nvmet layer */
2566*4882a593Smuzhiyun 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2567*4882a593Smuzhiyun 		return;
2568*4882a593Smuzhiyun 	}
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 	/*
2571*4882a593Smuzhiyun 	 * Reads or no data:
2572*4882a593Smuzhiyun 	 *
2573*4882a593Smuzhiyun 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2574*4882a593Smuzhiyun 	 * push the data
2575*4882a593Smuzhiyun 	 */
2576*4882a593Smuzhiyun 	fod->req.execute(&fod->req);
2577*4882a593Smuzhiyun 	return;
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun transport_error:
2580*4882a593Smuzhiyun 	nvmet_fc_abort_op(tgtport, fod);
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun /**
2584*4882a593Smuzhiyun  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2585*4882a593Smuzhiyun  *                       upon the reception of a NVME FCP CMD IU.
2586*4882a593Smuzhiyun  *
2587*4882a593Smuzhiyun  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2588*4882a593Smuzhiyun  * layer for processing.
2589*4882a593Smuzhiyun  *
2590*4882a593Smuzhiyun  * The nvmet_fc layer allocates a local job structure (struct
2591*4882a593Smuzhiyun  * nvmet_fc_fcp_iod) from the queue for the io and copies the
2592*4882a593Smuzhiyun  * CMD IU buffer to the job structure. As such, on a successful
2593*4882a593Smuzhiyun  * completion (returns 0), the LLDD may immediately free/reuse
2594*4882a593Smuzhiyun  * the CMD IU buffer passed in the call.
2595*4882a593Smuzhiyun  *
2596*4882a593Smuzhiyun  * However, in some circumstances, due to the packetized nature of FC
2597*4882a593Smuzhiyun  * and the api of the FC LLDD which may issue a hw command to send the
2598*4882a593Smuzhiyun  * response, but the LLDD may not get the hw completion for that command
2599*4882a593Smuzhiyun  * and upcall the nvmet_fc layer before a new command may be
2600*4882a593Smuzhiyun  * asynchronously received - its possible for a command to be received
2601*4882a593Smuzhiyun  * before the LLDD and nvmet_fc have recycled the job structure. It gives
2602*4882a593Smuzhiyun  * the appearance of more commands received than fits in the sq.
2603*4882a593Smuzhiyun  * To alleviate this scenario, a temporary queue is maintained in the
2604*4882a593Smuzhiyun  * transport for pending LLDD requests waiting for a queue job structure.
2605*4882a593Smuzhiyun  * In these "overrun" cases, a temporary queue element is allocated
2606*4882a593Smuzhiyun  * the LLDD request and CMD iu buffer information remembered, and the
2607*4882a593Smuzhiyun  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2608*4882a593Smuzhiyun  * structure is freed, it is immediately reallocated for anything on the
2609*4882a593Smuzhiyun  * pending request list. The LLDDs defer_rcv() callback is called,
2610*4882a593Smuzhiyun  * informing the LLDD that it may reuse the CMD IU buffer, and the io
2611*4882a593Smuzhiyun  * is then started normally with the transport.
2612*4882a593Smuzhiyun  *
2613*4882a593Smuzhiyun  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2614*4882a593Smuzhiyun  * the completion as successful but must not reuse the CMD IU buffer
2615*4882a593Smuzhiyun  * until the LLDD's defer_rcv() callback has been called for the
2616*4882a593Smuzhiyun  * corresponding struct nvmefc_tgt_fcp_req pointer.
2617*4882a593Smuzhiyun  *
2618*4882a593Smuzhiyun  * If there is any other condition in which an error occurs, the
2619*4882a593Smuzhiyun  * transport will return a non-zero status indicating the error.
2620*4882a593Smuzhiyun  * In all cases other than -EOVERFLOW, the transport has not accepted the
2621*4882a593Smuzhiyun  * request and the LLDD should abort the exchange.
2622*4882a593Smuzhiyun  *
2623*4882a593Smuzhiyun  * @target_port: pointer to the (registered) target port the FCP CMD IU
2624*4882a593Smuzhiyun  *              was received on.
2625*4882a593Smuzhiyun  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2626*4882a593Smuzhiyun  *              the exchange corresponding to the FCP Exchange.
2627*4882a593Smuzhiyun  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2628*4882a593Smuzhiyun  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2629*4882a593Smuzhiyun  */
2630*4882a593Smuzhiyun int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq,void * cmdiubuf,u32 cmdiubuf_len)2631*4882a593Smuzhiyun nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2632*4882a593Smuzhiyun 			struct nvmefc_tgt_fcp_req *fcpreq,
2633*4882a593Smuzhiyun 			void *cmdiubuf, u32 cmdiubuf_len)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2636*4882a593Smuzhiyun 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2637*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
2638*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod;
2639*4882a593Smuzhiyun 	struct nvmet_fc_defer_fcp_req *deferfcp;
2640*4882a593Smuzhiyun 	unsigned long flags;
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	/* validate iu, so the connection id can be used to find the queue */
2643*4882a593Smuzhiyun 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2644*4882a593Smuzhiyun 			(cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2645*4882a593Smuzhiyun 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2646*4882a593Smuzhiyun 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2647*4882a593Smuzhiyun 		return -EIO;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	queue = nvmet_fc_find_target_queue(tgtport,
2650*4882a593Smuzhiyun 				be64_to_cpu(cmdiu->connection_id));
2651*4882a593Smuzhiyun 	if (!queue)
2652*4882a593Smuzhiyun 		return -ENOTCONN;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	/*
2655*4882a593Smuzhiyun 	 * note: reference taken by find_target_queue
2656*4882a593Smuzhiyun 	 * After successful fod allocation, the fod will inherit the
2657*4882a593Smuzhiyun 	 * ownership of that reference and will remove the reference
2658*4882a593Smuzhiyun 	 * when the fod is freed.
2659*4882a593Smuzhiyun 	 */
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->qlock, flags);
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	fod = nvmet_fc_alloc_fcp_iod(queue);
2664*4882a593Smuzhiyun 	if (fod) {
2665*4882a593Smuzhiyun 		spin_unlock_irqrestore(&queue->qlock, flags);
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 		fcpreq->nvmet_fc_private = fod;
2668*4882a593Smuzhiyun 		fod->fcpreq = fcpreq;
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 		return 0;
2675*4882a593Smuzhiyun 	}
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	if (!tgtport->ops->defer_rcv) {
2678*4882a593Smuzhiyun 		spin_unlock_irqrestore(&queue->qlock, flags);
2679*4882a593Smuzhiyun 		/* release the queue lookup reference */
2680*4882a593Smuzhiyun 		nvmet_fc_tgt_q_put(queue);
2681*4882a593Smuzhiyun 		return -ENOENT;
2682*4882a593Smuzhiyun 	}
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2685*4882a593Smuzhiyun 			struct nvmet_fc_defer_fcp_req, req_list);
2686*4882a593Smuzhiyun 	if (deferfcp) {
2687*4882a593Smuzhiyun 		/* Just re-use one that was previously allocated */
2688*4882a593Smuzhiyun 		list_del(&deferfcp->req_list);
2689*4882a593Smuzhiyun 	} else {
2690*4882a593Smuzhiyun 		spin_unlock_irqrestore(&queue->qlock, flags);
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 		/* Now we need to dynamically allocate one */
2693*4882a593Smuzhiyun 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2694*4882a593Smuzhiyun 		if (!deferfcp) {
2695*4882a593Smuzhiyun 			/* release the queue lookup reference */
2696*4882a593Smuzhiyun 			nvmet_fc_tgt_q_put(queue);
2697*4882a593Smuzhiyun 			return -ENOMEM;
2698*4882a593Smuzhiyun 		}
2699*4882a593Smuzhiyun 		spin_lock_irqsave(&queue->qlock, flags);
2700*4882a593Smuzhiyun 	}
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	/* For now, use rspaddr / rsplen to save payload information */
2703*4882a593Smuzhiyun 	fcpreq->rspaddr = cmdiubuf;
2704*4882a593Smuzhiyun 	fcpreq->rsplen  = cmdiubuf_len;
2705*4882a593Smuzhiyun 	deferfcp->fcp_req = fcpreq;
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	/* defer processing till a fod becomes available */
2708*4882a593Smuzhiyun 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	/* NOTE: the queue lookup reference is still valid */
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->qlock, flags);
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	return -EOVERFLOW;
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun /**
2719*4882a593Smuzhiyun  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2720*4882a593Smuzhiyun  *                       upon the reception of an ABTS for a FCP command
2721*4882a593Smuzhiyun  *
2722*4882a593Smuzhiyun  * Notify the transport that an ABTS has been received for a FCP command
2723*4882a593Smuzhiyun  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2724*4882a593Smuzhiyun  * LLDD believes the command is still being worked on
2725*4882a593Smuzhiyun  * (template_ops->fcp_req_release() has not been called).
2726*4882a593Smuzhiyun  *
2727*4882a593Smuzhiyun  * The transport will wait for any outstanding work (an op to the LLDD,
2728*4882a593Smuzhiyun  * which the lldd should complete with error due to the ABTS; or the
2729*4882a593Smuzhiyun  * completion from the nvmet layer of the nvme command), then will
2730*4882a593Smuzhiyun  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2731*4882a593Smuzhiyun  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2732*4882a593Smuzhiyun  * to the ABTS either after return from this function (assuming any
2733*4882a593Smuzhiyun  * outstanding op work has been terminated) or upon the callback being
2734*4882a593Smuzhiyun  * called.
2735*4882a593Smuzhiyun  *
2736*4882a593Smuzhiyun  * @target_port: pointer to the (registered) target port the FCP CMD IU
2737*4882a593Smuzhiyun  *              was received on.
2738*4882a593Smuzhiyun  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2739*4882a593Smuzhiyun  *              to the exchange that received the ABTS.
2740*4882a593Smuzhiyun  */
2741*4882a593Smuzhiyun void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq)2742*4882a593Smuzhiyun nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2743*4882a593Smuzhiyun 			struct nvmefc_tgt_fcp_req *fcpreq)
2744*4882a593Smuzhiyun {
2745*4882a593Smuzhiyun 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2746*4882a593Smuzhiyun 	struct nvmet_fc_tgt_queue *queue;
2747*4882a593Smuzhiyun 	unsigned long flags;
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	if (!fod || fod->fcpreq != fcpreq)
2750*4882a593Smuzhiyun 		/* job appears to have already completed, ignore abort */
2751*4882a593Smuzhiyun 		return;
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 	queue = fod->queue;
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->qlock, flags);
2756*4882a593Smuzhiyun 	if (fod->active) {
2757*4882a593Smuzhiyun 		/*
2758*4882a593Smuzhiyun 		 * mark as abort. The abort handler, invoked upon completion
2759*4882a593Smuzhiyun 		 * of any work, will detect the aborted status and do the
2760*4882a593Smuzhiyun 		 * callback.
2761*4882a593Smuzhiyun 		 */
2762*4882a593Smuzhiyun 		spin_lock(&fod->flock);
2763*4882a593Smuzhiyun 		fod->abort = true;
2764*4882a593Smuzhiyun 		fod->aborted = true;
2765*4882a593Smuzhiyun 		spin_unlock(&fod->flock);
2766*4882a593Smuzhiyun 	}
2767*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->qlock, flags);
2768*4882a593Smuzhiyun }
2769*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun struct nvmet_fc_traddr {
2773*4882a593Smuzhiyun 	u64	nn;
2774*4882a593Smuzhiyun 	u64	pn;
2775*4882a593Smuzhiyun };
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)2778*4882a593Smuzhiyun __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2779*4882a593Smuzhiyun {
2780*4882a593Smuzhiyun 	u64 token64;
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	if (match_u64(sstr, &token64))
2783*4882a593Smuzhiyun 		return -EINVAL;
2784*4882a593Smuzhiyun 	*val = token64;
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	return 0;
2787*4882a593Smuzhiyun }
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun /*
2790*4882a593Smuzhiyun  * This routine validates and extracts the WWN's from the TRADDR string.
2791*4882a593Smuzhiyun  * As kernel parsers need the 0x to determine number base, universally
2792*4882a593Smuzhiyun  * build string to parse with 0x prefix before parsing name strings.
2793*4882a593Smuzhiyun  */
2794*4882a593Smuzhiyun static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)2795*4882a593Smuzhiyun nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2796*4882a593Smuzhiyun {
2797*4882a593Smuzhiyun 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2798*4882a593Smuzhiyun 	substring_t wwn = { name, &name[sizeof(name)-1] };
2799*4882a593Smuzhiyun 	int nnoffset, pnoffset;
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	/* validate if string is one of the 2 allowed formats */
2802*4882a593Smuzhiyun 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2803*4882a593Smuzhiyun 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2804*4882a593Smuzhiyun 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2805*4882a593Smuzhiyun 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2806*4882a593Smuzhiyun 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
2807*4882a593Smuzhiyun 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2808*4882a593Smuzhiyun 						NVME_FC_TRADDR_OXNNLEN;
2809*4882a593Smuzhiyun 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2810*4882a593Smuzhiyun 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2811*4882a593Smuzhiyun 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2812*4882a593Smuzhiyun 				"pn-", NVME_FC_TRADDR_NNLEN))) {
2813*4882a593Smuzhiyun 		nnoffset = NVME_FC_TRADDR_NNLEN;
2814*4882a593Smuzhiyun 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2815*4882a593Smuzhiyun 	} else
2816*4882a593Smuzhiyun 		goto out_einval;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	name[0] = '0';
2819*4882a593Smuzhiyun 	name[1] = 'x';
2820*4882a593Smuzhiyun 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2823*4882a593Smuzhiyun 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2824*4882a593Smuzhiyun 		goto out_einval;
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2827*4882a593Smuzhiyun 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2828*4882a593Smuzhiyun 		goto out_einval;
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 	return 0;
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun out_einval:
2833*4882a593Smuzhiyun 	pr_warn("%s: bad traddr string\n", __func__);
2834*4882a593Smuzhiyun 	return -EINVAL;
2835*4882a593Smuzhiyun }
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun static int
nvmet_fc_add_port(struct nvmet_port * port)2838*4882a593Smuzhiyun nvmet_fc_add_port(struct nvmet_port *port)
2839*4882a593Smuzhiyun {
2840*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport;
2841*4882a593Smuzhiyun 	struct nvmet_fc_port_entry *pe;
2842*4882a593Smuzhiyun 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2843*4882a593Smuzhiyun 	unsigned long flags;
2844*4882a593Smuzhiyun 	int ret;
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	/* validate the address info */
2847*4882a593Smuzhiyun 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2848*4882a593Smuzhiyun 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2849*4882a593Smuzhiyun 		return -EINVAL;
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 	/* map the traddr address info to a target port */
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2854*4882a593Smuzhiyun 			sizeof(port->disc_addr.traddr));
2855*4882a593Smuzhiyun 	if (ret)
2856*4882a593Smuzhiyun 		return ret;
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2859*4882a593Smuzhiyun 	if (!pe)
2860*4882a593Smuzhiyun 		return -ENOMEM;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	ret = -ENXIO;
2863*4882a593Smuzhiyun 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2864*4882a593Smuzhiyun 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2865*4882a593Smuzhiyun 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2866*4882a593Smuzhiyun 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2867*4882a593Smuzhiyun 			/* a FC port can only be 1 nvmet port id */
2868*4882a593Smuzhiyun 			if (!tgtport->pe) {
2869*4882a593Smuzhiyun 				nvmet_fc_portentry_bind(tgtport, pe, port);
2870*4882a593Smuzhiyun 				ret = 0;
2871*4882a593Smuzhiyun 			} else
2872*4882a593Smuzhiyun 				ret = -EALREADY;
2873*4882a593Smuzhiyun 			break;
2874*4882a593Smuzhiyun 		}
2875*4882a593Smuzhiyun 	}
2876*4882a593Smuzhiyun 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun 	if (ret)
2879*4882a593Smuzhiyun 		kfree(pe);
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	return ret;
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun static void
nvmet_fc_remove_port(struct nvmet_port * port)2885*4882a593Smuzhiyun nvmet_fc_remove_port(struct nvmet_port *port)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun 	struct nvmet_fc_port_entry *pe = port->priv;
2888*4882a593Smuzhiyun 
2889*4882a593Smuzhiyun 	nvmet_fc_portentry_unbind(pe);
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	kfree(pe);
2892*4882a593Smuzhiyun }
2893*4882a593Smuzhiyun 
2894*4882a593Smuzhiyun static void
nvmet_fc_discovery_chg(struct nvmet_port * port)2895*4882a593Smuzhiyun nvmet_fc_discovery_chg(struct nvmet_port *port)
2896*4882a593Smuzhiyun {
2897*4882a593Smuzhiyun 	struct nvmet_fc_port_entry *pe = port->priv;
2898*4882a593Smuzhiyun 	struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2899*4882a593Smuzhiyun 
2900*4882a593Smuzhiyun 	if (tgtport && tgtport->ops->discovery_event)
2901*4882a593Smuzhiyun 		tgtport->ops->discovery_event(&tgtport->fc_target_port);
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2905*4882a593Smuzhiyun 	.owner			= THIS_MODULE,
2906*4882a593Smuzhiyun 	.type			= NVMF_TRTYPE_FC,
2907*4882a593Smuzhiyun 	.msdbd			= 1,
2908*4882a593Smuzhiyun 	.add_port		= nvmet_fc_add_port,
2909*4882a593Smuzhiyun 	.remove_port		= nvmet_fc_remove_port,
2910*4882a593Smuzhiyun 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2911*4882a593Smuzhiyun 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2912*4882a593Smuzhiyun 	.discovery_chg		= nvmet_fc_discovery_chg,
2913*4882a593Smuzhiyun };
2914*4882a593Smuzhiyun 
nvmet_fc_init_module(void)2915*4882a593Smuzhiyun static int __init nvmet_fc_init_module(void)
2916*4882a593Smuzhiyun {
2917*4882a593Smuzhiyun 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun 
nvmet_fc_exit_module(void)2920*4882a593Smuzhiyun static void __exit nvmet_fc_exit_module(void)
2921*4882a593Smuzhiyun {
2922*4882a593Smuzhiyun 	/* sanity check - all lports should be removed */
2923*4882a593Smuzhiyun 	if (!list_empty(&nvmet_fc_target_list))
2924*4882a593Smuzhiyun 		pr_warn("%s: targetport list not empty\n", __func__);
2925*4882a593Smuzhiyun 
2926*4882a593Smuzhiyun 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 	ida_destroy(&nvmet_fc_tgtport_cnt);
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun 
2931*4882a593Smuzhiyun module_init(nvmet_fc_init_module);
2932*4882a593Smuzhiyun module_exit(nvmet_fc_exit_module);
2933*4882a593Smuzhiyun 
2934*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2935