xref: /OK3568_Linux_fs/kernel/drivers/scsi/csiostor/csio_rnode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is part of the Chelsio FCoE driver for Linux.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/string.h>
36*4882a593Smuzhiyun #include <scsi/scsi_device.h>
37*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
38*4882a593Smuzhiyun #include <scsi/fc/fc_els.h>
39*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include "csio_hw.h"
42*4882a593Smuzhiyun #include "csio_lnode.h"
43*4882a593Smuzhiyun #include "csio_rnode.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
46*4882a593Smuzhiyun static void csio_rnode_exit(struct csio_rnode *);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* Static machine forward declarations */
49*4882a593Smuzhiyun static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
50*4882a593Smuzhiyun static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
51*4882a593Smuzhiyun static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
52*4882a593Smuzhiyun static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* RNF event mapping */
55*4882a593Smuzhiyun static enum csio_rn_ev fwevt_to_rnevt[] = {
56*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* None */
57*4882a593Smuzhiyun 	CSIO_RNFE_LOGGED_IN,	/* PLOGI_ACC_RCVD  */
58*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* PLOGI_RJT_RCVD  */
59*4882a593Smuzhiyun 	CSIO_RNFE_PLOGI_RECV,	/* PLOGI_RCVD	   */
60*4882a593Smuzhiyun 	CSIO_RNFE_LOGO_RECV,	/* PLOGO_RCVD	   */
61*4882a593Smuzhiyun 	CSIO_RNFE_PRLI_DONE,	/* PRLI_ACC_RCVD   */
62*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* PRLI_RJT_RCVD   */
63*4882a593Smuzhiyun 	CSIO_RNFE_PRLI_RECV,	/* PRLI_RCVD	   */
64*4882a593Smuzhiyun 	CSIO_RNFE_PRLO_RECV,	/* PRLO_RCVD	   */
65*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* NPORT_ID_CHGD   */
66*4882a593Smuzhiyun 	CSIO_RNFE_LOGO_RECV,	/* FLOGO_RCVD	   */
67*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* CLR_VIRT_LNK_RCVD */
68*4882a593Smuzhiyun 	CSIO_RNFE_LOGGED_IN,	/* FLOGI_ACC_RCVD   */
69*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* FLOGI_RJT_RCVD   */
70*4882a593Smuzhiyun 	CSIO_RNFE_LOGGED_IN,	/* FDISC_ACC_RCVD   */
71*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* FDISC_RJT_RCVD   */
72*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* FLOGI_TMO_MAX_RETRY */
73*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_ACC */
74*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_RJT */
75*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_CNFLT */
76*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* PRLI_TMO		*/
77*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* ADISC_TMO		*/
78*4882a593Smuzhiyun 	CSIO_RNFE_NAME_MISSING,	/* RSCN_DEV_LOST  */
79*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* SCR_ACC_RCVD	*/
80*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* ADISC_RJT_RCVD */
81*4882a593Smuzhiyun 	CSIO_RNFE_NONE,		/* LOGO_SNT */
82*4882a593Smuzhiyun 	CSIO_RNFE_LOGO_RECV,	/* PROTO_ERR_IMPL_LOGO */
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define CSIO_FWE_TO_RNFE(_evt)	((_evt > PROTO_ERR_IMPL_LOGO) ?		\
86*4882a593Smuzhiyun 						CSIO_RNFE_NONE :	\
87*4882a593Smuzhiyun 						fwevt_to_rnevt[_evt])
88*4882a593Smuzhiyun int
csio_is_rnode_ready(struct csio_rnode * rn)89*4882a593Smuzhiyun csio_is_rnode_ready(struct csio_rnode *rn)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	return csio_match_state(rn, csio_rns_ready);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun static int
csio_is_rnode_uninit(struct csio_rnode * rn)95*4882a593Smuzhiyun csio_is_rnode_uninit(struct csio_rnode *rn)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	return csio_match_state(rn, csio_rns_uninit);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static int
csio_is_rnode_wka(uint8_t rport_type)101*4882a593Smuzhiyun csio_is_rnode_wka(uint8_t rport_type)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	if ((rport_type == FLOGI_VFPORT) ||
104*4882a593Smuzhiyun 	    (rport_type == FDISC_VFPORT) ||
105*4882a593Smuzhiyun 	    (rport_type == NS_VNPORT) ||
106*4882a593Smuzhiyun 	    (rport_type == FDMI_VNPORT))
107*4882a593Smuzhiyun 		return 1;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * csio_rn_lookup - Finds the rnode with the given flowid
114*4882a593Smuzhiyun  * @ln - lnode
115*4882a593Smuzhiyun  * @flowid - flowid.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * Does the rnode lookup on the given lnode and flowid.If no matching entry
118*4882a593Smuzhiyun  * found, NULL is returned.
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun static struct csio_rnode *
csio_rn_lookup(struct csio_lnode * ln,uint32_t flowid)121*4882a593Smuzhiyun csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
124*4882a593Smuzhiyun 	struct list_head *tmp;
125*4882a593Smuzhiyun 	struct csio_rnode *rn;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	list_for_each(tmp, &rnhead->sm.sm_list) {
128*4882a593Smuzhiyun 		rn = (struct csio_rnode *) tmp;
129*4882a593Smuzhiyun 		if (rn->flowid == flowid)
130*4882a593Smuzhiyun 			return rn;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return NULL;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
138*4882a593Smuzhiyun  * @ln: lnode
139*4882a593Smuzhiyun  * @wwpn: wwpn
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142*4882a593Smuzhiyun  * found, NULL is returned.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun static struct csio_rnode *
csio_rn_lookup_wwpn(struct csio_lnode * ln,uint8_t * wwpn)145*4882a593Smuzhiyun csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
148*4882a593Smuzhiyun 	struct list_head *tmp;
149*4882a593Smuzhiyun 	struct csio_rnode *rn;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	list_for_each(tmp, &rnhead->sm.sm_list) {
152*4882a593Smuzhiyun 		rn = (struct csio_rnode *) tmp;
153*4882a593Smuzhiyun 		if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
154*4882a593Smuzhiyun 			return rn;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return NULL;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * csio_rnode_lookup_portid - Finds the rnode with the given portid
162*4882a593Smuzhiyun  * @ln:		lnode
163*4882a593Smuzhiyun  * @portid:	port id
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * Lookup the rnode list for a given portid. If no matching entry
166*4882a593Smuzhiyun  * found, NULL is returned.
167*4882a593Smuzhiyun  */
168*4882a593Smuzhiyun struct csio_rnode *
csio_rnode_lookup_portid(struct csio_lnode * ln,uint32_t portid)169*4882a593Smuzhiyun csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
172*4882a593Smuzhiyun 	struct list_head *tmp;
173*4882a593Smuzhiyun 	struct csio_rnode *rn;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	list_for_each(tmp, &rnhead->sm.sm_list) {
176*4882a593Smuzhiyun 		rn = (struct csio_rnode *) tmp;
177*4882a593Smuzhiyun 		if (rn->nport_id == portid)
178*4882a593Smuzhiyun 			return rn;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return NULL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun static int
csio_rn_dup_flowid(struct csio_lnode * ln,uint32_t rdev_flowid,uint32_t * vnp_flowid)185*4882a593Smuzhiyun csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
186*4882a593Smuzhiyun 		    uint32_t *vnp_flowid)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct csio_rnode *rnhead;
189*4882a593Smuzhiyun 	struct list_head *tmp, *tmp1;
190*4882a593Smuzhiyun 	struct csio_rnode *rn;
191*4882a593Smuzhiyun 	struct csio_lnode *ln_tmp;
192*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	list_for_each(tmp1, &hw->sln_head) {
195*4882a593Smuzhiyun 		ln_tmp = (struct csio_lnode *) tmp1;
196*4882a593Smuzhiyun 		if (ln_tmp == ln)
197*4882a593Smuzhiyun 			continue;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
200*4882a593Smuzhiyun 		list_for_each(tmp, &rnhead->sm.sm_list) {
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 			rn = (struct csio_rnode *) tmp;
203*4882a593Smuzhiyun 			if (csio_is_rnode_ready(rn)) {
204*4882a593Smuzhiyun 				if (rn->flowid == rdev_flowid) {
205*4882a593Smuzhiyun 					*vnp_flowid = csio_ln_flowid(ln_tmp);
206*4882a593Smuzhiyun 					return 1;
207*4882a593Smuzhiyun 				}
208*4882a593Smuzhiyun 			}
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun static struct csio_rnode *
csio_alloc_rnode(struct csio_lnode * ln)216*4882a593Smuzhiyun csio_alloc_rnode(struct csio_lnode *ln)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
221*4882a593Smuzhiyun 	if (!rn)
222*4882a593Smuzhiyun 		goto err;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	memset(rn, 0, sizeof(struct csio_rnode));
225*4882a593Smuzhiyun 	if (csio_rnode_init(rn, ln))
226*4882a593Smuzhiyun 		goto err_free;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	CSIO_INC_STATS(ln, n_rnode_alloc);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	return rn;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun err_free:
233*4882a593Smuzhiyun 	mempool_free(rn, hw->rnode_mempool);
234*4882a593Smuzhiyun err:
235*4882a593Smuzhiyun 	CSIO_INC_STATS(ln, n_rnode_nomem);
236*4882a593Smuzhiyun 	return NULL;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun static void
csio_free_rnode(struct csio_rnode * rn)240*4882a593Smuzhiyun csio_free_rnode(struct csio_rnode *rn)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	csio_rnode_exit(rn);
245*4882a593Smuzhiyun 	CSIO_INC_STATS(rn->lnp, n_rnode_free);
246*4882a593Smuzhiyun 	mempool_free(rn, hw->rnode_mempool);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * csio_get_rnode - Gets rnode with the given flowid
251*4882a593Smuzhiyun  * @ln - lnode
252*4882a593Smuzhiyun  * @flowid - flow id.
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * Does the rnode lookup on the given lnode and flowid. If no matching
255*4882a593Smuzhiyun  * rnode found, then new rnode with given npid is allocated and returned.
256*4882a593Smuzhiyun  */
257*4882a593Smuzhiyun static struct csio_rnode *
csio_get_rnode(struct csio_lnode * ln,uint32_t flowid)258*4882a593Smuzhiyun csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct csio_rnode *rn;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	rn = csio_rn_lookup(ln, flowid);
263*4882a593Smuzhiyun 	if (!rn) {
264*4882a593Smuzhiyun 		rn = csio_alloc_rnode(ln);
265*4882a593Smuzhiyun 		if (!rn)
266*4882a593Smuzhiyun 			return NULL;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		rn->flowid = flowid;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return rn;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun  * csio_put_rnode - Frees the given rnode
276*4882a593Smuzhiyun  * @ln - lnode
277*4882a593Smuzhiyun  * @flowid - flow id.
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * Does the rnode lookup on the given lnode and flowid. If no matching
280*4882a593Smuzhiyun  * rnode found, then new rnode with given npid is allocated and returned.
281*4882a593Smuzhiyun  */
282*4882a593Smuzhiyun void
csio_put_rnode(struct csio_lnode * ln,struct csio_rnode * rn)283*4882a593Smuzhiyun csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
286*4882a593Smuzhiyun 	csio_free_rnode(rn);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * csio_confirm_rnode - confirms rnode based on wwpn.
291*4882a593Smuzhiyun  * @ln: lnode
292*4882a593Smuzhiyun  * @rdev_flowid: remote device flowid
293*4882a593Smuzhiyun  * @rdevp: remote device params
294*4882a593Smuzhiyun  * This routines searches other rnode in list having same wwpn of new rnode.
295*4882a593Smuzhiyun  * If there is a match, then matched rnode is returned and otherwise new rnode
296*4882a593Smuzhiyun  * is returned.
297*4882a593Smuzhiyun  * returns rnode.
298*4882a593Smuzhiyun  */
299*4882a593Smuzhiyun struct csio_rnode *
csio_confirm_rnode(struct csio_lnode * ln,uint32_t rdev_flowid,struct fcoe_rdev_entry * rdevp)300*4882a593Smuzhiyun csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
301*4882a593Smuzhiyun 		   struct fcoe_rdev_entry *rdevp)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	uint8_t rport_type;
304*4882a593Smuzhiyun 	struct csio_rnode *rn, *match_rn;
305*4882a593Smuzhiyun 	uint32_t vnp_flowid = 0;
306*4882a593Smuzhiyun 	__be32 *port_id;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	port_id = (__be32 *)&rdevp->r_id[0];
309*4882a593Smuzhiyun 	rport_type =
310*4882a593Smuzhiyun 		FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* Drop rdev event for cntrl port */
313*4882a593Smuzhiyun 	if (rport_type == FAB_CTLR_VNPORT) {
314*4882a593Smuzhiyun 		csio_ln_dbg(ln,
315*4882a593Smuzhiyun 			    "Unhandled rport_type:%d recv in rdev evt "
316*4882a593Smuzhiyun 			    "ssni:x%x\n", rport_type, rdev_flowid);
317*4882a593Smuzhiyun 		return NULL;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* Lookup on flowid */
321*4882a593Smuzhiyun 	rn = csio_rn_lookup(ln, rdev_flowid);
322*4882a593Smuzhiyun 	if (!rn) {
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		/* Drop events with duplicate flowid */
325*4882a593Smuzhiyun 		if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
326*4882a593Smuzhiyun 			csio_ln_warn(ln,
327*4882a593Smuzhiyun 				     "ssni:%x already active on vnpi:%x",
328*4882a593Smuzhiyun 				     rdev_flowid, vnp_flowid);
329*4882a593Smuzhiyun 			return NULL;
330*4882a593Smuzhiyun 		}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		/* Lookup on wwpn for NPORTs */
333*4882a593Smuzhiyun 		rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
334*4882a593Smuzhiyun 		if (!rn)
335*4882a593Smuzhiyun 			goto alloc_rnode;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	} else {
338*4882a593Smuzhiyun 		/* Lookup well-known ports with nport id */
339*4882a593Smuzhiyun 		if (csio_is_rnode_wka(rport_type)) {
340*4882a593Smuzhiyun 			match_rn = csio_rnode_lookup_portid(ln,
341*4882a593Smuzhiyun 				      ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
342*4882a593Smuzhiyun 			if (match_rn == NULL) {
343*4882a593Smuzhiyun 				csio_rn_flowid(rn) = CSIO_INVALID_IDX;
344*4882a593Smuzhiyun 				goto alloc_rnode;
345*4882a593Smuzhiyun 			}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 			/*
348*4882a593Smuzhiyun 			 * Now compare the wwpn to confirm that
349*4882a593Smuzhiyun 			 * same port relogged in. If so update the matched rn.
350*4882a593Smuzhiyun 			 * Else, go ahead and alloc a new rnode.
351*4882a593Smuzhiyun 			 */
352*4882a593Smuzhiyun 			if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353*4882a593Smuzhiyun 				if (rn == match_rn)
354*4882a593Smuzhiyun 					goto found_rnode;
355*4882a593Smuzhiyun 				csio_ln_dbg(ln,
356*4882a593Smuzhiyun 					    "nport_id:x%x and wwpn:%llx"
357*4882a593Smuzhiyun 					    " match for ssni:x%x\n",
358*4882a593Smuzhiyun 					    rn->nport_id,
359*4882a593Smuzhiyun 					    wwn_to_u64(rdevp->wwpn),
360*4882a593Smuzhiyun 					    rdev_flowid);
361*4882a593Smuzhiyun 				if (csio_is_rnode_ready(rn)) {
362*4882a593Smuzhiyun 					csio_ln_warn(ln,
363*4882a593Smuzhiyun 						     "rnode is already"
364*4882a593Smuzhiyun 						     "active ssni:x%x\n",
365*4882a593Smuzhiyun 						     rdev_flowid);
366*4882a593Smuzhiyun 					CSIO_ASSERT(0);
367*4882a593Smuzhiyun 				}
368*4882a593Smuzhiyun 				csio_rn_flowid(rn) = CSIO_INVALID_IDX;
369*4882a593Smuzhiyun 				rn = match_rn;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 				/* Update rn */
372*4882a593Smuzhiyun 				goto found_rnode;
373*4882a593Smuzhiyun 			}
374*4882a593Smuzhiyun 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
375*4882a593Smuzhiyun 			goto alloc_rnode;
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		/* wwpn match */
379*4882a593Smuzhiyun 		if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
380*4882a593Smuzhiyun 			goto found_rnode;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		/* Search for rnode that have same wwpn */
383*4882a593Smuzhiyun 		match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
384*4882a593Smuzhiyun 		if (match_rn != NULL) {
385*4882a593Smuzhiyun 			csio_ln_dbg(ln,
386*4882a593Smuzhiyun 				"ssni:x%x changed for rport name(wwpn):%llx "
387*4882a593Smuzhiyun 				"did:x%x\n", rdev_flowid,
388*4882a593Smuzhiyun 				wwn_to_u64(rdevp->wwpn),
389*4882a593Smuzhiyun 				match_rn->nport_id);
390*4882a593Smuzhiyun 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
391*4882a593Smuzhiyun 			rn = match_rn;
392*4882a593Smuzhiyun 		} else {
393*4882a593Smuzhiyun 			csio_ln_dbg(ln,
394*4882a593Smuzhiyun 				"rnode wwpn mismatch found ssni:x%x "
395*4882a593Smuzhiyun 				"name(wwpn):%llx\n",
396*4882a593Smuzhiyun 				rdev_flowid,
397*4882a593Smuzhiyun 				wwn_to_u64(csio_rn_wwpn(rn)));
398*4882a593Smuzhiyun 			if (csio_is_rnode_ready(rn)) {
399*4882a593Smuzhiyun 				csio_ln_warn(ln,
400*4882a593Smuzhiyun 					     "rnode is already active "
401*4882a593Smuzhiyun 					     "wwpn:%llx ssni:x%x\n",
402*4882a593Smuzhiyun 					     wwn_to_u64(csio_rn_wwpn(rn)),
403*4882a593Smuzhiyun 					     rdev_flowid);
404*4882a593Smuzhiyun 				CSIO_ASSERT(0);
405*4882a593Smuzhiyun 			}
406*4882a593Smuzhiyun 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
407*4882a593Smuzhiyun 			goto alloc_rnode;
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun found_rnode:
412*4882a593Smuzhiyun 	csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
413*4882a593Smuzhiyun 		rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* Update flowid */
416*4882a593Smuzhiyun 	csio_rn_flowid(rn) = rdev_flowid;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* update rdev entry */
419*4882a593Smuzhiyun 	rn->rdev_entry = rdevp;
420*4882a593Smuzhiyun 	CSIO_INC_STATS(ln, n_rnode_match);
421*4882a593Smuzhiyun 	return rn;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun alloc_rnode:
424*4882a593Smuzhiyun 	rn = csio_get_rnode(ln, rdev_flowid);
425*4882a593Smuzhiyun 	if (!rn)
426*4882a593Smuzhiyun 		return NULL;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
429*4882a593Smuzhiyun 		rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	/* update rdev entry */
432*4882a593Smuzhiyun 	rn->rdev_entry = rdevp;
433*4882a593Smuzhiyun 	return rn;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun  * csio_rn_verify_rparams - verify rparams.
438*4882a593Smuzhiyun  * @ln: lnode
439*4882a593Smuzhiyun  * @rn: rnode
440*4882a593Smuzhiyun  * @rdevp: remote device params
441*4882a593Smuzhiyun  * returns success if rparams are verified.
442*4882a593Smuzhiyun  */
443*4882a593Smuzhiyun static int
csio_rn_verify_rparams(struct csio_lnode * ln,struct csio_rnode * rn,struct fcoe_rdev_entry * rdevp)444*4882a593Smuzhiyun csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
445*4882a593Smuzhiyun 			struct fcoe_rdev_entry *rdevp)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	uint8_t null[8];
448*4882a593Smuzhiyun 	uint8_t rport_type;
449*4882a593Smuzhiyun 	uint8_t fc_class;
450*4882a593Smuzhiyun 	__be32 *did;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	did = (__be32 *) &rdevp->r_id[0];
453*4882a593Smuzhiyun 	rport_type =
454*4882a593Smuzhiyun 		FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
455*4882a593Smuzhiyun 	switch (rport_type) {
456*4882a593Smuzhiyun 	case FLOGI_VFPORT:
457*4882a593Smuzhiyun 		rn->role = CSIO_RNFR_FABRIC;
458*4882a593Smuzhiyun 		if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
459*4882a593Smuzhiyun 			csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
460*4882a593Smuzhiyun 				csio_rn_flowid(rn));
461*4882a593Smuzhiyun 			return -EINVAL;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 		/* NPIV support */
464*4882a593Smuzhiyun 		if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
465*4882a593Smuzhiyun 			ln->flags |= CSIO_LNF_NPIVSUPP;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		break;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	case NS_VNPORT:
470*4882a593Smuzhiyun 		rn->role = CSIO_RNFR_NS;
471*4882a593Smuzhiyun 		if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
472*4882a593Smuzhiyun 			csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
473*4882a593Smuzhiyun 				csio_rn_flowid(rn));
474*4882a593Smuzhiyun 			return -EINVAL;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 		break;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	case REG_FC4_VNPORT:
479*4882a593Smuzhiyun 	case REG_VNPORT:
480*4882a593Smuzhiyun 		rn->role = CSIO_RNFR_NPORT;
481*4882a593Smuzhiyun 		if (rdevp->event_cause == PRLI_ACC_RCVD ||
482*4882a593Smuzhiyun 			rdevp->event_cause == PRLI_RCVD) {
483*4882a593Smuzhiyun 			if (FW_RDEV_WR_TASK_RETRY_ID_GET(
484*4882a593Smuzhiyun 							rdevp->enh_disc_to_tgt))
485*4882a593Smuzhiyun 				rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 			if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
488*4882a593Smuzhiyun 				rn->fcp_flags |= FCP_SPPF_RETRY;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 			if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
491*4882a593Smuzhiyun 				rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 			if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
494*4882a593Smuzhiyun 				rn->role |= CSIO_RNFR_TARGET;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 			if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
497*4882a593Smuzhiyun 				rn->role |= CSIO_RNFR_INITIATOR;
498*4882a593Smuzhiyun 		}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 		break;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	case FDMI_VNPORT:
503*4882a593Smuzhiyun 	case FAB_CTLR_VNPORT:
504*4882a593Smuzhiyun 		rn->role = 0;
505*4882a593Smuzhiyun 		break;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	default:
508*4882a593Smuzhiyun 		csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
509*4882a593Smuzhiyun 			csio_rn_flowid(rn), rport_type);
510*4882a593Smuzhiyun 		return -EINVAL;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* validate wwpn/wwnn for Name server/remote port */
514*4882a593Smuzhiyun 	if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
515*4882a593Smuzhiyun 		memset(null, 0, 8);
516*4882a593Smuzhiyun 		if (!memcmp(rdevp->wwnn, null, 8)) {
517*4882a593Smuzhiyun 			csio_ln_err(ln,
518*4882a593Smuzhiyun 				    "ssni:x%x invalid wwnn received from"
519*4882a593Smuzhiyun 				    " rport did:x%x\n",
520*4882a593Smuzhiyun 				    csio_rn_flowid(rn),
521*4882a593Smuzhiyun 				    (ntohl(*did) & CSIO_DID_MASK));
522*4882a593Smuzhiyun 			return -EINVAL;
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 		if (!memcmp(rdevp->wwpn, null, 8)) {
526*4882a593Smuzhiyun 			csio_ln_err(ln,
527*4882a593Smuzhiyun 				    "ssni:x%x invalid wwpn received from"
528*4882a593Smuzhiyun 				    " rport did:x%x\n",
529*4882a593Smuzhiyun 				    csio_rn_flowid(rn),
530*4882a593Smuzhiyun 				    (ntohl(*did) & CSIO_DID_MASK));
531*4882a593Smuzhiyun 			return -EINVAL;
532*4882a593Smuzhiyun 		}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	/* Copy wwnn, wwpn and nport id */
537*4882a593Smuzhiyun 	rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
538*4882a593Smuzhiyun 	memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
539*4882a593Smuzhiyun 	memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
540*4882a593Smuzhiyun 	rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
541*4882a593Smuzhiyun 	fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
542*4882a593Smuzhiyun 	rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun static void
__csio_reg_rnode(struct csio_rnode * rn)548*4882a593Smuzhiyun __csio_reg_rnode(struct csio_rnode *rn)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
551*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
554*4882a593Smuzhiyun 	csio_reg_rnode(rn);
555*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (rn->role & CSIO_RNFR_TARGET)
558*4882a593Smuzhiyun 		ln->n_scsi_tgts++;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (rn->nport_id == FC_FID_MGMT_SERV)
561*4882a593Smuzhiyun 		csio_ln_fdmi_start(ln, (void *) rn);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun static void
__csio_unreg_rnode(struct csio_rnode * rn)565*4882a593Smuzhiyun __csio_unreg_rnode(struct csio_rnode *rn)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
568*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
569*4882a593Smuzhiyun 	LIST_HEAD(tmp_q);
570*4882a593Smuzhiyun 	int cmpl = 0;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (!list_empty(&rn->host_cmpl_q)) {
573*4882a593Smuzhiyun 		csio_dbg(hw, "Returning completion queue I/Os\n");
574*4882a593Smuzhiyun 		list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
575*4882a593Smuzhiyun 		cmpl = 1;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (rn->role & CSIO_RNFR_TARGET) {
579*4882a593Smuzhiyun 		ln->n_scsi_tgts--;
580*4882a593Smuzhiyun 		ln->last_scan_ntgts--;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
584*4882a593Smuzhiyun 	csio_unreg_rnode(rn);
585*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* Cleanup I/Os that were waiting for rnode to unregister */
588*4882a593Smuzhiyun 	if (cmpl)
589*4882a593Smuzhiyun 		csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun /*****************************************************************************/
594*4882a593Smuzhiyun /* START: Rnode SM                                                           */
595*4882a593Smuzhiyun /*****************************************************************************/
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun  * csio_rns_uninit -
599*4882a593Smuzhiyun  * @rn - rnode
600*4882a593Smuzhiyun  * @evt - SM event.
601*4882a593Smuzhiyun  *
602*4882a593Smuzhiyun  */
603*4882a593Smuzhiyun static void
csio_rns_uninit(struct csio_rnode * rn,enum csio_rn_ev evt)604*4882a593Smuzhiyun csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
607*4882a593Smuzhiyun 	int ret = 0;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	switch (evt) {
612*4882a593Smuzhiyun 	case CSIO_RNFE_LOGGED_IN:
613*4882a593Smuzhiyun 	case CSIO_RNFE_PLOGI_RECV:
614*4882a593Smuzhiyun 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
615*4882a593Smuzhiyun 		if (!ret) {
616*4882a593Smuzhiyun 			csio_set_state(&rn->sm, csio_rns_ready);
617*4882a593Smuzhiyun 			__csio_reg_rnode(rn);
618*4882a593Smuzhiyun 		} else {
619*4882a593Smuzhiyun 			CSIO_INC_STATS(rn, n_err_inval);
620*4882a593Smuzhiyun 		}
621*4882a593Smuzhiyun 		break;
622*4882a593Smuzhiyun 	case CSIO_RNFE_LOGO_RECV:
623*4882a593Smuzhiyun 		csio_ln_dbg(ln,
624*4882a593Smuzhiyun 			    "ssni:x%x Ignoring event %d recv "
625*4882a593Smuzhiyun 			    "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
626*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_drop);
627*4882a593Smuzhiyun 		break;
628*4882a593Smuzhiyun 	default:
629*4882a593Smuzhiyun 		csio_ln_dbg(ln,
630*4882a593Smuzhiyun 			    "ssni:x%x unexp event %d recv "
631*4882a593Smuzhiyun 			    "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
632*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_unexp);
633*4882a593Smuzhiyun 		break;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun  * csio_rns_ready -
639*4882a593Smuzhiyun  * @rn - rnode
640*4882a593Smuzhiyun  * @evt - SM event.
641*4882a593Smuzhiyun  *
642*4882a593Smuzhiyun  */
643*4882a593Smuzhiyun static void
csio_rns_ready(struct csio_rnode * rn,enum csio_rn_ev evt)644*4882a593Smuzhiyun csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
647*4882a593Smuzhiyun 	int ret = 0;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	switch (evt) {
652*4882a593Smuzhiyun 	case CSIO_RNFE_LOGGED_IN:
653*4882a593Smuzhiyun 	case CSIO_RNFE_PLOGI_RECV:
654*4882a593Smuzhiyun 		csio_ln_dbg(ln,
655*4882a593Smuzhiyun 			"ssni:x%x Ignoring event %d recv from did:x%x "
656*4882a593Smuzhiyun 			"in rn state[ready]\n", csio_rn_flowid(rn), evt,
657*4882a593Smuzhiyun 			rn->nport_id);
658*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_drop);
659*4882a593Smuzhiyun 		break;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	case CSIO_RNFE_PRLI_DONE:
662*4882a593Smuzhiyun 	case CSIO_RNFE_PRLI_RECV:
663*4882a593Smuzhiyun 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
664*4882a593Smuzhiyun 		if (!ret)
665*4882a593Smuzhiyun 			__csio_reg_rnode(rn);
666*4882a593Smuzhiyun 		else
667*4882a593Smuzhiyun 			CSIO_INC_STATS(rn, n_err_inval);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		break;
670*4882a593Smuzhiyun 	case CSIO_RNFE_DOWN:
671*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_offline);
672*4882a593Smuzhiyun 		__csio_unreg_rnode(rn);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		/* FW expected to internally aborted outstanding SCSI WRs
675*4882a593Smuzhiyun 		 * and return all SCSI WRs to host with status "ABORTED".
676*4882a593Smuzhiyun 		 */
677*4882a593Smuzhiyun 		break;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	case CSIO_RNFE_LOGO_RECV:
680*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_offline);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		__csio_unreg_rnode(rn);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/* FW expected to internally aborted outstanding SCSI WRs
685*4882a593Smuzhiyun 		 * and return all SCSI WRs to host with status "ABORTED".
686*4882a593Smuzhiyun 		 */
687*4882a593Smuzhiyun 		break;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	case CSIO_RNFE_CLOSE:
690*4882a593Smuzhiyun 		/*
691*4882a593Smuzhiyun 		 * Each rnode receives CLOSE event when driver is removed or
692*4882a593Smuzhiyun 		 * device is reset
693*4882a593Smuzhiyun 		 * Note: All outstanding IOs on remote port need to returned
694*4882a593Smuzhiyun 		 * to uppper layer with appropriate error before sending
695*4882a593Smuzhiyun 		 * CLOSE event
696*4882a593Smuzhiyun 		 */
697*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_uninit);
698*4882a593Smuzhiyun 		__csio_unreg_rnode(rn);
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	case CSIO_RNFE_NAME_MISSING:
702*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_disappeared);
703*4882a593Smuzhiyun 		__csio_unreg_rnode(rn);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 		/*
706*4882a593Smuzhiyun 		 * FW expected to internally aborted outstanding SCSI WRs
707*4882a593Smuzhiyun 		 * and return all SCSI WRs to host with status "ABORTED".
708*4882a593Smuzhiyun 		 */
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		break;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	default:
713*4882a593Smuzhiyun 		csio_ln_dbg(ln,
714*4882a593Smuzhiyun 			"ssni:x%x unexp event %d recv from did:x%x "
715*4882a593Smuzhiyun 			"in rn state[uninit]\n", csio_rn_flowid(rn), evt,
716*4882a593Smuzhiyun 			rn->nport_id);
717*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_unexp);
718*4882a593Smuzhiyun 		break;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun  * csio_rns_offline -
724*4882a593Smuzhiyun  * @rn - rnode
725*4882a593Smuzhiyun  * @evt - SM event.
726*4882a593Smuzhiyun  *
727*4882a593Smuzhiyun  */
728*4882a593Smuzhiyun static void
csio_rns_offline(struct csio_rnode * rn,enum csio_rn_ev evt)729*4882a593Smuzhiyun csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
732*4882a593Smuzhiyun 	int ret = 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	switch (evt) {
737*4882a593Smuzhiyun 	case CSIO_RNFE_LOGGED_IN:
738*4882a593Smuzhiyun 	case CSIO_RNFE_PLOGI_RECV:
739*4882a593Smuzhiyun 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
740*4882a593Smuzhiyun 		if (!ret) {
741*4882a593Smuzhiyun 			csio_set_state(&rn->sm, csio_rns_ready);
742*4882a593Smuzhiyun 			__csio_reg_rnode(rn);
743*4882a593Smuzhiyun 		} else {
744*4882a593Smuzhiyun 			CSIO_INC_STATS(rn, n_err_inval);
745*4882a593Smuzhiyun 			csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
746*4882a593Smuzhiyun 		}
747*4882a593Smuzhiyun 		break;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	case CSIO_RNFE_DOWN:
750*4882a593Smuzhiyun 		csio_ln_dbg(ln,
751*4882a593Smuzhiyun 			"ssni:x%x Ignoring event %d recv from did:x%x "
752*4882a593Smuzhiyun 			"in rn state[offline]\n", csio_rn_flowid(rn), evt,
753*4882a593Smuzhiyun 			rn->nport_id);
754*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_drop);
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	case CSIO_RNFE_CLOSE:
758*4882a593Smuzhiyun 		/* Each rnode receives CLOSE event when driver is removed or
759*4882a593Smuzhiyun 		 * device is reset
760*4882a593Smuzhiyun 		 * Note: All outstanding IOs on remote port need to returned
761*4882a593Smuzhiyun 		 * to uppper layer with appropriate error before sending
762*4882a593Smuzhiyun 		 * CLOSE event
763*4882a593Smuzhiyun 		 */
764*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_uninit);
765*4882a593Smuzhiyun 		break;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	case CSIO_RNFE_NAME_MISSING:
768*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_disappeared);
769*4882a593Smuzhiyun 		break;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	default:
772*4882a593Smuzhiyun 		csio_ln_dbg(ln,
773*4882a593Smuzhiyun 			"ssni:x%x unexp event %d recv from did:x%x "
774*4882a593Smuzhiyun 			"in rn state[offline]\n", csio_rn_flowid(rn), evt,
775*4882a593Smuzhiyun 			rn->nport_id);
776*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_unexp);
777*4882a593Smuzhiyun 		break;
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun  * csio_rns_disappeared -
783*4882a593Smuzhiyun  * @rn - rnode
784*4882a593Smuzhiyun  * @evt - SM event.
785*4882a593Smuzhiyun  *
786*4882a593Smuzhiyun  */
787*4882a593Smuzhiyun static void
csio_rns_disappeared(struct csio_rnode * rn,enum csio_rn_ev evt)788*4882a593Smuzhiyun csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
791*4882a593Smuzhiyun 	int ret = 0;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	switch (evt) {
796*4882a593Smuzhiyun 	case CSIO_RNFE_LOGGED_IN:
797*4882a593Smuzhiyun 	case CSIO_RNFE_PLOGI_RECV:
798*4882a593Smuzhiyun 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
799*4882a593Smuzhiyun 		if (!ret) {
800*4882a593Smuzhiyun 			csio_set_state(&rn->sm, csio_rns_ready);
801*4882a593Smuzhiyun 			__csio_reg_rnode(rn);
802*4882a593Smuzhiyun 		} else {
803*4882a593Smuzhiyun 			CSIO_INC_STATS(rn, n_err_inval);
804*4882a593Smuzhiyun 			csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 		break;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	case CSIO_RNFE_CLOSE:
809*4882a593Smuzhiyun 		/* Each rnode receives CLOSE event when driver is removed or
810*4882a593Smuzhiyun 		 * device is reset.
811*4882a593Smuzhiyun 		 * Note: All outstanding IOs on remote port need to returned
812*4882a593Smuzhiyun 		 * to uppper layer with appropriate error before sending
813*4882a593Smuzhiyun 		 * CLOSE event
814*4882a593Smuzhiyun 		 */
815*4882a593Smuzhiyun 		csio_set_state(&rn->sm, csio_rns_uninit);
816*4882a593Smuzhiyun 		break;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	case CSIO_RNFE_DOWN:
819*4882a593Smuzhiyun 	case CSIO_RNFE_NAME_MISSING:
820*4882a593Smuzhiyun 		csio_ln_dbg(ln,
821*4882a593Smuzhiyun 			"ssni:x%x Ignoring event %d recv from did x%x"
822*4882a593Smuzhiyun 			"in rn state[disappeared]\n", csio_rn_flowid(rn),
823*4882a593Smuzhiyun 			evt, rn->nport_id);
824*4882a593Smuzhiyun 		break;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	default:
827*4882a593Smuzhiyun 		csio_ln_dbg(ln,
828*4882a593Smuzhiyun 			"ssni:x%x unexp event %d recv from did x%x"
829*4882a593Smuzhiyun 			"in rn state[disappeared]\n", csio_rn_flowid(rn),
830*4882a593Smuzhiyun 			evt, rn->nport_id);
831*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_unexp);
832*4882a593Smuzhiyun 		break;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun /*****************************************************************************/
837*4882a593Smuzhiyun /* END: Rnode SM                                                             */
838*4882a593Smuzhiyun /*****************************************************************************/
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun /*
841*4882a593Smuzhiyun  * csio_rnode_devloss_handler - Device loss event handler
842*4882a593Smuzhiyun  * @rn: rnode
843*4882a593Smuzhiyun  *
844*4882a593Smuzhiyun  * Post event to close rnode SM and free rnode.
845*4882a593Smuzhiyun  */
846*4882a593Smuzhiyun void
csio_rnode_devloss_handler(struct csio_rnode * rn)847*4882a593Smuzhiyun csio_rnode_devloss_handler(struct csio_rnode *rn)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	/* ignore if same rnode came back as online */
852*4882a593Smuzhiyun 	if (csio_is_rnode_ready(rn))
853*4882a593Smuzhiyun 		return;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/* Free rn if in uninit state */
858*4882a593Smuzhiyun 	if (csio_is_rnode_uninit(rn))
859*4882a593Smuzhiyun 		csio_put_rnode(ln, rn);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun  * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
864*4882a593Smuzhiyun  * @rn:		rnode
865*4882a593Smuzhiyun  * @fwevt:	firmware event to handle
866*4882a593Smuzhiyun  */
867*4882a593Smuzhiyun void
csio_rnode_fwevt_handler(struct csio_rnode * rn,uint8_t fwevt)868*4882a593Smuzhiyun csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
871*4882a593Smuzhiyun 	enum csio_rn_ev evt;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	evt = CSIO_FWE_TO_RNFE(fwevt);
874*4882a593Smuzhiyun 	if (!evt) {
875*4882a593Smuzhiyun 		csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
876*4882a593Smuzhiyun 			    csio_rn_flowid(rn), fwevt);
877*4882a593Smuzhiyun 		CSIO_INC_STATS(rn, n_evt_unexp);
878*4882a593Smuzhiyun 		return;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/* Track previous & current events for debugging */
883*4882a593Smuzhiyun 	rn->prev_evt = rn->cur_evt;
884*4882a593Smuzhiyun 	rn->cur_evt = fwevt;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	/* Post event to rnode SM */
887*4882a593Smuzhiyun 	csio_post_event(&rn->sm, evt);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/* Free rn if in uninit state */
890*4882a593Smuzhiyun 	if (csio_is_rnode_uninit(rn))
891*4882a593Smuzhiyun 		csio_put_rnode(ln, rn);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun /*
895*4882a593Smuzhiyun  * csio_rnode_init - Initialize rnode.
896*4882a593Smuzhiyun  * @rn: RNode
897*4882a593Smuzhiyun  * @ln: Associated lnode
898*4882a593Smuzhiyun  *
899*4882a593Smuzhiyun  * Caller is responsible for holding the lock. The lock is required
900*4882a593Smuzhiyun  * to be held for inserting the rnode in ln->rnhead list.
901*4882a593Smuzhiyun  */
902*4882a593Smuzhiyun static int
csio_rnode_init(struct csio_rnode * rn,struct csio_lnode * ln)903*4882a593Smuzhiyun csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	csio_rnode_to_lnode(rn) = ln;
906*4882a593Smuzhiyun 	csio_init_state(&rn->sm, csio_rns_uninit);
907*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rn->host_cmpl_q);
908*4882a593Smuzhiyun 	csio_rn_flowid(rn) = CSIO_INVALID_IDX;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/* Add rnode to list of lnodes->rnhead */
911*4882a593Smuzhiyun 	list_add_tail(&rn->sm.sm_list, &ln->rnhead);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	return 0;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun static void
csio_rnode_exit(struct csio_rnode * rn)917*4882a593Smuzhiyun csio_rnode_exit(struct csio_rnode *rn)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	list_del_init(&rn->sm.sm_list);
920*4882a593Smuzhiyun 	CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
921*4882a593Smuzhiyun }
922