1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is part of the Chelsio FCoE driver for Linux.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/kernel.h>
36*4882a593Smuzhiyun #include <linux/delay.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/utsname.h>
39*4882a593Smuzhiyun #include <scsi/scsi_device.h>
40*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
41*4882a593Smuzhiyun #include <asm/unaligned.h>
42*4882a593Smuzhiyun #include <scsi/fc/fc_els.h>
43*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
44*4882a593Smuzhiyun #include <scsi/fc/fc_gs.h>
45*4882a593Smuzhiyun #include <scsi/fc/fc_ms.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include "csio_hw.h"
48*4882a593Smuzhiyun #include "csio_mb.h"
49*4882a593Smuzhiyun #include "csio_lnode.h"
50*4882a593Smuzhiyun #include "csio_rnode.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun int csio_fcoe_rnodes = 1024;
53*4882a593Smuzhiyun int csio_fdmi_enable = 1;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Lnode SM declarations */
58*4882a593Smuzhiyun static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
59*4882a593Smuzhiyun static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
60*4882a593Smuzhiyun static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
61*4882a593Smuzhiyun static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
64*4882a593Smuzhiyun void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
65*4882a593Smuzhiyun enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* LN event mapping */
68*4882a593Smuzhiyun static enum csio_ln_ev fwevt_to_lnevt[] = {
69*4882a593Smuzhiyun CSIO_LNE_NONE, /* None */
70*4882a593Smuzhiyun CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
71*4882a593Smuzhiyun CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
72*4882a593Smuzhiyun CSIO_LNE_NONE, /* PLOGI_RCVD */
73*4882a593Smuzhiyun CSIO_LNE_NONE, /* PLOGO_RCVD */
74*4882a593Smuzhiyun CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
75*4882a593Smuzhiyun CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
76*4882a593Smuzhiyun CSIO_LNE_NONE, /* PRLI_RCVD */
77*4882a593Smuzhiyun CSIO_LNE_NONE, /* PRLO_RCVD */
78*4882a593Smuzhiyun CSIO_LNE_NONE, /* NPORT_ID_CHGD */
79*4882a593Smuzhiyun CSIO_LNE_LOGO, /* FLOGO_RCVD */
80*4882a593Smuzhiyun CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
81*4882a593Smuzhiyun CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
82*4882a593Smuzhiyun CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
83*4882a593Smuzhiyun CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
84*4882a593Smuzhiyun CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
85*4882a593Smuzhiyun CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
86*4882a593Smuzhiyun CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
87*4882a593Smuzhiyun CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
88*4882a593Smuzhiyun CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
89*4882a593Smuzhiyun CSIO_LNE_NONE, /* PRLI_TMO */
90*4882a593Smuzhiyun CSIO_LNE_NONE, /* ADISC_TMO */
91*4882a593Smuzhiyun CSIO_LNE_NONE, /* RSCN_DEV_LOST */
92*4882a593Smuzhiyun CSIO_LNE_NONE, /* SCR_ACC_RCVD */
93*4882a593Smuzhiyun CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
94*4882a593Smuzhiyun CSIO_LNE_NONE, /* LOGO_SNT */
95*4882a593Smuzhiyun CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
99*4882a593Smuzhiyun CSIO_LNE_NONE : \
100*4882a593Smuzhiyun fwevt_to_lnevt[_evt])
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
103*4882a593Smuzhiyun #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
104*4882a593Smuzhiyun #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
105*4882a593Smuzhiyun #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * csio_ln_match_by_portid - lookup lnode using given portid.
109*4882a593Smuzhiyun * @hw: HW module
110*4882a593Smuzhiyun * @portid: port-id.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * If found, returns lnode matching given portid otherwise returns NULL.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw * hw,uint8_t portid)115*4882a593Smuzhiyun csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct csio_lnode *ln;
118*4882a593Smuzhiyun struct list_head *tmp;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Match siblings lnode with portid */
121*4882a593Smuzhiyun list_for_each(tmp, &hw->sln_head) {
122*4882a593Smuzhiyun ln = (struct csio_lnode *) tmp;
123*4882a593Smuzhiyun if (ln->portid == portid)
124*4882a593Smuzhiyun return ln;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return NULL;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
132*4882a593Smuzhiyun * @hw - HW module
133*4882a593Smuzhiyun * @vnpi - vnp index.
134*4882a593Smuzhiyun * Returns - If found, returns lnode matching given vnp id
135*4882a593Smuzhiyun * otherwise returns NULL.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun static struct csio_lnode *
csio_ln_lookup_by_vnpi(struct csio_hw * hw,uint32_t vnp_id)138*4882a593Smuzhiyun csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct list_head *tmp1, *tmp2;
141*4882a593Smuzhiyun struct csio_lnode *sln = NULL, *cln = NULL;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (list_empty(&hw->sln_head)) {
144*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_lnlkup_miss);
145*4882a593Smuzhiyun return NULL;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun /* Traverse sibling lnodes */
148*4882a593Smuzhiyun list_for_each(tmp1, &hw->sln_head) {
149*4882a593Smuzhiyun sln = (struct csio_lnode *) tmp1;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Match sibling lnode */
152*4882a593Smuzhiyun if (sln->vnp_flowid == vnp_id)
153*4882a593Smuzhiyun return sln;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (list_empty(&sln->cln_head))
156*4882a593Smuzhiyun continue;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Traverse children lnodes */
159*4882a593Smuzhiyun list_for_each(tmp2, &sln->cln_head) {
160*4882a593Smuzhiyun cln = (struct csio_lnode *) tmp2;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (cln->vnp_flowid == vnp_id)
163*4882a593Smuzhiyun return cln;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_lnlkup_miss);
167*4882a593Smuzhiyun return NULL;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /**
171*4882a593Smuzhiyun * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
172*4882a593Smuzhiyun * @hw: HW module.
173*4882a593Smuzhiyun * @wwpn: WWPN.
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * If found, returns lnode matching given wwpn, returns NULL otherwise.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun struct csio_lnode *
csio_lnode_lookup_by_wwpn(struct csio_hw * hw,uint8_t * wwpn)178*4882a593Smuzhiyun csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct list_head *tmp1, *tmp2;
181*4882a593Smuzhiyun struct csio_lnode *sln = NULL, *cln = NULL;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (list_empty(&hw->sln_head)) {
184*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_lnlkup_miss);
185*4882a593Smuzhiyun return NULL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun /* Traverse sibling lnodes */
188*4882a593Smuzhiyun list_for_each(tmp1, &hw->sln_head) {
189*4882a593Smuzhiyun sln = (struct csio_lnode *) tmp1;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Match sibling lnode */
192*4882a593Smuzhiyun if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
193*4882a593Smuzhiyun return sln;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (list_empty(&sln->cln_head))
196*4882a593Smuzhiyun continue;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Traverse children lnodes */
199*4882a593Smuzhiyun list_for_each(tmp2, &sln->cln_head) {
200*4882a593Smuzhiyun cln = (struct csio_lnode *) tmp2;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
203*4882a593Smuzhiyun return cln;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun return NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* FDMI */
210*4882a593Smuzhiyun static void
csio_fill_ct_iu(void * buf,uint8_t type,uint8_t sub_type,uint16_t op)211*4882a593Smuzhiyun csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
214*4882a593Smuzhiyun cmd->ct_rev = FC_CT_REV;
215*4882a593Smuzhiyun cmd->ct_fs_type = type;
216*4882a593Smuzhiyun cmd->ct_fs_subtype = sub_type;
217*4882a593Smuzhiyun cmd->ct_cmd = htons(op);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static int
csio_hostname(uint8_t * buf,size_t buf_len)221*4882a593Smuzhiyun csio_hostname(uint8_t *buf, size_t buf_len)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun return -1;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun static int
csio_osname(uint8_t * buf,size_t buf_len)229*4882a593Smuzhiyun csio_osname(uint8_t *buf, size_t buf_len)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun if (snprintf(buf, buf_len, "%s %s %s",
232*4882a593Smuzhiyun init_utsname()->sysname,
233*4882a593Smuzhiyun init_utsname()->release,
234*4882a593Smuzhiyun init_utsname()->version) > 0)
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return -1;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static inline void
csio_append_attrib(uint8_t ** ptr,uint16_t type,void * val,size_t val_len)241*4882a593Smuzhiyun csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun uint16_t len;
244*4882a593Smuzhiyun struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (WARN_ON(val_len > U16_MAX))
247*4882a593Smuzhiyun return;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun len = val_len;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun ae->type = htons(type);
252*4882a593Smuzhiyun len += 4; /* includes attribute type and length */
253*4882a593Smuzhiyun len = (len + 3) & ~3; /* should be multiple of 4 bytes */
254*4882a593Smuzhiyun ae->len = htons(len);
255*4882a593Smuzhiyun memcpy(ae->value, val, val_len);
256*4882a593Smuzhiyun if (len > val_len)
257*4882a593Smuzhiyun memset(ae->value + val_len, 0, len - val_len);
258*4882a593Smuzhiyun *ptr += len;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * csio_ln_fdmi_done - FDMI registeration completion
263*4882a593Smuzhiyun * @hw: HW context
264*4882a593Smuzhiyun * @fdmi_req: fdmi request
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun static void
csio_ln_fdmi_done(struct csio_hw * hw,struct csio_ioreq * fdmi_req)267*4882a593Smuzhiyun csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun void *cmd;
270*4882a593Smuzhiyun struct csio_lnode *ln = fdmi_req->lnode;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (fdmi_req->wr_status != FW_SUCCESS) {
273*4882a593Smuzhiyun csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
274*4882a593Smuzhiyun fdmi_req->wr_status);
275*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun cmd = fdmi_req->dma_buf.vaddr;
279*4882a593Smuzhiyun if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
280*4882a593Smuzhiyun csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
281*4882a593Smuzhiyun csio_ct_reason(cmd), csio_ct_expl(cmd));
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * csio_ln_fdmi_rhba_cbfn - RHBA completion
287*4882a593Smuzhiyun * @hw: HW context
288*4882a593Smuzhiyun * @fdmi_req: fdmi request
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun static void
csio_ln_fdmi_rhba_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)291*4882a593Smuzhiyun csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun void *cmd;
294*4882a593Smuzhiyun uint8_t *pld;
295*4882a593Smuzhiyun uint32_t len = 0;
296*4882a593Smuzhiyun __be32 val;
297*4882a593Smuzhiyun __be16 mfs;
298*4882a593Smuzhiyun uint32_t numattrs = 0;
299*4882a593Smuzhiyun struct csio_lnode *ln = fdmi_req->lnode;
300*4882a593Smuzhiyun struct fs_fdmi_attrs *attrib_blk;
301*4882a593Smuzhiyun struct fc_fdmi_port_name *port_name;
302*4882a593Smuzhiyun uint8_t buf[64];
303*4882a593Smuzhiyun uint8_t *fc4_type;
304*4882a593Smuzhiyun unsigned long flags;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (fdmi_req->wr_status != FW_SUCCESS) {
307*4882a593Smuzhiyun csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
308*4882a593Smuzhiyun fdmi_req->wr_status);
309*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun cmd = fdmi_req->dma_buf.vaddr;
313*4882a593Smuzhiyun if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
314*4882a593Smuzhiyun csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
315*4882a593Smuzhiyun csio_ct_reason(cmd), csio_ct_expl(cmd));
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (!csio_is_rnode_ready(fdmi_req->rnode)) {
319*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
320*4882a593Smuzhiyun return;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Prepare CT hdr for RPA cmd */
324*4882a593Smuzhiyun memset(cmd, 0, FC_CT_HDR_LEN);
325*4882a593Smuzhiyun csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* Prepare RPA payload */
328*4882a593Smuzhiyun pld = (uint8_t *)csio_ct_get_pld(cmd);
329*4882a593Smuzhiyun port_name = (struct fc_fdmi_port_name *)pld;
330*4882a593Smuzhiyun memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
331*4882a593Smuzhiyun pld += sizeof(*port_name);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* Start appending Port attributes */
334*4882a593Smuzhiyun attrib_blk = (struct fs_fdmi_attrs *)pld;
335*4882a593Smuzhiyun attrib_blk->numattrs = 0;
336*4882a593Smuzhiyun len += sizeof(attrib_blk->numattrs);
337*4882a593Smuzhiyun pld += sizeof(attrib_blk->numattrs);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun fc4_type = &buf[0];
340*4882a593Smuzhiyun memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
341*4882a593Smuzhiyun fc4_type[2] = 1;
342*4882a593Smuzhiyun fc4_type[7] = 1;
343*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
344*4882a593Smuzhiyun fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
345*4882a593Smuzhiyun numattrs++;
346*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
347*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
348*4882a593Smuzhiyun &val,
349*4882a593Smuzhiyun FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
350*4882a593Smuzhiyun numattrs++;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
353*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_1GBIT);
354*4882a593Smuzhiyun else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
355*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_10GBIT);
356*4882a593Smuzhiyun else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
357*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_25GBIT);
358*4882a593Smuzhiyun else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
359*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_40GBIT);
360*4882a593Smuzhiyun else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
361*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_50GBIT);
362*4882a593Smuzhiyun else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
363*4882a593Smuzhiyun val = htonl(FC_PORTSPEED_100GBIT);
364*4882a593Smuzhiyun else
365*4882a593Smuzhiyun val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
366*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
367*4882a593Smuzhiyun &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
368*4882a593Smuzhiyun numattrs++;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun mfs = ln->ln_sparm.csp.sp_bb_data;
371*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
372*4882a593Smuzhiyun &mfs, sizeof(mfs));
373*4882a593Smuzhiyun numattrs++;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun strcpy(buf, "csiostor");
376*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
377*4882a593Smuzhiyun strlen(buf));
378*4882a593Smuzhiyun numattrs++;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (!csio_hostname(buf, sizeof(buf))) {
381*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
382*4882a593Smuzhiyun buf, strlen(buf));
383*4882a593Smuzhiyun numattrs++;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun attrib_blk->numattrs = htonl(numattrs);
386*4882a593Smuzhiyun len = (uint32_t)(pld - (uint8_t *)cmd);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Submit FDMI RPA request */
389*4882a593Smuzhiyun spin_lock_irqsave(&hw->lock, flags);
390*4882a593Smuzhiyun if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
391*4882a593Smuzhiyun FCOE_CT, &fdmi_req->dma_buf, len)) {
392*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
393*4882a593Smuzhiyun csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->lock, flags);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * csio_ln_fdmi_dprt_cbfn - DPRT completion
400*4882a593Smuzhiyun * @hw: HW context
401*4882a593Smuzhiyun * @fdmi_req: fdmi request
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun static void
csio_ln_fdmi_dprt_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)404*4882a593Smuzhiyun csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun void *cmd;
407*4882a593Smuzhiyun uint8_t *pld;
408*4882a593Smuzhiyun uint32_t len = 0;
409*4882a593Smuzhiyun uint32_t numattrs = 0;
410*4882a593Smuzhiyun __be32 maxpayload = htonl(65536);
411*4882a593Smuzhiyun struct fc_fdmi_hba_identifier *hbaid;
412*4882a593Smuzhiyun struct csio_lnode *ln = fdmi_req->lnode;
413*4882a593Smuzhiyun struct fc_fdmi_rpl *reg_pl;
414*4882a593Smuzhiyun struct fs_fdmi_attrs *attrib_blk;
415*4882a593Smuzhiyun uint8_t buf[64];
416*4882a593Smuzhiyun unsigned long flags;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (fdmi_req->wr_status != FW_SUCCESS) {
419*4882a593Smuzhiyun csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
420*4882a593Smuzhiyun fdmi_req->wr_status);
421*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (!csio_is_rnode_ready(fdmi_req->rnode)) {
425*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
426*4882a593Smuzhiyun return;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun cmd = fdmi_req->dma_buf.vaddr;
429*4882a593Smuzhiyun if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
430*4882a593Smuzhiyun csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
431*4882a593Smuzhiyun csio_ct_reason(cmd), csio_ct_expl(cmd));
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Prepare CT hdr for RHBA cmd */
435*4882a593Smuzhiyun memset(cmd, 0, FC_CT_HDR_LEN);
436*4882a593Smuzhiyun csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
437*4882a593Smuzhiyun len = FC_CT_HDR_LEN;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Prepare RHBA payload */
440*4882a593Smuzhiyun pld = (uint8_t *)csio_ct_get_pld(cmd);
441*4882a593Smuzhiyun hbaid = (struct fc_fdmi_hba_identifier *)pld;
442*4882a593Smuzhiyun memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
443*4882a593Smuzhiyun pld += sizeof(*hbaid);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* Register one port per hba */
446*4882a593Smuzhiyun reg_pl = (struct fc_fdmi_rpl *)pld;
447*4882a593Smuzhiyun reg_pl->numport = htonl(1);
448*4882a593Smuzhiyun memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8);
449*4882a593Smuzhiyun pld += sizeof(*reg_pl);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Start appending HBA attributes hba */
452*4882a593Smuzhiyun attrib_blk = (struct fs_fdmi_attrs *)pld;
453*4882a593Smuzhiyun attrib_blk->numattrs = 0;
454*4882a593Smuzhiyun len += sizeof(attrib_blk->numattrs);
455*4882a593Smuzhiyun pld += sizeof(attrib_blk->numattrs);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
458*4882a593Smuzhiyun FC_FDMI_HBA_ATTR_NODENAME_LEN);
459*4882a593Smuzhiyun numattrs++;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun memset(buf, 0, sizeof(buf));
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun strcpy(buf, "Chelsio Communications");
464*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
465*4882a593Smuzhiyun strlen(buf));
466*4882a593Smuzhiyun numattrs++;
467*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
468*4882a593Smuzhiyun hw->vpd.sn, sizeof(hw->vpd.sn));
469*4882a593Smuzhiyun numattrs++;
470*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
471*4882a593Smuzhiyun sizeof(hw->vpd.id));
472*4882a593Smuzhiyun numattrs++;
473*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
474*4882a593Smuzhiyun hw->model_desc, strlen(hw->model_desc));
475*4882a593Smuzhiyun numattrs++;
476*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
477*4882a593Smuzhiyun hw->hw_ver, sizeof(hw->hw_ver));
478*4882a593Smuzhiyun numattrs++;
479*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
480*4882a593Smuzhiyun hw->fwrev_str, strlen(hw->fwrev_str));
481*4882a593Smuzhiyun numattrs++;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (!csio_osname(buf, sizeof(buf))) {
484*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
485*4882a593Smuzhiyun buf, strlen(buf));
486*4882a593Smuzhiyun numattrs++;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
490*4882a593Smuzhiyun &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
491*4882a593Smuzhiyun len = (uint32_t)(pld - (uint8_t *)cmd);
492*4882a593Smuzhiyun numattrs++;
493*4882a593Smuzhiyun attrib_blk->numattrs = htonl(numattrs);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Submit FDMI RHBA request */
496*4882a593Smuzhiyun spin_lock_irqsave(&hw->lock, flags);
497*4882a593Smuzhiyun if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
498*4882a593Smuzhiyun FCOE_CT, &fdmi_req->dma_buf, len)) {
499*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
500*4882a593Smuzhiyun csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->lock, flags);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * csio_ln_fdmi_dhba_cbfn - DHBA completion
507*4882a593Smuzhiyun * @hw: HW context
508*4882a593Smuzhiyun * @fdmi_req: fdmi request
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun static void
csio_ln_fdmi_dhba_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)511*4882a593Smuzhiyun csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct csio_lnode *ln = fdmi_req->lnode;
514*4882a593Smuzhiyun void *cmd;
515*4882a593Smuzhiyun struct fc_fdmi_port_name *port_name;
516*4882a593Smuzhiyun uint32_t len;
517*4882a593Smuzhiyun unsigned long flags;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (fdmi_req->wr_status != FW_SUCCESS) {
520*4882a593Smuzhiyun csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
521*4882a593Smuzhiyun fdmi_req->wr_status);
522*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (!csio_is_rnode_ready(fdmi_req->rnode)) {
526*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
527*4882a593Smuzhiyun return;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun cmd = fdmi_req->dma_buf.vaddr;
530*4882a593Smuzhiyun if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
531*4882a593Smuzhiyun csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
532*4882a593Smuzhiyun csio_ct_reason(cmd), csio_ct_expl(cmd));
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /* Send FDMI cmd to de-register any Port attributes if registered
536*4882a593Smuzhiyun * before
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* Prepare FDMI DPRT cmd */
540*4882a593Smuzhiyun memset(cmd, 0, FC_CT_HDR_LEN);
541*4882a593Smuzhiyun csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
542*4882a593Smuzhiyun len = FC_CT_HDR_LEN;
543*4882a593Smuzhiyun port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
544*4882a593Smuzhiyun memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
545*4882a593Smuzhiyun len += sizeof(*port_name);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Submit FDMI request */
548*4882a593Smuzhiyun spin_lock_irqsave(&hw->lock, flags);
549*4882a593Smuzhiyun if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
550*4882a593Smuzhiyun FCOE_CT, &fdmi_req->dma_buf, len)) {
551*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
552*4882a593Smuzhiyun csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->lock, flags);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun * csio_ln_fdmi_start - Start an FDMI request.
559*4882a593Smuzhiyun * @ln: lnode
560*4882a593Smuzhiyun * @context: session context
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * Issued with lock held.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun int
csio_ln_fdmi_start(struct csio_lnode * ln,void * context)565*4882a593Smuzhiyun csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun struct csio_ioreq *fdmi_req;
568*4882a593Smuzhiyun struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
569*4882a593Smuzhiyun void *cmd;
570*4882a593Smuzhiyun struct fc_fdmi_hba_identifier *hbaid;
571*4882a593Smuzhiyun uint32_t len;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
574*4882a593Smuzhiyun return -EPROTONOSUPPORT;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (!csio_is_rnode_ready(fdmi_rn))
577*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* Send FDMI cmd to de-register any HBA attributes if registered
580*4882a593Smuzhiyun * before
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun fdmi_req = ln->mgmt_req;
584*4882a593Smuzhiyun fdmi_req->lnode = ln;
585*4882a593Smuzhiyun fdmi_req->rnode = fdmi_rn;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* Prepare FDMI DHBA cmd */
588*4882a593Smuzhiyun cmd = fdmi_req->dma_buf.vaddr;
589*4882a593Smuzhiyun memset(cmd, 0, FC_CT_HDR_LEN);
590*4882a593Smuzhiyun csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
591*4882a593Smuzhiyun len = FC_CT_HDR_LEN;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
594*4882a593Smuzhiyun memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
595*4882a593Smuzhiyun len += sizeof(*hbaid);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Submit FDMI request */
598*4882a593Smuzhiyun if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
599*4882a593Smuzhiyun FCOE_CT, &fdmi_req->dma_buf, len)) {
600*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_fdmi_err);
601*4882a593Smuzhiyun csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return 0;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * csio_ln_vnp_read_cbfn - vnp read completion handler.
609*4882a593Smuzhiyun * @hw: HW lnode
610*4882a593Smuzhiyun * @cbfn: Completion handler.
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * Reads vnp response and updates ln parameters.
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun static void
csio_ln_vnp_read_cbfn(struct csio_hw * hw,struct csio_mb * mbp)615*4882a593Smuzhiyun csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
618*4882a593Smuzhiyun struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
619*4882a593Smuzhiyun struct fc_els_csp *csp;
620*4882a593Smuzhiyun struct fc_els_cssp *clsp;
621*4882a593Smuzhiyun enum fw_retval retval;
622*4882a593Smuzhiyun __be32 nport_id = 0;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
625*4882a593Smuzhiyun if (retval != FW_SUCCESS) {
626*4882a593Smuzhiyun csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
627*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
628*4882a593Smuzhiyun return;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
634*4882a593Smuzhiyun memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
635*4882a593Smuzhiyun ln->nport_id = ntohl(nport_id);
636*4882a593Smuzhiyun ln->nport_id = ln->nport_id >> 8;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* Update WWNs */
639*4882a593Smuzhiyun /*
640*4882a593Smuzhiyun * This may look like a duplication of what csio_fcoe_enable_link()
641*4882a593Smuzhiyun * does, but is absolutely necessary if the vnpi changes between
642*4882a593Smuzhiyun * a FCOE LINK UP and FCOE LINK DOWN.
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
645*4882a593Smuzhiyun memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Copy common sparam */
648*4882a593Smuzhiyun csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
649*4882a593Smuzhiyun ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
650*4882a593Smuzhiyun ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
651*4882a593Smuzhiyun ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
652*4882a593Smuzhiyun ln->ln_sparm.csp.sp_features = csp->sp_features;
653*4882a593Smuzhiyun ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
654*4882a593Smuzhiyun ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
655*4882a593Smuzhiyun ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Copy word 0 & word 1 of class sparam */
658*4882a593Smuzhiyun clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
659*4882a593Smuzhiyun ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
660*4882a593Smuzhiyun ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
661*4882a593Smuzhiyun ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
662*4882a593Smuzhiyun ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Send an event to update local attribs */
669*4882a593Smuzhiyun csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /*
673*4882a593Smuzhiyun * csio_ln_vnp_read - Read vnp params.
674*4882a593Smuzhiyun * @ln: lnode
675*4882a593Smuzhiyun * @cbfn: Completion handler.
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * Issued with lock held.
678*4882a593Smuzhiyun */
679*4882a593Smuzhiyun static int
csio_ln_vnp_read(struct csio_lnode * ln,void (* cbfn)(struct csio_hw *,struct csio_mb *))680*4882a593Smuzhiyun csio_ln_vnp_read(struct csio_lnode *ln,
681*4882a593Smuzhiyun void (*cbfn) (struct csio_hw *, struct csio_mb *))
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun struct csio_hw *hw = ln->hwp;
684*4882a593Smuzhiyun struct csio_mb *mbp;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* Allocate Mbox request */
687*4882a593Smuzhiyun mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
688*4882a593Smuzhiyun if (!mbp) {
689*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
690*4882a593Smuzhiyun return -ENOMEM;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* Prepare VNP Command */
694*4882a593Smuzhiyun csio_fcoe_vnp_read_init_mb(ln, mbp,
695*4882a593Smuzhiyun CSIO_MB_DEFAULT_TMO,
696*4882a593Smuzhiyun ln->fcf_flowid,
697*4882a593Smuzhiyun ln->vnp_flowid,
698*4882a593Smuzhiyun cbfn);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* Issue MBOX cmd */
701*4882a593Smuzhiyun if (csio_mb_issue(hw, mbp)) {
702*4882a593Smuzhiyun csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
703*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
704*4882a593Smuzhiyun return -EINVAL;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun return 0;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * csio_fcoe_enable_link - Enable fcoe link.
712*4882a593Smuzhiyun * @ln: lnode
713*4882a593Smuzhiyun * @enable: enable/disable
714*4882a593Smuzhiyun * Issued with lock held.
715*4882a593Smuzhiyun * Issues mbox cmd to bring up FCOE link on port associated with given ln.
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun static int
csio_fcoe_enable_link(struct csio_lnode * ln,bool enable)718*4882a593Smuzhiyun csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct csio_hw *hw = ln->hwp;
721*4882a593Smuzhiyun struct csio_mb *mbp;
722*4882a593Smuzhiyun enum fw_retval retval;
723*4882a593Smuzhiyun uint8_t portid;
724*4882a593Smuzhiyun uint8_t sub_op;
725*4882a593Smuzhiyun struct fw_fcoe_link_cmd *lcmd;
726*4882a593Smuzhiyun int i;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
729*4882a593Smuzhiyun if (!mbp) {
730*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
731*4882a593Smuzhiyun return -ENOMEM;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun portid = ln->portid;
735*4882a593Smuzhiyun sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
738*4882a593Smuzhiyun sub_op ? "UP" : "DOWN", portid);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
741*4882a593Smuzhiyun portid, sub_op, 0, 0, 0, NULL);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (csio_mb_issue(hw, mbp)) {
744*4882a593Smuzhiyun csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
745*4882a593Smuzhiyun portid);
746*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
747*4882a593Smuzhiyun return -EINVAL;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun retval = csio_mb_fw_retval(mbp);
751*4882a593Smuzhiyun if (retval != FW_SUCCESS) {
752*4882a593Smuzhiyun csio_err(hw,
753*4882a593Smuzhiyun "FCOE LINK %s cmd on port[%d] failed with "
754*4882a593Smuzhiyun "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
755*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
756*4882a593Smuzhiyun return -EINVAL;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!enable)
760*4882a593Smuzhiyun goto out;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
765*4882a593Smuzhiyun memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun for (i = 0; i < CSIO_MAX_PPORTS; i++)
768*4882a593Smuzhiyun if (hw->pport[i].portid == portid)
769*4882a593Smuzhiyun memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun out:
772*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /*
777*4882a593Smuzhiyun * csio_ln_read_fcf_cbfn - Read fcf parameters
778*4882a593Smuzhiyun * @ln: lnode
779*4882a593Smuzhiyun *
780*4882a593Smuzhiyun * read fcf response and Update ln fcf information.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun static void
csio_ln_read_fcf_cbfn(struct csio_hw * hw,struct csio_mb * mbp)783*4882a593Smuzhiyun csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
786*4882a593Smuzhiyun struct csio_fcf_info *fcf_info;
787*4882a593Smuzhiyun struct fw_fcoe_fcf_cmd *rsp =
788*4882a593Smuzhiyun (struct fw_fcoe_fcf_cmd *)(mbp->mb);
789*4882a593Smuzhiyun enum fw_retval retval;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
792*4882a593Smuzhiyun if (retval != FW_SUCCESS) {
793*4882a593Smuzhiyun csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
794*4882a593Smuzhiyun retval);
795*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
796*4882a593Smuzhiyun return;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
800*4882a593Smuzhiyun fcf_info = ln->fcfinfo;
801*4882a593Smuzhiyun fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
802*4882a593Smuzhiyun ntohs(rsp->priority_pkd));
803*4882a593Smuzhiyun fcf_info->vf_id = ntohs(rsp->vf_id);
804*4882a593Smuzhiyun fcf_info->vlan_id = rsp->vlan_id;
805*4882a593Smuzhiyun fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
806*4882a593Smuzhiyun fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
807*4882a593Smuzhiyun fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
808*4882a593Smuzhiyun fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
809*4882a593Smuzhiyun fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
810*4882a593Smuzhiyun fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
811*4882a593Smuzhiyun fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
812*4882a593Smuzhiyun memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
813*4882a593Smuzhiyun memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
814*4882a593Smuzhiyun memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
815*4882a593Smuzhiyun memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
816*4882a593Smuzhiyun memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /*
824*4882a593Smuzhiyun * csio_ln_read_fcf_entry - Read fcf entry.
825*4882a593Smuzhiyun * @ln: lnode
826*4882a593Smuzhiyun * @cbfn: Completion handler.
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * Issued with lock held.
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun static int
csio_ln_read_fcf_entry(struct csio_lnode * ln,void (* cbfn)(struct csio_hw *,struct csio_mb *))831*4882a593Smuzhiyun csio_ln_read_fcf_entry(struct csio_lnode *ln,
832*4882a593Smuzhiyun void (*cbfn) (struct csio_hw *, struct csio_mb *))
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct csio_hw *hw = ln->hwp;
835*4882a593Smuzhiyun struct csio_mb *mbp;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
838*4882a593Smuzhiyun if (!mbp) {
839*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
840*4882a593Smuzhiyun return -ENOMEM;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Get FCoE FCF information */
844*4882a593Smuzhiyun csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
845*4882a593Smuzhiyun ln->portid, ln->fcf_flowid, cbfn);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (csio_mb_issue(hw, mbp)) {
848*4882a593Smuzhiyun csio_err(hw, "failed to issue FCOE FCF cmd\n");
849*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
850*4882a593Smuzhiyun return -EINVAL;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return 0;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * csio_handle_link_up - Logical Linkup event.
858*4882a593Smuzhiyun * @hw - HW module.
859*4882a593Smuzhiyun * @portid - Physical port number
860*4882a593Smuzhiyun * @fcfi - FCF index.
861*4882a593Smuzhiyun * @vnpi - VNP index.
862*4882a593Smuzhiyun * Returns - none.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * This event is received from FW, when virtual link is established between
865*4882a593Smuzhiyun * Physical port[ENode] and FCF. If its new vnpi, then local node object is
866*4882a593Smuzhiyun * created on this FCF and set to [ONLINE] state.
867*4882a593Smuzhiyun * Lnode waits for FW_RDEV_CMD event to be received indicating that
868*4882a593Smuzhiyun * Fabric login is completed and lnode moves to [READY] state.
869*4882a593Smuzhiyun *
870*4882a593Smuzhiyun * This called with hw lock held
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun static void
csio_handle_link_up(struct csio_hw * hw,uint8_t portid,uint32_t fcfi,uint32_t vnpi)873*4882a593Smuzhiyun csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
874*4882a593Smuzhiyun uint32_t vnpi)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct csio_lnode *ln = NULL;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /* Lookup lnode based on vnpi */
879*4882a593Smuzhiyun ln = csio_ln_lookup_by_vnpi(hw, vnpi);
880*4882a593Smuzhiyun if (!ln) {
881*4882a593Smuzhiyun /* Pick lnode based on portid */
882*4882a593Smuzhiyun ln = csio_ln_lookup_by_portid(hw, portid);
883*4882a593Smuzhiyun if (!ln) {
884*4882a593Smuzhiyun csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
885*4882a593Smuzhiyun portid);
886*4882a593Smuzhiyun CSIO_DB_ASSERT(0);
887*4882a593Smuzhiyun return;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* Check if lnode has valid vnp flowid */
891*4882a593Smuzhiyun if (ln->vnp_flowid != CSIO_INVALID_IDX) {
892*4882a593Smuzhiyun /* New VN-Port */
893*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
894*4882a593Smuzhiyun csio_lnode_alloc(hw);
895*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
896*4882a593Smuzhiyun if (!ln) {
897*4882a593Smuzhiyun csio_err(hw,
898*4882a593Smuzhiyun "failed to allocate fcoe lnode"
899*4882a593Smuzhiyun "for port:%d vnpi:x%x\n",
900*4882a593Smuzhiyun portid, vnpi);
901*4882a593Smuzhiyun CSIO_DB_ASSERT(0);
902*4882a593Smuzhiyun return;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun ln->portid = portid;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun ln->vnp_flowid = vnpi;
907*4882a593Smuzhiyun ln->dev_num &= ~0xFFFF;
908*4882a593Smuzhiyun ln->dev_num |= vnpi;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /*Initialize fcfi */
912*4882a593Smuzhiyun ln->fcf_flowid = fcfi;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_link_up);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* Send LINKUP event to SM */
919*4882a593Smuzhiyun csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /*
923*4882a593Smuzhiyun * csio_post_event_rns
924*4882a593Smuzhiyun * @ln - FCOE lnode
925*4882a593Smuzhiyun * @evt - Given rnode event
926*4882a593Smuzhiyun * Returns - none
927*4882a593Smuzhiyun *
928*4882a593Smuzhiyun * Posts given rnode event to all FCOE rnodes connected with given Lnode.
929*4882a593Smuzhiyun * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
930*4882a593Smuzhiyun * event.
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * This called with hw lock held
933*4882a593Smuzhiyun */
934*4882a593Smuzhiyun static void
csio_post_event_rns(struct csio_lnode * ln,enum csio_rn_ev evt)935*4882a593Smuzhiyun csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
938*4882a593Smuzhiyun struct list_head *tmp, *next;
939*4882a593Smuzhiyun struct csio_rnode *rn;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
942*4882a593Smuzhiyun rn = (struct csio_rnode *) tmp;
943*4882a593Smuzhiyun csio_post_event(&rn->sm, evt);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /*
948*4882a593Smuzhiyun * csio_cleanup_rns
949*4882a593Smuzhiyun * @ln - FCOE lnode
950*4882a593Smuzhiyun * Returns - none
951*4882a593Smuzhiyun *
952*4882a593Smuzhiyun * Frees all FCOE rnodes connected with given Lnode.
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * This called with hw lock held
955*4882a593Smuzhiyun */
956*4882a593Smuzhiyun static void
csio_cleanup_rns(struct csio_lnode * ln)957*4882a593Smuzhiyun csio_cleanup_rns(struct csio_lnode *ln)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
960*4882a593Smuzhiyun struct list_head *tmp, *next_rn;
961*4882a593Smuzhiyun struct csio_rnode *rn;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
964*4882a593Smuzhiyun rn = (struct csio_rnode *) tmp;
965*4882a593Smuzhiyun csio_put_rnode(ln, rn);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * csio_post_event_lns
972*4882a593Smuzhiyun * @ln - FCOE lnode
973*4882a593Smuzhiyun * @evt - Given lnode event
974*4882a593Smuzhiyun * Returns - none
975*4882a593Smuzhiyun *
976*4882a593Smuzhiyun * Posts given lnode event to all FCOE lnodes connected with given Lnode.
977*4882a593Smuzhiyun * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
978*4882a593Smuzhiyun * event.
979*4882a593Smuzhiyun *
980*4882a593Smuzhiyun * This called with hw lock held
981*4882a593Smuzhiyun */
982*4882a593Smuzhiyun static void
csio_post_event_lns(struct csio_lnode * ln,enum csio_ln_ev evt)983*4882a593Smuzhiyun csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct list_head *tmp;
986*4882a593Smuzhiyun struct csio_lnode *cln, *sln;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /* If NPIV lnode, send evt only to that and return */
989*4882a593Smuzhiyun if (csio_is_npiv_ln(ln)) {
990*4882a593Smuzhiyun csio_post_event(&ln->sm, evt);
991*4882a593Smuzhiyun return;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun sln = ln;
995*4882a593Smuzhiyun /* Traverse children lnodes list and send evt */
996*4882a593Smuzhiyun list_for_each(tmp, &sln->cln_head) {
997*4882a593Smuzhiyun cln = (struct csio_lnode *) tmp;
998*4882a593Smuzhiyun csio_post_event(&cln->sm, evt);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Send evt to parent lnode */
1002*4882a593Smuzhiyun csio_post_event(&ln->sm, evt);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /*
1006*4882a593Smuzhiyun * csio_ln_down - Lcoal nport is down
1007*4882a593Smuzhiyun * @ln - FCOE Lnode
1008*4882a593Smuzhiyun * Returns - none
1009*4882a593Smuzhiyun *
1010*4882a593Smuzhiyun * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
1011*4882a593Smuzhiyun *
1012*4882a593Smuzhiyun * This called with hw lock held
1013*4882a593Smuzhiyun */
1014*4882a593Smuzhiyun static void
csio_ln_down(struct csio_lnode * ln)1015*4882a593Smuzhiyun csio_ln_down(struct csio_lnode *ln)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /*
1021*4882a593Smuzhiyun * csio_handle_link_down - Logical Linkdown event.
1022*4882a593Smuzhiyun * @hw - HW module.
1023*4882a593Smuzhiyun * @portid - Physical port number
1024*4882a593Smuzhiyun * @fcfi - FCF index.
1025*4882a593Smuzhiyun * @vnpi - VNP index.
1026*4882a593Smuzhiyun * Returns - none
1027*4882a593Smuzhiyun *
1028*4882a593Smuzhiyun * This event is received from FW, when virtual link goes down between
1029*4882a593Smuzhiyun * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
1030*4882a593Smuzhiyun * this vnpi[VN-Port] will be de-instantiated.
1031*4882a593Smuzhiyun *
1032*4882a593Smuzhiyun * This called with hw lock held
1033*4882a593Smuzhiyun */
1034*4882a593Smuzhiyun static void
csio_handle_link_down(struct csio_hw * hw,uint8_t portid,uint32_t fcfi,uint32_t vnpi)1035*4882a593Smuzhiyun csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
1036*4882a593Smuzhiyun uint32_t vnpi)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct csio_fcf_info *fp;
1039*4882a593Smuzhiyun struct csio_lnode *ln;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun /* Lookup lnode based on vnpi */
1042*4882a593Smuzhiyun ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1043*4882a593Smuzhiyun if (ln) {
1044*4882a593Smuzhiyun fp = ln->fcfinfo;
1045*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_link_down);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /*Warn if linkdown received if lnode is not in ready state */
1048*4882a593Smuzhiyun if (!csio_is_lnode_ready(ln)) {
1049*4882a593Smuzhiyun csio_ln_warn(ln,
1050*4882a593Smuzhiyun "warn: FCOE link is already in offline "
1051*4882a593Smuzhiyun "Ignoring Fcoe linkdown event on portid %d\n",
1052*4882a593Smuzhiyun portid);
1053*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1054*4882a593Smuzhiyun return;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /* Verify portid */
1058*4882a593Smuzhiyun if (fp->portid != portid) {
1059*4882a593Smuzhiyun csio_ln_warn(ln,
1060*4882a593Smuzhiyun "warn: FCOE linkdown recv with "
1061*4882a593Smuzhiyun "invalid port %d\n", portid);
1062*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1063*4882a593Smuzhiyun return;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /* verify fcfi */
1067*4882a593Smuzhiyun if (ln->fcf_flowid != fcfi) {
1068*4882a593Smuzhiyun csio_ln_warn(ln,
1069*4882a593Smuzhiyun "warn: FCOE linkdown recv with "
1070*4882a593Smuzhiyun "invalid fcfi x%x\n", fcfi);
1071*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1072*4882a593Smuzhiyun return;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /* Send LINK_DOWN event to lnode s/m */
1078*4882a593Smuzhiyun csio_ln_down(ln);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun return;
1081*4882a593Smuzhiyun } else {
1082*4882a593Smuzhiyun csio_warn(hw,
1083*4882a593Smuzhiyun "warn: FCOE linkdown recv with invalid vnpi x%x\n",
1084*4882a593Smuzhiyun vnpi);
1085*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_evt_drop);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /*
1090*4882a593Smuzhiyun * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
1091*4882a593Smuzhiyun * @ln: Lnode module
1092*4882a593Smuzhiyun *
1093*4882a593Smuzhiyun * Returns True if FCOE lnode is in ready state.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun int
csio_is_lnode_ready(struct csio_lnode * ln)1096*4882a593Smuzhiyun csio_is_lnode_ready(struct csio_lnode *ln)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /*****************************************************************************/
1102*4882a593Smuzhiyun /* START: Lnode SM */
1103*4882a593Smuzhiyun /*****************************************************************************/
1104*4882a593Smuzhiyun /*
1105*4882a593Smuzhiyun * csio_lns_uninit - The request in uninit state.
1106*4882a593Smuzhiyun * @ln - FCOE lnode.
1107*4882a593Smuzhiyun * @evt - Event to be processed.
1108*4882a593Smuzhiyun *
1109*4882a593Smuzhiyun * Process the given lnode event which is currently in "uninit" state.
1110*4882a593Smuzhiyun * Invoked with HW lock held.
1111*4882a593Smuzhiyun * Return - none.
1112*4882a593Smuzhiyun */
1113*4882a593Smuzhiyun static void
csio_lns_uninit(struct csio_lnode * ln,enum csio_ln_ev evt)1114*4882a593Smuzhiyun csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1117*4882a593Smuzhiyun struct csio_lnode *rln = hw->rln;
1118*4882a593Smuzhiyun int rv;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_sm[evt]);
1121*4882a593Smuzhiyun switch (evt) {
1122*4882a593Smuzhiyun case CSIO_LNE_LINKUP:
1123*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_online);
1124*4882a593Smuzhiyun /* Read FCF only for physical lnode */
1125*4882a593Smuzhiyun if (csio_is_phys_ln(ln)) {
1126*4882a593Smuzhiyun rv = csio_ln_read_fcf_entry(ln,
1127*4882a593Smuzhiyun csio_ln_read_fcf_cbfn);
1128*4882a593Smuzhiyun if (rv != 0) {
1129*4882a593Smuzhiyun /* TODO: Send HW RESET event */
1130*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_err);
1131*4882a593Smuzhiyun break;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Add FCF record */
1135*4882a593Smuzhiyun list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1139*4882a593Smuzhiyun if (rv != 0) {
1140*4882a593Smuzhiyun /* TODO: Send HW RESET event */
1141*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_err);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun break;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun case CSIO_LNE_DOWN_LINK:
1146*4882a593Smuzhiyun break;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun default:
1149*4882a593Smuzhiyun csio_ln_dbg(ln,
1150*4882a593Smuzhiyun "unexp ln event %d recv from did:x%x in "
1151*4882a593Smuzhiyun "ln state[uninit].\n", evt, ln->nport_id);
1152*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_unexp);
1153*4882a593Smuzhiyun break;
1154*4882a593Smuzhiyun } /* switch event */
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /*
1158*4882a593Smuzhiyun * csio_lns_online - The request in online state.
1159*4882a593Smuzhiyun * @ln - FCOE lnode.
1160*4882a593Smuzhiyun * @evt - Event to be processed.
1161*4882a593Smuzhiyun *
1162*4882a593Smuzhiyun * Process the given lnode event which is currently in "online" state.
1163*4882a593Smuzhiyun * Invoked with HW lock held.
1164*4882a593Smuzhiyun * Return - none.
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun static void
csio_lns_online(struct csio_lnode * ln,enum csio_ln_ev evt)1167*4882a593Smuzhiyun csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_sm[evt]);
1172*4882a593Smuzhiyun switch (evt) {
1173*4882a593Smuzhiyun case CSIO_LNE_LINKUP:
1174*4882a593Smuzhiyun csio_ln_warn(ln,
1175*4882a593Smuzhiyun "warn: FCOE link is up already "
1176*4882a593Smuzhiyun "Ignoring linkup on port:%d\n", ln->portid);
1177*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1178*4882a593Smuzhiyun break;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun case CSIO_LNE_FAB_INIT_DONE:
1181*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_ready);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1184*4882a593Smuzhiyun csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
1185*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun break;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun case CSIO_LNE_LINK_DOWN:
1190*4882a593Smuzhiyun case CSIO_LNE_DOWN_LINK:
1191*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_uninit);
1192*4882a593Smuzhiyun if (csio_is_phys_ln(ln)) {
1193*4882a593Smuzhiyun /* Remove FCF entry */
1194*4882a593Smuzhiyun list_del_init(&ln->fcfinfo->list);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun break;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun default:
1199*4882a593Smuzhiyun csio_ln_dbg(ln,
1200*4882a593Smuzhiyun "unexp ln event %d recv from did:x%x in "
1201*4882a593Smuzhiyun "ln state[uninit].\n", evt, ln->nport_id);
1202*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_unexp);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun break;
1205*4882a593Smuzhiyun } /* switch event */
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun /*
1209*4882a593Smuzhiyun * csio_lns_ready - The request in ready state.
1210*4882a593Smuzhiyun * @ln - FCOE lnode.
1211*4882a593Smuzhiyun * @evt - Event to be processed.
1212*4882a593Smuzhiyun *
1213*4882a593Smuzhiyun * Process the given lnode event which is currently in "ready" state.
1214*4882a593Smuzhiyun * Invoked with HW lock held.
1215*4882a593Smuzhiyun * Return - none.
1216*4882a593Smuzhiyun */
1217*4882a593Smuzhiyun static void
csio_lns_ready(struct csio_lnode * ln,enum csio_ln_ev evt)1218*4882a593Smuzhiyun csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_sm[evt]);
1223*4882a593Smuzhiyun switch (evt) {
1224*4882a593Smuzhiyun case CSIO_LNE_FAB_INIT_DONE:
1225*4882a593Smuzhiyun csio_ln_dbg(ln,
1226*4882a593Smuzhiyun "ignoring event %d recv from did x%x"
1227*4882a593Smuzhiyun "in ln state[ready].\n", evt, ln->nport_id);
1228*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1229*4882a593Smuzhiyun break;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun case CSIO_LNE_LINK_DOWN:
1232*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_offline);
1233*4882a593Smuzhiyun csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1236*4882a593Smuzhiyun csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1237*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (csio_is_phys_ln(ln)) {
1240*4882a593Smuzhiyun /* Remove FCF entry */
1241*4882a593Smuzhiyun list_del_init(&ln->fcfinfo->list);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun break;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun case CSIO_LNE_DOWN_LINK:
1246*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_offline);
1247*4882a593Smuzhiyun csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Host need to issue aborts in case if FW has not returned
1250*4882a593Smuzhiyun * WRs with status "ABORTED"
1251*4882a593Smuzhiyun */
1252*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1253*4882a593Smuzhiyun csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1254*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun if (csio_is_phys_ln(ln)) {
1257*4882a593Smuzhiyun /* Remove FCF entry */
1258*4882a593Smuzhiyun list_del_init(&ln->fcfinfo->list);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun break;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun case CSIO_LNE_CLOSE:
1263*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_uninit);
1264*4882a593Smuzhiyun csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1265*4882a593Smuzhiyun break;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun case CSIO_LNE_LOGO:
1268*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_offline);
1269*4882a593Smuzhiyun csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1270*4882a593Smuzhiyun break;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun default:
1273*4882a593Smuzhiyun csio_ln_dbg(ln,
1274*4882a593Smuzhiyun "unexp ln event %d recv from did:x%x in "
1275*4882a593Smuzhiyun "ln state[uninit].\n", evt, ln->nport_id);
1276*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_unexp);
1277*4882a593Smuzhiyun CSIO_DB_ASSERT(0);
1278*4882a593Smuzhiyun break;
1279*4882a593Smuzhiyun } /* switch event */
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun /*
1283*4882a593Smuzhiyun * csio_lns_offline - The request in offline state.
1284*4882a593Smuzhiyun * @ln - FCOE lnode.
1285*4882a593Smuzhiyun * @evt - Event to be processed.
1286*4882a593Smuzhiyun *
1287*4882a593Smuzhiyun * Process the given lnode event which is currently in "offline" state.
1288*4882a593Smuzhiyun * Invoked with HW lock held.
1289*4882a593Smuzhiyun * Return - none.
1290*4882a593Smuzhiyun */
1291*4882a593Smuzhiyun static void
csio_lns_offline(struct csio_lnode * ln,enum csio_ln_ev evt)1292*4882a593Smuzhiyun csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1295*4882a593Smuzhiyun struct csio_lnode *rln = hw->rln;
1296*4882a593Smuzhiyun int rv;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_sm[evt]);
1299*4882a593Smuzhiyun switch (evt) {
1300*4882a593Smuzhiyun case CSIO_LNE_LINKUP:
1301*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_online);
1302*4882a593Smuzhiyun /* Read FCF only for physical lnode */
1303*4882a593Smuzhiyun if (csio_is_phys_ln(ln)) {
1304*4882a593Smuzhiyun rv = csio_ln_read_fcf_entry(ln,
1305*4882a593Smuzhiyun csio_ln_read_fcf_cbfn);
1306*4882a593Smuzhiyun if (rv != 0) {
1307*4882a593Smuzhiyun /* TODO: Send HW RESET event */
1308*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_err);
1309*4882a593Smuzhiyun break;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /* Add FCF record */
1313*4882a593Smuzhiyun list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1317*4882a593Smuzhiyun if (rv != 0) {
1318*4882a593Smuzhiyun /* TODO: Send HW RESET event */
1319*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_err);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun break;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun case CSIO_LNE_LINK_DOWN:
1324*4882a593Smuzhiyun case CSIO_LNE_DOWN_LINK:
1325*4882a593Smuzhiyun case CSIO_LNE_LOGO:
1326*4882a593Smuzhiyun csio_ln_dbg(ln,
1327*4882a593Smuzhiyun "ignoring event %d recv from did x%x"
1328*4882a593Smuzhiyun "in ln state[offline].\n", evt, ln->nport_id);
1329*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_drop);
1330*4882a593Smuzhiyun break;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun case CSIO_LNE_CLOSE:
1333*4882a593Smuzhiyun csio_set_state(&ln->sm, csio_lns_uninit);
1334*4882a593Smuzhiyun csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1335*4882a593Smuzhiyun break;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun default:
1338*4882a593Smuzhiyun csio_ln_dbg(ln,
1339*4882a593Smuzhiyun "unexp ln event %d recv from did:x%x in "
1340*4882a593Smuzhiyun "ln state[offline]\n", evt, ln->nport_id);
1341*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_unexp);
1342*4882a593Smuzhiyun CSIO_DB_ASSERT(0);
1343*4882a593Smuzhiyun break;
1344*4882a593Smuzhiyun } /* switch event */
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun /*****************************************************************************/
1348*4882a593Smuzhiyun /* END: Lnode SM */
1349*4882a593Smuzhiyun /*****************************************************************************/
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun static void
csio_free_fcfinfo(struct kref * kref)1352*4882a593Smuzhiyun csio_free_fcfinfo(struct kref *kref)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun struct csio_fcf_info *fcfinfo = container_of(kref,
1355*4882a593Smuzhiyun struct csio_fcf_info, kref);
1356*4882a593Smuzhiyun kfree(fcfinfo);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* Helper routines for attributes */
1360*4882a593Smuzhiyun /*
1361*4882a593Smuzhiyun * csio_lnode_state_to_str - Get current state of FCOE lnode.
1362*4882a593Smuzhiyun * @ln - lnode
1363*4882a593Smuzhiyun * @str - state of lnode.
1364*4882a593Smuzhiyun *
1365*4882a593Smuzhiyun */
1366*4882a593Smuzhiyun void
csio_lnode_state_to_str(struct csio_lnode * ln,int8_t * str)1367*4882a593Smuzhiyun csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
1370*4882a593Smuzhiyun strcpy(str, "UNINIT");
1371*4882a593Smuzhiyun return;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
1374*4882a593Smuzhiyun strcpy(str, "READY");
1375*4882a593Smuzhiyun return;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
1378*4882a593Smuzhiyun strcpy(str, "OFFLINE");
1379*4882a593Smuzhiyun return;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun strcpy(str, "UNKNOWN");
1382*4882a593Smuzhiyun } /* csio_lnode_state_to_str */
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun int
csio_get_phy_port_stats(struct csio_hw * hw,uint8_t portid,struct fw_fcoe_port_stats * port_stats)1386*4882a593Smuzhiyun csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
1387*4882a593Smuzhiyun struct fw_fcoe_port_stats *port_stats)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun struct csio_mb *mbp;
1390*4882a593Smuzhiyun struct fw_fcoe_port_cmd_params portparams;
1391*4882a593Smuzhiyun enum fw_retval retval;
1392*4882a593Smuzhiyun int idx;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1395*4882a593Smuzhiyun if (!mbp) {
1396*4882a593Smuzhiyun csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
1397*4882a593Smuzhiyun return -EINVAL;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun portparams.portid = portid;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun for (idx = 1; idx <= 3; idx++) {
1402*4882a593Smuzhiyun portparams.idx = (idx-1)*6 + 1;
1403*4882a593Smuzhiyun portparams.nstats = 6;
1404*4882a593Smuzhiyun if (idx == 3)
1405*4882a593Smuzhiyun portparams.nstats = 4;
1406*4882a593Smuzhiyun csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
1407*4882a593Smuzhiyun &portparams, NULL);
1408*4882a593Smuzhiyun if (csio_mb_issue(hw, mbp)) {
1409*4882a593Smuzhiyun csio_err(hw, "Issue of FCoE port params failed!\n");
1410*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
1411*4882a593Smuzhiyun return -EINVAL;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun csio_mb_process_portparams_rsp(hw, mbp, &retval,
1414*4882a593Smuzhiyun &portparams, port_stats);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun mempool_free(mbp, hw->mb_mempool);
1418*4882a593Smuzhiyun return 0;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun /*
1422*4882a593Smuzhiyun * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
1423*4882a593Smuzhiyun * @wr - WR.
1424*4882a593Smuzhiyun * @len - WR len.
1425*4882a593Smuzhiyun * This handler is invoked when an outstanding mgmt WR is completed.
1426*4882a593Smuzhiyun * Its invoked in the context of FW event worker thread for every
1427*4882a593Smuzhiyun * mgmt event received.
1428*4882a593Smuzhiyun * Return - none.
1429*4882a593Smuzhiyun */
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun static void
csio_ln_mgmt_wr_handler(struct csio_hw * hw,void * wr,uint32_t len)1432*4882a593Smuzhiyun csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1435*4882a593Smuzhiyun struct csio_ioreq *io_req = NULL;
1436*4882a593Smuzhiyun struct fw_fcoe_els_ct_wr *wr_cmd;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
1442*4882a593Smuzhiyun csio_err(mgmtm->hw,
1443*4882a593Smuzhiyun "Invalid ELS CT WR length recvd, len:%x\n", len);
1444*4882a593Smuzhiyun mgmtm->stats.n_err++;
1445*4882a593Smuzhiyun return;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
1449*4882a593Smuzhiyun io_req->wr_status = csio_wr_status(wr_cmd);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /* lookup ioreq exists in our active Q */
1452*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1453*4882a593Smuzhiyun if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
1454*4882a593Smuzhiyun csio_err(mgmtm->hw,
1455*4882a593Smuzhiyun "Error- Invalid IO handle recv in WR. handle: %p\n",
1456*4882a593Smuzhiyun io_req);
1457*4882a593Smuzhiyun mgmtm->stats.n_err++;
1458*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1459*4882a593Smuzhiyun return;
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun mgmtm = csio_hw_to_mgmtm(hw);
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun /* Dequeue from active queue */
1465*4882a593Smuzhiyun list_del_init(&io_req->sm.sm_list);
1466*4882a593Smuzhiyun mgmtm->stats.n_active--;
1467*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun /* io_req will be freed by completion handler */
1470*4882a593Smuzhiyun if (io_req->io_cbfn)
1471*4882a593Smuzhiyun io_req->io_cbfn(hw, io_req);
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun /**
1475*4882a593Smuzhiyun * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
1476*4882a593Smuzhiyun * @hw: HW module
1477*4882a593Smuzhiyun * @cpl_op: CPL opcode
1478*4882a593Smuzhiyun * @cmd: FW cmd/WR.
1479*4882a593Smuzhiyun *
1480*4882a593Smuzhiyun * Process received FCoE cmd/WR event from FW.
1481*4882a593Smuzhiyun */
1482*4882a593Smuzhiyun void
csio_fcoe_fwevt_handler(struct csio_hw * hw,__u8 cpl_op,__be64 * cmd)1483*4882a593Smuzhiyun csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun struct csio_lnode *ln;
1486*4882a593Smuzhiyun struct csio_rnode *rn;
1487*4882a593Smuzhiyun uint8_t portid, opcode = *(uint8_t *)cmd;
1488*4882a593Smuzhiyun struct fw_fcoe_link_cmd *lcmd;
1489*4882a593Smuzhiyun struct fw_wr_hdr *wr;
1490*4882a593Smuzhiyun struct fw_rdev_wr *rdev_wr;
1491*4882a593Smuzhiyun enum fw_fcoe_link_status lstatus;
1492*4882a593Smuzhiyun uint32_t fcfi, rdev_flowid, vnpi;
1493*4882a593Smuzhiyun enum csio_ln_ev evt;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun lcmd = (struct fw_fcoe_link_cmd *)cmd;
1498*4882a593Smuzhiyun lstatus = lcmd->lstatus;
1499*4882a593Smuzhiyun portid = FW_FCOE_LINK_CMD_PORTID_GET(
1500*4882a593Smuzhiyun ntohl(lcmd->op_to_portid));
1501*4882a593Smuzhiyun fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
1502*4882a593Smuzhiyun vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun if (lstatus == FCOE_LINKUP) {
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /* HW lock here */
1507*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1508*4882a593Smuzhiyun csio_handle_link_up(hw, portid, fcfi, vnpi);
1509*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1510*4882a593Smuzhiyun /* HW un lock here */
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun } else if (lstatus == FCOE_LINKDOWN) {
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /* HW lock here */
1515*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1516*4882a593Smuzhiyun csio_handle_link_down(hw, portid, fcfi, vnpi);
1517*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1518*4882a593Smuzhiyun /* HW un lock here */
1519*4882a593Smuzhiyun } else {
1520*4882a593Smuzhiyun csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
1521*4882a593Smuzhiyun lcmd->lstatus);
1522*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_cpl_unexp);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun } else if (cpl_op == CPL_FW6_PLD) {
1525*4882a593Smuzhiyun wr = (struct fw_wr_hdr *) (cmd + 4);
1526*4882a593Smuzhiyun if (FW_WR_OP_G(be32_to_cpu(wr->hi))
1527*4882a593Smuzhiyun == FW_RDEV_WR) {
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun rdev_flowid = FW_RDEV_WR_FLOWID_GET(
1532*4882a593Smuzhiyun ntohl(rdev_wr->alloc_to_len16));
1533*4882a593Smuzhiyun vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
1534*4882a593Smuzhiyun ntohl(rdev_wr->flags_to_assoc_flowid));
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun csio_dbg(hw,
1537*4882a593Smuzhiyun "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
1538*4882a593Smuzhiyun "vnpi:0x%x\n", rdev_flowid,
1539*4882a593Smuzhiyun rdev_wr->event_cause, vnpi);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun if (rdev_wr->protocol != PROT_FCOE) {
1542*4882a593Smuzhiyun csio_err(hw,
1543*4882a593Smuzhiyun "FW_RDEV_WR: invalid proto:x%x "
1544*4882a593Smuzhiyun "received with flowid:x%x\n",
1545*4882a593Smuzhiyun rdev_wr->protocol,
1546*4882a593Smuzhiyun rdev_flowid);
1547*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_evt_drop);
1548*4882a593Smuzhiyun return;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun /* HW lock here */
1552*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
1553*4882a593Smuzhiyun ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1554*4882a593Smuzhiyun if (!ln) {
1555*4882a593Smuzhiyun csio_err(hw,
1556*4882a593Smuzhiyun "FW_DEV_WR: invalid vnpi:x%x received "
1557*4882a593Smuzhiyun "with flowid:x%x\n", vnpi, rdev_flowid);
1558*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_evt_drop);
1559*4882a593Smuzhiyun goto out_pld;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun rn = csio_confirm_rnode(ln, rdev_flowid,
1563*4882a593Smuzhiyun &rdev_wr->u.fcoe_rdev);
1564*4882a593Smuzhiyun if (!rn) {
1565*4882a593Smuzhiyun csio_ln_dbg(ln,
1566*4882a593Smuzhiyun "Failed to confirm rnode "
1567*4882a593Smuzhiyun "for flowid:x%x\n", rdev_flowid);
1568*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_evt_drop);
1569*4882a593Smuzhiyun goto out_pld;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun /* save previous event for debugging */
1573*4882a593Smuzhiyun ln->prev_evt = ln->cur_evt;
1574*4882a593Smuzhiyun ln->cur_evt = rdev_wr->event_cause;
1575*4882a593Smuzhiyun CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun /* Translate all the fabric events to lnode SM events */
1578*4882a593Smuzhiyun evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
1579*4882a593Smuzhiyun if (evt) {
1580*4882a593Smuzhiyun csio_ln_dbg(ln,
1581*4882a593Smuzhiyun "Posting event to lnode event:%d "
1582*4882a593Smuzhiyun "cause:%d flowid:x%x\n", evt,
1583*4882a593Smuzhiyun rdev_wr->event_cause, rdev_flowid);
1584*4882a593Smuzhiyun csio_post_event(&ln->sm, evt);
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Handover event to rn SM here. */
1588*4882a593Smuzhiyun csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
1589*4882a593Smuzhiyun out_pld:
1590*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
1591*4882a593Smuzhiyun return;
1592*4882a593Smuzhiyun } else {
1593*4882a593Smuzhiyun csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1594*4882a593Smuzhiyun FW_WR_OP_G(be32_to_cpu((wr->hi))));
1595*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_cpl_unexp);
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun } else if (cpl_op == CPL_FW6_MSG) {
1598*4882a593Smuzhiyun wr = (struct fw_wr_hdr *) (cmd);
1599*4882a593Smuzhiyun if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
1600*4882a593Smuzhiyun csio_ln_mgmt_wr_handler(hw, wr,
1601*4882a593Smuzhiyun sizeof(struct fw_fcoe_els_ct_wr));
1602*4882a593Smuzhiyun } else {
1603*4882a593Smuzhiyun csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1604*4882a593Smuzhiyun FW_WR_OP_G(be32_to_cpu((wr->hi))));
1605*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_cpl_unexp);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun } else {
1608*4882a593Smuzhiyun csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
1609*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_cpl_unexp);
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /**
1614*4882a593Smuzhiyun * csio_lnode_start - Kickstart lnode discovery.
1615*4882a593Smuzhiyun * @ln: lnode
1616*4882a593Smuzhiyun *
1617*4882a593Smuzhiyun * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
1618*4882a593Smuzhiyun */
1619*4882a593Smuzhiyun int
csio_lnode_start(struct csio_lnode * ln)1620*4882a593Smuzhiyun csio_lnode_start(struct csio_lnode *ln)
1621*4882a593Smuzhiyun {
1622*4882a593Smuzhiyun int rv = 0;
1623*4882a593Smuzhiyun if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
1624*4882a593Smuzhiyun rv = csio_fcoe_enable_link(ln, 1);
1625*4882a593Smuzhiyun ln->flags |= CSIO_LNF_LINK_ENABLE;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun return rv;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun /**
1632*4882a593Smuzhiyun * csio_lnode_stop - Stop the lnode.
1633*4882a593Smuzhiyun * @ln: lnode
1634*4882a593Smuzhiyun *
1635*4882a593Smuzhiyun * This routine is invoked by HW module to stop lnode and its associated NPIV
1636*4882a593Smuzhiyun * lnodes.
1637*4882a593Smuzhiyun */
1638*4882a593Smuzhiyun void
csio_lnode_stop(struct csio_lnode * ln)1639*4882a593Smuzhiyun csio_lnode_stop(struct csio_lnode *ln)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
1642*4882a593Smuzhiyun if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
1643*4882a593Smuzhiyun csio_fcoe_enable_link(ln, 0);
1644*4882a593Smuzhiyun ln->flags &= ~CSIO_LNF_LINK_ENABLE;
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun csio_ln_dbg(ln, "stopping ln :%p\n", ln);
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /**
1650*4882a593Smuzhiyun * csio_lnode_close - Close an lnode.
1651*4882a593Smuzhiyun * @ln: lnode
1652*4882a593Smuzhiyun *
1653*4882a593Smuzhiyun * This routine is invoked by HW module to close an lnode and its
1654*4882a593Smuzhiyun * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
1655*4882a593Smuzhiyun * set to uninitialized state.
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun void
csio_lnode_close(struct csio_lnode * ln)1658*4882a593Smuzhiyun csio_lnode_close(struct csio_lnode *ln)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun csio_post_event_lns(ln, CSIO_LNE_CLOSE);
1661*4882a593Smuzhiyun if (csio_is_phys_ln(ln))
1662*4882a593Smuzhiyun ln->vnp_flowid = CSIO_INVALID_IDX;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun csio_ln_dbg(ln, "closed ln :%p\n", ln);
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun /*
1668*4882a593Smuzhiyun * csio_ln_prep_ecwr - Prepare ELS/CT WR.
1669*4882a593Smuzhiyun * @io_req - IO request.
1670*4882a593Smuzhiyun * @wr_len - WR len
1671*4882a593Smuzhiyun * @immd_len - WR immediate data
1672*4882a593Smuzhiyun * @sub_op - Sub opcode
1673*4882a593Smuzhiyun * @sid - source portid.
1674*4882a593Smuzhiyun * @did - destination portid
1675*4882a593Smuzhiyun * @flow_id - flowid
1676*4882a593Smuzhiyun * @fw_wr - ELS/CT WR to be prepared.
1677*4882a593Smuzhiyun * Returns: 0 - on success
1678*4882a593Smuzhiyun */
1679*4882a593Smuzhiyun static int
csio_ln_prep_ecwr(struct csio_ioreq * io_req,uint32_t wr_len,uint32_t immd_len,uint8_t sub_op,uint32_t sid,uint32_t did,uint32_t flow_id,uint8_t * fw_wr)1680*4882a593Smuzhiyun csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
1681*4882a593Smuzhiyun uint32_t immd_len, uint8_t sub_op, uint32_t sid,
1682*4882a593Smuzhiyun uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun struct fw_fcoe_els_ct_wr *wr;
1685*4882a593Smuzhiyun __be32 port_id;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
1688*4882a593Smuzhiyun wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
1689*4882a593Smuzhiyun FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun wr_len = DIV_ROUND_UP(wr_len, 16);
1692*4882a593Smuzhiyun wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
1693*4882a593Smuzhiyun FW_WR_LEN16_V(wr_len));
1694*4882a593Smuzhiyun wr->els_ct_type = sub_op;
1695*4882a593Smuzhiyun wr->ctl_pri = 0;
1696*4882a593Smuzhiyun wr->cp_en_class = 0;
1697*4882a593Smuzhiyun wr->cookie = io_req->fw_handle;
1698*4882a593Smuzhiyun wr->iqid = cpu_to_be16(csio_q_physiqid(
1699*4882a593Smuzhiyun io_req->lnode->hwp, io_req->iq_idx));
1700*4882a593Smuzhiyun wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
1701*4882a593Smuzhiyun wr->tmo_val = (uint8_t) io_req->tmo;
1702*4882a593Smuzhiyun port_id = htonl(sid);
1703*4882a593Smuzhiyun memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
1704*4882a593Smuzhiyun port_id = htonl(did);
1705*4882a593Smuzhiyun memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun /* Prepare RSP SGL */
1708*4882a593Smuzhiyun wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
1709*4882a593Smuzhiyun wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
1710*4882a593Smuzhiyun return 0;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun /*
1714*4882a593Smuzhiyun * csio_ln_mgmt_submit_wr - Post elsct work request.
1715*4882a593Smuzhiyun * @mgmtm - mgmtm
1716*4882a593Smuzhiyun * @io_req - io request.
1717*4882a593Smuzhiyun * @sub_op - ELS or CT request type
1718*4882a593Smuzhiyun * @pld - Dma Payload buffer
1719*4882a593Smuzhiyun * @pld_len - Payload len
1720*4882a593Smuzhiyun * Prepares ELSCT Work request and sents it to FW.
1721*4882a593Smuzhiyun * Returns: 0 - on success
1722*4882a593Smuzhiyun */
1723*4882a593Smuzhiyun static int
csio_ln_mgmt_submit_wr(struct csio_mgmtm * mgmtm,struct csio_ioreq * io_req,uint8_t sub_op,struct csio_dma_buf * pld,uint32_t pld_len)1724*4882a593Smuzhiyun csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1725*4882a593Smuzhiyun uint8_t sub_op, struct csio_dma_buf *pld,
1726*4882a593Smuzhiyun uint32_t pld_len)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun struct csio_wr_pair wrp;
1729*4882a593Smuzhiyun struct csio_lnode *ln = io_req->lnode;
1730*4882a593Smuzhiyun struct csio_rnode *rn = io_req->rnode;
1731*4882a593Smuzhiyun struct csio_hw *hw = mgmtm->hw;
1732*4882a593Smuzhiyun uint8_t fw_wr[64];
1733*4882a593Smuzhiyun struct ulptx_sgl dsgl;
1734*4882a593Smuzhiyun uint32_t wr_size = 0;
1735*4882a593Smuzhiyun uint8_t im_len = 0;
1736*4882a593Smuzhiyun uint32_t wr_off = 0;
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun int ret = 0;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun /* Calculate WR Size for this ELS REQ */
1741*4882a593Smuzhiyun wr_size = sizeof(struct fw_fcoe_els_ct_wr);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /* Send as immediate data if pld < 256 */
1744*4882a593Smuzhiyun if (pld_len < 256) {
1745*4882a593Smuzhiyun wr_size += ALIGN(pld_len, 8);
1746*4882a593Smuzhiyun im_len = (uint8_t)pld_len;
1747*4882a593Smuzhiyun } else
1748*4882a593Smuzhiyun wr_size += sizeof(struct ulptx_sgl);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /* Roundup WR size in units of 16 bytes */
1751*4882a593Smuzhiyun wr_size = ALIGN(wr_size, 16);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun /* Get WR to send ELS REQ */
1754*4882a593Smuzhiyun ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
1755*4882a593Smuzhiyun if (ret != 0) {
1756*4882a593Smuzhiyun csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
1757*4882a593Smuzhiyun io_req, ret);
1758*4882a593Smuzhiyun return ret;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun /* Prepare Generic WR used by all ELS/CT cmd */
1762*4882a593Smuzhiyun csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
1763*4882a593Smuzhiyun ln->nport_id, rn->nport_id,
1764*4882a593Smuzhiyun csio_rn_flowid(rn),
1765*4882a593Smuzhiyun &fw_wr[0]);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun /* Copy ELS/CT WR CMD */
1768*4882a593Smuzhiyun csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
1769*4882a593Smuzhiyun sizeof(struct fw_fcoe_els_ct_wr));
1770*4882a593Smuzhiyun wr_off += sizeof(struct fw_fcoe_els_ct_wr);
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun /* Copy payload to Immediate section of WR */
1773*4882a593Smuzhiyun if (im_len)
1774*4882a593Smuzhiyun csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1775*4882a593Smuzhiyun else {
1776*4882a593Smuzhiyun /* Program DSGL to dma payload */
1777*4882a593Smuzhiyun dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
1778*4882a593Smuzhiyun ULPTX_MORE_F | ULPTX_NSGE_V(1));
1779*4882a593Smuzhiyun dsgl.len0 = cpu_to_be32(pld_len);
1780*4882a593Smuzhiyun dsgl.addr0 = cpu_to_be64(pld->paddr);
1781*4882a593Smuzhiyun csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
1782*4882a593Smuzhiyun sizeof(struct ulptx_sgl));
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun /* Issue work request to xmit ELS/CT req to FW */
1786*4882a593Smuzhiyun csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
1787*4882a593Smuzhiyun return ret;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun /*
1791*4882a593Smuzhiyun * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
1792*4882a593Smuzhiyun * @io_req - IO Request
1793*4882a593Smuzhiyun * @io_cbfn - Completion handler.
1794*4882a593Smuzhiyun * @req_type - ELS or CT request type
1795*4882a593Smuzhiyun * @pld - Dma Payload buffer
1796*4882a593Smuzhiyun * @pld_len - Payload len
1797*4882a593Smuzhiyun *
1798*4882a593Smuzhiyun *
1799*4882a593Smuzhiyun * This API used submit managment ELS/CT request.
1800*4882a593Smuzhiyun * This called with hw lock held
1801*4882a593Smuzhiyun * Returns: 0 - on success
1802*4882a593Smuzhiyun * -ENOMEM - on error.
1803*4882a593Smuzhiyun */
1804*4882a593Smuzhiyun static int
csio_ln_mgmt_submit_req(struct csio_ioreq * io_req,void (* io_cbfn)(struct csio_hw *,struct csio_ioreq *),enum fcoe_cmn_type req_type,struct csio_dma_buf * pld,uint32_t pld_len)1805*4882a593Smuzhiyun csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
1806*4882a593Smuzhiyun void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
1807*4882a593Smuzhiyun enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
1808*4882a593Smuzhiyun uint32_t pld_len)
1809*4882a593Smuzhiyun {
1810*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
1811*4882a593Smuzhiyun struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1812*4882a593Smuzhiyun int rv;
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun BUG_ON(pld_len > pld->len);
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
1817*4882a593Smuzhiyun io_req->fw_handle = (uintptr_t) (io_req);
1818*4882a593Smuzhiyun io_req->eq_idx = mgmtm->eq_idx;
1819*4882a593Smuzhiyun io_req->iq_idx = mgmtm->iq_idx;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
1822*4882a593Smuzhiyun if (rv == 0) {
1823*4882a593Smuzhiyun list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
1824*4882a593Smuzhiyun mgmtm->stats.n_active++;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun return rv;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun /*
1830*4882a593Smuzhiyun * csio_ln_fdmi_init - FDMI Init entry point.
1831*4882a593Smuzhiyun * @ln: lnode
1832*4882a593Smuzhiyun */
1833*4882a593Smuzhiyun static int
csio_ln_fdmi_init(struct csio_lnode * ln)1834*4882a593Smuzhiyun csio_ln_fdmi_init(struct csio_lnode *ln)
1835*4882a593Smuzhiyun {
1836*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1837*4882a593Smuzhiyun struct csio_dma_buf *dma_buf;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /* Allocate MGMT request required for FDMI */
1840*4882a593Smuzhiyun ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
1841*4882a593Smuzhiyun if (!ln->mgmt_req) {
1842*4882a593Smuzhiyun csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
1843*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
1844*4882a593Smuzhiyun return -ENOMEM;
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /* Allocate Dma buffers for FDMI response Payload */
1848*4882a593Smuzhiyun dma_buf = &ln->mgmt_req->dma_buf;
1849*4882a593Smuzhiyun dma_buf->len = 2048;
1850*4882a593Smuzhiyun dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
1851*4882a593Smuzhiyun &dma_buf->paddr, GFP_KERNEL);
1852*4882a593Smuzhiyun if (!dma_buf->vaddr) {
1853*4882a593Smuzhiyun csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
1854*4882a593Smuzhiyun kfree(ln->mgmt_req);
1855*4882a593Smuzhiyun ln->mgmt_req = NULL;
1856*4882a593Smuzhiyun return -ENOMEM;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun ln->flags |= CSIO_LNF_FDMI_ENABLE;
1860*4882a593Smuzhiyun return 0;
1861*4882a593Smuzhiyun }
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun /*
1864*4882a593Smuzhiyun * csio_ln_fdmi_exit - FDMI exit entry point.
1865*4882a593Smuzhiyun * @ln: lnode
1866*4882a593Smuzhiyun */
1867*4882a593Smuzhiyun static int
csio_ln_fdmi_exit(struct csio_lnode * ln)1868*4882a593Smuzhiyun csio_ln_fdmi_exit(struct csio_lnode *ln)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun struct csio_dma_buf *dma_buf;
1871*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun if (!ln->mgmt_req)
1874*4882a593Smuzhiyun return 0;
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun dma_buf = &ln->mgmt_req->dma_buf;
1877*4882a593Smuzhiyun if (dma_buf->vaddr)
1878*4882a593Smuzhiyun dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
1879*4882a593Smuzhiyun dma_buf->paddr);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun kfree(ln->mgmt_req);
1882*4882a593Smuzhiyun return 0;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun int
csio_scan_done(struct csio_lnode * ln,unsigned long ticks,unsigned long time,unsigned long max_scan_ticks,unsigned long delta_scan_ticks)1886*4882a593Smuzhiyun csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
1887*4882a593Smuzhiyun unsigned long time, unsigned long max_scan_ticks,
1888*4882a593Smuzhiyun unsigned long delta_scan_ticks)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun int rv = 0;
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun if (time >= max_scan_ticks)
1893*4882a593Smuzhiyun return 1;
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun if (!ln->tgt_scan_tick)
1896*4882a593Smuzhiyun ln->tgt_scan_tick = ticks;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
1899*4882a593Smuzhiyun if (!ln->last_scan_ntgts)
1900*4882a593Smuzhiyun ln->last_scan_ntgts = ln->n_scsi_tgts;
1901*4882a593Smuzhiyun else {
1902*4882a593Smuzhiyun if (ln->last_scan_ntgts == ln->n_scsi_tgts)
1903*4882a593Smuzhiyun return 1;
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun ln->last_scan_ntgts = ln->n_scsi_tgts;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun ln->tgt_scan_tick = ticks;
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun return rv;
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun /*
1913*4882a593Smuzhiyun * csio_notify_lnodes:
1914*4882a593Smuzhiyun * @hw: HW module
1915*4882a593Smuzhiyun * @note: Notification
1916*4882a593Smuzhiyun *
1917*4882a593Smuzhiyun * Called from the HW SM to fan out notifications to the
1918*4882a593Smuzhiyun * Lnode SM. Since the HW SM is entered with lock held,
1919*4882a593Smuzhiyun * there is no need to hold locks here.
1920*4882a593Smuzhiyun *
1921*4882a593Smuzhiyun */
1922*4882a593Smuzhiyun void
csio_notify_lnodes(struct csio_hw * hw,enum csio_ln_notify note)1923*4882a593Smuzhiyun csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun struct list_head *tmp;
1926*4882a593Smuzhiyun struct csio_lnode *ln;
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun csio_dbg(hw, "Notifying all nodes of event %d\n", note);
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun /* Traverse children lnodes list and send evt */
1931*4882a593Smuzhiyun list_for_each(tmp, &hw->sln_head) {
1932*4882a593Smuzhiyun ln = (struct csio_lnode *) tmp;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun switch (note) {
1935*4882a593Smuzhiyun case CSIO_LN_NOTIFY_HWREADY:
1936*4882a593Smuzhiyun csio_lnode_start(ln);
1937*4882a593Smuzhiyun break;
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun case CSIO_LN_NOTIFY_HWRESET:
1940*4882a593Smuzhiyun case CSIO_LN_NOTIFY_HWREMOVE:
1941*4882a593Smuzhiyun csio_lnode_close(ln);
1942*4882a593Smuzhiyun break;
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun case CSIO_LN_NOTIFY_HWSTOP:
1945*4882a593Smuzhiyun csio_lnode_stop(ln);
1946*4882a593Smuzhiyun break;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun default:
1949*4882a593Smuzhiyun break;
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun /*
1956*4882a593Smuzhiyun * csio_disable_lnodes:
1957*4882a593Smuzhiyun * @hw: HW module
1958*4882a593Smuzhiyun * @portid:port id
1959*4882a593Smuzhiyun * @disable: disable/enable flag.
1960*4882a593Smuzhiyun * If disable=1, disables all lnode hosted on given physical port.
1961*4882a593Smuzhiyun * otherwise enables all the lnodes on given phsysical port.
1962*4882a593Smuzhiyun * This routine need to called with hw lock held.
1963*4882a593Smuzhiyun */
1964*4882a593Smuzhiyun void
csio_disable_lnodes(struct csio_hw * hw,uint8_t portid,bool disable)1965*4882a593Smuzhiyun csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
1966*4882a593Smuzhiyun {
1967*4882a593Smuzhiyun struct list_head *tmp;
1968*4882a593Smuzhiyun struct csio_lnode *ln;
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun /* Traverse sibling lnodes list and send evt */
1973*4882a593Smuzhiyun list_for_each(tmp, &hw->sln_head) {
1974*4882a593Smuzhiyun ln = (struct csio_lnode *) tmp;
1975*4882a593Smuzhiyun if (ln->portid != portid)
1976*4882a593Smuzhiyun continue;
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun if (disable)
1979*4882a593Smuzhiyun csio_lnode_stop(ln);
1980*4882a593Smuzhiyun else
1981*4882a593Smuzhiyun csio_lnode_start(ln);
1982*4882a593Smuzhiyun }
1983*4882a593Smuzhiyun }
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun /*
1986*4882a593Smuzhiyun * csio_ln_init - Initialize an lnode.
1987*4882a593Smuzhiyun * @ln: lnode
1988*4882a593Smuzhiyun *
1989*4882a593Smuzhiyun */
1990*4882a593Smuzhiyun static int
csio_ln_init(struct csio_lnode * ln)1991*4882a593Smuzhiyun csio_ln_init(struct csio_lnode *ln)
1992*4882a593Smuzhiyun {
1993*4882a593Smuzhiyun int rv = -EINVAL;
1994*4882a593Smuzhiyun struct csio_lnode *pln;
1995*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun csio_init_state(&ln->sm, csio_lns_uninit);
1998*4882a593Smuzhiyun ln->vnp_flowid = CSIO_INVALID_IDX;
1999*4882a593Smuzhiyun ln->fcf_flowid = CSIO_INVALID_IDX;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun if (csio_is_root_ln(ln)) {
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun /* This is the lnode used during initialization */
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
2006*4882a593Smuzhiyun if (!ln->fcfinfo) {
2007*4882a593Smuzhiyun csio_ln_err(ln, "Failed to alloc FCF record\n");
2008*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
2009*4882a593Smuzhiyun goto err;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun INIT_LIST_HEAD(&ln->fcf_lsthead);
2013*4882a593Smuzhiyun kref_init(&ln->fcfinfo->kref);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2016*4882a593Smuzhiyun goto err;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun } else { /* Either a non-root physical or a virtual lnode */
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun /*
2021*4882a593Smuzhiyun * THe rest is common for non-root physical and NPIV lnodes.
2022*4882a593Smuzhiyun * Just get references to all other modules
2023*4882a593Smuzhiyun */
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun if (csio_is_npiv_ln(ln)) {
2026*4882a593Smuzhiyun /* NPIV */
2027*4882a593Smuzhiyun pln = csio_parent_lnode(ln);
2028*4882a593Smuzhiyun kref_get(&pln->fcfinfo->kref);
2029*4882a593Smuzhiyun ln->fcfinfo = pln->fcfinfo;
2030*4882a593Smuzhiyun } else {
2031*4882a593Smuzhiyun /* Another non-root physical lnode (FCF) */
2032*4882a593Smuzhiyun ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
2033*4882a593Smuzhiyun GFP_KERNEL);
2034*4882a593Smuzhiyun if (!ln->fcfinfo) {
2035*4882a593Smuzhiyun csio_ln_err(ln, "Failed to alloc FCF info\n");
2036*4882a593Smuzhiyun CSIO_INC_STATS(hw, n_err_nomem);
2037*4882a593Smuzhiyun goto err;
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun kref_init(&ln->fcfinfo->kref);
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2043*4882a593Smuzhiyun goto err;
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun } /* if (!csio_is_root_ln(ln)) */
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun return 0;
2049*4882a593Smuzhiyun err:
2050*4882a593Smuzhiyun return rv;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun static void
csio_ln_exit(struct csio_lnode * ln)2054*4882a593Smuzhiyun csio_ln_exit(struct csio_lnode *ln)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun struct csio_lnode *pln;
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun csio_cleanup_rns(ln);
2059*4882a593Smuzhiyun if (csio_is_npiv_ln(ln)) {
2060*4882a593Smuzhiyun pln = csio_parent_lnode(ln);
2061*4882a593Smuzhiyun kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
2062*4882a593Smuzhiyun } else {
2063*4882a593Smuzhiyun kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
2064*4882a593Smuzhiyun if (csio_fdmi_enable)
2065*4882a593Smuzhiyun csio_ln_fdmi_exit(ln);
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun ln->fcfinfo = NULL;
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun /*
2071*4882a593Smuzhiyun * csio_lnode_init - Initialize the members of an lnode.
2072*4882a593Smuzhiyun * @ln: lnode
2073*4882a593Smuzhiyun */
2074*4882a593Smuzhiyun int
csio_lnode_init(struct csio_lnode * ln,struct csio_hw * hw,struct csio_lnode * pln)2075*4882a593Smuzhiyun csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
2076*4882a593Smuzhiyun struct csio_lnode *pln)
2077*4882a593Smuzhiyun {
2078*4882a593Smuzhiyun int rv = -EINVAL;
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun /* Link this lnode to hw */
2081*4882a593Smuzhiyun csio_lnode_to_hw(ln) = hw;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun /* Link child to parent if child lnode */
2084*4882a593Smuzhiyun if (pln)
2085*4882a593Smuzhiyun ln->pln = pln;
2086*4882a593Smuzhiyun else
2087*4882a593Smuzhiyun ln->pln = NULL;
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun /* Initialize scsi_tgt and timers to zero */
2090*4882a593Smuzhiyun ln->n_scsi_tgts = 0;
2091*4882a593Smuzhiyun ln->last_scan_ntgts = 0;
2092*4882a593Smuzhiyun ln->tgt_scan_tick = 0;
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun /* Initialize rnode list */
2095*4882a593Smuzhiyun INIT_LIST_HEAD(&ln->rnhead);
2096*4882a593Smuzhiyun INIT_LIST_HEAD(&ln->cln_head);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun /* Initialize log level for debug */
2099*4882a593Smuzhiyun ln->params.log_level = hw->params.log_level;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun if (csio_ln_init(ln))
2102*4882a593Smuzhiyun goto err;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun /* Add lnode to list of sibling or children lnodes */
2105*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
2106*4882a593Smuzhiyun list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
2107*4882a593Smuzhiyun if (pln)
2108*4882a593Smuzhiyun pln->num_vports++;
2109*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun hw->num_lns++;
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun return 0;
2114*4882a593Smuzhiyun err:
2115*4882a593Smuzhiyun csio_lnode_to_hw(ln) = NULL;
2116*4882a593Smuzhiyun return rv;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun /**
2120*4882a593Smuzhiyun * csio_lnode_exit - De-instantiate an lnode.
2121*4882a593Smuzhiyun * @ln: lnode
2122*4882a593Smuzhiyun *
2123*4882a593Smuzhiyun */
2124*4882a593Smuzhiyun void
csio_lnode_exit(struct csio_lnode * ln)2125*4882a593Smuzhiyun csio_lnode_exit(struct csio_lnode *ln)
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun struct csio_hw *hw = csio_lnode_to_hw(ln);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun csio_ln_exit(ln);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun /* Remove this lnode from hw->sln_head */
2132*4882a593Smuzhiyun spin_lock_irq(&hw->lock);
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun list_del_init(&ln->sm.sm_list);
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun /* If it is children lnode, decrement the
2137*4882a593Smuzhiyun * counter in its parent lnode
2138*4882a593Smuzhiyun */
2139*4882a593Smuzhiyun if (ln->pln)
2140*4882a593Smuzhiyun ln->pln->num_vports--;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun /* Update root lnode pointer */
2143*4882a593Smuzhiyun if (list_empty(&hw->sln_head))
2144*4882a593Smuzhiyun hw->rln = NULL;
2145*4882a593Smuzhiyun else
2146*4882a593Smuzhiyun hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun spin_unlock_irq(&hw->lock);
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun csio_lnode_to_hw(ln) = NULL;
2151*4882a593Smuzhiyun hw->num_lns--;
2152*4882a593Smuzhiyun }
2153