1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun * This file is part of the Emulex Linux Device Driver for *
3*4882a593Smuzhiyun * Fibre Channel Host Bus Adapters. *
4*4882a593Smuzhiyun * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6*4882a593Smuzhiyun * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7*4882a593Smuzhiyun * EMULEX and SLI are trademarks of Emulex. *
8*4882a593Smuzhiyun * www.broadcom.com *
9*4882a593Smuzhiyun * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10*4882a593Smuzhiyun * *
11*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or *
12*4882a593Smuzhiyun * modify it under the terms of version 2 of the GNU General *
13*4882a593Smuzhiyun * Public License as published by the Free Software Foundation. *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful. *
15*4882a593Smuzhiyun * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16*4882a593Smuzhiyun * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18*4882a593Smuzhiyun * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19*4882a593Smuzhiyun * TO BE LEGALLY INVALID. See the GNU General Public License for *
20*4882a593Smuzhiyun * more details, a copy of which can be found in the file COPYING *
21*4882a593Smuzhiyun * included with this package. *
22*4882a593Smuzhiyun *******************************************************************/
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/blkdev.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/pci.h>
28*4882a593Smuzhiyun #include <linux/kthread.h>
29*4882a593Smuzhiyun #include <linux/interrupt.h>
30*4882a593Smuzhiyun #include <linux/lockdep.h>
31*4882a593Smuzhiyun #include <linux/utsname.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <scsi/scsi.h>
34*4882a593Smuzhiyun #include <scsi/scsi_device.h>
35*4882a593Smuzhiyun #include <scsi/scsi_host.h>
36*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
37*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "lpfc_hw4.h"
40*4882a593Smuzhiyun #include "lpfc_hw.h"
41*4882a593Smuzhiyun #include "lpfc_nl.h"
42*4882a593Smuzhiyun #include "lpfc_disc.h"
43*4882a593Smuzhiyun #include "lpfc_sli.h"
44*4882a593Smuzhiyun #include "lpfc_sli4.h"
45*4882a593Smuzhiyun #include "lpfc.h"
46*4882a593Smuzhiyun #include "lpfc_scsi.h"
47*4882a593Smuzhiyun #include "lpfc_nvme.h"
48*4882a593Smuzhiyun #include "lpfc_logmsg.h"
49*4882a593Smuzhiyun #include "lpfc_crtn.h"
50*4882a593Smuzhiyun #include "lpfc_vport.h"
51*4882a593Smuzhiyun #include "lpfc_debugfs.h"
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* AlpaArray for assignment of scsid for scan-down and bind_method */
54*4882a593Smuzhiyun static uint8_t lpfcAlpaArray[] = {
55*4882a593Smuzhiyun 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56*4882a593Smuzhiyun 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57*4882a593Smuzhiyun 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58*4882a593Smuzhiyun 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59*4882a593Smuzhiyun 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60*4882a593Smuzhiyun 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61*4882a593Smuzhiyun 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62*4882a593Smuzhiyun 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63*4882a593Smuzhiyun 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64*4882a593Smuzhiyun 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65*4882a593Smuzhiyun 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66*4882a593Smuzhiyun 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67*4882a593Smuzhiyun 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static void lpfc_disc_timeout_handler(struct lpfc_vport *);
71*4882a593Smuzhiyun static void lpfc_disc_flush_list(struct lpfc_vport *vport);
72*4882a593Smuzhiyun static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
73*4882a593Smuzhiyun static int lpfc_fcf_inuse(struct lpfc_hba *);
74*4882a593Smuzhiyun static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun void
lpfc_terminate_rport_io(struct fc_rport * rport)77*4882a593Smuzhiyun lpfc_terminate_rport_io(struct fc_rport *rport)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct lpfc_rport_data *rdata;
80*4882a593Smuzhiyun struct lpfc_nodelist * ndlp;
81*4882a593Smuzhiyun struct lpfc_hba *phba;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun rdata = rport->dd_data;
84*4882a593Smuzhiyun ndlp = rdata->pnode;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
87*4882a593Smuzhiyun if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
88*4882a593Smuzhiyun printk(KERN_ERR "Cannot find remote node"
89*4882a593Smuzhiyun " to terminate I/O Data x%x\n",
90*4882a593Smuzhiyun rport->port_id);
91*4882a593Smuzhiyun return;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun phba = ndlp->phba;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
97*4882a593Smuzhiyun "rport terminate: sid:x%x did:x%x flg:x%x",
98*4882a593Smuzhiyun ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (ndlp->nlp_sid != NLP_NO_SID) {
101*4882a593Smuzhiyun lpfc_sli_abort_iocb(ndlp->vport,
102*4882a593Smuzhiyun &phba->sli.sli3_ring[LPFC_FCP_RING],
103*4882a593Smuzhiyun ndlp->nlp_sid, 0, LPFC_CTX_TGT);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * This function will be called when dev_loss_tmo fire.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun void
lpfc_dev_loss_tmo_callbk(struct fc_rport * rport)111*4882a593Smuzhiyun lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct lpfc_rport_data *rdata;
114*4882a593Smuzhiyun struct lpfc_nodelist * ndlp;
115*4882a593Smuzhiyun struct lpfc_vport *vport;
116*4882a593Smuzhiyun struct Scsi_Host *shost;
117*4882a593Smuzhiyun struct lpfc_hba *phba;
118*4882a593Smuzhiyun struct lpfc_work_evt *evtp;
119*4882a593Smuzhiyun int put_node;
120*4882a593Smuzhiyun int put_rport;
121*4882a593Smuzhiyun unsigned long iflags;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun rdata = rport->dd_data;
124*4882a593Smuzhiyun ndlp = rdata->pnode;
125*4882a593Smuzhiyun if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
126*4882a593Smuzhiyun return;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun vport = ndlp->vport;
129*4882a593Smuzhiyun phba = vport->phba;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
132*4882a593Smuzhiyun "rport devlosscb: sid:x%x did:x%x flg:x%x",
133*4882a593Smuzhiyun ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
136*4882a593Smuzhiyun "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
137*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Don't defer this if we are in the process of deleting the vport
140*4882a593Smuzhiyun * or unloading the driver. The unload will cleanup the node
141*4882a593Smuzhiyun * appropriately we just need to cleanup the ndlp rport info here.
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun if (vport->load_flag & FC_UNLOADING) {
144*4882a593Smuzhiyun put_node = rdata->pnode != NULL;
145*4882a593Smuzhiyun put_rport = ndlp->rport != NULL;
146*4882a593Smuzhiyun rdata->pnode = NULL;
147*4882a593Smuzhiyun ndlp->rport = NULL;
148*4882a593Smuzhiyun if (put_node)
149*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
150*4882a593Smuzhiyun if (put_rport)
151*4882a593Smuzhiyun put_device(&rport->dev);
152*4882a593Smuzhiyun return;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
156*4882a593Smuzhiyun return;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
159*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
160*4882a593Smuzhiyun "6789 rport name %llx != node port name %llx",
161*4882a593Smuzhiyun rport->port_name,
162*4882a593Smuzhiyun wwn_to_u64(ndlp->nlp_portname.u.wwn));
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun evtp = &ndlp->dev_loss_evt;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (!list_empty(&evtp->evt_listp)) {
167*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
168*4882a593Smuzhiyun "6790 rport name %llx dev_loss_evt pending",
169*4882a593Smuzhiyun rport->port_name);
170*4882a593Smuzhiyun return;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vport);
174*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
175*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
176*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* We need to hold the node by incrementing the reference
179*4882a593Smuzhiyun * count until this queued work is done
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun evtp->evt_arg1 = lpfc_nlp_get(ndlp);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, iflags);
184*4882a593Smuzhiyun if (evtp->evt_arg1) {
185*4882a593Smuzhiyun evtp->evt = LPFC_EVT_DEV_LOSS;
186*4882a593Smuzhiyun list_add_tail(&evtp->evt_listp, &phba->work_list);
187*4882a593Smuzhiyun lpfc_worker_wake_up(phba);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
196*4882a593Smuzhiyun * @ndlp: Pointer to remote node object.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * This function is called from the worker thread when devloss timeout timer
199*4882a593Smuzhiyun * expires. For SLI4 host, this routine shall return 1 when at lease one
200*4882a593Smuzhiyun * remote node, including this @ndlp, is still in use of FCF; otherwise, this
201*4882a593Smuzhiyun * routine shall return 0 when there is no remote node is still in use of FCF
202*4882a593Smuzhiyun * when devloss timeout happened to this @ndlp.
203*4882a593Smuzhiyun **/
204*4882a593Smuzhiyun static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist * ndlp)205*4882a593Smuzhiyun lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct lpfc_rport_data *rdata;
208*4882a593Smuzhiyun struct fc_rport *rport;
209*4882a593Smuzhiyun struct lpfc_vport *vport;
210*4882a593Smuzhiyun struct lpfc_hba *phba;
211*4882a593Smuzhiyun struct Scsi_Host *shost;
212*4882a593Smuzhiyun uint8_t *name;
213*4882a593Smuzhiyun int put_node;
214*4882a593Smuzhiyun int warn_on = 0;
215*4882a593Smuzhiyun int fcf_inuse = 0;
216*4882a593Smuzhiyun unsigned long iflags;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun rport = ndlp->rport;
219*4882a593Smuzhiyun vport = ndlp->vport;
220*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vport);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
223*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
224*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!rport)
227*4882a593Smuzhiyun return fcf_inuse;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun name = (uint8_t *) &ndlp->nlp_portname;
230*4882a593Smuzhiyun phba = vport->phba;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
233*4882a593Smuzhiyun fcf_inuse = lpfc_fcf_inuse(phba);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
236*4882a593Smuzhiyun "rport devlosstmo:did:x%x type:x%x id:x%x",
237*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
240*4882a593Smuzhiyun "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
241*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * lpfc_nlp_remove if reached with dangling rport drops the
245*4882a593Smuzhiyun * reference. To make sure that does not happen clear rport
246*4882a593Smuzhiyun * pointer in ndlp before lpfc_nlp_put.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun rdata = rport->dd_data;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* Don't defer this if we are in the process of deleting the vport
251*4882a593Smuzhiyun * or unloading the driver. The unload will cleanup the node
252*4882a593Smuzhiyun * appropriately we just need to cleanup the ndlp rport info here.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun if (vport->load_flag & FC_UNLOADING) {
255*4882a593Smuzhiyun if (ndlp->nlp_sid != NLP_NO_SID) {
256*4882a593Smuzhiyun /* flush the target */
257*4882a593Smuzhiyun lpfc_sli_abort_iocb(vport,
258*4882a593Smuzhiyun &phba->sli.sli3_ring[LPFC_FCP_RING],
259*4882a593Smuzhiyun ndlp->nlp_sid, 0, LPFC_CTX_TGT);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun put_node = rdata->pnode != NULL;
262*4882a593Smuzhiyun rdata->pnode = NULL;
263*4882a593Smuzhiyun ndlp->rport = NULL;
264*4882a593Smuzhiyun if (put_node)
265*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
266*4882a593Smuzhiyun put_device(&rport->dev);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return fcf_inuse;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
272*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
273*4882a593Smuzhiyun "0284 Devloss timeout Ignored on "
274*4882a593Smuzhiyun "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
275*4882a593Smuzhiyun "NPort x%x\n",
276*4882a593Smuzhiyun *name, *(name+1), *(name+2), *(name+3),
277*4882a593Smuzhiyun *(name+4), *(name+5), *(name+6), *(name+7),
278*4882a593Smuzhiyun ndlp->nlp_DID);
279*4882a593Smuzhiyun return fcf_inuse;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun put_node = rdata->pnode != NULL;
283*4882a593Smuzhiyun rdata->pnode = NULL;
284*4882a593Smuzhiyun ndlp->rport = NULL;
285*4882a593Smuzhiyun if (put_node)
286*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
287*4882a593Smuzhiyun put_device(&rport->dev);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_FABRIC)
290*4882a593Smuzhiyun return fcf_inuse;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if (ndlp->nlp_sid != NLP_NO_SID) {
293*4882a593Smuzhiyun warn_on = 1;
294*4882a593Smuzhiyun lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
295*4882a593Smuzhiyun ndlp->nlp_sid, 0, LPFC_CTX_TGT);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (warn_on) {
299*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
300*4882a593Smuzhiyun "0203 Devloss timeout on "
301*4882a593Smuzhiyun "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
302*4882a593Smuzhiyun "NPort x%06x Data: x%x x%x x%x\n",
303*4882a593Smuzhiyun *name, *(name+1), *(name+2), *(name+3),
304*4882a593Smuzhiyun *(name+4), *(name+5), *(name+6), *(name+7),
305*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
306*4882a593Smuzhiyun ndlp->nlp_state, ndlp->nlp_rpi);
307*4882a593Smuzhiyun } else {
308*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
309*4882a593Smuzhiyun "0204 Devloss timeout on "
310*4882a593Smuzhiyun "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
311*4882a593Smuzhiyun "NPort x%06x Data: x%x x%x x%x\n",
312*4882a593Smuzhiyun *name, *(name+1), *(name+2), *(name+3),
313*4882a593Smuzhiyun *(name+4), *(name+5), *(name+6), *(name+7),
314*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
315*4882a593Smuzhiyun ndlp->nlp_state, ndlp->nlp_rpi);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
319*4882a593Smuzhiyun !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
320*4882a593Smuzhiyun (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
321*4882a593Smuzhiyun (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
322*4882a593Smuzhiyun (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
323*4882a593Smuzhiyun lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return fcf_inuse;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
330*4882a593Smuzhiyun * @phba: Pointer to hba context object.
331*4882a593Smuzhiyun * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
332*4882a593Smuzhiyun * @nlp_did: remote node identifer with devloss timeout.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * This function is called from the worker thread after invoking devloss
335*4882a593Smuzhiyun * timeout handler and releasing the reference count for the ndlp with
336*4882a593Smuzhiyun * which the devloss timeout was handled for SLI4 host. For the devloss
337*4882a593Smuzhiyun * timeout of the last remote node which had been in use of FCF, when this
338*4882a593Smuzhiyun * routine is invoked, it shall be guaranteed that none of the remote are
339*4882a593Smuzhiyun * in-use of FCF. When devloss timeout to the last remote using the FCF,
340*4882a593Smuzhiyun * if the FIP engine is neither in FCF table scan process nor roundrobin
341*4882a593Smuzhiyun * failover process, the in-use FCF shall be unregistered. If the FIP
342*4882a593Smuzhiyun * engine is in FCF discovery process, the devloss timeout state shall
343*4882a593Smuzhiyun * be set for either the FCF table scan process or roundrobin failover
344*4882a593Smuzhiyun * process to unregister the in-use FCF.
345*4882a593Smuzhiyun **/
346*4882a593Smuzhiyun static void
lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba * phba,int fcf_inuse,uint32_t nlp_did)347*4882a593Smuzhiyun lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
348*4882a593Smuzhiyun uint32_t nlp_did)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun /* If devloss timeout happened to a remote node when FCF had no
351*4882a593Smuzhiyun * longer been in-use, do nothing.
352*4882a593Smuzhiyun */
353*4882a593Smuzhiyun if (!fcf_inuse)
354*4882a593Smuzhiyun return;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
357*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
358*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
359*4882a593Smuzhiyun if (phba->hba_flag & HBA_DEVLOSS_TMO) {
360*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun phba->hba_flag |= HBA_DEVLOSS_TMO;
364*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
365*4882a593Smuzhiyun "2847 Last remote node (x%x) using "
366*4882a593Smuzhiyun "FCF devloss tmo\n", nlp_did);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
369*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
370*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
371*4882a593Smuzhiyun "2868 Devloss tmo to FCF rediscovery "
372*4882a593Smuzhiyun "in progress\n");
373*4882a593Smuzhiyun return;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
376*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
377*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
378*4882a593Smuzhiyun "2869 Devloss tmo to idle FIP engine, "
379*4882a593Smuzhiyun "unreg in-use FCF and rescan.\n");
380*4882a593Smuzhiyun /* Unregister in-use FCF and rescan */
381*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(phba);
382*4882a593Smuzhiyun return;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
385*4882a593Smuzhiyun if (phba->hba_flag & FCF_TS_INPROG)
386*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
387*4882a593Smuzhiyun "2870 FCF table scan in progress\n");
388*4882a593Smuzhiyun if (phba->hba_flag & FCF_RR_INPROG)
389*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
390*4882a593Smuzhiyun "2871 FLOGI roundrobin FCF failover "
391*4882a593Smuzhiyun "in progress\n");
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun lpfc_unregister_unused_fcf(phba);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * lpfc_alloc_fast_evt - Allocates data structure for posting event
398*4882a593Smuzhiyun * @phba: Pointer to hba context object.
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * This function is called from the functions which need to post
401*4882a593Smuzhiyun * events from interrupt context. This function allocates data
402*4882a593Smuzhiyun * structure required for posting event. It also keeps track of
403*4882a593Smuzhiyun * number of events pending and prevent event storm when there are
404*4882a593Smuzhiyun * too many events.
405*4882a593Smuzhiyun **/
406*4882a593Smuzhiyun struct lpfc_fast_path_event *
lpfc_alloc_fast_evt(struct lpfc_hba * phba)407*4882a593Smuzhiyun lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
408*4882a593Smuzhiyun struct lpfc_fast_path_event *ret;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* If there are lot of fast event do not exhaust memory due to this */
411*4882a593Smuzhiyun if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
412*4882a593Smuzhiyun return NULL;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ret = kzalloc(sizeof(struct lpfc_fast_path_event),
415*4882a593Smuzhiyun GFP_ATOMIC);
416*4882a593Smuzhiyun if (ret) {
417*4882a593Smuzhiyun atomic_inc(&phba->fast_event_count);
418*4882a593Smuzhiyun INIT_LIST_HEAD(&ret->work_evt.evt_listp);
419*4882a593Smuzhiyun ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun return ret;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun * lpfc_free_fast_evt - Frees event data structure
426*4882a593Smuzhiyun * @phba: Pointer to hba context object.
427*4882a593Smuzhiyun * @evt: Event object which need to be freed.
428*4882a593Smuzhiyun *
429*4882a593Smuzhiyun * This function frees the data structure required for posting
430*4882a593Smuzhiyun * events.
431*4882a593Smuzhiyun **/
432*4882a593Smuzhiyun void
lpfc_free_fast_evt(struct lpfc_hba * phba,struct lpfc_fast_path_event * evt)433*4882a593Smuzhiyun lpfc_free_fast_evt(struct lpfc_hba *phba,
434*4882a593Smuzhiyun struct lpfc_fast_path_event *evt) {
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun atomic_dec(&phba->fast_event_count);
437*4882a593Smuzhiyun kfree(evt);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * lpfc_send_fastpath_evt - Posts events generated from fast path
442*4882a593Smuzhiyun * @phba: Pointer to hba context object.
443*4882a593Smuzhiyun * @evtp: Event data structure.
444*4882a593Smuzhiyun *
445*4882a593Smuzhiyun * This function is called from worker thread, when the interrupt
446*4882a593Smuzhiyun * context need to post an event. This function posts the event
447*4882a593Smuzhiyun * to fc transport netlink interface.
448*4882a593Smuzhiyun **/
449*4882a593Smuzhiyun static void
lpfc_send_fastpath_evt(struct lpfc_hba * phba,struct lpfc_work_evt * evtp)450*4882a593Smuzhiyun lpfc_send_fastpath_evt(struct lpfc_hba *phba,
451*4882a593Smuzhiyun struct lpfc_work_evt *evtp)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun unsigned long evt_category, evt_sub_category;
454*4882a593Smuzhiyun struct lpfc_fast_path_event *fast_evt_data;
455*4882a593Smuzhiyun char *evt_data;
456*4882a593Smuzhiyun uint32_t evt_data_size;
457*4882a593Smuzhiyun struct Scsi_Host *shost;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
460*4882a593Smuzhiyun work_evt);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
463*4882a593Smuzhiyun evt_sub_category = (unsigned long) fast_evt_data->un.
464*4882a593Smuzhiyun fabric_evt.subcategory;
465*4882a593Smuzhiyun shost = lpfc_shost_from_vport(fast_evt_data->vport);
466*4882a593Smuzhiyun if (evt_category == FC_REG_FABRIC_EVENT) {
467*4882a593Smuzhiyun if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
468*4882a593Smuzhiyun evt_data = (char *) &fast_evt_data->un.read_check_error;
469*4882a593Smuzhiyun evt_data_size = sizeof(fast_evt_data->un.
470*4882a593Smuzhiyun read_check_error);
471*4882a593Smuzhiyun } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
472*4882a593Smuzhiyun (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
473*4882a593Smuzhiyun evt_data = (char *) &fast_evt_data->un.fabric_evt;
474*4882a593Smuzhiyun evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
475*4882a593Smuzhiyun } else {
476*4882a593Smuzhiyun lpfc_free_fast_evt(phba, fast_evt_data);
477*4882a593Smuzhiyun return;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun } else if (evt_category == FC_REG_SCSI_EVENT) {
480*4882a593Smuzhiyun switch (evt_sub_category) {
481*4882a593Smuzhiyun case LPFC_EVENT_QFULL:
482*4882a593Smuzhiyun case LPFC_EVENT_DEVBSY:
483*4882a593Smuzhiyun evt_data = (char *) &fast_evt_data->un.scsi_evt;
484*4882a593Smuzhiyun evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
485*4882a593Smuzhiyun break;
486*4882a593Smuzhiyun case LPFC_EVENT_CHECK_COND:
487*4882a593Smuzhiyun evt_data = (char *) &fast_evt_data->un.check_cond_evt;
488*4882a593Smuzhiyun evt_data_size = sizeof(fast_evt_data->un.
489*4882a593Smuzhiyun check_cond_evt);
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun case LPFC_EVENT_VARQUEDEPTH:
492*4882a593Smuzhiyun evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
493*4882a593Smuzhiyun evt_data_size = sizeof(fast_evt_data->un.
494*4882a593Smuzhiyun queue_depth_evt);
495*4882a593Smuzhiyun break;
496*4882a593Smuzhiyun default:
497*4882a593Smuzhiyun lpfc_free_fast_evt(phba, fast_evt_data);
498*4882a593Smuzhiyun return;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun } else {
501*4882a593Smuzhiyun lpfc_free_fast_evt(phba, fast_evt_data);
502*4882a593Smuzhiyun return;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
506*4882a593Smuzhiyun fc_host_post_vendor_event(shost,
507*4882a593Smuzhiyun fc_get_event_number(),
508*4882a593Smuzhiyun evt_data_size,
509*4882a593Smuzhiyun evt_data,
510*4882a593Smuzhiyun LPFC_NL_VENDOR_ID);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun lpfc_free_fast_evt(phba, fast_evt_data);
513*4882a593Smuzhiyun return;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun static void
lpfc_work_list_done(struct lpfc_hba * phba)517*4882a593Smuzhiyun lpfc_work_list_done(struct lpfc_hba *phba)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct lpfc_work_evt *evtp = NULL;
520*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
521*4882a593Smuzhiyun int free_evt;
522*4882a593Smuzhiyun int fcf_inuse;
523*4882a593Smuzhiyun uint32_t nlp_did;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
526*4882a593Smuzhiyun while (!list_empty(&phba->work_list)) {
527*4882a593Smuzhiyun list_remove_head((&phba->work_list), evtp, typeof(*evtp),
528*4882a593Smuzhiyun evt_listp);
529*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
530*4882a593Smuzhiyun free_evt = 1;
531*4882a593Smuzhiyun switch (evtp->evt) {
532*4882a593Smuzhiyun case LPFC_EVT_ELS_RETRY:
533*4882a593Smuzhiyun ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
534*4882a593Smuzhiyun lpfc_els_retry_delay_handler(ndlp);
535*4882a593Smuzhiyun free_evt = 0; /* evt is part of ndlp */
536*4882a593Smuzhiyun /* decrement the node reference count held
537*4882a593Smuzhiyun * for this queued work
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun case LPFC_EVT_DEV_LOSS:
542*4882a593Smuzhiyun ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
543*4882a593Smuzhiyun fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
544*4882a593Smuzhiyun free_evt = 0;
545*4882a593Smuzhiyun /* decrement the node reference count held for
546*4882a593Smuzhiyun * this queued work
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun nlp_did = ndlp->nlp_DID;
549*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
550*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
551*4882a593Smuzhiyun lpfc_sli4_post_dev_loss_tmo_handler(phba,
552*4882a593Smuzhiyun fcf_inuse,
553*4882a593Smuzhiyun nlp_did);
554*4882a593Smuzhiyun break;
555*4882a593Smuzhiyun case LPFC_EVT_RECOVER_PORT:
556*4882a593Smuzhiyun ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
557*4882a593Smuzhiyun lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
558*4882a593Smuzhiyun free_evt = 0;
559*4882a593Smuzhiyun /* decrement the node reference count held for
560*4882a593Smuzhiyun * this queued work
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
563*4882a593Smuzhiyun break;
564*4882a593Smuzhiyun case LPFC_EVT_ONLINE:
565*4882a593Smuzhiyun if (phba->link_state < LPFC_LINK_DOWN)
566*4882a593Smuzhiyun *(int *) (evtp->evt_arg1) = lpfc_online(phba);
567*4882a593Smuzhiyun else
568*4882a593Smuzhiyun *(int *) (evtp->evt_arg1) = 0;
569*4882a593Smuzhiyun complete((struct completion *)(evtp->evt_arg2));
570*4882a593Smuzhiyun break;
571*4882a593Smuzhiyun case LPFC_EVT_OFFLINE_PREP:
572*4882a593Smuzhiyun if (phba->link_state >= LPFC_LINK_DOWN)
573*4882a593Smuzhiyun lpfc_offline_prep(phba, LPFC_MBX_WAIT);
574*4882a593Smuzhiyun *(int *)(evtp->evt_arg1) = 0;
575*4882a593Smuzhiyun complete((struct completion *)(evtp->evt_arg2));
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun case LPFC_EVT_OFFLINE:
578*4882a593Smuzhiyun lpfc_offline(phba);
579*4882a593Smuzhiyun lpfc_sli_brdrestart(phba);
580*4882a593Smuzhiyun *(int *)(evtp->evt_arg1) =
581*4882a593Smuzhiyun lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
582*4882a593Smuzhiyun lpfc_unblock_mgmt_io(phba);
583*4882a593Smuzhiyun complete((struct completion *)(evtp->evt_arg2));
584*4882a593Smuzhiyun break;
585*4882a593Smuzhiyun case LPFC_EVT_WARM_START:
586*4882a593Smuzhiyun lpfc_offline(phba);
587*4882a593Smuzhiyun lpfc_reset_barrier(phba);
588*4882a593Smuzhiyun lpfc_sli_brdreset(phba);
589*4882a593Smuzhiyun lpfc_hba_down_post(phba);
590*4882a593Smuzhiyun *(int *)(evtp->evt_arg1) =
591*4882a593Smuzhiyun lpfc_sli_brdready(phba, HS_MBRDY);
592*4882a593Smuzhiyun lpfc_unblock_mgmt_io(phba);
593*4882a593Smuzhiyun complete((struct completion *)(evtp->evt_arg2));
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun case LPFC_EVT_KILL:
596*4882a593Smuzhiyun lpfc_offline(phba);
597*4882a593Smuzhiyun *(int *)(evtp->evt_arg1)
598*4882a593Smuzhiyun = (phba->pport->stopped)
599*4882a593Smuzhiyun ? 0 : lpfc_sli_brdkill(phba);
600*4882a593Smuzhiyun lpfc_unblock_mgmt_io(phba);
601*4882a593Smuzhiyun complete((struct completion *)(evtp->evt_arg2));
602*4882a593Smuzhiyun break;
603*4882a593Smuzhiyun case LPFC_EVT_FASTPATH_MGMT_EVT:
604*4882a593Smuzhiyun lpfc_send_fastpath_evt(phba, evtp);
605*4882a593Smuzhiyun free_evt = 0;
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case LPFC_EVT_RESET_HBA:
608*4882a593Smuzhiyun if (!(phba->pport->load_flag & FC_UNLOADING))
609*4882a593Smuzhiyun lpfc_reset_hba(phba);
610*4882a593Smuzhiyun break;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun if (free_evt)
613*4882a593Smuzhiyun kfree(evtp);
614*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun static void
lpfc_work_done(struct lpfc_hba * phba)621*4882a593Smuzhiyun lpfc_work_done(struct lpfc_hba *phba)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct lpfc_sli_ring *pring;
624*4882a593Smuzhiyun uint32_t ha_copy, status, control, work_port_events;
625*4882a593Smuzhiyun struct lpfc_vport **vports;
626*4882a593Smuzhiyun struct lpfc_vport *vport;
627*4882a593Smuzhiyun int i;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
630*4882a593Smuzhiyun ha_copy = phba->work_ha;
631*4882a593Smuzhiyun phba->work_ha = 0;
632*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* First, try to post the next mailbox command to SLI4 device */
635*4882a593Smuzhiyun if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
636*4882a593Smuzhiyun lpfc_sli4_post_async_mbox(phba);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun if (ha_copy & HA_ERATT) {
639*4882a593Smuzhiyun /* Handle the error attention event */
640*4882a593Smuzhiyun lpfc_handle_eratt(phba);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (phba->fw_dump_cmpl) {
643*4882a593Smuzhiyun complete(phba->fw_dump_cmpl);
644*4882a593Smuzhiyun phba->fw_dump_cmpl = NULL;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (ha_copy & HA_MBATT)
649*4882a593Smuzhiyun lpfc_sli_handle_mb_event(phba);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (ha_copy & HA_LATT)
652*4882a593Smuzhiyun lpfc_handle_latt(phba);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* Process SLI4 events */
655*4882a593Smuzhiyun if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
656*4882a593Smuzhiyun if (phba->hba_flag & HBA_RRQ_ACTIVE)
657*4882a593Smuzhiyun lpfc_handle_rrq_active(phba);
658*4882a593Smuzhiyun if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
659*4882a593Smuzhiyun lpfc_sli4_els_xri_abort_event_proc(phba);
660*4882a593Smuzhiyun if (phba->hba_flag & ASYNC_EVENT)
661*4882a593Smuzhiyun lpfc_sli4_async_event_proc(phba);
662*4882a593Smuzhiyun if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
663*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
664*4882a593Smuzhiyun phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
665*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
666*4882a593Smuzhiyun lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
669*4882a593Smuzhiyun lpfc_sli4_fcf_redisc_event_proc(phba);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
673*4882a593Smuzhiyun if (vports != NULL)
674*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports; i++) {
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * We could have no vports in array if unloading, so if
677*4882a593Smuzhiyun * this happens then just use the pport
678*4882a593Smuzhiyun */
679*4882a593Smuzhiyun if (vports[i] == NULL && i == 0)
680*4882a593Smuzhiyun vport = phba->pport;
681*4882a593Smuzhiyun else
682*4882a593Smuzhiyun vport = vports[i];
683*4882a593Smuzhiyun if (vport == NULL)
684*4882a593Smuzhiyun break;
685*4882a593Smuzhiyun spin_lock_irq(&vport->work_port_lock);
686*4882a593Smuzhiyun work_port_events = vport->work_port_events;
687*4882a593Smuzhiyun vport->work_port_events &= ~work_port_events;
688*4882a593Smuzhiyun spin_unlock_irq(&vport->work_port_lock);
689*4882a593Smuzhiyun if (work_port_events & WORKER_DISC_TMO)
690*4882a593Smuzhiyun lpfc_disc_timeout_handler(vport);
691*4882a593Smuzhiyun if (work_port_events & WORKER_ELS_TMO)
692*4882a593Smuzhiyun lpfc_els_timeout_handler(vport);
693*4882a593Smuzhiyun if (work_port_events & WORKER_HB_TMO)
694*4882a593Smuzhiyun lpfc_hb_timeout_handler(phba);
695*4882a593Smuzhiyun if (work_port_events & WORKER_MBOX_TMO)
696*4882a593Smuzhiyun lpfc_mbox_timeout_handler(phba);
697*4882a593Smuzhiyun if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
698*4882a593Smuzhiyun lpfc_unblock_fabric_iocbs(phba);
699*4882a593Smuzhiyun if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
700*4882a593Smuzhiyun lpfc_ramp_down_queue_handler(phba);
701*4882a593Smuzhiyun if (work_port_events & WORKER_DELAYED_DISC_TMO)
702*4882a593Smuzhiyun lpfc_delayed_disc_timeout_handler(vport);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun pring = lpfc_phba_elsring(phba);
707*4882a593Smuzhiyun status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
708*4882a593Smuzhiyun status >>= (4*LPFC_ELS_RING);
709*4882a593Smuzhiyun if (pring && (status & HA_RXMASK ||
710*4882a593Smuzhiyun pring->flag & LPFC_DEFERRED_RING_EVENT ||
711*4882a593Smuzhiyun phba->hba_flag & HBA_SP_QUEUE_EVT)) {
712*4882a593Smuzhiyun if (pring->flag & LPFC_STOP_IOCB_EVENT) {
713*4882a593Smuzhiyun pring->flag |= LPFC_DEFERRED_RING_EVENT;
714*4882a593Smuzhiyun /* Preserve legacy behavior. */
715*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
716*4882a593Smuzhiyun set_bit(LPFC_DATA_READY, &phba->data_flags);
717*4882a593Smuzhiyun } else {
718*4882a593Smuzhiyun /* Driver could have abort request completed in queue
719*4882a593Smuzhiyun * when link goes down. Allow for this transition.
720*4882a593Smuzhiyun */
721*4882a593Smuzhiyun if (phba->link_state >= LPFC_LINK_DOWN ||
722*4882a593Smuzhiyun phba->link_flag & LS_MDS_LOOPBACK) {
723*4882a593Smuzhiyun pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
724*4882a593Smuzhiyun lpfc_sli_handle_slow_ring_event(phba, pring,
725*4882a593Smuzhiyun (status &
726*4882a593Smuzhiyun HA_RXMASK));
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
730*4882a593Smuzhiyun lpfc_drain_txq(phba);
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * Turn on Ring interrupts
733*4882a593Smuzhiyun */
734*4882a593Smuzhiyun if (phba->sli_rev <= LPFC_SLI_REV3) {
735*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
736*4882a593Smuzhiyun control = readl(phba->HCregaddr);
737*4882a593Smuzhiyun if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
738*4882a593Smuzhiyun lpfc_debugfs_slow_ring_trc(phba,
739*4882a593Smuzhiyun "WRK Enable ring: cntl:x%x hacopy:x%x",
740*4882a593Smuzhiyun control, ha_copy, 0);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun control |= (HC_R0INT_ENA << LPFC_ELS_RING);
743*4882a593Smuzhiyun writel(control, phba->HCregaddr);
744*4882a593Smuzhiyun readl(phba->HCregaddr); /* flush */
745*4882a593Smuzhiyun } else {
746*4882a593Smuzhiyun lpfc_debugfs_slow_ring_trc(phba,
747*4882a593Smuzhiyun "WRK Ring ok: cntl:x%x hacopy:x%x",
748*4882a593Smuzhiyun control, ha_copy, 0);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun lpfc_work_list_done(phba);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun int
lpfc_do_work(void * p)757*4882a593Smuzhiyun lpfc_do_work(void *p)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun struct lpfc_hba *phba = p;
760*4882a593Smuzhiyun int rc;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun set_user_nice(current, MIN_NICE);
763*4882a593Smuzhiyun current->flags |= PF_NOFREEZE;
764*4882a593Smuzhiyun phba->data_flags = 0;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun while (!kthread_should_stop()) {
767*4882a593Smuzhiyun /* wait and check worker queue activities */
768*4882a593Smuzhiyun rc = wait_event_interruptible(phba->work_waitq,
769*4882a593Smuzhiyun (test_and_clear_bit(LPFC_DATA_READY,
770*4882a593Smuzhiyun &phba->data_flags)
771*4882a593Smuzhiyun || kthread_should_stop()));
772*4882a593Smuzhiyun /* Signal wakeup shall terminate the worker thread */
773*4882a593Smuzhiyun if (rc) {
774*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
775*4882a593Smuzhiyun "0433 Wakeup on signal: rc=x%x\n", rc);
776*4882a593Smuzhiyun break;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Attend pending lpfc data processing */
780*4882a593Smuzhiyun lpfc_work_done(phba);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun phba->worker_thread = NULL;
783*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
784*4882a593Smuzhiyun "0432 Worker thread stopped.\n");
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun * This is only called to handle FC worker events. Since this a rare
790*4882a593Smuzhiyun * occurrence, we allocate a struct lpfc_work_evt structure here instead of
791*4882a593Smuzhiyun * embedding it in the IOCB.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun int
lpfc_workq_post_event(struct lpfc_hba * phba,void * arg1,void * arg2,uint32_t evt)794*4882a593Smuzhiyun lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
795*4882a593Smuzhiyun uint32_t evt)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct lpfc_work_evt *evtp;
798*4882a593Smuzhiyun unsigned long flags;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
802*4882a593Smuzhiyun * be queued to worker thread for processing
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
805*4882a593Smuzhiyun if (!evtp)
806*4882a593Smuzhiyun return 0;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun evtp->evt_arg1 = arg1;
809*4882a593Smuzhiyun evtp->evt_arg2 = arg2;
810*4882a593Smuzhiyun evtp->evt = evt;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, flags);
813*4882a593Smuzhiyun list_add_tail(&evtp->evt_listp, &phba->work_list);
814*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, flags);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun lpfc_worker_wake_up(phba);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun return 1;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun void
lpfc_cleanup_rpis(struct lpfc_vport * vport,int remove)822*4882a593Smuzhiyun lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
825*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
826*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, *next_ndlp;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
829*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
830*4882a593Smuzhiyun continue;
831*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
832*4882a593Smuzhiyun continue;
833*4882a593Smuzhiyun if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
834*4882a593Smuzhiyun ((vport->port_type == LPFC_NPIV_PORT) &&
835*4882a593Smuzhiyun (ndlp->nlp_DID == NameServer_DID)))
836*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /* Leave Fabric nodes alone on link down */
839*4882a593Smuzhiyun if ((phba->sli_rev < LPFC_SLI_REV4) &&
840*4882a593Smuzhiyun (!remove && ndlp->nlp_type & NLP_FABRIC))
841*4882a593Smuzhiyun continue;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Notify transport of connectivity loss to trigger cleanup. */
844*4882a593Smuzhiyun if (phba->nvmet_support &&
845*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
846*4882a593Smuzhiyun lpfc_nvmet_invalidate_host(phba, ndlp);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun lpfc_disc_state_machine(vport, ndlp, NULL,
849*4882a593Smuzhiyun remove
850*4882a593Smuzhiyun ? NLP_EVT_DEVICE_RM
851*4882a593Smuzhiyun : NLP_EVT_DEVICE_RECOVERY);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
854*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
855*4882a593Smuzhiyun lpfc_sli4_unreg_all_rpis(vport);
856*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(vport);
857*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
858*4882a593Smuzhiyun vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
859*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun void
lpfc_port_link_failure(struct lpfc_vport * vport)864*4882a593Smuzhiyun lpfc_port_link_failure(struct lpfc_vport *vport)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /* Cleanup any outstanding received buffers */
869*4882a593Smuzhiyun lpfc_cleanup_rcv_buffers(vport);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /* Cleanup any outstanding RSCN activity */
872*4882a593Smuzhiyun lpfc_els_flush_rscn(vport);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* Cleanup any outstanding ELS commands */
875*4882a593Smuzhiyun lpfc_els_flush_cmd(vport);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun lpfc_cleanup_rpis(vport, 0);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Turn off discovery timer if its running */
880*4882a593Smuzhiyun lpfc_can_disctmo(vport);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun void
lpfc_linkdown_port(struct lpfc_vport * vport)884*4882a593Smuzhiyun lpfc_linkdown_port(struct lpfc_vport *vport)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
889*4882a593Smuzhiyun fc_host_post_event(shost, fc_get_event_number(),
890*4882a593Smuzhiyun FCH_EVT_LINKDOWN, 0);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
893*4882a593Smuzhiyun "Link Down: state:x%x rtry:x%x flg:x%x",
894*4882a593Smuzhiyun vport->port_state, vport->fc_ns_retry, vport->fc_flag);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun lpfc_port_link_failure(vport);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* Stop delayed Nport discovery */
899*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
900*4882a593Smuzhiyun vport->fc_flag &= ~FC_DISC_DELAYED;
901*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
902*4882a593Smuzhiyun del_timer_sync(&vport->delayed_disc_tmo);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun int
lpfc_linkdown(struct lpfc_hba * phba)906*4882a593Smuzhiyun lpfc_linkdown(struct lpfc_hba *phba)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun struct lpfc_vport *vport = phba->pport;
909*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
910*4882a593Smuzhiyun struct lpfc_vport **vports;
911*4882a593Smuzhiyun LPFC_MBOXQ_t *mb;
912*4882a593Smuzhiyun int i;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun if (phba->link_state == LPFC_LINK_DOWN)
915*4882a593Smuzhiyun return 0;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* Block all SCSI stack I/Os */
918*4882a593Smuzhiyun lpfc_scsi_dev_block(phba);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun phba->defer_flogi_acc_flag = false;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
923*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
924*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
925*4882a593Smuzhiyun if (phba->link_state > LPFC_LINK_DOWN) {
926*4882a593Smuzhiyun phba->link_state = LPFC_LINK_DOWN;
927*4882a593Smuzhiyun if (phba->sli4_hba.conf_trunk) {
928*4882a593Smuzhiyun phba->trunk_link.link0.state = 0;
929*4882a593Smuzhiyun phba->trunk_link.link1.state = 0;
930*4882a593Smuzhiyun phba->trunk_link.link2.state = 0;
931*4882a593Smuzhiyun phba->trunk_link.link3.state = 0;
932*4882a593Smuzhiyun phba->sli4_hba.link_state.logical_speed =
933*4882a593Smuzhiyun LPFC_LINK_SPEED_UNKNOWN;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
936*4882a593Smuzhiyun phba->pport->fc_flag &= ~FC_LBIT;
937*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
940*4882a593Smuzhiyun if (vports != NULL) {
941*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
942*4882a593Smuzhiyun /* Issue a LINK DOWN event to all nodes */
943*4882a593Smuzhiyun lpfc_linkdown_port(vports[i]);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun vports[i]->fc_myDID = 0;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
948*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
949*4882a593Smuzhiyun if (phba->nvmet_support)
950*4882a593Smuzhiyun lpfc_nvmet_update_targetport(phba);
951*4882a593Smuzhiyun else
952*4882a593Smuzhiyun lpfc_nvme_update_localport(vports[i]);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /* Clean up any SLI3 firmware default rpi's */
959*4882a593Smuzhiyun if (phba->sli_rev > LPFC_SLI_REV3)
960*4882a593Smuzhiyun goto skip_unreg_did;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
963*4882a593Smuzhiyun if (mb) {
964*4882a593Smuzhiyun lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
965*4882a593Smuzhiyun mb->vport = vport;
966*4882a593Smuzhiyun mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
967*4882a593Smuzhiyun if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
968*4882a593Smuzhiyun == MBX_NOT_FINISHED) {
969*4882a593Smuzhiyun mempool_free(mb, phba->mbox_mem_pool);
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun skip_unreg_did:
974*4882a593Smuzhiyun /* Setup myDID for link up if we are in pt2pt mode */
975*4882a593Smuzhiyun if (phba->pport->fc_flag & FC_PT2PT) {
976*4882a593Smuzhiyun mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
977*4882a593Smuzhiyun if (mb) {
978*4882a593Smuzhiyun lpfc_config_link(phba, mb);
979*4882a593Smuzhiyun mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
980*4882a593Smuzhiyun mb->vport = vport;
981*4882a593Smuzhiyun if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
982*4882a593Smuzhiyun == MBX_NOT_FINISHED) {
983*4882a593Smuzhiyun mempool_free(mb, phba->mbox_mem_pool);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
987*4882a593Smuzhiyun phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
988*4882a593Smuzhiyun phba->pport->rcv_flogi_cnt = 0;
989*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun return 0;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun static void
lpfc_linkup_cleanup_nodes(struct lpfc_vport * vport)995*4882a593Smuzhiyun lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1000*4882a593Smuzhiyun ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1001*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
1002*4882a593Smuzhiyun continue;
1003*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1004*4882a593Smuzhiyun continue;
1005*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_FABRIC) {
1006*4882a593Smuzhiyun /* On Linkup its safe to clean up the ndlp
1007*4882a593Smuzhiyun * from Fabric connections.
1008*4882a593Smuzhiyun */
1009*4882a593Smuzhiyun if (ndlp->nlp_DID != Fabric_DID)
1010*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
1011*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1012*4882a593Smuzhiyun } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1013*4882a593Smuzhiyun /* Fail outstanding IO now since device is
1014*4882a593Smuzhiyun * marked for PLOGI.
1015*4882a593Smuzhiyun */
1016*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun static void
lpfc_linkup_port(struct lpfc_vport * vport)1022*4882a593Smuzhiyun lpfc_linkup_port(struct lpfc_vport *vport)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1025*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun if ((vport->load_flag & FC_UNLOADING) != 0)
1028*4882a593Smuzhiyun return;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1031*4882a593Smuzhiyun "Link Up: top:x%x speed:x%x flg:x%x",
1032*4882a593Smuzhiyun phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun /* If NPIV is not enabled, only bring the physical port up */
1035*4882a593Smuzhiyun if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1036*4882a593Smuzhiyun (vport != phba->pport))
1037*4882a593Smuzhiyun return;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1040*4882a593Smuzhiyun fc_host_post_event(shost, fc_get_event_number(),
1041*4882a593Smuzhiyun FCH_EVT_LINKUP, 0);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
1044*4882a593Smuzhiyun vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1045*4882a593Smuzhiyun FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1046*4882a593Smuzhiyun vport->fc_flag |= FC_NDISC_ACTIVE;
1047*4882a593Smuzhiyun vport->fc_ns_retry = 0;
1048*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun if (vport->fc_flag & FC_LBIT)
1051*4882a593Smuzhiyun lpfc_linkup_cleanup_nodes(vport);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun static int
lpfc_linkup(struct lpfc_hba * phba)1056*4882a593Smuzhiyun lpfc_linkup(struct lpfc_hba *phba)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun struct lpfc_vport **vports;
1059*4882a593Smuzhiyun int i;
1060*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun phba->link_state = LPFC_LINK_UP;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /* Unblock fabric iocbs if they are blocked */
1065*4882a593Smuzhiyun clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1066*4882a593Smuzhiyun del_timer_sync(&phba->fabric_block_timer);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
1069*4882a593Smuzhiyun if (vports != NULL)
1070*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1071*4882a593Smuzhiyun lpfc_linkup_port(vports[i]);
1072*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun /* Clear the pport flogi counter in case the link down was
1075*4882a593Smuzhiyun * absorbed without an ACQE. No lock here - in worker thread
1076*4882a593Smuzhiyun * and discovery is synchronized.
1077*4882a593Smuzhiyun */
1078*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
1079*4882a593Smuzhiyun phba->pport->rcv_flogi_cnt = 0;
1080*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /* reinitialize initial FLOGI flag */
1083*4882a593Smuzhiyun phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1084*4882a593Smuzhiyun phba->defer_flogi_acc_flag = false;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun return 0;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /*
1090*4882a593Smuzhiyun * This routine handles processing a CLEAR_LA mailbox
1091*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
1092*4882a593Smuzhiyun * as the completion routine when the command is
1093*4882a593Smuzhiyun * handed off to the SLI layer. SLI3 only.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)1096*4882a593Smuzhiyun lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
1099*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1100*4882a593Smuzhiyun struct lpfc_sli *psli = &phba->sli;
1101*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
1102*4882a593Smuzhiyun uint32_t control;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* Since we don't do discovery right now, turn these off here */
1105*4882a593Smuzhiyun psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1106*4882a593Smuzhiyun psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /* Check for error */
1109*4882a593Smuzhiyun if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1110*4882a593Smuzhiyun /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1111*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1112*4882a593Smuzhiyun "0320 CLEAR_LA mbxStatus error x%x hba "
1113*4882a593Smuzhiyun "state x%x\n",
1114*4882a593Smuzhiyun mb->mbxStatus, vport->port_state);
1115*4882a593Smuzhiyun phba->link_state = LPFC_HBA_ERROR;
1116*4882a593Smuzhiyun goto out;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun if (vport->port_type == LPFC_PHYSICAL_PORT)
1120*4882a593Smuzhiyun phba->link_state = LPFC_HBA_READY;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1123*4882a593Smuzhiyun psli->sli_flag |= LPFC_PROCESS_LA;
1124*4882a593Smuzhiyun control = readl(phba->HCregaddr);
1125*4882a593Smuzhiyun control |= HC_LAINT_ENA;
1126*4882a593Smuzhiyun writel(control, phba->HCregaddr);
1127*4882a593Smuzhiyun readl(phba->HCregaddr); /* flush */
1128*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1129*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
1130*4882a593Smuzhiyun return;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun out:
1133*4882a593Smuzhiyun /* Device Discovery completes */
1134*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1135*4882a593Smuzhiyun "0225 Device Discovery completes\n");
1136*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
1139*4882a593Smuzhiyun vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1140*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun lpfc_can_disctmo(vport);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* turn on Link Attention interrupts */
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1147*4882a593Smuzhiyun psli->sli_flag |= LPFC_PROCESS_LA;
1148*4882a593Smuzhiyun control = readl(phba->HCregaddr);
1149*4882a593Smuzhiyun control |= HC_LAINT_ENA;
1150*4882a593Smuzhiyun writel(control, phba->HCregaddr);
1151*4882a593Smuzhiyun readl(phba->HCregaddr); /* flush */
1152*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun return;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)1158*4882a593Smuzhiyun lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
1161*4882a593Smuzhiyun LPFC_MBOXQ_t *sparam_mb;
1162*4882a593Smuzhiyun struct lpfc_dmabuf *sparam_mp;
1163*4882a593Smuzhiyun u16 status = pmb->u.mb.mbxStatus;
1164*4882a593Smuzhiyun int rc;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (status)
1169*4882a593Smuzhiyun goto out;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /* don't perform discovery for SLI4 loopback diagnostic test */
1172*4882a593Smuzhiyun if ((phba->sli_rev == LPFC_SLI_REV4) &&
1173*4882a593Smuzhiyun !(phba->hba_flag & HBA_FCOE_MODE) &&
1174*4882a593Smuzhiyun (phba->link_flag & LS_LOOPBACK_MODE))
1175*4882a593Smuzhiyun return;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1178*4882a593Smuzhiyun vport->fc_flag & FC_PUBLIC_LOOP &&
1179*4882a593Smuzhiyun !(vport->fc_flag & FC_LBIT)) {
1180*4882a593Smuzhiyun /* Need to wait for FAN - use discovery timer
1181*4882a593Smuzhiyun * for timeout. port_state is identically
1182*4882a593Smuzhiyun * LPFC_LOCAL_CFG_LINK while waiting for FAN
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun lpfc_set_disctmo(vport);
1185*4882a593Smuzhiyun return;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /* Start discovery by sending a FLOGI. port_state is identically
1189*4882a593Smuzhiyun * LPFC_FLOGI while waiting for FLOGI cmpl.
1190*4882a593Smuzhiyun */
1191*4882a593Smuzhiyun if (vport->port_state != LPFC_FLOGI) {
1192*4882a593Smuzhiyun /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
1193*4882a593Smuzhiyun * bb-credit recovery is in place.
1194*4882a593Smuzhiyun */
1195*4882a593Smuzhiyun if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1196*4882a593Smuzhiyun !(phba->link_flag & LS_LOOPBACK_MODE)) {
1197*4882a593Smuzhiyun sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1198*4882a593Smuzhiyun GFP_KERNEL);
1199*4882a593Smuzhiyun if (!sparam_mb)
1200*4882a593Smuzhiyun goto sparam_out;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun rc = lpfc_read_sparam(phba, sparam_mb, 0);
1203*4882a593Smuzhiyun if (rc) {
1204*4882a593Smuzhiyun mempool_free(sparam_mb, phba->mbox_mem_pool);
1205*4882a593Smuzhiyun goto sparam_out;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun sparam_mb->vport = vport;
1208*4882a593Smuzhiyun sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1209*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1210*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
1211*4882a593Smuzhiyun sparam_mp = (struct lpfc_dmabuf *)
1212*4882a593Smuzhiyun sparam_mb->ctx_buf;
1213*4882a593Smuzhiyun lpfc_mbuf_free(phba, sparam_mp->virt,
1214*4882a593Smuzhiyun sparam_mp->phys);
1215*4882a593Smuzhiyun kfree(sparam_mp);
1216*4882a593Smuzhiyun sparam_mb->ctx_buf = NULL;
1217*4882a593Smuzhiyun mempool_free(sparam_mb, phba->mbox_mem_pool);
1218*4882a593Smuzhiyun goto sparam_out;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun phba->hba_flag |= HBA_DEFER_FLOGI;
1222*4882a593Smuzhiyun } else {
1223*4882a593Smuzhiyun lpfc_initial_flogi(vport);
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun } else {
1226*4882a593Smuzhiyun if (vport->fc_flag & FC_PT2PT)
1227*4882a593Smuzhiyun lpfc_disc_start(vport);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun return;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun out:
1232*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1233*4882a593Smuzhiyun "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1234*4882a593Smuzhiyun status, vport->port_state);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun sparam_out:
1237*4882a593Smuzhiyun lpfc_linkdown(phba);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1240*4882a593Smuzhiyun "0200 CONFIG_LINK bad hba state x%x\n",
1241*4882a593Smuzhiyun vport->port_state);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
1244*4882a593Smuzhiyun return;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun * lpfc_sli4_clear_fcf_rr_bmask
1249*4882a593Smuzhiyun * @phba: pointer to the struct lpfc_hba for this port.
1250*4882a593Smuzhiyun * This fucnction resets the round robin bit mask and clears the
1251*4882a593Smuzhiyun * fcf priority list. The list deletions are done while holding the
1252*4882a593Smuzhiyun * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1253*4882a593Smuzhiyun * from the lpfc_fcf_pri record.
1254*4882a593Smuzhiyun **/
1255*4882a593Smuzhiyun void
lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba * phba)1256*4882a593Smuzhiyun lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun struct lpfc_fcf_pri *fcf_pri;
1259*4882a593Smuzhiyun struct lpfc_fcf_pri *next_fcf_pri;
1260*4882a593Smuzhiyun memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1261*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1262*4882a593Smuzhiyun list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1263*4882a593Smuzhiyun &phba->fcf.fcf_pri_list, list) {
1264*4882a593Smuzhiyun list_del_init(&fcf_pri->list);
1265*4882a593Smuzhiyun fcf_pri->fcf_rec.flag = 0;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun static void
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)1270*4882a593Smuzhiyun lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus) {
1275*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1276*4882a593Smuzhiyun "2017 REG_FCFI mbxStatus error x%x "
1277*4882a593Smuzhiyun "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1278*4882a593Smuzhiyun vport->port_state);
1279*4882a593Smuzhiyun goto fail_out;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun /* Start FCoE discovery by sending a FLOGI. */
1283*4882a593Smuzhiyun phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1284*4882a593Smuzhiyun /* Set the FCFI registered flag */
1285*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1286*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_REGISTERED;
1287*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun /* If there is a pending FCoE event, restart FCF table scan. */
1290*4882a593Smuzhiyun if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1291*4882a593Smuzhiyun lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1292*4882a593Smuzhiyun goto fail_out;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* Mark successful completion of FCF table scan */
1295*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1296*4882a593Smuzhiyun phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1297*4882a593Smuzhiyun phba->hba_flag &= ~FCF_TS_INPROG;
1298*4882a593Smuzhiyun if (vport->port_state != LPFC_FLOGI) {
1299*4882a593Smuzhiyun phba->hba_flag |= FCF_RR_INPROG;
1300*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1301*4882a593Smuzhiyun lpfc_issue_init_vfi(vport);
1302*4882a593Smuzhiyun goto out;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1305*4882a593Smuzhiyun goto out;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun fail_out:
1308*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1309*4882a593Smuzhiyun phba->hba_flag &= ~FCF_RR_INPROG;
1310*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1311*4882a593Smuzhiyun out:
1312*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /**
1316*4882a593Smuzhiyun * lpfc_fab_name_match - Check if the fcf fabric name match.
1317*4882a593Smuzhiyun * @fab_name: pointer to fabric name.
1318*4882a593Smuzhiyun * @new_fcf_record: pointer to fcf record.
1319*4882a593Smuzhiyun *
1320*4882a593Smuzhiyun * This routine compare the fcf record's fabric name with provided
1321*4882a593Smuzhiyun * fabric name. If the fabric name are identical this function
1322*4882a593Smuzhiyun * returns 1 else return 0.
1323*4882a593Smuzhiyun **/
1324*4882a593Smuzhiyun static uint32_t
lpfc_fab_name_match(uint8_t * fab_name,struct fcf_record * new_fcf_record)1325*4882a593Smuzhiyun lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1328*4882a593Smuzhiyun return 0;
1329*4882a593Smuzhiyun if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1330*4882a593Smuzhiyun return 0;
1331*4882a593Smuzhiyun if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1332*4882a593Smuzhiyun return 0;
1333*4882a593Smuzhiyun if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1334*4882a593Smuzhiyun return 0;
1335*4882a593Smuzhiyun if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1336*4882a593Smuzhiyun return 0;
1337*4882a593Smuzhiyun if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1340*4882a593Smuzhiyun return 0;
1341*4882a593Smuzhiyun if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1342*4882a593Smuzhiyun return 0;
1343*4882a593Smuzhiyun return 1;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /**
1347*4882a593Smuzhiyun * lpfc_sw_name_match - Check if the fcf switch name match.
1348*4882a593Smuzhiyun * @sw_name: pointer to switch name.
1349*4882a593Smuzhiyun * @new_fcf_record: pointer to fcf record.
1350*4882a593Smuzhiyun *
1351*4882a593Smuzhiyun * This routine compare the fcf record's switch name with provided
1352*4882a593Smuzhiyun * switch name. If the switch name are identical this function
1353*4882a593Smuzhiyun * returns 1 else return 0.
1354*4882a593Smuzhiyun **/
1355*4882a593Smuzhiyun static uint32_t
lpfc_sw_name_match(uint8_t * sw_name,struct fcf_record * new_fcf_record)1356*4882a593Smuzhiyun lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1359*4882a593Smuzhiyun return 0;
1360*4882a593Smuzhiyun if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1361*4882a593Smuzhiyun return 0;
1362*4882a593Smuzhiyun if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1363*4882a593Smuzhiyun return 0;
1364*4882a593Smuzhiyun if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1365*4882a593Smuzhiyun return 0;
1366*4882a593Smuzhiyun if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1367*4882a593Smuzhiyun return 0;
1368*4882a593Smuzhiyun if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1369*4882a593Smuzhiyun return 0;
1370*4882a593Smuzhiyun if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1371*4882a593Smuzhiyun return 0;
1372*4882a593Smuzhiyun if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1373*4882a593Smuzhiyun return 0;
1374*4882a593Smuzhiyun return 1;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /**
1378*4882a593Smuzhiyun * lpfc_mac_addr_match - Check if the fcf mac address match.
1379*4882a593Smuzhiyun * @mac_addr: pointer to mac address.
1380*4882a593Smuzhiyun * @new_fcf_record: pointer to fcf record.
1381*4882a593Smuzhiyun *
1382*4882a593Smuzhiyun * This routine compare the fcf record's mac address with HBA's
1383*4882a593Smuzhiyun * FCF mac address. If the mac addresses are identical this function
1384*4882a593Smuzhiyun * returns 1 else return 0.
1385*4882a593Smuzhiyun **/
1386*4882a593Smuzhiyun static uint32_t
lpfc_mac_addr_match(uint8_t * mac_addr,struct fcf_record * new_fcf_record)1387*4882a593Smuzhiyun lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1390*4882a593Smuzhiyun return 0;
1391*4882a593Smuzhiyun if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1392*4882a593Smuzhiyun return 0;
1393*4882a593Smuzhiyun if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1394*4882a593Smuzhiyun return 0;
1395*4882a593Smuzhiyun if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1396*4882a593Smuzhiyun return 0;
1397*4882a593Smuzhiyun if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1398*4882a593Smuzhiyun return 0;
1399*4882a593Smuzhiyun if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1400*4882a593Smuzhiyun return 0;
1401*4882a593Smuzhiyun return 1;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun static bool
lpfc_vlan_id_match(uint16_t curr_vlan_id,uint16_t new_vlan_id)1405*4882a593Smuzhiyun lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun return (curr_vlan_id == new_vlan_id);
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun /**
1411*4882a593Smuzhiyun * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1412*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1413*4882a593Smuzhiyun * @fcf_index: Index for the lpfc_fcf_record.
1414*4882a593Smuzhiyun * @new_fcf_record: pointer to hba fcf record.
1415*4882a593Smuzhiyun *
1416*4882a593Smuzhiyun * This routine updates the driver FCF priority record from the new HBA FCF
1417*4882a593Smuzhiyun * record. The hbalock is asserted held in the code path calling this
1418*4882a593Smuzhiyun * routine.
1419*4882a593Smuzhiyun **/
1420*4882a593Smuzhiyun static void
__lpfc_update_fcf_record_pri(struct lpfc_hba * phba,uint16_t fcf_index,struct fcf_record * new_fcf_record)1421*4882a593Smuzhiyun __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1422*4882a593Smuzhiyun struct fcf_record *new_fcf_record
1423*4882a593Smuzhiyun )
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun struct lpfc_fcf_pri *fcf_pri;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1428*4882a593Smuzhiyun fcf_pri->fcf_rec.fcf_index = fcf_index;
1429*4882a593Smuzhiyun /* FCF record priority */
1430*4882a593Smuzhiyun fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /**
1435*4882a593Smuzhiyun * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1436*4882a593Smuzhiyun * @fcf_rec: pointer to driver fcf record.
1437*4882a593Smuzhiyun * @new_fcf_record: pointer to fcf record.
1438*4882a593Smuzhiyun *
1439*4882a593Smuzhiyun * This routine copies the FCF information from the FCF
1440*4882a593Smuzhiyun * record to lpfc_hba data structure.
1441*4882a593Smuzhiyun **/
1442*4882a593Smuzhiyun static void
lpfc_copy_fcf_record(struct lpfc_fcf_rec * fcf_rec,struct fcf_record * new_fcf_record)1443*4882a593Smuzhiyun lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1444*4882a593Smuzhiyun struct fcf_record *new_fcf_record)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun /* Fabric name */
1447*4882a593Smuzhiyun fcf_rec->fabric_name[0] =
1448*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1449*4882a593Smuzhiyun fcf_rec->fabric_name[1] =
1450*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1451*4882a593Smuzhiyun fcf_rec->fabric_name[2] =
1452*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1453*4882a593Smuzhiyun fcf_rec->fabric_name[3] =
1454*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1455*4882a593Smuzhiyun fcf_rec->fabric_name[4] =
1456*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1457*4882a593Smuzhiyun fcf_rec->fabric_name[5] =
1458*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1459*4882a593Smuzhiyun fcf_rec->fabric_name[6] =
1460*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1461*4882a593Smuzhiyun fcf_rec->fabric_name[7] =
1462*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1463*4882a593Smuzhiyun /* Mac address */
1464*4882a593Smuzhiyun fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1465*4882a593Smuzhiyun fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1466*4882a593Smuzhiyun fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1467*4882a593Smuzhiyun fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1468*4882a593Smuzhiyun fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1469*4882a593Smuzhiyun fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1470*4882a593Smuzhiyun /* FCF record index */
1471*4882a593Smuzhiyun fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1472*4882a593Smuzhiyun /* FCF record priority */
1473*4882a593Smuzhiyun fcf_rec->priority = new_fcf_record->fip_priority;
1474*4882a593Smuzhiyun /* Switch name */
1475*4882a593Smuzhiyun fcf_rec->switch_name[0] =
1476*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1477*4882a593Smuzhiyun fcf_rec->switch_name[1] =
1478*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1479*4882a593Smuzhiyun fcf_rec->switch_name[2] =
1480*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1481*4882a593Smuzhiyun fcf_rec->switch_name[3] =
1482*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1483*4882a593Smuzhiyun fcf_rec->switch_name[4] =
1484*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1485*4882a593Smuzhiyun fcf_rec->switch_name[5] =
1486*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1487*4882a593Smuzhiyun fcf_rec->switch_name[6] =
1488*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1489*4882a593Smuzhiyun fcf_rec->switch_name[7] =
1490*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /**
1494*4882a593Smuzhiyun * lpfc_update_fcf_record - Update driver fcf record
1495*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1496*4882a593Smuzhiyun * @fcf_rec: pointer to driver fcf record.
1497*4882a593Smuzhiyun * @new_fcf_record: pointer to hba fcf record.
1498*4882a593Smuzhiyun * @addr_mode: address mode to be set to the driver fcf record.
1499*4882a593Smuzhiyun * @vlan_id: vlan tag to be set to the driver fcf record.
1500*4882a593Smuzhiyun * @flag: flag bits to be set to the driver fcf record.
1501*4882a593Smuzhiyun *
1502*4882a593Smuzhiyun * This routine updates the driver FCF record from the new HBA FCF record
1503*4882a593Smuzhiyun * together with the address mode, vlan_id, and other informations. This
1504*4882a593Smuzhiyun * routine is called with the hbalock held.
1505*4882a593Smuzhiyun **/
1506*4882a593Smuzhiyun static void
__lpfc_update_fcf_record(struct lpfc_hba * phba,struct lpfc_fcf_rec * fcf_rec,struct fcf_record * new_fcf_record,uint32_t addr_mode,uint16_t vlan_id,uint32_t flag)1507*4882a593Smuzhiyun __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1508*4882a593Smuzhiyun struct fcf_record *new_fcf_record, uint32_t addr_mode,
1509*4882a593Smuzhiyun uint16_t vlan_id, uint32_t flag)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun lockdep_assert_held(&phba->hbalock);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun /* Copy the fields from the HBA's FCF record */
1514*4882a593Smuzhiyun lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1515*4882a593Smuzhiyun /* Update other fields of driver FCF record */
1516*4882a593Smuzhiyun fcf_rec->addr_mode = addr_mode;
1517*4882a593Smuzhiyun fcf_rec->vlan_id = vlan_id;
1518*4882a593Smuzhiyun fcf_rec->flag |= (flag | RECORD_VALID);
1519*4882a593Smuzhiyun __lpfc_update_fcf_record_pri(phba,
1520*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1521*4882a593Smuzhiyun new_fcf_record);
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun /**
1525*4882a593Smuzhiyun * lpfc_register_fcf - Register the FCF with hba.
1526*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1527*4882a593Smuzhiyun *
1528*4882a593Smuzhiyun * This routine issues a register fcfi mailbox command to register
1529*4882a593Smuzhiyun * the fcf with HBA.
1530*4882a593Smuzhiyun **/
1531*4882a593Smuzhiyun static void
lpfc_register_fcf(struct lpfc_hba * phba)1532*4882a593Smuzhiyun lpfc_register_fcf(struct lpfc_hba *phba)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun LPFC_MBOXQ_t *fcf_mbxq;
1535*4882a593Smuzhiyun int rc;
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1538*4882a593Smuzhiyun /* If the FCF is not available do nothing. */
1539*4882a593Smuzhiyun if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1540*4882a593Smuzhiyun phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1541*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1542*4882a593Smuzhiyun return;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /* The FCF is already registered, start discovery */
1546*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1547*4882a593Smuzhiyun phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1548*4882a593Smuzhiyun phba->hba_flag &= ~FCF_TS_INPROG;
1549*4882a593Smuzhiyun if (phba->pport->port_state != LPFC_FLOGI &&
1550*4882a593Smuzhiyun phba->pport->fc_flag & FC_FABRIC) {
1551*4882a593Smuzhiyun phba->hba_flag |= FCF_RR_INPROG;
1552*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1553*4882a593Smuzhiyun lpfc_initial_flogi(phba->pport);
1554*4882a593Smuzhiyun return;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1557*4882a593Smuzhiyun return;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1562*4882a593Smuzhiyun if (!fcf_mbxq) {
1563*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1564*4882a593Smuzhiyun phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1565*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1566*4882a593Smuzhiyun return;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun lpfc_reg_fcfi(phba, fcf_mbxq);
1570*4882a593Smuzhiyun fcf_mbxq->vport = phba->pport;
1571*4882a593Smuzhiyun fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1572*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1573*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
1574*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1575*4882a593Smuzhiyun phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1576*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1577*4882a593Smuzhiyun mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun return;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun /**
1584*4882a593Smuzhiyun * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1585*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1586*4882a593Smuzhiyun * @new_fcf_record: pointer to fcf record.
1587*4882a593Smuzhiyun * @boot_flag: Indicates if this record used by boot bios.
1588*4882a593Smuzhiyun * @addr_mode: The address mode to be used by this FCF
1589*4882a593Smuzhiyun * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1590*4882a593Smuzhiyun *
1591*4882a593Smuzhiyun * This routine compare the fcf record with connect list obtained from the
1592*4882a593Smuzhiyun * config region to decide if this FCF can be used for SAN discovery. It returns
1593*4882a593Smuzhiyun * 1 if this record can be used for SAN discovery else return zero. If this FCF
1594*4882a593Smuzhiyun * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1595*4882a593Smuzhiyun * is used by boot bios and addr_mode will indicate the addressing mode to be
1596*4882a593Smuzhiyun * used for this FCF when the function returns.
1597*4882a593Smuzhiyun * If the FCF record need to be used with a particular vlan id, the vlan is
1598*4882a593Smuzhiyun * set in the vlan_id on return of the function. If not VLAN tagging need to
1599*4882a593Smuzhiyun * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1600*4882a593Smuzhiyun **/
1601*4882a593Smuzhiyun static int
lpfc_match_fcf_conn_list(struct lpfc_hba * phba,struct fcf_record * new_fcf_record,uint32_t * boot_flag,uint32_t * addr_mode,uint16_t * vlan_id)1602*4882a593Smuzhiyun lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1603*4882a593Smuzhiyun struct fcf_record *new_fcf_record,
1604*4882a593Smuzhiyun uint32_t *boot_flag, uint32_t *addr_mode,
1605*4882a593Smuzhiyun uint16_t *vlan_id)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun struct lpfc_fcf_conn_entry *conn_entry;
1608*4882a593Smuzhiyun int i, j, fcf_vlan_id = 0;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun /* Find the lowest VLAN id in the FCF record */
1611*4882a593Smuzhiyun for (i = 0; i < 512; i++) {
1612*4882a593Smuzhiyun if (new_fcf_record->vlan_bitmap[i]) {
1613*4882a593Smuzhiyun fcf_vlan_id = i * 8;
1614*4882a593Smuzhiyun j = 0;
1615*4882a593Smuzhiyun while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1616*4882a593Smuzhiyun j++;
1617*4882a593Smuzhiyun fcf_vlan_id++;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun break;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun /* FCF not valid/available or solicitation in progress */
1624*4882a593Smuzhiyun if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1625*4882a593Smuzhiyun !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1626*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1627*4882a593Smuzhiyun return 0;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1630*4882a593Smuzhiyun *boot_flag = 0;
1631*4882a593Smuzhiyun *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1632*4882a593Smuzhiyun new_fcf_record);
1633*4882a593Smuzhiyun if (phba->valid_vlan)
1634*4882a593Smuzhiyun *vlan_id = phba->vlan_id;
1635*4882a593Smuzhiyun else
1636*4882a593Smuzhiyun *vlan_id = LPFC_FCOE_NULL_VID;
1637*4882a593Smuzhiyun return 1;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun /*
1641*4882a593Smuzhiyun * If there are no FCF connection table entry, driver connect to all
1642*4882a593Smuzhiyun * FCFs.
1643*4882a593Smuzhiyun */
1644*4882a593Smuzhiyun if (list_empty(&phba->fcf_conn_rec_list)) {
1645*4882a593Smuzhiyun *boot_flag = 0;
1646*4882a593Smuzhiyun *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1647*4882a593Smuzhiyun new_fcf_record);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /*
1650*4882a593Smuzhiyun * When there are no FCF connect entries, use driver's default
1651*4882a593Smuzhiyun * addressing mode - FPMA.
1652*4882a593Smuzhiyun */
1653*4882a593Smuzhiyun if (*addr_mode & LPFC_FCF_FPMA)
1654*4882a593Smuzhiyun *addr_mode = LPFC_FCF_FPMA;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun /* If FCF record report a vlan id use that vlan id */
1657*4882a593Smuzhiyun if (fcf_vlan_id)
1658*4882a593Smuzhiyun *vlan_id = fcf_vlan_id;
1659*4882a593Smuzhiyun else
1660*4882a593Smuzhiyun *vlan_id = LPFC_FCOE_NULL_VID;
1661*4882a593Smuzhiyun return 1;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun list_for_each_entry(conn_entry,
1665*4882a593Smuzhiyun &phba->fcf_conn_rec_list, list) {
1666*4882a593Smuzhiyun if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1667*4882a593Smuzhiyun continue;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1670*4882a593Smuzhiyun !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1671*4882a593Smuzhiyun new_fcf_record))
1672*4882a593Smuzhiyun continue;
1673*4882a593Smuzhiyun if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1674*4882a593Smuzhiyun !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1675*4882a593Smuzhiyun new_fcf_record))
1676*4882a593Smuzhiyun continue;
1677*4882a593Smuzhiyun if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1678*4882a593Smuzhiyun /*
1679*4882a593Smuzhiyun * If the vlan bit map does not have the bit set for the
1680*4882a593Smuzhiyun * vlan id to be used, then it is not a match.
1681*4882a593Smuzhiyun */
1682*4882a593Smuzhiyun if (!(new_fcf_record->vlan_bitmap
1683*4882a593Smuzhiyun [conn_entry->conn_rec.vlan_tag / 8] &
1684*4882a593Smuzhiyun (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1685*4882a593Smuzhiyun continue;
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun /*
1689*4882a593Smuzhiyun * If connection record does not support any addressing mode,
1690*4882a593Smuzhiyun * skip the FCF record.
1691*4882a593Smuzhiyun */
1692*4882a593Smuzhiyun if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1693*4882a593Smuzhiyun & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1694*4882a593Smuzhiyun continue;
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun /*
1697*4882a593Smuzhiyun * Check if the connection record specifies a required
1698*4882a593Smuzhiyun * addressing mode.
1699*4882a593Smuzhiyun */
1700*4882a593Smuzhiyun if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1701*4882a593Smuzhiyun !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun /*
1704*4882a593Smuzhiyun * If SPMA required but FCF not support this continue.
1705*4882a593Smuzhiyun */
1706*4882a593Smuzhiyun if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1707*4882a593Smuzhiyun !(bf_get(lpfc_fcf_record_mac_addr_prov,
1708*4882a593Smuzhiyun new_fcf_record) & LPFC_FCF_SPMA))
1709*4882a593Smuzhiyun continue;
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun /*
1712*4882a593Smuzhiyun * If FPMA required but FCF not support this continue.
1713*4882a593Smuzhiyun */
1714*4882a593Smuzhiyun if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1715*4882a593Smuzhiyun !(bf_get(lpfc_fcf_record_mac_addr_prov,
1716*4882a593Smuzhiyun new_fcf_record) & LPFC_FCF_FPMA))
1717*4882a593Smuzhiyun continue;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /*
1721*4882a593Smuzhiyun * This fcf record matches filtering criteria.
1722*4882a593Smuzhiyun */
1723*4882a593Smuzhiyun if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1724*4882a593Smuzhiyun *boot_flag = 1;
1725*4882a593Smuzhiyun else
1726*4882a593Smuzhiyun *boot_flag = 0;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun /*
1729*4882a593Smuzhiyun * If user did not specify any addressing mode, or if the
1730*4882a593Smuzhiyun * preferred addressing mode specified by user is not supported
1731*4882a593Smuzhiyun * by FCF, allow fabric to pick the addressing mode.
1732*4882a593Smuzhiyun */
1733*4882a593Smuzhiyun *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1734*4882a593Smuzhiyun new_fcf_record);
1735*4882a593Smuzhiyun /*
1736*4882a593Smuzhiyun * If the user specified a required address mode, assign that
1737*4882a593Smuzhiyun * address mode
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1740*4882a593Smuzhiyun (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1741*4882a593Smuzhiyun *addr_mode = (conn_entry->conn_rec.flags &
1742*4882a593Smuzhiyun FCFCNCT_AM_SPMA) ?
1743*4882a593Smuzhiyun LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1744*4882a593Smuzhiyun /*
1745*4882a593Smuzhiyun * If the user specified a preferred address mode, use the
1746*4882a593Smuzhiyun * addr mode only if FCF support the addr_mode.
1747*4882a593Smuzhiyun */
1748*4882a593Smuzhiyun else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1749*4882a593Smuzhiyun (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1750*4882a593Smuzhiyun (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1751*4882a593Smuzhiyun (*addr_mode & LPFC_FCF_SPMA))
1752*4882a593Smuzhiyun *addr_mode = LPFC_FCF_SPMA;
1753*4882a593Smuzhiyun else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1754*4882a593Smuzhiyun (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1755*4882a593Smuzhiyun !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1756*4882a593Smuzhiyun (*addr_mode & LPFC_FCF_FPMA))
1757*4882a593Smuzhiyun *addr_mode = LPFC_FCF_FPMA;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun /* If matching connect list has a vlan id, use it */
1760*4882a593Smuzhiyun if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1761*4882a593Smuzhiyun *vlan_id = conn_entry->conn_rec.vlan_tag;
1762*4882a593Smuzhiyun /*
1763*4882a593Smuzhiyun * If no vlan id is specified in connect list, use the vlan id
1764*4882a593Smuzhiyun * in the FCF record
1765*4882a593Smuzhiyun */
1766*4882a593Smuzhiyun else if (fcf_vlan_id)
1767*4882a593Smuzhiyun *vlan_id = fcf_vlan_id;
1768*4882a593Smuzhiyun else
1769*4882a593Smuzhiyun *vlan_id = LPFC_FCOE_NULL_VID;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun return 1;
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun return 0;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun /**
1778*4882a593Smuzhiyun * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1779*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1780*4882a593Smuzhiyun * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1781*4882a593Smuzhiyun *
1782*4882a593Smuzhiyun * This function check if there is any fcoe event pending while driver
1783*4882a593Smuzhiyun * scan FCF entries. If there is any pending event, it will restart the
1784*4882a593Smuzhiyun * FCF saning and return 1 else return 0.
1785*4882a593Smuzhiyun */
1786*4882a593Smuzhiyun int
lpfc_check_pending_fcoe_event(struct lpfc_hba * phba,uint8_t unreg_fcf)1787*4882a593Smuzhiyun lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun /*
1790*4882a593Smuzhiyun * If the Link is up and no FCoE events while in the
1791*4882a593Smuzhiyun * FCF discovery, no need to restart FCF discovery.
1792*4882a593Smuzhiyun */
1793*4882a593Smuzhiyun if ((phba->link_state >= LPFC_LINK_UP) &&
1794*4882a593Smuzhiyun (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1795*4882a593Smuzhiyun return 0;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1798*4882a593Smuzhiyun "2768 Pending link or FCF event during current "
1799*4882a593Smuzhiyun "handling of the previous event: link_state:x%x, "
1800*4882a593Smuzhiyun "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1801*4882a593Smuzhiyun phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1802*4882a593Smuzhiyun phba->fcoe_eventtag);
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1805*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1806*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (phba->link_state >= LPFC_LINK_UP) {
1809*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1810*4882a593Smuzhiyun "2780 Restart FCF table scan due to "
1811*4882a593Smuzhiyun "pending FCF event:evt_tag_at_scan:x%x, "
1812*4882a593Smuzhiyun "evt_tag_current:x%x\n",
1813*4882a593Smuzhiyun phba->fcoe_eventtag_at_fcf_scan,
1814*4882a593Smuzhiyun phba->fcoe_eventtag);
1815*4882a593Smuzhiyun lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1816*4882a593Smuzhiyun } else {
1817*4882a593Smuzhiyun /*
1818*4882a593Smuzhiyun * Do not continue FCF discovery and clear FCF_TS_INPROG
1819*4882a593Smuzhiyun * flag
1820*4882a593Smuzhiyun */
1821*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1822*4882a593Smuzhiyun "2833 Stop FCF discovery process due to link "
1823*4882a593Smuzhiyun "state change (x%x)\n", phba->link_state);
1824*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1825*4882a593Smuzhiyun phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1826*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1827*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun /* Unregister the currently registered FCF if required */
1831*4882a593Smuzhiyun if (unreg_fcf) {
1832*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
1833*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1834*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
1835*4882a593Smuzhiyun lpfc_sli4_unregister_fcf(phba);
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun return 1;
1838*4882a593Smuzhiyun }
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun /**
1841*4882a593Smuzhiyun * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1842*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1843*4882a593Smuzhiyun * @fcf_cnt: number of eligible fcf record seen so far.
1844*4882a593Smuzhiyun *
1845*4882a593Smuzhiyun * This function makes an running random selection decision on FCF record to
1846*4882a593Smuzhiyun * use through a sequence of @fcf_cnt eligible FCF records with equal
1847*4882a593Smuzhiyun * probability. To perform integer manunipulation of random numbers with
1848*4882a593Smuzhiyun * size unit32_t, the lower 16 bits of the 32-bit random number returned
1849*4882a593Smuzhiyun * from prandom_u32() are taken as the random random number generated.
1850*4882a593Smuzhiyun *
1851*4882a593Smuzhiyun * Returns true when outcome is for the newly read FCF record should be
1852*4882a593Smuzhiyun * chosen; otherwise, return false when outcome is for keeping the previously
1853*4882a593Smuzhiyun * chosen FCF record.
1854*4882a593Smuzhiyun **/
1855*4882a593Smuzhiyun static bool
lpfc_sli4_new_fcf_random_select(struct lpfc_hba * phba,uint32_t fcf_cnt)1856*4882a593Smuzhiyun lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun uint32_t rand_num;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun /* Get 16-bit uniform random number */
1861*4882a593Smuzhiyun rand_num = 0xFFFF & prandom_u32();
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun /* Decision with probability 1/fcf_cnt */
1864*4882a593Smuzhiyun if ((fcf_cnt * rand_num) < 0xFFFF)
1865*4882a593Smuzhiyun return true;
1866*4882a593Smuzhiyun else
1867*4882a593Smuzhiyun return false;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun /**
1871*4882a593Smuzhiyun * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1872*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1873*4882a593Smuzhiyun * @mboxq: pointer to mailbox object.
1874*4882a593Smuzhiyun * @next_fcf_index: pointer to holder of next fcf index.
1875*4882a593Smuzhiyun *
1876*4882a593Smuzhiyun * This routine parses the non-embedded fcf mailbox command by performing the
1877*4882a593Smuzhiyun * necessarily error checking, non-embedded read FCF record mailbox command
1878*4882a593Smuzhiyun * SGE parsing, and endianness swapping.
1879*4882a593Smuzhiyun *
1880*4882a593Smuzhiyun * Returns the pointer to the new FCF record in the non-embedded mailbox
1881*4882a593Smuzhiyun * command DMA memory if successfully, other NULL.
1882*4882a593Smuzhiyun */
1883*4882a593Smuzhiyun static struct fcf_record *
lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint16_t * next_fcf_index)1884*4882a593Smuzhiyun lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1885*4882a593Smuzhiyun uint16_t *next_fcf_index)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun void *virt_addr;
1888*4882a593Smuzhiyun struct lpfc_mbx_sge sge;
1889*4882a593Smuzhiyun struct lpfc_mbx_read_fcf_tbl *read_fcf;
1890*4882a593Smuzhiyun uint32_t shdr_status, shdr_add_status, if_type;
1891*4882a593Smuzhiyun union lpfc_sli4_cfg_shdr *shdr;
1892*4882a593Smuzhiyun struct fcf_record *new_fcf_record;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun /* Get the first SGE entry from the non-embedded DMA memory. This
1895*4882a593Smuzhiyun * routine only uses a single SGE.
1896*4882a593Smuzhiyun */
1897*4882a593Smuzhiyun lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1898*4882a593Smuzhiyun if (unlikely(!mboxq->sge_array)) {
1899*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1900*4882a593Smuzhiyun "2524 Failed to get the non-embedded SGE "
1901*4882a593Smuzhiyun "virtual address\n");
1902*4882a593Smuzhiyun return NULL;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun virt_addr = mboxq->sge_array->addr[0];
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1907*4882a593Smuzhiyun lpfc_sli_pcimem_bcopy(shdr, shdr,
1908*4882a593Smuzhiyun sizeof(union lpfc_sli4_cfg_shdr));
1909*4882a593Smuzhiyun shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1910*4882a593Smuzhiyun if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1911*4882a593Smuzhiyun shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1912*4882a593Smuzhiyun if (shdr_status || shdr_add_status) {
1913*4882a593Smuzhiyun if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1914*4882a593Smuzhiyun if_type == LPFC_SLI_INTF_IF_TYPE_2)
1915*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR,
1916*4882a593Smuzhiyun LOG_TRACE_EVENT,
1917*4882a593Smuzhiyun "2726 READ_FCF_RECORD Indicates empty "
1918*4882a593Smuzhiyun "FCF table.\n");
1919*4882a593Smuzhiyun else
1920*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1921*4882a593Smuzhiyun "2521 READ_FCF_RECORD mailbox failed "
1922*4882a593Smuzhiyun "with status x%x add_status x%x, "
1923*4882a593Smuzhiyun "mbx\n", shdr_status, shdr_add_status);
1924*4882a593Smuzhiyun return NULL;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /* Interpreting the returned information of the FCF record */
1928*4882a593Smuzhiyun read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1929*4882a593Smuzhiyun lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1930*4882a593Smuzhiyun sizeof(struct lpfc_mbx_read_fcf_tbl));
1931*4882a593Smuzhiyun *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1932*4882a593Smuzhiyun new_fcf_record = (struct fcf_record *)(virt_addr +
1933*4882a593Smuzhiyun sizeof(struct lpfc_mbx_read_fcf_tbl));
1934*4882a593Smuzhiyun lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1935*4882a593Smuzhiyun offsetof(struct fcf_record, vlan_bitmap));
1936*4882a593Smuzhiyun new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1937*4882a593Smuzhiyun new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun return new_fcf_record;
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun /**
1943*4882a593Smuzhiyun * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1944*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
1945*4882a593Smuzhiyun * @fcf_record: pointer to the fcf record.
1946*4882a593Smuzhiyun * @vlan_id: the lowest vlan identifier associated to this fcf record.
1947*4882a593Smuzhiyun * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1948*4882a593Smuzhiyun *
1949*4882a593Smuzhiyun * This routine logs the detailed FCF record if the LOG_FIP loggin is
1950*4882a593Smuzhiyun * enabled.
1951*4882a593Smuzhiyun **/
1952*4882a593Smuzhiyun static void
lpfc_sli4_log_fcf_record_info(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t vlan_id,uint16_t next_fcf_index)1953*4882a593Smuzhiyun lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1954*4882a593Smuzhiyun struct fcf_record *fcf_record,
1955*4882a593Smuzhiyun uint16_t vlan_id,
1956*4882a593Smuzhiyun uint16_t next_fcf_index)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1959*4882a593Smuzhiyun "2764 READ_FCF_RECORD:\n"
1960*4882a593Smuzhiyun "\tFCF_Index : x%x\n"
1961*4882a593Smuzhiyun "\tFCF_Avail : x%x\n"
1962*4882a593Smuzhiyun "\tFCF_Valid : x%x\n"
1963*4882a593Smuzhiyun "\tFCF_SOL : x%x\n"
1964*4882a593Smuzhiyun "\tFIP_Priority : x%x\n"
1965*4882a593Smuzhiyun "\tMAC_Provider : x%x\n"
1966*4882a593Smuzhiyun "\tLowest VLANID : x%x\n"
1967*4882a593Smuzhiyun "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1968*4882a593Smuzhiyun "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1969*4882a593Smuzhiyun "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1970*4882a593Smuzhiyun "\tNext_FCF_Index: x%x\n",
1971*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1972*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1973*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1974*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1975*4882a593Smuzhiyun fcf_record->fip_priority,
1976*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1977*4882a593Smuzhiyun vlan_id,
1978*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_0, fcf_record),
1979*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_1, fcf_record),
1980*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_2, fcf_record),
1981*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_3, fcf_record),
1982*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_4, fcf_record),
1983*4882a593Smuzhiyun bf_get(lpfc_fcf_record_mac_5, fcf_record),
1984*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1985*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1986*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1987*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1988*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1989*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1990*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1991*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1992*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1993*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1994*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1995*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1996*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1997*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1998*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1999*4882a593Smuzhiyun bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
2000*4882a593Smuzhiyun next_fcf_index);
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun /**
2004*4882a593Smuzhiyun * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
2005*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2006*4882a593Smuzhiyun * @fcf_rec: pointer to an existing FCF record.
2007*4882a593Smuzhiyun * @new_fcf_record: pointer to a new FCF record.
2008*4882a593Smuzhiyun * @new_vlan_id: vlan id from the new FCF record.
2009*4882a593Smuzhiyun *
2010*4882a593Smuzhiyun * This function performs matching test of a new FCF record against an existing
2011*4882a593Smuzhiyun * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
2012*4882a593Smuzhiyun * will not be used as part of the FCF record matching criteria.
2013*4882a593Smuzhiyun *
2014*4882a593Smuzhiyun * Returns true if all the fields matching, otherwise returns false.
2015*4882a593Smuzhiyun */
2016*4882a593Smuzhiyun static bool
lpfc_sli4_fcf_record_match(struct lpfc_hba * phba,struct lpfc_fcf_rec * fcf_rec,struct fcf_record * new_fcf_record,uint16_t new_vlan_id)2017*4882a593Smuzhiyun lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2018*4882a593Smuzhiyun struct lpfc_fcf_rec *fcf_rec,
2019*4882a593Smuzhiyun struct fcf_record *new_fcf_record,
2020*4882a593Smuzhiyun uint16_t new_vlan_id)
2021*4882a593Smuzhiyun {
2022*4882a593Smuzhiyun if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2023*4882a593Smuzhiyun if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2024*4882a593Smuzhiyun return false;
2025*4882a593Smuzhiyun if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2026*4882a593Smuzhiyun return false;
2027*4882a593Smuzhiyun if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2028*4882a593Smuzhiyun return false;
2029*4882a593Smuzhiyun if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2030*4882a593Smuzhiyun return false;
2031*4882a593Smuzhiyun if (fcf_rec->priority != new_fcf_record->fip_priority)
2032*4882a593Smuzhiyun return false;
2033*4882a593Smuzhiyun return true;
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun /**
2037*4882a593Smuzhiyun * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
2038*4882a593Smuzhiyun * @vport: Pointer to vport object.
2039*4882a593Smuzhiyun * @fcf_index: index to next fcf.
2040*4882a593Smuzhiyun *
2041*4882a593Smuzhiyun * This function processing the roundrobin fcf failover to next fcf index.
2042*4882a593Smuzhiyun * When this function is invoked, there will be a current fcf registered
2043*4882a593Smuzhiyun * for flogi.
2044*4882a593Smuzhiyun * Return: 0 for continue retrying flogi on currently registered fcf;
2045*4882a593Smuzhiyun * 1 for stop flogi on currently registered fcf;
2046*4882a593Smuzhiyun */
lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport * vport,uint16_t fcf_index)2047*4882a593Smuzhiyun int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
2050*4882a593Smuzhiyun int rc;
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2053*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2054*4882a593Smuzhiyun if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2055*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2056*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2057*4882a593Smuzhiyun "2872 Devloss tmo with no eligible "
2058*4882a593Smuzhiyun "FCF, unregister in-use FCF (x%x) "
2059*4882a593Smuzhiyun "and rescan FCF table\n",
2060*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx);
2061*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(phba);
2062*4882a593Smuzhiyun goto stop_flogi_current_fcf;
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun /* Mark the end to FLOGI roundrobin failover */
2065*4882a593Smuzhiyun phba->hba_flag &= ~FCF_RR_INPROG;
2066*4882a593Smuzhiyun /* Allow action to new fcf asynchronous event */
2067*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2068*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2069*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2070*4882a593Smuzhiyun "2865 No FCF available, stop roundrobin FCF "
2071*4882a593Smuzhiyun "failover and change port state:x%x/x%x\n",
2072*4882a593Smuzhiyun phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2073*4882a593Smuzhiyun phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun if (!phba->fcf.fcf_redisc_attempted) {
2076*4882a593Smuzhiyun lpfc_unregister_fcf(phba);
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun rc = lpfc_sli4_redisc_fcf_table(phba);
2079*4882a593Smuzhiyun if (!rc) {
2080*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2081*4882a593Smuzhiyun "3195 Rediscover FCF table\n");
2082*4882a593Smuzhiyun phba->fcf.fcf_redisc_attempted = 1;
2083*4882a593Smuzhiyun lpfc_sli4_clear_fcf_rr_bmask(phba);
2084*4882a593Smuzhiyun } else {
2085*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2086*4882a593Smuzhiyun "3196 Rediscover FCF table "
2087*4882a593Smuzhiyun "failed. Status:x%x\n", rc);
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun } else {
2090*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2091*4882a593Smuzhiyun "3197 Already rediscover FCF table "
2092*4882a593Smuzhiyun "attempted. No more retry\n");
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun goto stop_flogi_current_fcf;
2095*4882a593Smuzhiyun } else {
2096*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2097*4882a593Smuzhiyun "2794 Try FLOGI roundrobin FCF failover to "
2098*4882a593Smuzhiyun "(x%x)\n", fcf_index);
2099*4882a593Smuzhiyun rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2100*4882a593Smuzhiyun if (rc)
2101*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2102*4882a593Smuzhiyun "2761 FLOGI roundrobin FCF failover "
2103*4882a593Smuzhiyun "failed (rc:x%x) to read FCF (x%x)\n",
2104*4882a593Smuzhiyun rc, phba->fcf.current_rec.fcf_indx);
2105*4882a593Smuzhiyun else
2106*4882a593Smuzhiyun goto stop_flogi_current_fcf;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun return 0;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun stop_flogi_current_fcf:
2111*4882a593Smuzhiyun lpfc_can_disctmo(vport);
2112*4882a593Smuzhiyun return 1;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun /**
2116*4882a593Smuzhiyun * lpfc_sli4_fcf_pri_list_del
2117*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2118*4882a593Smuzhiyun * @fcf_index: the index of the fcf record to delete
2119*4882a593Smuzhiyun * This routine checks the on list flag of the fcf_index to be deleted.
2120*4882a593Smuzhiyun * If it is one the list then it is removed from the list, and the flag
2121*4882a593Smuzhiyun * is cleared. This routine grab the hbalock before removing the fcf
2122*4882a593Smuzhiyun * record from the list.
2123*4882a593Smuzhiyun **/
lpfc_sli4_fcf_pri_list_del(struct lpfc_hba * phba,uint16_t fcf_index)2124*4882a593Smuzhiyun static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2125*4882a593Smuzhiyun uint16_t fcf_index)
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun struct lpfc_fcf_pri *new_fcf_pri;
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2130*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2131*4882a593Smuzhiyun "3058 deleting idx x%x pri x%x flg x%x\n",
2132*4882a593Smuzhiyun fcf_index, new_fcf_pri->fcf_rec.priority,
2133*4882a593Smuzhiyun new_fcf_pri->fcf_rec.flag);
2134*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2135*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2136*4882a593Smuzhiyun if (phba->fcf.current_rec.priority ==
2137*4882a593Smuzhiyun new_fcf_pri->fcf_rec.priority)
2138*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt--;
2139*4882a593Smuzhiyun list_del_init(&new_fcf_pri->list);
2140*4882a593Smuzhiyun new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun /**
2146*4882a593Smuzhiyun * lpfc_sli4_set_fcf_flogi_fail
2147*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2148*4882a593Smuzhiyun * @fcf_index: the index of the fcf record to update
2149*4882a593Smuzhiyun * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2150*4882a593Smuzhiyun * flag so the the round robin slection for the particular priority level
2151*4882a593Smuzhiyun * will try a different fcf record that does not have this bit set.
2152*4882a593Smuzhiyun * If the fcf record is re-read for any reason this flag is cleared brfore
2153*4882a593Smuzhiyun * adding it to the priority list.
2154*4882a593Smuzhiyun **/
2155*4882a593Smuzhiyun void
lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba * phba,uint16_t fcf_index)2156*4882a593Smuzhiyun lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2157*4882a593Smuzhiyun {
2158*4882a593Smuzhiyun struct lpfc_fcf_pri *new_fcf_pri;
2159*4882a593Smuzhiyun new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2160*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2161*4882a593Smuzhiyun new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2162*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun /**
2166*4882a593Smuzhiyun * lpfc_sli4_fcf_pri_list_add
2167*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2168*4882a593Smuzhiyun * @fcf_index: the index of the fcf record to add
2169*4882a593Smuzhiyun * @new_fcf_record: pointer to a new FCF record.
2170*4882a593Smuzhiyun * This routine checks the priority of the fcf_index to be added.
2171*4882a593Smuzhiyun * If it is a lower priority than the current head of the fcf_pri list
2172*4882a593Smuzhiyun * then it is added to the list in the right order.
2173*4882a593Smuzhiyun * If it is the same priority as the current head of the list then it
2174*4882a593Smuzhiyun * is added to the head of the list and its bit in the rr_bmask is set.
2175*4882a593Smuzhiyun * If the fcf_index to be added is of a higher priority than the current
2176*4882a593Smuzhiyun * head of the list then the rr_bmask is cleared, its bit is set in the
2177*4882a593Smuzhiyun * rr_bmask and it is added to the head of the list.
2178*4882a593Smuzhiyun * returns:
2179*4882a593Smuzhiyun * 0=success 1=failure
2180*4882a593Smuzhiyun **/
lpfc_sli4_fcf_pri_list_add(struct lpfc_hba * phba,uint16_t fcf_index,struct fcf_record * new_fcf_record)2181*4882a593Smuzhiyun static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2182*4882a593Smuzhiyun uint16_t fcf_index,
2183*4882a593Smuzhiyun struct fcf_record *new_fcf_record)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun uint16_t current_fcf_pri;
2186*4882a593Smuzhiyun uint16_t last_index;
2187*4882a593Smuzhiyun struct lpfc_fcf_pri *fcf_pri;
2188*4882a593Smuzhiyun struct lpfc_fcf_pri *next_fcf_pri;
2189*4882a593Smuzhiyun struct lpfc_fcf_pri *new_fcf_pri;
2190*4882a593Smuzhiyun int ret;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2193*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2194*4882a593Smuzhiyun "3059 adding idx x%x pri x%x flg x%x\n",
2195*4882a593Smuzhiyun fcf_index, new_fcf_record->fip_priority,
2196*4882a593Smuzhiyun new_fcf_pri->fcf_rec.flag);
2197*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2198*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2199*4882a593Smuzhiyun list_del_init(&new_fcf_pri->list);
2200*4882a593Smuzhiyun new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2201*4882a593Smuzhiyun new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2202*4882a593Smuzhiyun if (list_empty(&phba->fcf.fcf_pri_list)) {
2203*4882a593Smuzhiyun list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2204*4882a593Smuzhiyun ret = lpfc_sli4_fcf_rr_index_set(phba,
2205*4882a593Smuzhiyun new_fcf_pri->fcf_rec.fcf_index);
2206*4882a593Smuzhiyun goto out;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2210*4882a593Smuzhiyun LPFC_SLI4_FCF_TBL_INDX_MAX);
2211*4882a593Smuzhiyun if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2212*4882a593Smuzhiyun ret = 0; /* Empty rr list */
2213*4882a593Smuzhiyun goto out;
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2216*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2217*4882a593Smuzhiyun list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2218*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2219*4882a593Smuzhiyun memset(phba->fcf.fcf_rr_bmask, 0,
2220*4882a593Smuzhiyun sizeof(*phba->fcf.fcf_rr_bmask));
2221*4882a593Smuzhiyun /* fcfs_at_this_priority_level = 1; */
2222*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt = 1;
2223*4882a593Smuzhiyun } else
2224*4882a593Smuzhiyun /* fcfs_at_this_priority_level++; */
2225*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt++;
2226*4882a593Smuzhiyun ret = lpfc_sli4_fcf_rr_index_set(phba,
2227*4882a593Smuzhiyun new_fcf_pri->fcf_rec.fcf_index);
2228*4882a593Smuzhiyun goto out;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2232*4882a593Smuzhiyun &phba->fcf.fcf_pri_list, list) {
2233*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.priority <=
2234*4882a593Smuzhiyun fcf_pri->fcf_rec.priority) {
2235*4882a593Smuzhiyun if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2236*4882a593Smuzhiyun list_add(&new_fcf_pri->list,
2237*4882a593Smuzhiyun &phba->fcf.fcf_pri_list);
2238*4882a593Smuzhiyun else
2239*4882a593Smuzhiyun list_add(&new_fcf_pri->list,
2240*4882a593Smuzhiyun &((struct lpfc_fcf_pri *)
2241*4882a593Smuzhiyun fcf_pri->list.prev)->list);
2242*4882a593Smuzhiyun ret = 0;
2243*4882a593Smuzhiyun goto out;
2244*4882a593Smuzhiyun } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2245*4882a593Smuzhiyun || new_fcf_pri->fcf_rec.priority <
2246*4882a593Smuzhiyun next_fcf_pri->fcf_rec.priority) {
2247*4882a593Smuzhiyun list_add(&new_fcf_pri->list, &fcf_pri->list);
2248*4882a593Smuzhiyun ret = 0;
2249*4882a593Smuzhiyun goto out;
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2252*4882a593Smuzhiyun continue;
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun ret = 1;
2256*4882a593Smuzhiyun out:
2257*4882a593Smuzhiyun /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2258*4882a593Smuzhiyun new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2259*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2260*4882a593Smuzhiyun return ret;
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun /**
2264*4882a593Smuzhiyun * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2265*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2266*4882a593Smuzhiyun * @mboxq: pointer to mailbox object.
2267*4882a593Smuzhiyun *
2268*4882a593Smuzhiyun * This function iterates through all the fcf records available in
2269*4882a593Smuzhiyun * HBA and chooses the optimal FCF record for discovery. After finding
2270*4882a593Smuzhiyun * the FCF for discovery it registers the FCF record and kicks start
2271*4882a593Smuzhiyun * discovery.
2272*4882a593Smuzhiyun * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2273*4882a593Smuzhiyun * use an FCF record which matches fabric name and mac address of the
2274*4882a593Smuzhiyun * currently used FCF record.
2275*4882a593Smuzhiyun * If the driver supports only one FCF, it will try to use the FCF record
2276*4882a593Smuzhiyun * used by BOOT_BIOS.
2277*4882a593Smuzhiyun */
2278*4882a593Smuzhiyun void
lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)2279*4882a593Smuzhiyun lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2280*4882a593Smuzhiyun {
2281*4882a593Smuzhiyun struct fcf_record *new_fcf_record;
2282*4882a593Smuzhiyun uint32_t boot_flag, addr_mode;
2283*4882a593Smuzhiyun uint16_t fcf_index, next_fcf_index;
2284*4882a593Smuzhiyun struct lpfc_fcf_rec *fcf_rec = NULL;
2285*4882a593Smuzhiyun uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2286*4882a593Smuzhiyun bool select_new_fcf;
2287*4882a593Smuzhiyun int rc;
2288*4882a593Smuzhiyun
2289*4882a593Smuzhiyun /* If there is pending FCoE event restart FCF table scan */
2290*4882a593Smuzhiyun if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2291*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2292*4882a593Smuzhiyun return;
2293*4882a593Smuzhiyun }
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun /* Parse the FCF record from the non-embedded mailbox command */
2296*4882a593Smuzhiyun new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2297*4882a593Smuzhiyun &next_fcf_index);
2298*4882a593Smuzhiyun if (!new_fcf_record) {
2299*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300*4882a593Smuzhiyun "2765 Mailbox command READ_FCF_RECORD "
2301*4882a593Smuzhiyun "failed to retrieve a FCF record.\n");
2302*4882a593Smuzhiyun /* Let next new FCF event trigger fast failover */
2303*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2304*4882a593Smuzhiyun phba->hba_flag &= ~FCF_TS_INPROG;
2305*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2306*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2307*4882a593Smuzhiyun return;
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun /* Check the FCF record against the connection list */
2311*4882a593Smuzhiyun rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2312*4882a593Smuzhiyun &addr_mode, &vlan_id);
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun /* Log the FCF record information if turned on */
2315*4882a593Smuzhiyun lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2316*4882a593Smuzhiyun next_fcf_index);
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun /*
2319*4882a593Smuzhiyun * If the fcf record does not match with connect list entries
2320*4882a593Smuzhiyun * read the next entry; otherwise, this is an eligible FCF
2321*4882a593Smuzhiyun * record for roundrobin FCF failover.
2322*4882a593Smuzhiyun */
2323*4882a593Smuzhiyun if (!rc) {
2324*4882a593Smuzhiyun lpfc_sli4_fcf_pri_list_del(phba,
2325*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2326*4882a593Smuzhiyun new_fcf_record));
2327*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2328*4882a593Smuzhiyun "2781 FCF (x%x) failed connection "
2329*4882a593Smuzhiyun "list check: (x%x/x%x/%x)\n",
2330*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2331*4882a593Smuzhiyun new_fcf_record),
2332*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_avail,
2333*4882a593Smuzhiyun new_fcf_record),
2334*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_valid,
2335*4882a593Smuzhiyun new_fcf_record),
2336*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_sol,
2337*4882a593Smuzhiyun new_fcf_record));
2338*4882a593Smuzhiyun if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2339*4882a593Smuzhiyun lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2340*4882a593Smuzhiyun new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2341*4882a593Smuzhiyun if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2342*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx) {
2343*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR,
2344*4882a593Smuzhiyun LOG_TRACE_EVENT,
2345*4882a593Smuzhiyun "2862 FCF (x%x) matches property "
2346*4882a593Smuzhiyun "of in-use FCF (x%x)\n",
2347*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2348*4882a593Smuzhiyun new_fcf_record),
2349*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx);
2350*4882a593Smuzhiyun goto read_next_fcf;
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun /*
2353*4882a593Smuzhiyun * In case the current in-use FCF record becomes
2354*4882a593Smuzhiyun * invalid/unavailable during FCF discovery that
2355*4882a593Smuzhiyun * was not triggered by fast FCF failover process,
2356*4882a593Smuzhiyun * treat it as fast FCF failover.
2357*4882a593Smuzhiyun */
2358*4882a593Smuzhiyun if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2359*4882a593Smuzhiyun !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2360*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2361*4882a593Smuzhiyun "2835 Invalid in-use FCF "
2362*4882a593Smuzhiyun "(x%x), enter FCF failover "
2363*4882a593Smuzhiyun "table scan.\n",
2364*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx);
2365*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2366*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2367*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2368*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2369*4882a593Smuzhiyun lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2370*4882a593Smuzhiyun LPFC_FCOE_FCF_GET_FIRST);
2371*4882a593Smuzhiyun return;
2372*4882a593Smuzhiyun }
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun goto read_next_fcf;
2375*4882a593Smuzhiyun } else {
2376*4882a593Smuzhiyun fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2377*4882a593Smuzhiyun rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2378*4882a593Smuzhiyun new_fcf_record);
2379*4882a593Smuzhiyun if (rc)
2380*4882a593Smuzhiyun goto read_next_fcf;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun /*
2384*4882a593Smuzhiyun * If this is not the first FCF discovery of the HBA, use last
2385*4882a593Smuzhiyun * FCF record for the discovery. The condition that a rescan
2386*4882a593Smuzhiyun * matches the in-use FCF record: fabric name, switch name, mac
2387*4882a593Smuzhiyun * address, and vlan_id.
2388*4882a593Smuzhiyun */
2389*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2390*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_IN_USE) {
2391*4882a593Smuzhiyun if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2392*4882a593Smuzhiyun lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2393*4882a593Smuzhiyun new_fcf_record, vlan_id)) {
2394*4882a593Smuzhiyun if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2395*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx) {
2396*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_AVAILABLE;
2397*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2398*4882a593Smuzhiyun /* Stop FCF redisc wait timer */
2399*4882a593Smuzhiyun __lpfc_sli4_stop_fcf_redisc_wait_timer(
2400*4882a593Smuzhiyun phba);
2401*4882a593Smuzhiyun else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2402*4882a593Smuzhiyun /* Fast failover, mark completed */
2403*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2404*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2405*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2406*4882a593Smuzhiyun "2836 New FCF matches in-use "
2407*4882a593Smuzhiyun "FCF (x%x), port_state:x%x, "
2408*4882a593Smuzhiyun "fc_flag:x%x\n",
2409*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx,
2410*4882a593Smuzhiyun phba->pport->port_state,
2411*4882a593Smuzhiyun phba->pport->fc_flag);
2412*4882a593Smuzhiyun goto out;
2413*4882a593Smuzhiyun } else
2414*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2415*4882a593Smuzhiyun "2863 New FCF (x%x) matches "
2416*4882a593Smuzhiyun "property of in-use FCF (x%x)\n",
2417*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2418*4882a593Smuzhiyun new_fcf_record),
2419*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx);
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun /*
2422*4882a593Smuzhiyun * Read next FCF record from HBA searching for the matching
2423*4882a593Smuzhiyun * with in-use record only if not during the fast failover
2424*4882a593Smuzhiyun * period. In case of fast failover period, it shall try to
2425*4882a593Smuzhiyun * determine whether the FCF record just read should be the
2426*4882a593Smuzhiyun * next candidate.
2427*4882a593Smuzhiyun */
2428*4882a593Smuzhiyun if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2429*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2430*4882a593Smuzhiyun goto read_next_fcf;
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun /*
2434*4882a593Smuzhiyun * Update on failover FCF record only if it's in FCF fast-failover
2435*4882a593Smuzhiyun * period; otherwise, update on current FCF record.
2436*4882a593Smuzhiyun */
2437*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2438*4882a593Smuzhiyun fcf_rec = &phba->fcf.failover_rec;
2439*4882a593Smuzhiyun else
2440*4882a593Smuzhiyun fcf_rec = &phba->fcf.current_rec;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2443*4882a593Smuzhiyun /*
2444*4882a593Smuzhiyun * If the driver FCF record does not have boot flag
2445*4882a593Smuzhiyun * set and new hba fcf record has boot flag set, use
2446*4882a593Smuzhiyun * the new hba fcf record.
2447*4882a593Smuzhiyun */
2448*4882a593Smuzhiyun if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2449*4882a593Smuzhiyun /* Choose this FCF record */
2450*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2451*4882a593Smuzhiyun "2837 Update current FCF record "
2452*4882a593Smuzhiyun "(x%x) with new FCF record (x%x)\n",
2453*4882a593Smuzhiyun fcf_rec->fcf_indx,
2454*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2455*4882a593Smuzhiyun new_fcf_record));
2456*4882a593Smuzhiyun __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2457*4882a593Smuzhiyun addr_mode, vlan_id, BOOT_ENABLE);
2458*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2459*4882a593Smuzhiyun goto read_next_fcf;
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun /*
2462*4882a593Smuzhiyun * If the driver FCF record has boot flag set and the
2463*4882a593Smuzhiyun * new hba FCF record does not have boot flag, read
2464*4882a593Smuzhiyun * the next FCF record.
2465*4882a593Smuzhiyun */
2466*4882a593Smuzhiyun if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2467*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2468*4882a593Smuzhiyun goto read_next_fcf;
2469*4882a593Smuzhiyun }
2470*4882a593Smuzhiyun /*
2471*4882a593Smuzhiyun * If the new hba FCF record has lower priority value
2472*4882a593Smuzhiyun * than the driver FCF record, use the new record.
2473*4882a593Smuzhiyun */
2474*4882a593Smuzhiyun if (new_fcf_record->fip_priority < fcf_rec->priority) {
2475*4882a593Smuzhiyun /* Choose the new FCF record with lower priority */
2476*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2477*4882a593Smuzhiyun "2838 Update current FCF record "
2478*4882a593Smuzhiyun "(x%x) with new FCF record (x%x)\n",
2479*4882a593Smuzhiyun fcf_rec->fcf_indx,
2480*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2481*4882a593Smuzhiyun new_fcf_record));
2482*4882a593Smuzhiyun __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2483*4882a593Smuzhiyun addr_mode, vlan_id, 0);
2484*4882a593Smuzhiyun /* Reset running random FCF selection count */
2485*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt = 1;
2486*4882a593Smuzhiyun } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2487*4882a593Smuzhiyun /* Update running random FCF selection count */
2488*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt++;
2489*4882a593Smuzhiyun select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2490*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt);
2491*4882a593Smuzhiyun if (select_new_fcf) {
2492*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2493*4882a593Smuzhiyun "2839 Update current FCF record "
2494*4882a593Smuzhiyun "(x%x) with new FCF record (x%x)\n",
2495*4882a593Smuzhiyun fcf_rec->fcf_indx,
2496*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2497*4882a593Smuzhiyun new_fcf_record));
2498*4882a593Smuzhiyun /* Choose the new FCF by random selection */
2499*4882a593Smuzhiyun __lpfc_update_fcf_record(phba, fcf_rec,
2500*4882a593Smuzhiyun new_fcf_record,
2501*4882a593Smuzhiyun addr_mode, vlan_id, 0);
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2505*4882a593Smuzhiyun goto read_next_fcf;
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun /*
2508*4882a593Smuzhiyun * This is the first suitable FCF record, choose this record for
2509*4882a593Smuzhiyun * initial best-fit FCF.
2510*4882a593Smuzhiyun */
2511*4882a593Smuzhiyun if (fcf_rec) {
2512*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2513*4882a593Smuzhiyun "2840 Update initial FCF candidate "
2514*4882a593Smuzhiyun "with FCF (x%x)\n",
2515*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2516*4882a593Smuzhiyun new_fcf_record));
2517*4882a593Smuzhiyun __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2518*4882a593Smuzhiyun addr_mode, vlan_id, (boot_flag ?
2519*4882a593Smuzhiyun BOOT_ENABLE : 0));
2520*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_AVAILABLE;
2521*4882a593Smuzhiyun /* Setup initial running random FCF selection count */
2522*4882a593Smuzhiyun phba->fcf.eligible_fcf_cnt = 1;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2525*4882a593Smuzhiyun goto read_next_fcf;
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun read_next_fcf:
2528*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2529*4882a593Smuzhiyun if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2530*4882a593Smuzhiyun if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2531*4882a593Smuzhiyun /*
2532*4882a593Smuzhiyun * Case of FCF fast failover scan
2533*4882a593Smuzhiyun */
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun /*
2536*4882a593Smuzhiyun * It has not found any suitable FCF record, cancel
2537*4882a593Smuzhiyun * FCF scan inprogress, and do nothing
2538*4882a593Smuzhiyun */
2539*4882a593Smuzhiyun if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2540*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2541*4882a593Smuzhiyun "2782 No suitable FCF found: "
2542*4882a593Smuzhiyun "(x%x/x%x)\n",
2543*4882a593Smuzhiyun phba->fcoe_eventtag_at_fcf_scan,
2544*4882a593Smuzhiyun bf_get(lpfc_fcf_record_fcf_index,
2545*4882a593Smuzhiyun new_fcf_record));
2546*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2547*4882a593Smuzhiyun if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2548*4882a593Smuzhiyun phba->hba_flag &= ~FCF_TS_INPROG;
2549*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2550*4882a593Smuzhiyun /* Unregister in-use FCF and rescan */
2551*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO,
2552*4882a593Smuzhiyun LOG_FIP,
2553*4882a593Smuzhiyun "2864 On devloss tmo "
2554*4882a593Smuzhiyun "unreg in-use FCF and "
2555*4882a593Smuzhiyun "rescan FCF table\n");
2556*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(phba);
2557*4882a593Smuzhiyun return;
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun /*
2560*4882a593Smuzhiyun * Let next new FCF event trigger fast failover
2561*4882a593Smuzhiyun */
2562*4882a593Smuzhiyun phba->hba_flag &= ~FCF_TS_INPROG;
2563*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2564*4882a593Smuzhiyun return;
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun /*
2567*4882a593Smuzhiyun * It has found a suitable FCF record that is not
2568*4882a593Smuzhiyun * the same as in-use FCF record, unregister the
2569*4882a593Smuzhiyun * in-use FCF record, replace the in-use FCF record
2570*4882a593Smuzhiyun * with the new FCF record, mark FCF fast failover
2571*4882a593Smuzhiyun * completed, and then start register the new FCF
2572*4882a593Smuzhiyun * record.
2573*4882a593Smuzhiyun */
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun /* Unregister the current in-use FCF record */
2576*4882a593Smuzhiyun lpfc_unregister_fcf(phba);
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun /* Replace in-use record with the new record */
2579*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2580*4882a593Smuzhiyun "2842 Replace in-use FCF (x%x) "
2581*4882a593Smuzhiyun "with failover FCF (x%x)\n",
2582*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx,
2583*4882a593Smuzhiyun phba->fcf.failover_rec.fcf_indx);
2584*4882a593Smuzhiyun memcpy(&phba->fcf.current_rec,
2585*4882a593Smuzhiyun &phba->fcf.failover_rec,
2586*4882a593Smuzhiyun sizeof(struct lpfc_fcf_rec));
2587*4882a593Smuzhiyun /*
2588*4882a593Smuzhiyun * Mark the fast FCF failover rediscovery completed
2589*4882a593Smuzhiyun * and the start of the first round of the roundrobin
2590*4882a593Smuzhiyun * FCF failover.
2591*4882a593Smuzhiyun */
2592*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2593*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2594*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2595*4882a593Smuzhiyun /* Register to the new FCF record */
2596*4882a593Smuzhiyun lpfc_register_fcf(phba);
2597*4882a593Smuzhiyun } else {
2598*4882a593Smuzhiyun /*
2599*4882a593Smuzhiyun * In case of transaction period to fast FCF failover,
2600*4882a593Smuzhiyun * do nothing when search to the end of the FCF table.
2601*4882a593Smuzhiyun */
2602*4882a593Smuzhiyun if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2603*4882a593Smuzhiyun (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2604*4882a593Smuzhiyun return;
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2607*4882a593Smuzhiyun phba->fcf.fcf_flag & FCF_IN_USE) {
2608*4882a593Smuzhiyun /*
2609*4882a593Smuzhiyun * In case the current in-use FCF record no
2610*4882a593Smuzhiyun * longer existed during FCF discovery that
2611*4882a593Smuzhiyun * was not triggered by fast FCF failover
2612*4882a593Smuzhiyun * process, treat it as fast FCF failover.
2613*4882a593Smuzhiyun */
2614*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2615*4882a593Smuzhiyun "2841 In-use FCF record (x%x) "
2616*4882a593Smuzhiyun "not reported, entering fast "
2617*4882a593Smuzhiyun "FCF failover mode scanning.\n",
2618*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx);
2619*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2620*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2621*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2622*4882a593Smuzhiyun lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2623*4882a593Smuzhiyun LPFC_FCOE_FCF_GET_FIRST);
2624*4882a593Smuzhiyun return;
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun /* Register to the new FCF record */
2627*4882a593Smuzhiyun lpfc_register_fcf(phba);
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun } else
2630*4882a593Smuzhiyun lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2631*4882a593Smuzhiyun return;
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun out:
2634*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2635*4882a593Smuzhiyun lpfc_register_fcf(phba);
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun return;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun /**
2641*4882a593Smuzhiyun * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2642*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2643*4882a593Smuzhiyun * @mboxq: pointer to mailbox object.
2644*4882a593Smuzhiyun *
2645*4882a593Smuzhiyun * This is the callback function for FLOGI failure roundrobin FCF failover
2646*4882a593Smuzhiyun * read FCF record mailbox command from the eligible FCF record bmask for
2647*4882a593Smuzhiyun * performing the failover. If the FCF read back is not valid/available, it
2648*4882a593Smuzhiyun * fails through to retrying FLOGI to the currently registered FCF again.
2649*4882a593Smuzhiyun * Otherwise, if the FCF read back is valid and available, it will set the
2650*4882a593Smuzhiyun * newly read FCF record to the failover FCF record, unregister currently
2651*4882a593Smuzhiyun * registered FCF record, copy the failover FCF record to the current
2652*4882a593Smuzhiyun * FCF record, and then register the current FCF record before proceeding
2653*4882a593Smuzhiyun * to trying FLOGI on the new failover FCF.
2654*4882a593Smuzhiyun */
2655*4882a593Smuzhiyun void
lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)2656*4882a593Smuzhiyun lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2657*4882a593Smuzhiyun {
2658*4882a593Smuzhiyun struct fcf_record *new_fcf_record;
2659*4882a593Smuzhiyun uint32_t boot_flag, addr_mode;
2660*4882a593Smuzhiyun uint16_t next_fcf_index, fcf_index;
2661*4882a593Smuzhiyun uint16_t current_fcf_index;
2662*4882a593Smuzhiyun uint16_t vlan_id;
2663*4882a593Smuzhiyun int rc;
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun /* If link state is not up, stop the roundrobin failover process */
2666*4882a593Smuzhiyun if (phba->link_state < LPFC_LINK_UP) {
2667*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2668*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2669*4882a593Smuzhiyun phba->hba_flag &= ~FCF_RR_INPROG;
2670*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2671*4882a593Smuzhiyun goto out;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun /* Parse the FCF record from the non-embedded mailbox command */
2675*4882a593Smuzhiyun new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2676*4882a593Smuzhiyun &next_fcf_index);
2677*4882a593Smuzhiyun if (!new_fcf_record) {
2678*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2679*4882a593Smuzhiyun "2766 Mailbox command READ_FCF_RECORD "
2680*4882a593Smuzhiyun "failed to retrieve a FCF record. "
2681*4882a593Smuzhiyun "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2682*4882a593Smuzhiyun phba->fcf.fcf_flag);
2683*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(phba);
2684*4882a593Smuzhiyun goto out;
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun /* Get the needed parameters from FCF record */
2688*4882a593Smuzhiyun rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2689*4882a593Smuzhiyun &addr_mode, &vlan_id);
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun /* Log the FCF record information if turned on */
2692*4882a593Smuzhiyun lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2693*4882a593Smuzhiyun next_fcf_index);
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2696*4882a593Smuzhiyun if (!rc) {
2697*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2698*4882a593Smuzhiyun "2848 Remove ineligible FCF (x%x) from "
2699*4882a593Smuzhiyun "from roundrobin bmask\n", fcf_index);
2700*4882a593Smuzhiyun /* Clear roundrobin bmask bit for ineligible FCF */
2701*4882a593Smuzhiyun lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2702*4882a593Smuzhiyun /* Perform next round of roundrobin FCF failover */
2703*4882a593Smuzhiyun fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2704*4882a593Smuzhiyun rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2705*4882a593Smuzhiyun if (rc)
2706*4882a593Smuzhiyun goto out;
2707*4882a593Smuzhiyun goto error_out;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2711*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2712*4882a593Smuzhiyun "2760 Perform FLOGI roundrobin FCF failover: "
2713*4882a593Smuzhiyun "FCF (x%x) back to FCF (x%x)\n",
2714*4882a593Smuzhiyun phba->fcf.current_rec.fcf_indx, fcf_index);
2715*4882a593Smuzhiyun /* Wait 500 ms before retrying FLOGI to current FCF */
2716*4882a593Smuzhiyun msleep(500);
2717*4882a593Smuzhiyun lpfc_issue_init_vfi(phba->pport);
2718*4882a593Smuzhiyun goto out;
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun /* Upload new FCF record to the failover FCF record */
2722*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2723*4882a593Smuzhiyun "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2724*4882a593Smuzhiyun phba->fcf.failover_rec.fcf_indx, fcf_index);
2725*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
2726*4882a593Smuzhiyun __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2727*4882a593Smuzhiyun new_fcf_record, addr_mode, vlan_id,
2728*4882a593Smuzhiyun (boot_flag ? BOOT_ENABLE : 0));
2729*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun current_fcf_index = phba->fcf.current_rec.fcf_indx;
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun /* Unregister the current in-use FCF record */
2734*4882a593Smuzhiyun lpfc_unregister_fcf(phba);
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun /* Replace in-use record with the new record */
2737*4882a593Smuzhiyun memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2738*4882a593Smuzhiyun sizeof(struct lpfc_fcf_rec));
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2741*4882a593Smuzhiyun "2783 Perform FLOGI roundrobin FCF failover: FCF "
2742*4882a593Smuzhiyun "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun error_out:
2745*4882a593Smuzhiyun lpfc_register_fcf(phba);
2746*4882a593Smuzhiyun out:
2747*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun /**
2751*4882a593Smuzhiyun * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2752*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2753*4882a593Smuzhiyun * @mboxq: pointer to mailbox object.
2754*4882a593Smuzhiyun *
2755*4882a593Smuzhiyun * This is the callback function of read FCF record mailbox command for
2756*4882a593Smuzhiyun * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2757*4882a593Smuzhiyun * failover when a new FCF event happened. If the FCF read back is
2758*4882a593Smuzhiyun * valid/available and it passes the connection list check, it updates
2759*4882a593Smuzhiyun * the bmask for the eligible FCF record for roundrobin failover.
2760*4882a593Smuzhiyun */
2761*4882a593Smuzhiyun void
lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)2762*4882a593Smuzhiyun lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2763*4882a593Smuzhiyun {
2764*4882a593Smuzhiyun struct fcf_record *new_fcf_record;
2765*4882a593Smuzhiyun uint32_t boot_flag, addr_mode;
2766*4882a593Smuzhiyun uint16_t fcf_index, next_fcf_index;
2767*4882a593Smuzhiyun uint16_t vlan_id;
2768*4882a593Smuzhiyun int rc;
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun /* If link state is not up, no need to proceed */
2771*4882a593Smuzhiyun if (phba->link_state < LPFC_LINK_UP)
2772*4882a593Smuzhiyun goto out;
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun /* If FCF discovery period is over, no need to proceed */
2775*4882a593Smuzhiyun if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2776*4882a593Smuzhiyun goto out;
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun /* Parse the FCF record from the non-embedded mailbox command */
2779*4882a593Smuzhiyun new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2780*4882a593Smuzhiyun &next_fcf_index);
2781*4882a593Smuzhiyun if (!new_fcf_record) {
2782*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2783*4882a593Smuzhiyun "2767 Mailbox command READ_FCF_RECORD "
2784*4882a593Smuzhiyun "failed to retrieve a FCF record.\n");
2785*4882a593Smuzhiyun goto out;
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun
2788*4882a593Smuzhiyun /* Check the connection list for eligibility */
2789*4882a593Smuzhiyun rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2790*4882a593Smuzhiyun &addr_mode, &vlan_id);
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun /* Log the FCF record information if turned on */
2793*4882a593Smuzhiyun lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2794*4882a593Smuzhiyun next_fcf_index);
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun if (!rc)
2797*4882a593Smuzhiyun goto out;
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun /* Update the eligible FCF record index bmask */
2800*4882a593Smuzhiyun fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun out:
2805*4882a593Smuzhiyun lpfc_sli4_mbox_cmd_free(phba, mboxq);
2806*4882a593Smuzhiyun }
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun /**
2809*4882a593Smuzhiyun * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2810*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2811*4882a593Smuzhiyun * @mboxq: pointer to mailbox data structure.
2812*4882a593Smuzhiyun *
2813*4882a593Smuzhiyun * This function handles completion of init vfi mailbox command.
2814*4882a593Smuzhiyun */
2815*4882a593Smuzhiyun static void
lpfc_init_vfi_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)2816*4882a593Smuzhiyun lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2817*4882a593Smuzhiyun {
2818*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun /*
2821*4882a593Smuzhiyun * VFI not supported on interface type 0, just do the flogi
2822*4882a593Smuzhiyun * Also continue if the VFI is in use - just use the same one.
2823*4882a593Smuzhiyun */
2824*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus &&
2825*4882a593Smuzhiyun (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2826*4882a593Smuzhiyun LPFC_SLI_INTF_IF_TYPE_0) &&
2827*4882a593Smuzhiyun mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2828*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2829*4882a593Smuzhiyun "2891 Init VFI mailbox failed 0x%x\n",
2830*4882a593Smuzhiyun mboxq->u.mb.mbxStatus);
2831*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
2832*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2833*4882a593Smuzhiyun return;
2834*4882a593Smuzhiyun }
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun lpfc_initial_flogi(vport);
2837*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
2838*4882a593Smuzhiyun return;
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun /**
2842*4882a593Smuzhiyun * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2843*4882a593Smuzhiyun * @vport: pointer to lpfc_vport data structure.
2844*4882a593Smuzhiyun *
2845*4882a593Smuzhiyun * This function issue a init_vfi mailbox command to initialize the VFI and
2846*4882a593Smuzhiyun * VPI for the physical port.
2847*4882a593Smuzhiyun */
2848*4882a593Smuzhiyun void
lpfc_issue_init_vfi(struct lpfc_vport * vport)2849*4882a593Smuzhiyun lpfc_issue_init_vfi(struct lpfc_vport *vport)
2850*4882a593Smuzhiyun {
2851*4882a593Smuzhiyun LPFC_MBOXQ_t *mboxq;
2852*4882a593Smuzhiyun int rc;
2853*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2856*4882a593Smuzhiyun if (!mboxq) {
2857*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
2858*4882a593Smuzhiyun LOG_TRACE_EVENT, "2892 Failed to allocate "
2859*4882a593Smuzhiyun "init_vfi mailbox\n");
2860*4882a593Smuzhiyun return;
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun lpfc_init_vfi(mboxq, vport);
2863*4882a593Smuzhiyun mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2864*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2865*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
2866*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2867*4882a593Smuzhiyun "2893 Failed to issue init_vfi mailbox\n");
2868*4882a593Smuzhiyun mempool_free(mboxq, vport->phba->mbox_mem_pool);
2869*4882a593Smuzhiyun }
2870*4882a593Smuzhiyun }
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun /**
2873*4882a593Smuzhiyun * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2874*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2875*4882a593Smuzhiyun * @mboxq: pointer to mailbox data structure.
2876*4882a593Smuzhiyun *
2877*4882a593Smuzhiyun * This function handles completion of init vpi mailbox command.
2878*4882a593Smuzhiyun */
2879*4882a593Smuzhiyun void
lpfc_init_vpi_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)2880*4882a593Smuzhiyun lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2881*4882a593Smuzhiyun {
2882*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
2883*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
2884*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus) {
2887*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2888*4882a593Smuzhiyun "2609 Init VPI mailbox failed 0x%x\n",
2889*4882a593Smuzhiyun mboxq->u.mb.mbxStatus);
2890*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
2891*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2892*4882a593Smuzhiyun return;
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
2895*4882a593Smuzhiyun vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2896*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
2897*4882a593Smuzhiyun
2898*4882a593Smuzhiyun /* If this port is physical port or FDISC is done, do reg_vpi */
2899*4882a593Smuzhiyun if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2900*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, Fabric_DID);
2901*4882a593Smuzhiyun if (!ndlp)
2902*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
2903*4882a593Smuzhiyun LOG_TRACE_EVENT,
2904*4882a593Smuzhiyun "2731 Cannot find fabric "
2905*4882a593Smuzhiyun "controller node\n");
2906*4882a593Smuzhiyun else
2907*4882a593Smuzhiyun lpfc_register_new_vport(phba, vport, ndlp);
2908*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
2909*4882a593Smuzhiyun return;
2910*4882a593Smuzhiyun }
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2913*4882a593Smuzhiyun lpfc_initial_fdisc(vport);
2914*4882a593Smuzhiyun else {
2915*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2916*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2917*4882a593Smuzhiyun "2606 No NPIV Fabric support\n");
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
2920*4882a593Smuzhiyun return;
2921*4882a593Smuzhiyun }
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun /**
2924*4882a593Smuzhiyun * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2925*4882a593Smuzhiyun * @vport: pointer to lpfc_vport data structure.
2926*4882a593Smuzhiyun *
2927*4882a593Smuzhiyun * This function issue a init_vpi mailbox command to initialize
2928*4882a593Smuzhiyun * VPI for the vport.
2929*4882a593Smuzhiyun */
2930*4882a593Smuzhiyun void
lpfc_issue_init_vpi(struct lpfc_vport * vport)2931*4882a593Smuzhiyun lpfc_issue_init_vpi(struct lpfc_vport *vport)
2932*4882a593Smuzhiyun {
2933*4882a593Smuzhiyun LPFC_MBOXQ_t *mboxq;
2934*4882a593Smuzhiyun int rc, vpi;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2937*4882a593Smuzhiyun vpi = lpfc_alloc_vpi(vport->phba);
2938*4882a593Smuzhiyun if (!vpi) {
2939*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2940*4882a593Smuzhiyun "3303 Failed to obtain vport vpi\n");
2941*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2942*4882a593Smuzhiyun return;
2943*4882a593Smuzhiyun }
2944*4882a593Smuzhiyun vport->vpi = vpi;
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2948*4882a593Smuzhiyun if (!mboxq) {
2949*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
2950*4882a593Smuzhiyun LOG_TRACE_EVENT, "2607 Failed to allocate "
2951*4882a593Smuzhiyun "init_vpi mailbox\n");
2952*4882a593Smuzhiyun return;
2953*4882a593Smuzhiyun }
2954*4882a593Smuzhiyun lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2955*4882a593Smuzhiyun mboxq->vport = vport;
2956*4882a593Smuzhiyun mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2957*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2958*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
2959*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2960*4882a593Smuzhiyun "2608 Failed to issue init_vpi mailbox\n");
2961*4882a593Smuzhiyun mempool_free(mboxq, vport->phba->mbox_mem_pool);
2962*4882a593Smuzhiyun }
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun /**
2966*4882a593Smuzhiyun * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2967*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
2968*4882a593Smuzhiyun *
2969*4882a593Smuzhiyun * This function loops through the list of vports on the @phba and issues an
2970*4882a593Smuzhiyun * FDISC if possible.
2971*4882a593Smuzhiyun */
2972*4882a593Smuzhiyun void
lpfc_start_fdiscs(struct lpfc_hba * phba)2973*4882a593Smuzhiyun lpfc_start_fdiscs(struct lpfc_hba *phba)
2974*4882a593Smuzhiyun {
2975*4882a593Smuzhiyun struct lpfc_vport **vports;
2976*4882a593Smuzhiyun int i;
2977*4882a593Smuzhiyun
2978*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
2979*4882a593Smuzhiyun if (vports != NULL) {
2980*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2981*4882a593Smuzhiyun if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2982*4882a593Smuzhiyun continue;
2983*4882a593Smuzhiyun /* There are no vpi for this vport */
2984*4882a593Smuzhiyun if (vports[i]->vpi > phba->max_vpi) {
2985*4882a593Smuzhiyun lpfc_vport_set_state(vports[i],
2986*4882a593Smuzhiyun FC_VPORT_FAILED);
2987*4882a593Smuzhiyun continue;
2988*4882a593Smuzhiyun }
2989*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2990*4882a593Smuzhiyun lpfc_vport_set_state(vports[i],
2991*4882a593Smuzhiyun FC_VPORT_LINKDOWN);
2992*4882a593Smuzhiyun continue;
2993*4882a593Smuzhiyun }
2994*4882a593Smuzhiyun if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2995*4882a593Smuzhiyun lpfc_issue_init_vpi(vports[i]);
2996*4882a593Smuzhiyun continue;
2997*4882a593Smuzhiyun }
2998*4882a593Smuzhiyun if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2999*4882a593Smuzhiyun lpfc_initial_fdisc(vports[i]);
3000*4882a593Smuzhiyun else {
3001*4882a593Smuzhiyun lpfc_vport_set_state(vports[i],
3002*4882a593Smuzhiyun FC_VPORT_NO_FABRIC_SUPP);
3003*4882a593Smuzhiyun lpfc_printf_vlog(vports[i], KERN_ERR,
3004*4882a593Smuzhiyun LOG_TRACE_EVENT,
3005*4882a593Smuzhiyun "0259 No NPIV "
3006*4882a593Smuzhiyun "Fabric support\n");
3007*4882a593Smuzhiyun }
3008*4882a593Smuzhiyun }
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
3011*4882a593Smuzhiyun }
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)3014*4882a593Smuzhiyun lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3015*4882a593Smuzhiyun {
3016*4882a593Smuzhiyun struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
3017*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
3018*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun /*
3021*4882a593Smuzhiyun * VFI not supported for interface type 0, so ignore any mailbox
3022*4882a593Smuzhiyun * error (except VFI in use) and continue with the discovery.
3023*4882a593Smuzhiyun */
3024*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus &&
3025*4882a593Smuzhiyun (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3026*4882a593Smuzhiyun LPFC_SLI_INTF_IF_TYPE_0) &&
3027*4882a593Smuzhiyun mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3028*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3029*4882a593Smuzhiyun "2018 REG_VFI mbxStatus error x%x "
3030*4882a593Smuzhiyun "HBA state x%x\n",
3031*4882a593Smuzhiyun mboxq->u.mb.mbxStatus, vport->port_state);
3032*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3033*4882a593Smuzhiyun /* FLOGI failed, use loop map to make discovery list */
3034*4882a593Smuzhiyun lpfc_disc_list_loopmap(vport);
3035*4882a593Smuzhiyun /* Start discovery */
3036*4882a593Smuzhiyun lpfc_disc_start(vport);
3037*4882a593Smuzhiyun goto out_free_mem;
3038*4882a593Smuzhiyun }
3039*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3040*4882a593Smuzhiyun goto out_free_mem;
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun /* If the VFI is already registered, there is nothing else to do
3044*4882a593Smuzhiyun * Unless this was a VFI update and we are in PT2PT mode, then
3045*4882a593Smuzhiyun * we should drop through to set the port state to ready.
3046*4882a593Smuzhiyun */
3047*4882a593Smuzhiyun if (vport->fc_flag & FC_VFI_REGISTERED)
3048*4882a593Smuzhiyun if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3049*4882a593Smuzhiyun vport->fc_flag & FC_PT2PT))
3050*4882a593Smuzhiyun goto out_free_mem;
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun /* The VPI is implicitly registered when the VFI is registered */
3053*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3054*4882a593Smuzhiyun vport->vpi_state |= LPFC_VPI_REGISTERED;
3055*4882a593Smuzhiyun vport->fc_flag |= FC_VFI_REGISTERED;
3056*4882a593Smuzhiyun vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3057*4882a593Smuzhiyun vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3058*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun /* In case SLI4 FC loopback test, we are ready */
3061*4882a593Smuzhiyun if ((phba->sli_rev == LPFC_SLI_REV4) &&
3062*4882a593Smuzhiyun (phba->link_flag & LS_LOOPBACK_MODE)) {
3063*4882a593Smuzhiyun phba->link_state = LPFC_HBA_READY;
3064*4882a593Smuzhiyun goto out_free_mem;
3065*4882a593Smuzhiyun }
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3068*4882a593Smuzhiyun "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3069*4882a593Smuzhiyun "alpacnt:%d LinkState:%x topology:%x\n",
3070*4882a593Smuzhiyun vport->port_state, vport->fc_flag, vport->fc_myDID,
3071*4882a593Smuzhiyun vport->phba->alpa_map[0],
3072*4882a593Smuzhiyun phba->link_state, phba->fc_topology);
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3075*4882a593Smuzhiyun /*
3076*4882a593Smuzhiyun * For private loop or for NPort pt2pt,
3077*4882a593Smuzhiyun * just start discovery and we are done.
3078*4882a593Smuzhiyun */
3079*4882a593Smuzhiyun if ((vport->fc_flag & FC_PT2PT) ||
3080*4882a593Smuzhiyun ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3081*4882a593Smuzhiyun !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun /* Use loop map to make discovery list */
3084*4882a593Smuzhiyun lpfc_disc_list_loopmap(vport);
3085*4882a593Smuzhiyun /* Start discovery */
3086*4882a593Smuzhiyun if (vport->fc_flag & FC_PT2PT)
3087*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
3088*4882a593Smuzhiyun else
3089*4882a593Smuzhiyun lpfc_disc_start(vport);
3090*4882a593Smuzhiyun } else {
3091*4882a593Smuzhiyun lpfc_start_fdiscs(phba);
3092*4882a593Smuzhiyun lpfc_do_scr_ns_plogi(phba, vport);
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun }
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun out_free_mem:
3097*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
3098*4882a593Smuzhiyun if (dmabuf) {
3099*4882a593Smuzhiyun lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3100*4882a593Smuzhiyun kfree(dmabuf);
3101*4882a593Smuzhiyun }
3102*4882a593Smuzhiyun return;
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3106*4882a593Smuzhiyun lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3107*4882a593Smuzhiyun {
3108*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
3109*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3110*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3111*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3112*4882a593Smuzhiyun struct serv_parm *sp = &vport->fc_sparam;
3113*4882a593Smuzhiyun uint32_t ed_tov;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun /* Check for error */
3116*4882a593Smuzhiyun if (mb->mbxStatus) {
3117*4882a593Smuzhiyun /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3118*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3119*4882a593Smuzhiyun "0319 READ_SPARAM mbxStatus error x%x "
3120*4882a593Smuzhiyun "hba state x%x>\n",
3121*4882a593Smuzhiyun mb->mbxStatus, vport->port_state);
3122*4882a593Smuzhiyun lpfc_linkdown(phba);
3123*4882a593Smuzhiyun goto out;
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3127*4882a593Smuzhiyun sizeof (struct serv_parm));
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3130*4882a593Smuzhiyun if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
3131*4882a593Smuzhiyun ed_tov = (ed_tov + 999999) / 1000000;
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun phba->fc_edtov = ed_tov;
3134*4882a593Smuzhiyun phba->fc_ratov = (2 * ed_tov) / 1000;
3135*4882a593Smuzhiyun if (phba->fc_ratov < FF_DEF_RATOV) {
3136*4882a593Smuzhiyun /* RA_TOV should be atleast 10sec for initial flogi */
3137*4882a593Smuzhiyun phba->fc_ratov = FF_DEF_RATOV;
3138*4882a593Smuzhiyun }
3139*4882a593Smuzhiyun
3140*4882a593Smuzhiyun lpfc_update_vport_wwn(vport);
3141*4882a593Smuzhiyun fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3142*4882a593Smuzhiyun if (vport->port_type == LPFC_PHYSICAL_PORT) {
3143*4882a593Smuzhiyun memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3144*4882a593Smuzhiyun memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3145*4882a593Smuzhiyun }
3146*4882a593Smuzhiyun
3147*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3148*4882a593Smuzhiyun kfree(mp);
3149*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun /* Check if sending the FLOGI is being deferred to after we get
3152*4882a593Smuzhiyun * up to date CSPs from MBX_READ_SPARAM.
3153*4882a593Smuzhiyun */
3154*4882a593Smuzhiyun if (phba->hba_flag & HBA_DEFER_FLOGI) {
3155*4882a593Smuzhiyun lpfc_initial_flogi(vport);
3156*4882a593Smuzhiyun phba->hba_flag &= ~HBA_DEFER_FLOGI;
3157*4882a593Smuzhiyun }
3158*4882a593Smuzhiyun return;
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun out:
3161*4882a593Smuzhiyun pmb->ctx_buf = NULL;
3162*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3163*4882a593Smuzhiyun kfree(mp);
3164*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
3165*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3166*4882a593Smuzhiyun return;
3167*4882a593Smuzhiyun }
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun static void
lpfc_mbx_process_link_up(struct lpfc_hba * phba,struct lpfc_mbx_read_top * la)3170*4882a593Smuzhiyun lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3171*4882a593Smuzhiyun {
3172*4882a593Smuzhiyun struct lpfc_vport *vport = phba->pport;
3173*4882a593Smuzhiyun LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3174*4882a593Smuzhiyun struct Scsi_Host *shost;
3175*4882a593Smuzhiyun int i;
3176*4882a593Smuzhiyun struct lpfc_dmabuf *mp;
3177*4882a593Smuzhiyun int rc;
3178*4882a593Smuzhiyun struct fcf_record *fcf_record;
3179*4882a593Smuzhiyun uint32_t fc_flags = 0;
3180*4882a593Smuzhiyun unsigned long iflags;
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, iflags);
3183*4882a593Smuzhiyun phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3186*4882a593Smuzhiyun switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3187*4882a593Smuzhiyun case LPFC_LINK_SPEED_1GHZ:
3188*4882a593Smuzhiyun case LPFC_LINK_SPEED_2GHZ:
3189*4882a593Smuzhiyun case LPFC_LINK_SPEED_4GHZ:
3190*4882a593Smuzhiyun case LPFC_LINK_SPEED_8GHZ:
3191*4882a593Smuzhiyun case LPFC_LINK_SPEED_10GHZ:
3192*4882a593Smuzhiyun case LPFC_LINK_SPEED_16GHZ:
3193*4882a593Smuzhiyun case LPFC_LINK_SPEED_32GHZ:
3194*4882a593Smuzhiyun case LPFC_LINK_SPEED_64GHZ:
3195*4882a593Smuzhiyun case LPFC_LINK_SPEED_128GHZ:
3196*4882a593Smuzhiyun break;
3197*4882a593Smuzhiyun default:
3198*4882a593Smuzhiyun phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3199*4882a593Smuzhiyun break;
3200*4882a593Smuzhiyun }
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun if (phba->fc_topology &&
3204*4882a593Smuzhiyun phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3205*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3206*4882a593Smuzhiyun "3314 Toplogy changed was 0x%x is 0x%x\n",
3207*4882a593Smuzhiyun phba->fc_topology,
3208*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_topology, la));
3209*4882a593Smuzhiyun phba->fc_topology_changed = 1;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3213*4882a593Smuzhiyun phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vport);
3216*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3217*4882a593Smuzhiyun phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun /* if npiv is enabled and this adapter supports npiv log
3220*4882a593Smuzhiyun * a message that npiv is not supported in this topology
3221*4882a593Smuzhiyun */
3222*4882a593Smuzhiyun if (phba->cfg_enable_npiv && phba->max_vpi)
3223*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3224*4882a593Smuzhiyun "1309 Link Up Event npiv not supported in loop "
3225*4882a593Smuzhiyun "topology\n");
3226*4882a593Smuzhiyun /* Get Loop Map information */
3227*4882a593Smuzhiyun if (bf_get(lpfc_mbx_read_top_il, la))
3228*4882a593Smuzhiyun fc_flags |= FC_LBIT;
3229*4882a593Smuzhiyun
3230*4882a593Smuzhiyun vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3231*4882a593Smuzhiyun i = la->lilpBde64.tus.f.bdeSize;
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun if (i == 0) {
3234*4882a593Smuzhiyun phba->alpa_map[0] = 0;
3235*4882a593Smuzhiyun } else {
3236*4882a593Smuzhiyun if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3237*4882a593Smuzhiyun int numalpa, j, k;
3238*4882a593Smuzhiyun union {
3239*4882a593Smuzhiyun uint8_t pamap[16];
3240*4882a593Smuzhiyun struct {
3241*4882a593Smuzhiyun uint32_t wd1;
3242*4882a593Smuzhiyun uint32_t wd2;
3243*4882a593Smuzhiyun uint32_t wd3;
3244*4882a593Smuzhiyun uint32_t wd4;
3245*4882a593Smuzhiyun } pa;
3246*4882a593Smuzhiyun } un;
3247*4882a593Smuzhiyun numalpa = phba->alpa_map[0];
3248*4882a593Smuzhiyun j = 0;
3249*4882a593Smuzhiyun while (j < numalpa) {
3250*4882a593Smuzhiyun memset(un.pamap, 0, 16);
3251*4882a593Smuzhiyun for (k = 1; j < numalpa; k++) {
3252*4882a593Smuzhiyun un.pamap[k - 1] =
3253*4882a593Smuzhiyun phba->alpa_map[j + 1];
3254*4882a593Smuzhiyun j++;
3255*4882a593Smuzhiyun if (k == 16)
3256*4882a593Smuzhiyun break;
3257*4882a593Smuzhiyun }
3258*4882a593Smuzhiyun /* Link Up Event ALPA map */
3259*4882a593Smuzhiyun lpfc_printf_log(phba,
3260*4882a593Smuzhiyun KERN_WARNING,
3261*4882a593Smuzhiyun LOG_LINK_EVENT,
3262*4882a593Smuzhiyun "1304 Link Up Event "
3263*4882a593Smuzhiyun "ALPA map Data: x%x "
3264*4882a593Smuzhiyun "x%x x%x x%x\n",
3265*4882a593Smuzhiyun un.pa.wd1, un.pa.wd2,
3266*4882a593Smuzhiyun un.pa.wd3, un.pa.wd4);
3267*4882a593Smuzhiyun }
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun }
3270*4882a593Smuzhiyun } else {
3271*4882a593Smuzhiyun if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3272*4882a593Smuzhiyun if (phba->max_vpi && phba->cfg_enable_npiv &&
3273*4882a593Smuzhiyun (phba->sli_rev >= LPFC_SLI_REV3))
3274*4882a593Smuzhiyun phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3275*4882a593Smuzhiyun }
3276*4882a593Smuzhiyun vport->fc_myDID = phba->fc_pref_DID;
3277*4882a593Smuzhiyun fc_flags |= FC_LBIT;
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
3280*4882a593Smuzhiyun
3281*4882a593Smuzhiyun if (fc_flags) {
3282*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
3283*4882a593Smuzhiyun vport->fc_flag |= fc_flags;
3284*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
3285*4882a593Smuzhiyun }
3286*4882a593Smuzhiyun
3287*4882a593Smuzhiyun lpfc_linkup(phba);
3288*4882a593Smuzhiyun sparam_mbox = NULL;
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3291*4882a593Smuzhiyun if (!sparam_mbox)
3292*4882a593Smuzhiyun goto out;
3293*4882a593Smuzhiyun
3294*4882a593Smuzhiyun rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3295*4882a593Smuzhiyun if (rc) {
3296*4882a593Smuzhiyun mempool_free(sparam_mbox, phba->mbox_mem_pool);
3297*4882a593Smuzhiyun goto out;
3298*4882a593Smuzhiyun }
3299*4882a593Smuzhiyun sparam_mbox->vport = vport;
3300*4882a593Smuzhiyun sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3301*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3302*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
3303*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3304*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3305*4882a593Smuzhiyun kfree(mp);
3306*4882a593Smuzhiyun mempool_free(sparam_mbox, phba->mbox_mem_pool);
3307*4882a593Smuzhiyun goto out;
3308*4882a593Smuzhiyun }
3309*4882a593Smuzhiyun
3310*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3311*4882a593Smuzhiyun cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3312*4882a593Smuzhiyun if (!cfglink_mbox)
3313*4882a593Smuzhiyun goto out;
3314*4882a593Smuzhiyun vport->port_state = LPFC_LOCAL_CFG_LINK;
3315*4882a593Smuzhiyun lpfc_config_link(phba, cfglink_mbox);
3316*4882a593Smuzhiyun cfglink_mbox->vport = vport;
3317*4882a593Smuzhiyun cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3318*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3319*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
3320*4882a593Smuzhiyun mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3321*4882a593Smuzhiyun goto out;
3322*4882a593Smuzhiyun }
3323*4882a593Smuzhiyun } else {
3324*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_UNKNOWN;
3325*4882a593Smuzhiyun /*
3326*4882a593Smuzhiyun * Add the driver's default FCF record at FCF index 0 now. This
3327*4882a593Smuzhiyun * is phase 1 implementation that support FCF index 0 and driver
3328*4882a593Smuzhiyun * defaults.
3329*4882a593Smuzhiyun */
3330*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3331*4882a593Smuzhiyun fcf_record = kzalloc(sizeof(struct fcf_record),
3332*4882a593Smuzhiyun GFP_KERNEL);
3333*4882a593Smuzhiyun if (unlikely(!fcf_record)) {
3334*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR,
3335*4882a593Smuzhiyun LOG_TRACE_EVENT,
3336*4882a593Smuzhiyun "2554 Could not allocate memory for "
3337*4882a593Smuzhiyun "fcf record\n");
3338*4882a593Smuzhiyun rc = -ENODEV;
3339*4882a593Smuzhiyun goto out;
3340*4882a593Smuzhiyun }
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3343*4882a593Smuzhiyun LPFC_FCOE_FCF_DEF_INDEX);
3344*4882a593Smuzhiyun rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3345*4882a593Smuzhiyun if (unlikely(rc)) {
3346*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR,
3347*4882a593Smuzhiyun LOG_TRACE_EVENT,
3348*4882a593Smuzhiyun "2013 Could not manually add FCF "
3349*4882a593Smuzhiyun "record 0, status %d\n", rc);
3350*4882a593Smuzhiyun rc = -ENODEV;
3351*4882a593Smuzhiyun kfree(fcf_record);
3352*4882a593Smuzhiyun goto out;
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun kfree(fcf_record);
3355*4882a593Smuzhiyun }
3356*4882a593Smuzhiyun /*
3357*4882a593Smuzhiyun * The driver is expected to do FIP/FCF. Call the port
3358*4882a593Smuzhiyun * and get the FCF Table.
3359*4882a593Smuzhiyun */
3360*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, iflags);
3361*4882a593Smuzhiyun if (phba->hba_flag & FCF_TS_INPROG) {
3362*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
3363*4882a593Smuzhiyun return;
3364*4882a593Smuzhiyun }
3365*4882a593Smuzhiyun /* This is the initial FCF discovery scan */
3366*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_INIT_DISC;
3367*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
3368*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3369*4882a593Smuzhiyun "2778 Start FCF table scan at linkup\n");
3370*4882a593Smuzhiyun rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3371*4882a593Smuzhiyun LPFC_FCOE_FCF_GET_FIRST);
3372*4882a593Smuzhiyun if (rc) {
3373*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, iflags);
3374*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3375*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
3376*4882a593Smuzhiyun goto out;
3377*4882a593Smuzhiyun }
3378*4882a593Smuzhiyun /* Reset FCF roundrobin bmask for new discovery */
3379*4882a593Smuzhiyun lpfc_sli4_clear_fcf_rr_bmask(phba);
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun /* Prepare for LINK up registrations */
3383*4882a593Smuzhiyun memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3384*4882a593Smuzhiyun scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3385*4882a593Smuzhiyun init_utsname()->nodename);
3386*4882a593Smuzhiyun return;
3387*4882a593Smuzhiyun out:
3388*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3389*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3390*4882a593Smuzhiyun "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3391*4882a593Smuzhiyun vport->port_state, sparam_mbox, cfglink_mbox);
3392*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
3393*4882a593Smuzhiyun return;
3394*4882a593Smuzhiyun }
3395*4882a593Smuzhiyun
3396*4882a593Smuzhiyun static void
lpfc_enable_la(struct lpfc_hba * phba)3397*4882a593Smuzhiyun lpfc_enable_la(struct lpfc_hba *phba)
3398*4882a593Smuzhiyun {
3399*4882a593Smuzhiyun uint32_t control;
3400*4882a593Smuzhiyun struct lpfc_sli *psli = &phba->sli;
3401*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
3402*4882a593Smuzhiyun psli->sli_flag |= LPFC_PROCESS_LA;
3403*4882a593Smuzhiyun if (phba->sli_rev <= LPFC_SLI_REV3) {
3404*4882a593Smuzhiyun control = readl(phba->HCregaddr);
3405*4882a593Smuzhiyun control |= HC_LAINT_ENA;
3406*4882a593Smuzhiyun writel(control, phba->HCregaddr);
3407*4882a593Smuzhiyun readl(phba->HCregaddr); /* flush */
3408*4882a593Smuzhiyun }
3409*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
3410*4882a593Smuzhiyun }
3411*4882a593Smuzhiyun
3412*4882a593Smuzhiyun static void
lpfc_mbx_issue_link_down(struct lpfc_hba * phba)3413*4882a593Smuzhiyun lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3414*4882a593Smuzhiyun {
3415*4882a593Smuzhiyun lpfc_linkdown(phba);
3416*4882a593Smuzhiyun lpfc_enable_la(phba);
3417*4882a593Smuzhiyun lpfc_unregister_unused_fcf(phba);
3418*4882a593Smuzhiyun /* turn on Link Attention interrupts - no CLEAR_LA needed */
3419*4882a593Smuzhiyun }
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun
3422*4882a593Smuzhiyun /*
3423*4882a593Smuzhiyun * This routine handles processing a READ_TOPOLOGY mailbox
3424*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
3425*4882a593Smuzhiyun * as the completion routine when the command is
3426*4882a593Smuzhiyun * handed off to the SLI layer. SLI4 only.
3427*4882a593Smuzhiyun */
3428*4882a593Smuzhiyun void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3429*4882a593Smuzhiyun lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3430*4882a593Smuzhiyun {
3431*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3432*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3433*4882a593Smuzhiyun struct lpfc_mbx_read_top *la;
3434*4882a593Smuzhiyun struct lpfc_sli_ring *pring;
3435*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
3436*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3437*4882a593Smuzhiyun uint8_t attn_type;
3438*4882a593Smuzhiyun unsigned long iflags;
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun /* Unblock ELS traffic */
3441*4882a593Smuzhiyun pring = lpfc_phba_elsring(phba);
3442*4882a593Smuzhiyun if (pring)
3443*4882a593Smuzhiyun pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3444*4882a593Smuzhiyun
3445*4882a593Smuzhiyun /* Check for error */
3446*4882a593Smuzhiyun if (mb->mbxStatus) {
3447*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3448*4882a593Smuzhiyun "1307 READ_LA mbox error x%x state x%x\n",
3449*4882a593Smuzhiyun mb->mbxStatus, vport->port_state);
3450*4882a593Smuzhiyun lpfc_mbx_issue_link_down(phba);
3451*4882a593Smuzhiyun phba->link_state = LPFC_HBA_ERROR;
3452*4882a593Smuzhiyun goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3453*4882a593Smuzhiyun }
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3456*4882a593Smuzhiyun attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun memcpy(&phba->alpa_map[0], mp->virt, 128);
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
3461*4882a593Smuzhiyun if (bf_get(lpfc_mbx_read_top_pb, la))
3462*4882a593Smuzhiyun vport->fc_flag |= FC_BYPASSED_MODE;
3463*4882a593Smuzhiyun else
3464*4882a593Smuzhiyun vport->fc_flag &= ~FC_BYPASSED_MODE;
3465*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
3466*4882a593Smuzhiyun
3467*4882a593Smuzhiyun if (phba->fc_eventTag <= la->eventTag) {
3468*4882a593Smuzhiyun phba->fc_stat.LinkMultiEvent++;
3469*4882a593Smuzhiyun if (attn_type == LPFC_ATT_LINK_UP)
3470*4882a593Smuzhiyun if (phba->fc_eventTag != 0)
3471*4882a593Smuzhiyun lpfc_linkdown(phba);
3472*4882a593Smuzhiyun }
3473*4882a593Smuzhiyun
3474*4882a593Smuzhiyun phba->fc_eventTag = la->eventTag;
3475*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4) {
3476*4882a593Smuzhiyun spin_lock_irqsave(&phba->hbalock, iflags);
3477*4882a593Smuzhiyun if (bf_get(lpfc_mbx_read_top_mm, la))
3478*4882a593Smuzhiyun phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3479*4882a593Smuzhiyun else
3480*4882a593Smuzhiyun phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3481*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->hbalock, iflags);
3482*4882a593Smuzhiyun }
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun phba->link_events++;
3485*4882a593Smuzhiyun if ((attn_type == LPFC_ATT_LINK_UP) &&
3486*4882a593Smuzhiyun !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3487*4882a593Smuzhiyun phba->fc_stat.LinkUp++;
3488*4882a593Smuzhiyun if (phba->link_flag & LS_LOOPBACK_MODE) {
3489*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3490*4882a593Smuzhiyun "1306 Link Up Event in loop back mode "
3491*4882a593Smuzhiyun "x%x received Data: x%x x%x x%x x%x\n",
3492*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3493*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_alpa_granted,
3494*4882a593Smuzhiyun la),
3495*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_link_spd, la),
3496*4882a593Smuzhiyun phba->alpa_map[0]);
3497*4882a593Smuzhiyun } else {
3498*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3499*4882a593Smuzhiyun "1303 Link Up Event x%x received "
3500*4882a593Smuzhiyun "Data: x%x x%x x%x x%x x%x x%x %d\n",
3501*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3502*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_alpa_granted,
3503*4882a593Smuzhiyun la),
3504*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_link_spd, la),
3505*4882a593Smuzhiyun phba->alpa_map[0],
3506*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_mm, la),
3507*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_fa, la),
3508*4882a593Smuzhiyun phba->wait_4_mlo_maint_flg);
3509*4882a593Smuzhiyun }
3510*4882a593Smuzhiyun lpfc_mbx_process_link_up(phba, la);
3511*4882a593Smuzhiyun } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3512*4882a593Smuzhiyun attn_type == LPFC_ATT_UNEXP_WWPN) {
3513*4882a593Smuzhiyun phba->fc_stat.LinkDown++;
3514*4882a593Smuzhiyun if (phba->link_flag & LS_LOOPBACK_MODE)
3515*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3516*4882a593Smuzhiyun "1308 Link Down Event in loop back mode "
3517*4882a593Smuzhiyun "x%x received "
3518*4882a593Smuzhiyun "Data: x%x x%x x%x\n",
3519*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3520*4882a593Smuzhiyun phba->pport->port_state, vport->fc_flag);
3521*4882a593Smuzhiyun else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3522*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3523*4882a593Smuzhiyun "1313 Link Down Unexpected FA WWPN Event x%x "
3524*4882a593Smuzhiyun "received Data: x%x x%x x%x x%x x%x\n",
3525*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3526*4882a593Smuzhiyun phba->pport->port_state, vport->fc_flag,
3527*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_mm, la),
3528*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_fa, la));
3529*4882a593Smuzhiyun else
3530*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3531*4882a593Smuzhiyun "1305 Link Down Event x%x received "
3532*4882a593Smuzhiyun "Data: x%x x%x x%x x%x x%x\n",
3533*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3534*4882a593Smuzhiyun phba->pport->port_state, vport->fc_flag,
3535*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_mm, la),
3536*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_fa, la));
3537*4882a593Smuzhiyun lpfc_mbx_issue_link_down(phba);
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3540*4882a593Smuzhiyun attn_type == LPFC_ATT_LINK_UP) {
3541*4882a593Smuzhiyun if (phba->link_state != LPFC_LINK_DOWN) {
3542*4882a593Smuzhiyun phba->fc_stat.LinkDown++;
3543*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3544*4882a593Smuzhiyun "1312 Link Down Event x%x received "
3545*4882a593Smuzhiyun "Data: x%x x%x x%x\n",
3546*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3547*4882a593Smuzhiyun phba->pport->port_state, vport->fc_flag);
3548*4882a593Smuzhiyun lpfc_mbx_issue_link_down(phba);
3549*4882a593Smuzhiyun } else
3550*4882a593Smuzhiyun lpfc_enable_la(phba);
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3553*4882a593Smuzhiyun "1310 Menlo Maint Mode Link up Event x%x rcvd "
3554*4882a593Smuzhiyun "Data: x%x x%x x%x\n",
3555*4882a593Smuzhiyun la->eventTag, phba->fc_eventTag,
3556*4882a593Smuzhiyun phba->pport->port_state, vport->fc_flag);
3557*4882a593Smuzhiyun /*
3558*4882a593Smuzhiyun * The cmnd that triggered this will be waiting for this
3559*4882a593Smuzhiyun * signal.
3560*4882a593Smuzhiyun */
3561*4882a593Smuzhiyun /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3562*4882a593Smuzhiyun if (phba->wait_4_mlo_maint_flg) {
3563*4882a593Smuzhiyun phba->wait_4_mlo_maint_flg = 0;
3564*4882a593Smuzhiyun wake_up_interruptible(&phba->wait_4_mlo_m_q);
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun }
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun if ((phba->sli_rev < LPFC_SLI_REV4) &&
3569*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_fa, la)) {
3570*4882a593Smuzhiyun if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3571*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
3572*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3573*4882a593Smuzhiyun "1311 fa %d\n",
3574*4882a593Smuzhiyun bf_get(lpfc_mbx_read_top_fa, la));
3575*4882a593Smuzhiyun }
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun lpfc_mbx_cmpl_read_topology_free_mbuf:
3578*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3579*4882a593Smuzhiyun kfree(mp);
3580*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3581*4882a593Smuzhiyun return;
3582*4882a593Smuzhiyun }
3583*4882a593Smuzhiyun
3584*4882a593Smuzhiyun /*
3585*4882a593Smuzhiyun * This routine handles processing a REG_LOGIN mailbox
3586*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
3587*4882a593Smuzhiyun * as the completion routine when the command is
3588*4882a593Smuzhiyun * handed off to the SLI layer.
3589*4882a593Smuzhiyun */
3590*4882a593Smuzhiyun void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3591*4882a593Smuzhiyun lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3592*4882a593Smuzhiyun {
3593*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3594*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3595*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3596*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3597*4882a593Smuzhiyun
3598*4882a593Smuzhiyun pmb->ctx_buf = NULL;
3599*4882a593Smuzhiyun pmb->ctx_ndlp = NULL;
3600*4882a593Smuzhiyun
3601*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3602*4882a593Smuzhiyun "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
3603*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3604*4882a593Smuzhiyun kref_read(&ndlp->kref),
3605*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
3606*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3607*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3608*4882a593Smuzhiyun
3609*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3610*4882a593Smuzhiyun ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3611*4882a593Smuzhiyun /* We rcvd a rscn after issuing this
3612*4882a593Smuzhiyun * mbox reg login, we may have cycled
3613*4882a593Smuzhiyun * back through the state and be
3614*4882a593Smuzhiyun * back at reg login state so this
3615*4882a593Smuzhiyun * mbox needs to be ignored becase
3616*4882a593Smuzhiyun * there is another reg login in
3617*4882a593Smuzhiyun * process.
3618*4882a593Smuzhiyun */
3619*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3620*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3621*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3622*4882a593Smuzhiyun
3623*4882a593Smuzhiyun /*
3624*4882a593Smuzhiyun * We cannot leave the RPI registered because
3625*4882a593Smuzhiyun * if we go thru discovery again for this ndlp
3626*4882a593Smuzhiyun * a subsequent REG_RPI will fail.
3627*4882a593Smuzhiyun */
3628*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3629*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
3630*4882a593Smuzhiyun }
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun /* Call state machine */
3633*4882a593Smuzhiyun lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3636*4882a593Smuzhiyun kfree(mp);
3637*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3638*4882a593Smuzhiyun /* decrement the node reference count held for this callback
3639*4882a593Smuzhiyun * function.
3640*4882a593Smuzhiyun */
3641*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
3642*4882a593Smuzhiyun
3643*4882a593Smuzhiyun return;
3644*4882a593Smuzhiyun }
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun static void
lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3647*4882a593Smuzhiyun lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3648*4882a593Smuzhiyun {
3649*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
3650*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3651*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3652*4882a593Smuzhiyun
3653*4882a593Smuzhiyun switch (mb->mbxStatus) {
3654*4882a593Smuzhiyun case 0x0011:
3655*4882a593Smuzhiyun case 0x0020:
3656*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3657*4882a593Smuzhiyun "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3658*4882a593Smuzhiyun mb->mbxStatus);
3659*4882a593Smuzhiyun break;
3660*4882a593Smuzhiyun /* If VPI is busy, reset the HBA */
3661*4882a593Smuzhiyun case 0x9700:
3662*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3663*4882a593Smuzhiyun "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3664*4882a593Smuzhiyun vport->vpi, mb->mbxStatus);
3665*4882a593Smuzhiyun if (!(phba->pport->load_flag & FC_UNLOADING))
3666*4882a593Smuzhiyun lpfc_workq_post_event(phba, NULL, NULL,
3667*4882a593Smuzhiyun LPFC_EVT_RESET_HBA);
3668*4882a593Smuzhiyun }
3669*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3670*4882a593Smuzhiyun vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3671*4882a593Smuzhiyun vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3672*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3673*4882a593Smuzhiyun vport->unreg_vpi_cmpl = VPORT_OK;
3674*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3675*4882a593Smuzhiyun lpfc_cleanup_vports_rrqs(vport, NULL);
3676*4882a593Smuzhiyun /*
3677*4882a593Smuzhiyun * This shost reference might have been taken at the beginning of
3678*4882a593Smuzhiyun * lpfc_vport_delete()
3679*4882a593Smuzhiyun */
3680*4882a593Smuzhiyun if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3681*4882a593Smuzhiyun scsi_host_put(shost);
3682*4882a593Smuzhiyun }
3683*4882a593Smuzhiyun
3684*4882a593Smuzhiyun int
lpfc_mbx_unreg_vpi(struct lpfc_vport * vport)3685*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3686*4882a593Smuzhiyun {
3687*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
3688*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
3689*4882a593Smuzhiyun int rc;
3690*4882a593Smuzhiyun
3691*4882a593Smuzhiyun mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3692*4882a593Smuzhiyun if (!mbox)
3693*4882a593Smuzhiyun return 1;
3694*4882a593Smuzhiyun
3695*4882a593Smuzhiyun lpfc_unreg_vpi(phba, vport->vpi, mbox);
3696*4882a593Smuzhiyun mbox->vport = vport;
3697*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3698*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3699*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
3700*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3701*4882a593Smuzhiyun "1800 Could not issue unreg_vpi\n");
3702*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
3703*4882a593Smuzhiyun vport->unreg_vpi_cmpl = VPORT_ERROR;
3704*4882a593Smuzhiyun return rc;
3705*4882a593Smuzhiyun }
3706*4882a593Smuzhiyun return 0;
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun
3709*4882a593Smuzhiyun static void
lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3710*4882a593Smuzhiyun lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3711*4882a593Smuzhiyun {
3712*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3713*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3714*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun switch (mb->mbxStatus) {
3717*4882a593Smuzhiyun case 0x0011:
3718*4882a593Smuzhiyun case 0x9601:
3719*4882a593Smuzhiyun case 0x9602:
3720*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3721*4882a593Smuzhiyun "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3722*4882a593Smuzhiyun mb->mbxStatus);
3723*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3724*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3725*4882a593Smuzhiyun vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3726*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3727*4882a593Smuzhiyun vport->fc_myDID = 0;
3728*4882a593Smuzhiyun
3729*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3730*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3731*4882a593Smuzhiyun if (phba->nvmet_support)
3732*4882a593Smuzhiyun lpfc_nvmet_update_targetport(phba);
3733*4882a593Smuzhiyun else
3734*4882a593Smuzhiyun lpfc_nvme_update_localport(vport);
3735*4882a593Smuzhiyun }
3736*4882a593Smuzhiyun goto out;
3737*4882a593Smuzhiyun }
3738*4882a593Smuzhiyun
3739*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3740*4882a593Smuzhiyun vport->vpi_state |= LPFC_VPI_REGISTERED;
3741*4882a593Smuzhiyun vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3742*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3743*4882a593Smuzhiyun vport->num_disc_nodes = 0;
3744*4882a593Smuzhiyun /* go thru NPR list and issue ELS PLOGIs */
3745*4882a593Smuzhiyun if (vport->fc_npr_cnt)
3746*4882a593Smuzhiyun lpfc_els_disc_plogi(vport);
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun if (!vport->num_disc_nodes) {
3749*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3750*4882a593Smuzhiyun vport->fc_flag &= ~FC_NDISC_ACTIVE;
3751*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3752*4882a593Smuzhiyun lpfc_can_disctmo(vport);
3753*4882a593Smuzhiyun }
3754*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
3755*4882a593Smuzhiyun
3756*4882a593Smuzhiyun out:
3757*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3758*4882a593Smuzhiyun return;
3759*4882a593Smuzhiyun }
3760*4882a593Smuzhiyun
3761*4882a593Smuzhiyun /**
3762*4882a593Smuzhiyun * lpfc_create_static_vport - Read HBA config region to create static vports.
3763*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
3764*4882a593Smuzhiyun *
3765*4882a593Smuzhiyun * This routine issue a DUMP mailbox command for config region 22 to get
3766*4882a593Smuzhiyun * the list of static vports to be created. The function create vports
3767*4882a593Smuzhiyun * based on the information returned from the HBA.
3768*4882a593Smuzhiyun **/
3769*4882a593Smuzhiyun void
lpfc_create_static_vport(struct lpfc_hba * phba)3770*4882a593Smuzhiyun lpfc_create_static_vport(struct lpfc_hba *phba)
3771*4882a593Smuzhiyun {
3772*4882a593Smuzhiyun LPFC_MBOXQ_t *pmb = NULL;
3773*4882a593Smuzhiyun MAILBOX_t *mb;
3774*4882a593Smuzhiyun struct static_vport_info *vport_info;
3775*4882a593Smuzhiyun int mbx_wait_rc = 0, i;
3776*4882a593Smuzhiyun struct fc_vport_identifiers vport_id;
3777*4882a593Smuzhiyun struct fc_vport *new_fc_vport;
3778*4882a593Smuzhiyun struct Scsi_Host *shost;
3779*4882a593Smuzhiyun struct lpfc_vport *vport;
3780*4882a593Smuzhiyun uint16_t offset = 0;
3781*4882a593Smuzhiyun uint8_t *vport_buff;
3782*4882a593Smuzhiyun struct lpfc_dmabuf *mp;
3783*4882a593Smuzhiyun uint32_t byte_count = 0;
3784*4882a593Smuzhiyun
3785*4882a593Smuzhiyun pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3786*4882a593Smuzhiyun if (!pmb) {
3787*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3788*4882a593Smuzhiyun "0542 lpfc_create_static_vport failed to"
3789*4882a593Smuzhiyun " allocate mailbox memory\n");
3790*4882a593Smuzhiyun return;
3791*4882a593Smuzhiyun }
3792*4882a593Smuzhiyun memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3793*4882a593Smuzhiyun mb = &pmb->u.mb;
3794*4882a593Smuzhiyun
3795*4882a593Smuzhiyun vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3796*4882a593Smuzhiyun if (!vport_info) {
3797*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3798*4882a593Smuzhiyun "0543 lpfc_create_static_vport failed to"
3799*4882a593Smuzhiyun " allocate vport_info\n");
3800*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3801*4882a593Smuzhiyun return;
3802*4882a593Smuzhiyun }
3803*4882a593Smuzhiyun
3804*4882a593Smuzhiyun vport_buff = (uint8_t *) vport_info;
3805*4882a593Smuzhiyun do {
3806*4882a593Smuzhiyun /* free dma buffer from previous round */
3807*4882a593Smuzhiyun if (pmb->ctx_buf) {
3808*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3809*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3810*4882a593Smuzhiyun kfree(mp);
3811*4882a593Smuzhiyun }
3812*4882a593Smuzhiyun if (lpfc_dump_static_vport(phba, pmb, offset))
3813*4882a593Smuzhiyun goto out;
3814*4882a593Smuzhiyun
3815*4882a593Smuzhiyun pmb->vport = phba->pport;
3816*4882a593Smuzhiyun mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3817*4882a593Smuzhiyun LPFC_MBOX_TMO);
3818*4882a593Smuzhiyun
3819*4882a593Smuzhiyun if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3820*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3821*4882a593Smuzhiyun "0544 lpfc_create_static_vport failed to"
3822*4882a593Smuzhiyun " issue dump mailbox command ret 0x%x "
3823*4882a593Smuzhiyun "status 0x%x\n",
3824*4882a593Smuzhiyun mbx_wait_rc, mb->mbxStatus);
3825*4882a593Smuzhiyun goto out;
3826*4882a593Smuzhiyun }
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
3829*4882a593Smuzhiyun byte_count = pmb->u.mqe.un.mb_words[5];
3830*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3831*4882a593Smuzhiyun if (byte_count > sizeof(struct static_vport_info) -
3832*4882a593Smuzhiyun offset)
3833*4882a593Smuzhiyun byte_count = sizeof(struct static_vport_info)
3834*4882a593Smuzhiyun - offset;
3835*4882a593Smuzhiyun memcpy(vport_buff + offset, mp->virt, byte_count);
3836*4882a593Smuzhiyun offset += byte_count;
3837*4882a593Smuzhiyun } else {
3838*4882a593Smuzhiyun if (mb->un.varDmp.word_cnt >
3839*4882a593Smuzhiyun sizeof(struct static_vport_info) - offset)
3840*4882a593Smuzhiyun mb->un.varDmp.word_cnt =
3841*4882a593Smuzhiyun sizeof(struct static_vport_info)
3842*4882a593Smuzhiyun - offset;
3843*4882a593Smuzhiyun byte_count = mb->un.varDmp.word_cnt;
3844*4882a593Smuzhiyun lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3845*4882a593Smuzhiyun vport_buff + offset,
3846*4882a593Smuzhiyun byte_count);
3847*4882a593Smuzhiyun
3848*4882a593Smuzhiyun offset += byte_count;
3849*4882a593Smuzhiyun }
3850*4882a593Smuzhiyun
3851*4882a593Smuzhiyun } while (byte_count &&
3852*4882a593Smuzhiyun offset < sizeof(struct static_vport_info));
3853*4882a593Smuzhiyun
3854*4882a593Smuzhiyun
3855*4882a593Smuzhiyun if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3856*4882a593Smuzhiyun ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3857*4882a593Smuzhiyun != VPORT_INFO_REV)) {
3858*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3859*4882a593Smuzhiyun "0545 lpfc_create_static_vport bad"
3860*4882a593Smuzhiyun " information header 0x%x 0x%x\n",
3861*4882a593Smuzhiyun le32_to_cpu(vport_info->signature),
3862*4882a593Smuzhiyun le32_to_cpu(vport_info->rev) &
3863*4882a593Smuzhiyun VPORT_INFO_REV_MASK);
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun goto out;
3866*4882a593Smuzhiyun }
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun shost = lpfc_shost_from_vport(phba->pport);
3869*4882a593Smuzhiyun
3870*4882a593Smuzhiyun for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3871*4882a593Smuzhiyun memset(&vport_id, 0, sizeof(vport_id));
3872*4882a593Smuzhiyun vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3873*4882a593Smuzhiyun vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3874*4882a593Smuzhiyun if (!vport_id.port_name || !vport_id.node_name)
3875*4882a593Smuzhiyun continue;
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3878*4882a593Smuzhiyun vport_id.vport_type = FC_PORTTYPE_NPIV;
3879*4882a593Smuzhiyun vport_id.disable = false;
3880*4882a593Smuzhiyun new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3881*4882a593Smuzhiyun
3882*4882a593Smuzhiyun if (!new_fc_vport) {
3883*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3884*4882a593Smuzhiyun "0546 lpfc_create_static_vport failed to"
3885*4882a593Smuzhiyun " create vport\n");
3886*4882a593Smuzhiyun continue;
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun
3889*4882a593Smuzhiyun vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3890*4882a593Smuzhiyun vport->vport_flag |= STATIC_VPORT;
3891*4882a593Smuzhiyun }
3892*4882a593Smuzhiyun
3893*4882a593Smuzhiyun out:
3894*4882a593Smuzhiyun kfree(vport_info);
3895*4882a593Smuzhiyun if (mbx_wait_rc != MBX_TIMEOUT) {
3896*4882a593Smuzhiyun if (pmb->ctx_buf) {
3897*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3898*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3899*4882a593Smuzhiyun kfree(mp);
3900*4882a593Smuzhiyun }
3901*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3902*4882a593Smuzhiyun }
3903*4882a593Smuzhiyun
3904*4882a593Smuzhiyun return;
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun /*
3908*4882a593Smuzhiyun * This routine handles processing a Fabric REG_LOGIN mailbox
3909*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
3910*4882a593Smuzhiyun * as the completion routine when the command is
3911*4882a593Smuzhiyun * handed off to the SLI layer.
3912*4882a593Smuzhiyun */
3913*4882a593Smuzhiyun void
lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)3914*4882a593Smuzhiyun lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3915*4882a593Smuzhiyun {
3916*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
3917*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
3918*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3919*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
3920*4882a593Smuzhiyun struct Scsi_Host *shost;
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3923*4882a593Smuzhiyun pmb->ctx_ndlp = NULL;
3924*4882a593Smuzhiyun pmb->ctx_buf = NULL;
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun if (mb->mbxStatus) {
3927*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3928*4882a593Smuzhiyun "0258 Register Fabric login error: 0x%x\n",
3929*4882a593Smuzhiyun mb->mbxStatus);
3930*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3931*4882a593Smuzhiyun kfree(mp);
3932*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3933*4882a593Smuzhiyun
3934*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3935*4882a593Smuzhiyun /* FLOGI failed, use loop map to make discovery list */
3936*4882a593Smuzhiyun lpfc_disc_list_loopmap(vport);
3937*4882a593Smuzhiyun
3938*4882a593Smuzhiyun /* Start discovery */
3939*4882a593Smuzhiyun lpfc_disc_start(vport);
3940*4882a593Smuzhiyun /* Decrement the reference count to ndlp after the
3941*4882a593Smuzhiyun * reference to the ndlp are done.
3942*4882a593Smuzhiyun */
3943*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
3944*4882a593Smuzhiyun return;
3945*4882a593Smuzhiyun }
3946*4882a593Smuzhiyun
3947*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3948*4882a593Smuzhiyun /* Decrement the reference count to ndlp after the reference
3949*4882a593Smuzhiyun * to the ndlp are done.
3950*4882a593Smuzhiyun */
3951*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
3952*4882a593Smuzhiyun return;
3953*4882a593Smuzhiyun }
3954*4882a593Smuzhiyun
3955*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4)
3956*4882a593Smuzhiyun ndlp->nlp_rpi = mb->un.varWords[0];
3957*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3958*4882a593Smuzhiyun ndlp->nlp_type |= NLP_FABRIC;
3959*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3960*4882a593Smuzhiyun
3961*4882a593Smuzhiyun if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3962*4882a593Smuzhiyun /* when physical port receive logo donot start
3963*4882a593Smuzhiyun * vport discovery */
3964*4882a593Smuzhiyun if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3965*4882a593Smuzhiyun lpfc_start_fdiscs(phba);
3966*4882a593Smuzhiyun else {
3967*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vport);
3968*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
3969*4882a593Smuzhiyun vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3970*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
3971*4882a593Smuzhiyun }
3972*4882a593Smuzhiyun lpfc_do_scr_ns_plogi(phba, vport);
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
3976*4882a593Smuzhiyun kfree(mp);
3977*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
3978*4882a593Smuzhiyun
3979*4882a593Smuzhiyun /* Drop the reference count from the mbox at the end after
3980*4882a593Smuzhiyun * all the current reference to the ndlp have been done.
3981*4882a593Smuzhiyun */
3982*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
3983*4882a593Smuzhiyun return;
3984*4882a593Smuzhiyun }
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun /*
3987*4882a593Smuzhiyun * This routine will issue a GID_FT for each FC4 Type supported
3988*4882a593Smuzhiyun * by the driver. ALL GID_FTs must complete before discovery is started.
3989*4882a593Smuzhiyun */
3990*4882a593Smuzhiyun int
lpfc_issue_gidft(struct lpfc_vport * vport)3991*4882a593Smuzhiyun lpfc_issue_gidft(struct lpfc_vport *vport)
3992*4882a593Smuzhiyun {
3993*4882a593Smuzhiyun /* Good status, issue CT Request to NameServer */
3994*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3995*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3996*4882a593Smuzhiyun if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3997*4882a593Smuzhiyun /* Cannot issue NameServer FCP Query, so finish up
3998*4882a593Smuzhiyun * discovery
3999*4882a593Smuzhiyun */
4000*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
4001*4882a593Smuzhiyun LOG_TRACE_EVENT,
4002*4882a593Smuzhiyun "0604 %s FC TYPE %x %s\n",
4003*4882a593Smuzhiyun "Failed to issue GID_FT to ",
4004*4882a593Smuzhiyun FC_TYPE_FCP,
4005*4882a593Smuzhiyun "Finishing discovery.");
4006*4882a593Smuzhiyun return 0;
4007*4882a593Smuzhiyun }
4008*4882a593Smuzhiyun vport->gidft_inp++;
4009*4882a593Smuzhiyun }
4010*4882a593Smuzhiyun
4011*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4012*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4013*4882a593Smuzhiyun if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4014*4882a593Smuzhiyun /* Cannot issue NameServer NVME Query, so finish up
4015*4882a593Smuzhiyun * discovery
4016*4882a593Smuzhiyun */
4017*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
4018*4882a593Smuzhiyun LOG_TRACE_EVENT,
4019*4882a593Smuzhiyun "0605 %s FC_TYPE %x %s %d\n",
4020*4882a593Smuzhiyun "Failed to issue GID_FT to ",
4021*4882a593Smuzhiyun FC_TYPE_NVME,
4022*4882a593Smuzhiyun "Finishing discovery: gidftinp ",
4023*4882a593Smuzhiyun vport->gidft_inp);
4024*4882a593Smuzhiyun if (vport->gidft_inp == 0)
4025*4882a593Smuzhiyun return 0;
4026*4882a593Smuzhiyun } else
4027*4882a593Smuzhiyun vport->gidft_inp++;
4028*4882a593Smuzhiyun }
4029*4882a593Smuzhiyun return vport->gidft_inp;
4030*4882a593Smuzhiyun }
4031*4882a593Smuzhiyun
4032*4882a593Smuzhiyun /**
4033*4882a593Smuzhiyun * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
4034*4882a593Smuzhiyun * @vport: The virtual port for which this call is being executed.
4035*4882a593Smuzhiyun *
4036*4882a593Smuzhiyun * This routine will issue a GID_PT to get a list of all N_Ports
4037*4882a593Smuzhiyun *
4038*4882a593Smuzhiyun * Return value :
4039*4882a593Smuzhiyun * 0 - Failure to issue a GID_PT
4040*4882a593Smuzhiyun * 1 - GID_PT issued
4041*4882a593Smuzhiyun **/
4042*4882a593Smuzhiyun int
lpfc_issue_gidpt(struct lpfc_vport * vport)4043*4882a593Smuzhiyun lpfc_issue_gidpt(struct lpfc_vport *vport)
4044*4882a593Smuzhiyun {
4045*4882a593Smuzhiyun /* Good status, issue CT Request to NameServer */
4046*4882a593Smuzhiyun if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4047*4882a593Smuzhiyun /* Cannot issue NameServer FCP Query, so finish up
4048*4882a593Smuzhiyun * discovery
4049*4882a593Smuzhiyun */
4050*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4051*4882a593Smuzhiyun "0606 %s Port TYPE %x %s\n",
4052*4882a593Smuzhiyun "Failed to issue GID_PT to ",
4053*4882a593Smuzhiyun GID_PT_N_PORT,
4054*4882a593Smuzhiyun "Finishing discovery.");
4055*4882a593Smuzhiyun return 0;
4056*4882a593Smuzhiyun }
4057*4882a593Smuzhiyun vport->gidft_inp++;
4058*4882a593Smuzhiyun return 1;
4059*4882a593Smuzhiyun }
4060*4882a593Smuzhiyun
4061*4882a593Smuzhiyun /*
4062*4882a593Smuzhiyun * This routine handles processing a NameServer REG_LOGIN mailbox
4063*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
4064*4882a593Smuzhiyun * as the completion routine when the command is
4065*4882a593Smuzhiyun * handed off to the SLI layer.
4066*4882a593Smuzhiyun */
4067*4882a593Smuzhiyun void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)4068*4882a593Smuzhiyun lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4069*4882a593Smuzhiyun {
4070*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
4071*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4072*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4073*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun pmb->ctx_buf = NULL;
4076*4882a593Smuzhiyun pmb->ctx_ndlp = NULL;
4077*4882a593Smuzhiyun vport->gidft_inp = 0;
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun if (mb->mbxStatus) {
4080*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4081*4882a593Smuzhiyun "0260 Register NameServer error: 0x%x\n",
4082*4882a593Smuzhiyun mb->mbxStatus);
4083*4882a593Smuzhiyun
4084*4882a593Smuzhiyun out:
4085*4882a593Smuzhiyun /* decrement the node reference count held for this
4086*4882a593Smuzhiyun * callback function.
4087*4882a593Smuzhiyun */
4088*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
4089*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
4090*4882a593Smuzhiyun kfree(mp);
4091*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
4092*4882a593Smuzhiyun
4093*4882a593Smuzhiyun /* If no other thread is using the ndlp, free it */
4094*4882a593Smuzhiyun lpfc_nlp_not_used(ndlp);
4095*4882a593Smuzhiyun
4096*4882a593Smuzhiyun if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4097*4882a593Smuzhiyun /*
4098*4882a593Smuzhiyun * RegLogin failed, use loop map to make discovery
4099*4882a593Smuzhiyun * list
4100*4882a593Smuzhiyun */
4101*4882a593Smuzhiyun lpfc_disc_list_loopmap(vport);
4102*4882a593Smuzhiyun
4103*4882a593Smuzhiyun /* Start discovery */
4104*4882a593Smuzhiyun lpfc_disc_start(vport);
4105*4882a593Smuzhiyun return;
4106*4882a593Smuzhiyun }
4107*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4108*4882a593Smuzhiyun return;
4109*4882a593Smuzhiyun }
4110*4882a593Smuzhiyun
4111*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4)
4112*4882a593Smuzhiyun ndlp->nlp_rpi = mb->un.varWords[0];
4113*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4114*4882a593Smuzhiyun ndlp->nlp_type |= NLP_FABRIC;
4115*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4116*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4117*4882a593Smuzhiyun "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
4118*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4119*4882a593Smuzhiyun kref_read(&ndlp->kref),
4120*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
4121*4882a593Smuzhiyun
4122*4882a593Smuzhiyun if (vport->port_state < LPFC_VPORT_READY) {
4123*4882a593Smuzhiyun /* Link up discovery requires Fabric registration. */
4124*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4125*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4126*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4127*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4130*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4131*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4132*4882a593Smuzhiyun
4133*4882a593Smuzhiyun if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4134*4882a593Smuzhiyun (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4135*4882a593Smuzhiyun lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4136*4882a593Smuzhiyun FC_TYPE_NVME);
4137*4882a593Smuzhiyun
4138*4882a593Smuzhiyun /* Issue SCR just before NameServer GID_FT Query */
4139*4882a593Smuzhiyun lpfc_issue_els_scr(vport, 0);
4140*4882a593Smuzhiyun
4141*4882a593Smuzhiyun lpfc_issue_els_rdf(vport, 0);
4142*4882a593Smuzhiyun }
4143*4882a593Smuzhiyun
4144*4882a593Smuzhiyun vport->fc_ns_retry = 0;
4145*4882a593Smuzhiyun if (lpfc_issue_gidft(vport) == 0)
4146*4882a593Smuzhiyun goto out;
4147*4882a593Smuzhiyun
4148*4882a593Smuzhiyun /*
4149*4882a593Smuzhiyun * At this point in time we may need to wait for multiple
4150*4882a593Smuzhiyun * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4151*4882a593Smuzhiyun *
4152*4882a593Smuzhiyun * decrement the node reference count held for this
4153*4882a593Smuzhiyun * callback function.
4154*4882a593Smuzhiyun */
4155*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
4156*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
4157*4882a593Smuzhiyun kfree(mp);
4158*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
4159*4882a593Smuzhiyun
4160*4882a593Smuzhiyun return;
4161*4882a593Smuzhiyun }
4162*4882a593Smuzhiyun
4163*4882a593Smuzhiyun static void
lpfc_register_remote_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4164*4882a593Smuzhiyun lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4165*4882a593Smuzhiyun {
4166*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4167*4882a593Smuzhiyun struct fc_rport *rport;
4168*4882a593Smuzhiyun struct lpfc_rport_data *rdata;
4169*4882a593Smuzhiyun struct fc_rport_identifiers rport_ids;
4170*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
4171*4882a593Smuzhiyun
4172*4882a593Smuzhiyun if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4173*4882a593Smuzhiyun return;
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun /* Remote port has reappeared. Re-register w/ FC transport */
4176*4882a593Smuzhiyun rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4177*4882a593Smuzhiyun rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4178*4882a593Smuzhiyun rport_ids.port_id = ndlp->nlp_DID;
4179*4882a593Smuzhiyun rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4180*4882a593Smuzhiyun
4181*4882a593Smuzhiyun /*
4182*4882a593Smuzhiyun * We leave our node pointer in rport->dd_data when we unregister a
4183*4882a593Smuzhiyun * FCP target port. But fc_remote_port_add zeros the space to which
4184*4882a593Smuzhiyun * rport->dd_data points. So, if we're reusing a previously
4185*4882a593Smuzhiyun * registered port, drop the reference that we took the last time we
4186*4882a593Smuzhiyun * registered the port.
4187*4882a593Smuzhiyun */
4188*4882a593Smuzhiyun rport = ndlp->rport;
4189*4882a593Smuzhiyun if (rport) {
4190*4882a593Smuzhiyun rdata = rport->dd_data;
4191*4882a593Smuzhiyun /* break the link before dropping the ref */
4192*4882a593Smuzhiyun ndlp->rport = NULL;
4193*4882a593Smuzhiyun if (rdata) {
4194*4882a593Smuzhiyun if (rdata->pnode == ndlp)
4195*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
4196*4882a593Smuzhiyun rdata->pnode = NULL;
4197*4882a593Smuzhiyun }
4198*4882a593Smuzhiyun /* drop reference for earlier registeration */
4199*4882a593Smuzhiyun put_device(&rport->dev);
4200*4882a593Smuzhiyun }
4201*4882a593Smuzhiyun
4202*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4203*4882a593Smuzhiyun "rport add: did:x%x flg:x%x type x%x",
4204*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4205*4882a593Smuzhiyun
4206*4882a593Smuzhiyun /* Don't add the remote port if unloading. */
4207*4882a593Smuzhiyun if (vport->load_flag & FC_UNLOADING)
4208*4882a593Smuzhiyun return;
4209*4882a593Smuzhiyun
4210*4882a593Smuzhiyun ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4211*4882a593Smuzhiyun if (!rport || !get_device(&rport->dev)) {
4212*4882a593Smuzhiyun dev_printk(KERN_WARNING, &phba->pcidev->dev,
4213*4882a593Smuzhiyun "Warning: fc_remote_port_add failed\n");
4214*4882a593Smuzhiyun return;
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun
4217*4882a593Smuzhiyun /* initialize static port data */
4218*4882a593Smuzhiyun rport->maxframe_size = ndlp->nlp_maxframe;
4219*4882a593Smuzhiyun rport->supported_classes = ndlp->nlp_class_sup;
4220*4882a593Smuzhiyun rdata = rport->dd_data;
4221*4882a593Smuzhiyun rdata->pnode = lpfc_nlp_get(ndlp);
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_FCP_TARGET)
4224*4882a593Smuzhiyun rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4225*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4226*4882a593Smuzhiyun rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4227*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4228*4882a593Smuzhiyun rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4229*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_NVME_TARGET)
4230*4882a593Smuzhiyun rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4231*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4232*4882a593Smuzhiyun rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4233*4882a593Smuzhiyun
4234*4882a593Smuzhiyun if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4235*4882a593Smuzhiyun fc_remote_port_rolechg(rport, rport_ids.roles);
4236*4882a593Smuzhiyun
4237*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4238*4882a593Smuzhiyun "3183 rport register x%06x, rport x%px role x%x\n",
4239*4882a593Smuzhiyun ndlp->nlp_DID, rport, rport_ids.roles);
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun if ((rport->scsi_target_id != -1) &&
4242*4882a593Smuzhiyun (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4243*4882a593Smuzhiyun ndlp->nlp_sid = rport->scsi_target_id;
4244*4882a593Smuzhiyun }
4245*4882a593Smuzhiyun return;
4246*4882a593Smuzhiyun }
4247*4882a593Smuzhiyun
4248*4882a593Smuzhiyun static void
lpfc_unregister_remote_port(struct lpfc_nodelist * ndlp)4249*4882a593Smuzhiyun lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4250*4882a593Smuzhiyun {
4251*4882a593Smuzhiyun struct fc_rport *rport = ndlp->rport;
4252*4882a593Smuzhiyun struct lpfc_vport *vport = ndlp->vport;
4253*4882a593Smuzhiyun
4254*4882a593Smuzhiyun if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4255*4882a593Smuzhiyun return;
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4258*4882a593Smuzhiyun "rport delete: did:x%x flg:x%x type x%x",
4259*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4260*4882a593Smuzhiyun
4261*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4262*4882a593Smuzhiyun "3184 rport unregister x%06x, rport x%px\n",
4263*4882a593Smuzhiyun ndlp->nlp_DID, rport);
4264*4882a593Smuzhiyun
4265*4882a593Smuzhiyun fc_remote_port_delete(rport);
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun return;
4268*4882a593Smuzhiyun }
4269*4882a593Smuzhiyun
4270*4882a593Smuzhiyun static void
lpfc_nlp_counters(struct lpfc_vport * vport,int state,int count)4271*4882a593Smuzhiyun lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4272*4882a593Smuzhiyun {
4273*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4274*4882a593Smuzhiyun unsigned long iflags;
4275*4882a593Smuzhiyun
4276*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
4277*4882a593Smuzhiyun switch (state) {
4278*4882a593Smuzhiyun case NLP_STE_UNUSED_NODE:
4279*4882a593Smuzhiyun vport->fc_unused_cnt += count;
4280*4882a593Smuzhiyun break;
4281*4882a593Smuzhiyun case NLP_STE_PLOGI_ISSUE:
4282*4882a593Smuzhiyun vport->fc_plogi_cnt += count;
4283*4882a593Smuzhiyun break;
4284*4882a593Smuzhiyun case NLP_STE_ADISC_ISSUE:
4285*4882a593Smuzhiyun vport->fc_adisc_cnt += count;
4286*4882a593Smuzhiyun break;
4287*4882a593Smuzhiyun case NLP_STE_REG_LOGIN_ISSUE:
4288*4882a593Smuzhiyun vport->fc_reglogin_cnt += count;
4289*4882a593Smuzhiyun break;
4290*4882a593Smuzhiyun case NLP_STE_PRLI_ISSUE:
4291*4882a593Smuzhiyun vport->fc_prli_cnt += count;
4292*4882a593Smuzhiyun break;
4293*4882a593Smuzhiyun case NLP_STE_UNMAPPED_NODE:
4294*4882a593Smuzhiyun vport->fc_unmap_cnt += count;
4295*4882a593Smuzhiyun break;
4296*4882a593Smuzhiyun case NLP_STE_MAPPED_NODE:
4297*4882a593Smuzhiyun vport->fc_map_cnt += count;
4298*4882a593Smuzhiyun break;
4299*4882a593Smuzhiyun case NLP_STE_NPR_NODE:
4300*4882a593Smuzhiyun if (vport->fc_npr_cnt == 0 && count == -1)
4301*4882a593Smuzhiyun vport->fc_npr_cnt = 0;
4302*4882a593Smuzhiyun else
4303*4882a593Smuzhiyun vport->fc_npr_cnt += count;
4304*4882a593Smuzhiyun break;
4305*4882a593Smuzhiyun }
4306*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
4307*4882a593Smuzhiyun }
4308*4882a593Smuzhiyun
4309*4882a593Smuzhiyun static void
lpfc_nlp_state_cleanup(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,int old_state,int new_state)4310*4882a593Smuzhiyun lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4311*4882a593Smuzhiyun int old_state, int new_state)
4312*4882a593Smuzhiyun {
4313*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4314*4882a593Smuzhiyun
4315*4882a593Smuzhiyun if (new_state == NLP_STE_UNMAPPED_NODE) {
4316*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4317*4882a593Smuzhiyun ndlp->nlp_type |= NLP_FC_NODE;
4318*4882a593Smuzhiyun }
4319*4882a593Smuzhiyun if (new_state == NLP_STE_MAPPED_NODE)
4320*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4321*4882a593Smuzhiyun if (new_state == NLP_STE_NPR_NODE)
4322*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun /* FCP and NVME Transport interface */
4325*4882a593Smuzhiyun if ((old_state == NLP_STE_MAPPED_NODE ||
4326*4882a593Smuzhiyun old_state == NLP_STE_UNMAPPED_NODE)) {
4327*4882a593Smuzhiyun if (ndlp->rport) {
4328*4882a593Smuzhiyun vport->phba->nport_event_cnt++;
4329*4882a593Smuzhiyun lpfc_unregister_remote_port(ndlp);
4330*4882a593Smuzhiyun }
4331*4882a593Smuzhiyun
4332*4882a593Smuzhiyun if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4333*4882a593Smuzhiyun vport->phba->nport_event_cnt++;
4334*4882a593Smuzhiyun if (vport->phba->nvmet_support == 0) {
4335*4882a593Smuzhiyun /* Start devloss if target. */
4336*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_NVME_TARGET)
4337*4882a593Smuzhiyun lpfc_nvme_unregister_port(vport, ndlp);
4338*4882a593Smuzhiyun } else {
4339*4882a593Smuzhiyun /* NVMET has no upcall. */
4340*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
4341*4882a593Smuzhiyun }
4342*4882a593Smuzhiyun }
4343*4882a593Smuzhiyun }
4344*4882a593Smuzhiyun
4345*4882a593Smuzhiyun /* FCP and NVME Transport interfaces */
4346*4882a593Smuzhiyun
4347*4882a593Smuzhiyun if (new_state == NLP_STE_MAPPED_NODE ||
4348*4882a593Smuzhiyun new_state == NLP_STE_UNMAPPED_NODE) {
4349*4882a593Smuzhiyun if (ndlp->nlp_fc4_type ||
4350*4882a593Smuzhiyun ndlp->nlp_DID == Fabric_DID ||
4351*4882a593Smuzhiyun ndlp->nlp_DID == NameServer_DID ||
4352*4882a593Smuzhiyun ndlp->nlp_DID == FDMI_DID) {
4353*4882a593Smuzhiyun vport->phba->nport_event_cnt++;
4354*4882a593Smuzhiyun /*
4355*4882a593Smuzhiyun * Tell the fc transport about the port, if we haven't
4356*4882a593Smuzhiyun * already. If we have, and it's a scsi entity, be
4357*4882a593Smuzhiyun */
4358*4882a593Smuzhiyun lpfc_register_remote_port(vport, ndlp);
4359*4882a593Smuzhiyun }
4360*4882a593Smuzhiyun /* Notify the NVME transport of this new rport. */
4361*4882a593Smuzhiyun if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4362*4882a593Smuzhiyun ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4363*4882a593Smuzhiyun if (vport->phba->nvmet_support == 0) {
4364*4882a593Smuzhiyun /* Register this rport with the transport.
4365*4882a593Smuzhiyun * Only NVME Target Rports are registered with
4366*4882a593Smuzhiyun * the transport.
4367*4882a593Smuzhiyun */
4368*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_NVME_TARGET) {
4369*4882a593Smuzhiyun vport->phba->nport_event_cnt++;
4370*4882a593Smuzhiyun lpfc_nvme_register_port(vport, ndlp);
4371*4882a593Smuzhiyun }
4372*4882a593Smuzhiyun } else {
4373*4882a593Smuzhiyun /* Just take an NDLP ref count since the
4374*4882a593Smuzhiyun * target does not register rports.
4375*4882a593Smuzhiyun */
4376*4882a593Smuzhiyun lpfc_nlp_get(ndlp);
4377*4882a593Smuzhiyun }
4378*4882a593Smuzhiyun }
4379*4882a593Smuzhiyun }
4380*4882a593Smuzhiyun
4381*4882a593Smuzhiyun if ((new_state == NLP_STE_MAPPED_NODE) &&
4382*4882a593Smuzhiyun (vport->stat_data_enabled)) {
4383*4882a593Smuzhiyun /*
4384*4882a593Smuzhiyun * A new target is discovered, if there is no buffer for
4385*4882a593Smuzhiyun * statistical data collection allocate buffer.
4386*4882a593Smuzhiyun */
4387*4882a593Smuzhiyun ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4388*4882a593Smuzhiyun sizeof(struct lpfc_scsicmd_bkt),
4389*4882a593Smuzhiyun GFP_KERNEL);
4390*4882a593Smuzhiyun
4391*4882a593Smuzhiyun if (!ndlp->lat_data)
4392*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4393*4882a593Smuzhiyun "0286 lpfc_nlp_state_cleanup failed to "
4394*4882a593Smuzhiyun "allocate statistical data buffer DID "
4395*4882a593Smuzhiyun "0x%x\n", ndlp->nlp_DID);
4396*4882a593Smuzhiyun }
4397*4882a593Smuzhiyun /*
4398*4882a593Smuzhiyun * If the node just added to Mapped list was an FCP target,
4399*4882a593Smuzhiyun * but the remote port registration failed or assigned a target
4400*4882a593Smuzhiyun * id outside the presentable range - move the node to the
4401*4882a593Smuzhiyun * Unmapped List.
4402*4882a593Smuzhiyun */
4403*4882a593Smuzhiyun if ((new_state == NLP_STE_MAPPED_NODE) &&
4404*4882a593Smuzhiyun (ndlp->nlp_type & NLP_FCP_TARGET) &&
4405*4882a593Smuzhiyun (!ndlp->rport ||
4406*4882a593Smuzhiyun ndlp->rport->scsi_target_id == -1 ||
4407*4882a593Smuzhiyun ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4408*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
4409*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4410*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
4411*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4412*4882a593Smuzhiyun }
4413*4882a593Smuzhiyun }
4414*4882a593Smuzhiyun
4415*4882a593Smuzhiyun static char *
lpfc_nlp_state_name(char * buffer,size_t size,int state)4416*4882a593Smuzhiyun lpfc_nlp_state_name(char *buffer, size_t size, int state)
4417*4882a593Smuzhiyun {
4418*4882a593Smuzhiyun static char *states[] = {
4419*4882a593Smuzhiyun [NLP_STE_UNUSED_NODE] = "UNUSED",
4420*4882a593Smuzhiyun [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4421*4882a593Smuzhiyun [NLP_STE_ADISC_ISSUE] = "ADISC",
4422*4882a593Smuzhiyun [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4423*4882a593Smuzhiyun [NLP_STE_PRLI_ISSUE] = "PRLI",
4424*4882a593Smuzhiyun [NLP_STE_LOGO_ISSUE] = "LOGO",
4425*4882a593Smuzhiyun [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4426*4882a593Smuzhiyun [NLP_STE_MAPPED_NODE] = "MAPPED",
4427*4882a593Smuzhiyun [NLP_STE_NPR_NODE] = "NPR",
4428*4882a593Smuzhiyun };
4429*4882a593Smuzhiyun
4430*4882a593Smuzhiyun if (state < NLP_STE_MAX_STATE && states[state])
4431*4882a593Smuzhiyun strlcpy(buffer, states[state], size);
4432*4882a593Smuzhiyun else
4433*4882a593Smuzhiyun snprintf(buffer, size, "unknown (%d)", state);
4434*4882a593Smuzhiyun return buffer;
4435*4882a593Smuzhiyun }
4436*4882a593Smuzhiyun
4437*4882a593Smuzhiyun void
lpfc_nlp_set_state(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,int state)4438*4882a593Smuzhiyun lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4439*4882a593Smuzhiyun int state)
4440*4882a593Smuzhiyun {
4441*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4442*4882a593Smuzhiyun int old_state = ndlp->nlp_state;
4443*4882a593Smuzhiyun char name1[16], name2[16];
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4446*4882a593Smuzhiyun "0904 NPort state transition x%06x, %s -> %s\n",
4447*4882a593Smuzhiyun ndlp->nlp_DID,
4448*4882a593Smuzhiyun lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4449*4882a593Smuzhiyun lpfc_nlp_state_name(name2, sizeof(name2), state));
4450*4882a593Smuzhiyun
4451*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4452*4882a593Smuzhiyun "node statechg did:x%x old:%d ste:%d",
4453*4882a593Smuzhiyun ndlp->nlp_DID, old_state, state);
4454*4882a593Smuzhiyun
4455*4882a593Smuzhiyun if (old_state == NLP_STE_NPR_NODE &&
4456*4882a593Smuzhiyun state != NLP_STE_NPR_NODE)
4457*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vport, ndlp);
4458*4882a593Smuzhiyun if (old_state == NLP_STE_UNMAPPED_NODE) {
4459*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4460*4882a593Smuzhiyun ndlp->nlp_type &= ~NLP_FC_NODE;
4461*4882a593Smuzhiyun }
4462*4882a593Smuzhiyun
4463*4882a593Smuzhiyun if (list_empty(&ndlp->nlp_listp)) {
4464*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
4465*4882a593Smuzhiyun list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4466*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
4467*4882a593Smuzhiyun } else if (old_state)
4468*4882a593Smuzhiyun lpfc_nlp_counters(vport, old_state, -1);
4469*4882a593Smuzhiyun
4470*4882a593Smuzhiyun ndlp->nlp_state = state;
4471*4882a593Smuzhiyun lpfc_nlp_counters(vport, state, 1);
4472*4882a593Smuzhiyun lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4473*4882a593Smuzhiyun }
4474*4882a593Smuzhiyun
4475*4882a593Smuzhiyun void
lpfc_enqueue_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4476*4882a593Smuzhiyun lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4477*4882a593Smuzhiyun {
4478*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4479*4882a593Smuzhiyun
4480*4882a593Smuzhiyun if (list_empty(&ndlp->nlp_listp)) {
4481*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
4482*4882a593Smuzhiyun list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4483*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
4484*4882a593Smuzhiyun }
4485*4882a593Smuzhiyun }
4486*4882a593Smuzhiyun
4487*4882a593Smuzhiyun void
lpfc_dequeue_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4488*4882a593Smuzhiyun lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4489*4882a593Smuzhiyun {
4490*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4491*4882a593Smuzhiyun
4492*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vport, ndlp);
4493*4882a593Smuzhiyun if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4494*4882a593Smuzhiyun lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4495*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
4496*4882a593Smuzhiyun list_del_init(&ndlp->nlp_listp);
4497*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
4498*4882a593Smuzhiyun lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4499*4882a593Smuzhiyun NLP_STE_UNUSED_NODE);
4500*4882a593Smuzhiyun }
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun static void
lpfc_disable_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4503*4882a593Smuzhiyun lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4504*4882a593Smuzhiyun {
4505*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vport, ndlp);
4506*4882a593Smuzhiyun if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4507*4882a593Smuzhiyun lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4508*4882a593Smuzhiyun lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4509*4882a593Smuzhiyun NLP_STE_UNUSED_NODE);
4510*4882a593Smuzhiyun }
4511*4882a593Smuzhiyun /**
4512*4882a593Smuzhiyun * lpfc_initialize_node - Initialize all fields of node object
4513*4882a593Smuzhiyun * @vport: Pointer to Virtual Port object.
4514*4882a593Smuzhiyun * @ndlp: Pointer to FC node object.
4515*4882a593Smuzhiyun * @did: FC_ID of the node.
4516*4882a593Smuzhiyun *
4517*4882a593Smuzhiyun * This function is always called when node object need to be initialized.
4518*4882a593Smuzhiyun * It initializes all the fields of the node object. Although the reference
4519*4882a593Smuzhiyun * to phba from @ndlp can be obtained indirectly through it's reference to
4520*4882a593Smuzhiyun * @vport, a direct reference to phba is taken here by @ndlp. This is due
4521*4882a593Smuzhiyun * to the life-span of the @ndlp might go beyond the existence of @vport as
4522*4882a593Smuzhiyun * the final release of ndlp is determined by its reference count. And, the
4523*4882a593Smuzhiyun * operation on @ndlp needs the reference to phba.
4524*4882a593Smuzhiyun **/
4525*4882a593Smuzhiyun static inline void
lpfc_initialize_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did)4526*4882a593Smuzhiyun lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4527*4882a593Smuzhiyun uint32_t did)
4528*4882a593Smuzhiyun {
4529*4882a593Smuzhiyun INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4530*4882a593Smuzhiyun INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4531*4882a593Smuzhiyun timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4532*4882a593Smuzhiyun INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4533*4882a593Smuzhiyun
4534*4882a593Smuzhiyun ndlp->nlp_DID = did;
4535*4882a593Smuzhiyun ndlp->vport = vport;
4536*4882a593Smuzhiyun ndlp->phba = vport->phba;
4537*4882a593Smuzhiyun ndlp->nlp_sid = NLP_NO_SID;
4538*4882a593Smuzhiyun ndlp->nlp_fc4_type = NLP_FC4_NONE;
4539*4882a593Smuzhiyun kref_init(&ndlp->kref);
4540*4882a593Smuzhiyun NLP_INT_NODE_ACT(ndlp);
4541*4882a593Smuzhiyun atomic_set(&ndlp->cmd_pending, 0);
4542*4882a593Smuzhiyun ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4543*4882a593Smuzhiyun ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4544*4882a593Smuzhiyun }
4545*4882a593Smuzhiyun
4546*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_enable_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,int state)4547*4882a593Smuzhiyun lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4548*4882a593Smuzhiyun int state)
4549*4882a593Smuzhiyun {
4550*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
4551*4882a593Smuzhiyun uint32_t did, flag;
4552*4882a593Smuzhiyun unsigned long flags;
4553*4882a593Smuzhiyun unsigned long *active_rrqs_xri_bitmap = NULL;
4554*4882a593Smuzhiyun int rpi = LPFC_RPI_ALLOC_ERROR;
4555*4882a593Smuzhiyun uint32_t defer_did = 0;
4556*4882a593Smuzhiyun
4557*4882a593Smuzhiyun if (!ndlp)
4558*4882a593Smuzhiyun return NULL;
4559*4882a593Smuzhiyun
4560*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
4561*4882a593Smuzhiyun if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4562*4882a593Smuzhiyun rpi = lpfc_sli4_alloc_rpi(vport->phba);
4563*4882a593Smuzhiyun else
4564*4882a593Smuzhiyun rpi = ndlp->nlp_rpi;
4565*4882a593Smuzhiyun
4566*4882a593Smuzhiyun if (rpi == LPFC_RPI_ALLOC_ERROR) {
4567*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4568*4882a593Smuzhiyun "0359 %s: ndlp:x%px "
4569*4882a593Smuzhiyun "usgmap:x%x refcnt:%d FAILED RPI "
4570*4882a593Smuzhiyun " ALLOC\n",
4571*4882a593Smuzhiyun __func__,
4572*4882a593Smuzhiyun (void *)ndlp, ndlp->nlp_usg_map,
4573*4882a593Smuzhiyun kref_read(&ndlp->kref));
4574*4882a593Smuzhiyun return NULL;
4575*4882a593Smuzhiyun }
4576*4882a593Smuzhiyun }
4577*4882a593Smuzhiyun
4578*4882a593Smuzhiyun spin_lock_irqsave(&phba->ndlp_lock, flags);
4579*4882a593Smuzhiyun /* The ndlp should not be in memory free mode */
4580*4882a593Smuzhiyun if (NLP_CHK_FREE_REQ(ndlp)) {
4581*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4582*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4583*4882a593Smuzhiyun "0277 %s: ndlp:x%px "
4584*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
4585*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
4586*4882a593Smuzhiyun kref_read(&ndlp->kref));
4587*4882a593Smuzhiyun goto free_rpi;
4588*4882a593Smuzhiyun }
4589*4882a593Smuzhiyun /* The ndlp should not already be in active mode */
4590*4882a593Smuzhiyun if (NLP_CHK_NODE_ACT(ndlp)) {
4591*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4592*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4593*4882a593Smuzhiyun "0278 %s: ndlp:x%px "
4594*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
4595*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
4596*4882a593Smuzhiyun kref_read(&ndlp->kref));
4597*4882a593Smuzhiyun goto free_rpi;
4598*4882a593Smuzhiyun }
4599*4882a593Smuzhiyun
4600*4882a593Smuzhiyun /* First preserve the orginal DID, xri_bitmap and some flags */
4601*4882a593Smuzhiyun did = ndlp->nlp_DID;
4602*4882a593Smuzhiyun flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4603*4882a593Smuzhiyun if (flag & NLP_UNREG_INP)
4604*4882a593Smuzhiyun defer_did = ndlp->nlp_defer_did;
4605*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
4606*4882a593Smuzhiyun active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4607*4882a593Smuzhiyun
4608*4882a593Smuzhiyun /* Zero ndlp except of ndlp linked list pointer */
4609*4882a593Smuzhiyun memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4610*4882a593Smuzhiyun sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4611*4882a593Smuzhiyun
4612*4882a593Smuzhiyun /* Next reinitialize and restore saved objects */
4613*4882a593Smuzhiyun lpfc_initialize_node(vport, ndlp, did);
4614*4882a593Smuzhiyun ndlp->nlp_flag |= flag;
4615*4882a593Smuzhiyun if (flag & NLP_UNREG_INP)
4616*4882a593Smuzhiyun ndlp->nlp_defer_did = defer_did;
4617*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
4618*4882a593Smuzhiyun ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4619*4882a593Smuzhiyun
4620*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4621*4882a593Smuzhiyun if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4622*4882a593Smuzhiyun ndlp->nlp_rpi = rpi;
4623*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4624*4882a593Smuzhiyun "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4625*4882a593Smuzhiyun "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4626*4882a593Smuzhiyun ndlp->nlp_flag,
4627*4882a593Smuzhiyun kref_read(&ndlp->kref),
4628*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
4629*4882a593Smuzhiyun }
4630*4882a593Smuzhiyun
4631*4882a593Smuzhiyun
4632*4882a593Smuzhiyun if (state != NLP_STE_UNUSED_NODE)
4633*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, state);
4634*4882a593Smuzhiyun else
4635*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4636*4882a593Smuzhiyun "0013 rpi:%x DID:%x flg:%x refcnt:%d "
4637*4882a593Smuzhiyun "map:%x x%px STATE=UNUSED\n",
4638*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
4639*4882a593Smuzhiyun ndlp->nlp_flag,
4640*4882a593Smuzhiyun kref_read(&ndlp->kref),
4641*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
4642*4882a593Smuzhiyun
4643*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4644*4882a593Smuzhiyun "node enable: did:x%x",
4645*4882a593Smuzhiyun ndlp->nlp_DID, 0, 0);
4646*4882a593Smuzhiyun return ndlp;
4647*4882a593Smuzhiyun
4648*4882a593Smuzhiyun free_rpi:
4649*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
4650*4882a593Smuzhiyun lpfc_sli4_free_rpi(vport->phba, rpi);
4651*4882a593Smuzhiyun ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4652*4882a593Smuzhiyun }
4653*4882a593Smuzhiyun return NULL;
4654*4882a593Smuzhiyun }
4655*4882a593Smuzhiyun
4656*4882a593Smuzhiyun void
lpfc_drop_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4657*4882a593Smuzhiyun lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4658*4882a593Smuzhiyun {
4659*4882a593Smuzhiyun /*
4660*4882a593Smuzhiyun * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4661*4882a593Smuzhiyun * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4662*4882a593Smuzhiyun * the ndlp from the vport. The ndlp marked as UNUSED on the list
4663*4882a593Smuzhiyun * until ALL other outstanding threads have completed. We check
4664*4882a593Smuzhiyun * that the ndlp not already in the UNUSED state before we proceed.
4665*4882a593Smuzhiyun */
4666*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4667*4882a593Smuzhiyun return;
4668*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4669*4882a593Smuzhiyun if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4670*4882a593Smuzhiyun lpfc_cleanup_vports_rrqs(vport, ndlp);
4671*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
4672*4882a593Smuzhiyun }
4673*4882a593Smuzhiyun
4674*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
4675*4882a593Smuzhiyun return;
4676*4882a593Smuzhiyun }
4677*4882a593Smuzhiyun
4678*4882a593Smuzhiyun /*
4679*4882a593Smuzhiyun * Start / ReStart rescue timer for Discovery / RSCN handling
4680*4882a593Smuzhiyun */
4681*4882a593Smuzhiyun void
lpfc_set_disctmo(struct lpfc_vport * vport)4682*4882a593Smuzhiyun lpfc_set_disctmo(struct lpfc_vport *vport)
4683*4882a593Smuzhiyun {
4684*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4685*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
4686*4882a593Smuzhiyun uint32_t tmo;
4687*4882a593Smuzhiyun
4688*4882a593Smuzhiyun if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4689*4882a593Smuzhiyun /* For FAN, timeout should be greater than edtov */
4690*4882a593Smuzhiyun tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4691*4882a593Smuzhiyun } else {
4692*4882a593Smuzhiyun /* Normal discovery timeout should be > than ELS/CT timeout
4693*4882a593Smuzhiyun * FC spec states we need 3 * ratov for CT requests
4694*4882a593Smuzhiyun */
4695*4882a593Smuzhiyun tmo = ((phba->fc_ratov * 3) + 3);
4696*4882a593Smuzhiyun }
4697*4882a593Smuzhiyun
4698*4882a593Smuzhiyun
4699*4882a593Smuzhiyun if (!timer_pending(&vport->fc_disctmo)) {
4700*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4701*4882a593Smuzhiyun "set disc timer: tmo:x%x state:x%x flg:x%x",
4702*4882a593Smuzhiyun tmo, vport->port_state, vport->fc_flag);
4703*4882a593Smuzhiyun }
4704*4882a593Smuzhiyun
4705*4882a593Smuzhiyun mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4706*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
4707*4882a593Smuzhiyun vport->fc_flag |= FC_DISC_TMO;
4708*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
4709*4882a593Smuzhiyun
4710*4882a593Smuzhiyun /* Start Discovery Timer state <hba_state> */
4711*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4712*4882a593Smuzhiyun "0247 Start Discovery Timer state x%x "
4713*4882a593Smuzhiyun "Data: x%x x%lx x%x x%x\n",
4714*4882a593Smuzhiyun vport->port_state, tmo,
4715*4882a593Smuzhiyun (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4716*4882a593Smuzhiyun vport->fc_adisc_cnt);
4717*4882a593Smuzhiyun
4718*4882a593Smuzhiyun return;
4719*4882a593Smuzhiyun }
4720*4882a593Smuzhiyun
4721*4882a593Smuzhiyun /*
4722*4882a593Smuzhiyun * Cancel rescue timer for Discovery / RSCN handling
4723*4882a593Smuzhiyun */
4724*4882a593Smuzhiyun int
lpfc_can_disctmo(struct lpfc_vport * vport)4725*4882a593Smuzhiyun lpfc_can_disctmo(struct lpfc_vport *vport)
4726*4882a593Smuzhiyun {
4727*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4728*4882a593Smuzhiyun unsigned long iflags;
4729*4882a593Smuzhiyun
4730*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4731*4882a593Smuzhiyun "can disc timer: state:x%x rtry:x%x flg:x%x",
4732*4882a593Smuzhiyun vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4733*4882a593Smuzhiyun
4734*4882a593Smuzhiyun /* Turn off discovery timer if its running */
4735*4882a593Smuzhiyun if (vport->fc_flag & FC_DISC_TMO) {
4736*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
4737*4882a593Smuzhiyun vport->fc_flag &= ~FC_DISC_TMO;
4738*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
4739*4882a593Smuzhiyun del_timer_sync(&vport->fc_disctmo);
4740*4882a593Smuzhiyun spin_lock_irqsave(&vport->work_port_lock, iflags);
4741*4882a593Smuzhiyun vport->work_port_events &= ~WORKER_DISC_TMO;
4742*4882a593Smuzhiyun spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4743*4882a593Smuzhiyun }
4744*4882a593Smuzhiyun
4745*4882a593Smuzhiyun /* Cancel Discovery Timer state <hba_state> */
4746*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4747*4882a593Smuzhiyun "0248 Cancel Discovery Timer state x%x "
4748*4882a593Smuzhiyun "Data: x%x x%x x%x\n",
4749*4882a593Smuzhiyun vport->port_state, vport->fc_flag,
4750*4882a593Smuzhiyun vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4751*4882a593Smuzhiyun return 0;
4752*4882a593Smuzhiyun }
4753*4882a593Smuzhiyun
4754*4882a593Smuzhiyun /*
4755*4882a593Smuzhiyun * Check specified ring for outstanding IOCB on the SLI queue
4756*4882a593Smuzhiyun * Return true if iocb matches the specified nport
4757*4882a593Smuzhiyun */
4758*4882a593Smuzhiyun int
lpfc_check_sli_ndlp(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocb,struct lpfc_nodelist * ndlp)4759*4882a593Smuzhiyun lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4760*4882a593Smuzhiyun struct lpfc_sli_ring *pring,
4761*4882a593Smuzhiyun struct lpfc_iocbq *iocb,
4762*4882a593Smuzhiyun struct lpfc_nodelist *ndlp)
4763*4882a593Smuzhiyun {
4764*4882a593Smuzhiyun IOCB_t *icmd = &iocb->iocb;
4765*4882a593Smuzhiyun struct lpfc_vport *vport = ndlp->vport;
4766*4882a593Smuzhiyun
4767*4882a593Smuzhiyun if (iocb->vport != vport)
4768*4882a593Smuzhiyun return 0;
4769*4882a593Smuzhiyun
4770*4882a593Smuzhiyun if (pring->ringno == LPFC_ELS_RING) {
4771*4882a593Smuzhiyun switch (icmd->ulpCommand) {
4772*4882a593Smuzhiyun case CMD_GEN_REQUEST64_CR:
4773*4882a593Smuzhiyun if (iocb->context_un.ndlp == ndlp)
4774*4882a593Smuzhiyun return 1;
4775*4882a593Smuzhiyun fallthrough;
4776*4882a593Smuzhiyun case CMD_ELS_REQUEST64_CR:
4777*4882a593Smuzhiyun if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4778*4882a593Smuzhiyun return 1;
4779*4882a593Smuzhiyun fallthrough;
4780*4882a593Smuzhiyun case CMD_XMIT_ELS_RSP64_CX:
4781*4882a593Smuzhiyun if (iocb->context1 == (uint8_t *) ndlp)
4782*4882a593Smuzhiyun return 1;
4783*4882a593Smuzhiyun }
4784*4882a593Smuzhiyun } else if (pring->ringno == LPFC_FCP_RING) {
4785*4882a593Smuzhiyun /* Skip match check if waiting to relogin to FCP target */
4786*4882a593Smuzhiyun if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4787*4882a593Smuzhiyun (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4788*4882a593Smuzhiyun return 0;
4789*4882a593Smuzhiyun }
4790*4882a593Smuzhiyun if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4791*4882a593Smuzhiyun return 1;
4792*4882a593Smuzhiyun }
4793*4882a593Smuzhiyun }
4794*4882a593Smuzhiyun return 0;
4795*4882a593Smuzhiyun }
4796*4882a593Smuzhiyun
4797*4882a593Smuzhiyun static void
__lpfc_dequeue_nport_iocbs(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct lpfc_sli_ring * pring,struct list_head * dequeue_list)4798*4882a593Smuzhiyun __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4799*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4800*4882a593Smuzhiyun struct list_head *dequeue_list)
4801*4882a593Smuzhiyun {
4802*4882a593Smuzhiyun struct lpfc_iocbq *iocb, *next_iocb;
4803*4882a593Smuzhiyun
4804*4882a593Smuzhiyun list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4805*4882a593Smuzhiyun /* Check to see if iocb matches the nport */
4806*4882a593Smuzhiyun if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4807*4882a593Smuzhiyun /* match, dequeue */
4808*4882a593Smuzhiyun list_move_tail(&iocb->list, dequeue_list);
4809*4882a593Smuzhiyun }
4810*4882a593Smuzhiyun }
4811*4882a593Smuzhiyun
4812*4882a593Smuzhiyun static void
lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct list_head * dequeue_list)4813*4882a593Smuzhiyun lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4814*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4815*4882a593Smuzhiyun {
4816*4882a593Smuzhiyun struct lpfc_sli *psli = &phba->sli;
4817*4882a593Smuzhiyun uint32_t i;
4818*4882a593Smuzhiyun
4819*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
4820*4882a593Smuzhiyun for (i = 0; i < psli->num_rings; i++)
4821*4882a593Smuzhiyun __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4822*4882a593Smuzhiyun dequeue_list);
4823*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
4824*4882a593Smuzhiyun }
4825*4882a593Smuzhiyun
4826*4882a593Smuzhiyun static void
lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct list_head * dequeue_list)4827*4882a593Smuzhiyun lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4828*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4829*4882a593Smuzhiyun {
4830*4882a593Smuzhiyun struct lpfc_sli_ring *pring;
4831*4882a593Smuzhiyun struct lpfc_queue *qp = NULL;
4832*4882a593Smuzhiyun
4833*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
4834*4882a593Smuzhiyun list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4835*4882a593Smuzhiyun pring = qp->pring;
4836*4882a593Smuzhiyun if (!pring)
4837*4882a593Smuzhiyun continue;
4838*4882a593Smuzhiyun spin_lock(&pring->ring_lock);
4839*4882a593Smuzhiyun __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4840*4882a593Smuzhiyun spin_unlock(&pring->ring_lock);
4841*4882a593Smuzhiyun }
4842*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
4843*4882a593Smuzhiyun }
4844*4882a593Smuzhiyun
4845*4882a593Smuzhiyun /*
4846*4882a593Smuzhiyun * Free resources / clean up outstanding I/Os
4847*4882a593Smuzhiyun * associated with nlp_rpi in the LPFC_NODELIST entry.
4848*4882a593Smuzhiyun */
4849*4882a593Smuzhiyun static int
lpfc_no_rpi(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)4850*4882a593Smuzhiyun lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4851*4882a593Smuzhiyun {
4852*4882a593Smuzhiyun LIST_HEAD(completions);
4853*4882a593Smuzhiyun
4854*4882a593Smuzhiyun lpfc_fabric_abort_nport(ndlp);
4855*4882a593Smuzhiyun
4856*4882a593Smuzhiyun /*
4857*4882a593Smuzhiyun * Everything that matches on txcmplq will be returned
4858*4882a593Smuzhiyun * by firmware with a no rpi error.
4859*4882a593Smuzhiyun */
4860*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4861*4882a593Smuzhiyun if (phba->sli_rev != LPFC_SLI_REV4)
4862*4882a593Smuzhiyun lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4863*4882a593Smuzhiyun else
4864*4882a593Smuzhiyun lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4865*4882a593Smuzhiyun }
4866*4882a593Smuzhiyun
4867*4882a593Smuzhiyun /* Cancel all the IOCBs from the completions list */
4868*4882a593Smuzhiyun lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4869*4882a593Smuzhiyun IOERR_SLI_ABORTED);
4870*4882a593Smuzhiyun
4871*4882a593Smuzhiyun return 0;
4872*4882a593Smuzhiyun }
4873*4882a593Smuzhiyun
4874*4882a593Smuzhiyun /**
4875*4882a593Smuzhiyun * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4876*4882a593Smuzhiyun * @phba: Pointer to HBA context object.
4877*4882a593Smuzhiyun * @pmb: Pointer to mailbox object.
4878*4882a593Smuzhiyun *
4879*4882a593Smuzhiyun * This function will issue an ELS LOGO command after completing
4880*4882a593Smuzhiyun * the UNREG_RPI.
4881*4882a593Smuzhiyun **/
4882*4882a593Smuzhiyun static void
lpfc_nlp_logo_unreg(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)4883*4882a593Smuzhiyun lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4884*4882a593Smuzhiyun {
4885*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
4886*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
4887*4882a593Smuzhiyun
4888*4882a593Smuzhiyun ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4889*4882a593Smuzhiyun if (!ndlp)
4890*4882a593Smuzhiyun return;
4891*4882a593Smuzhiyun lpfc_issue_els_logo(vport, ndlp, 0);
4892*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
4893*4882a593Smuzhiyun
4894*4882a593Smuzhiyun /* Check to see if there are any deferred events to process */
4895*4882a593Smuzhiyun if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4896*4882a593Smuzhiyun (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4897*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4898*4882a593Smuzhiyun "1434 UNREG cmpl deferred logo x%x "
4899*4882a593Smuzhiyun "on NPort x%x Data: x%x x%px\n",
4900*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
4901*4882a593Smuzhiyun ndlp->nlp_defer_did, ndlp);
4902*4882a593Smuzhiyun
4903*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_UNREG_INP;
4904*4882a593Smuzhiyun ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4905*4882a593Smuzhiyun lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4906*4882a593Smuzhiyun } else {
4907*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4908*4882a593Smuzhiyun lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4909*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4910*4882a593Smuzhiyun ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4911*4882a593Smuzhiyun }
4912*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_UNREG_INP;
4913*4882a593Smuzhiyun }
4914*4882a593Smuzhiyun }
4915*4882a593Smuzhiyun
4916*4882a593Smuzhiyun /*
4917*4882a593Smuzhiyun * Sets the mailbox completion handler to be used for the
4918*4882a593Smuzhiyun * unreg_rpi command. The handler varies based on the state of
4919*4882a593Smuzhiyun * the port and what will be happening to the rpi next.
4920*4882a593Smuzhiyun */
4921*4882a593Smuzhiyun static void
lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)4922*4882a593Smuzhiyun lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
4923*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4924*4882a593Smuzhiyun {
4925*4882a593Smuzhiyun unsigned long iflags;
4926*4882a593Smuzhiyun
4927*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4928*4882a593Smuzhiyun mbox->ctx_ndlp = ndlp;
4929*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4930*4882a593Smuzhiyun
4931*4882a593Smuzhiyun } else if (phba->sli_rev == LPFC_SLI_REV4 &&
4932*4882a593Smuzhiyun (!(vport->load_flag & FC_UNLOADING)) &&
4933*4882a593Smuzhiyun (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
4934*4882a593Smuzhiyun LPFC_SLI_INTF_IF_TYPE_2) &&
4935*4882a593Smuzhiyun (kref_read(&ndlp->kref) > 0)) {
4936*4882a593Smuzhiyun mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4937*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
4938*4882a593Smuzhiyun } else {
4939*4882a593Smuzhiyun if (vport->load_flag & FC_UNLOADING) {
4940*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
4941*4882a593Smuzhiyun spin_lock_irqsave(&vport->phba->ndlp_lock,
4942*4882a593Smuzhiyun iflags);
4943*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RELEASE_RPI;
4944*4882a593Smuzhiyun spin_unlock_irqrestore(&vport->phba->ndlp_lock,
4945*4882a593Smuzhiyun iflags);
4946*4882a593Smuzhiyun }
4947*4882a593Smuzhiyun lpfc_nlp_get(ndlp);
4948*4882a593Smuzhiyun }
4949*4882a593Smuzhiyun mbox->ctx_ndlp = ndlp;
4950*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4951*4882a593Smuzhiyun }
4952*4882a593Smuzhiyun }
4953*4882a593Smuzhiyun
4954*4882a593Smuzhiyun /*
4955*4882a593Smuzhiyun * Free rpi associated with LPFC_NODELIST entry.
4956*4882a593Smuzhiyun * This routine is called from lpfc_freenode(), when we are removing
4957*4882a593Smuzhiyun * a LPFC_NODELIST entry. It is also called if the driver initiates a
4958*4882a593Smuzhiyun * LOGO that completes successfully, and we are waiting to PLOGI back
4959*4882a593Smuzhiyun * to the remote NPort. In addition, it is called after we receive
4960*4882a593Smuzhiyun * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4961*4882a593Smuzhiyun * we are waiting to PLOGI back to the remote NPort.
4962*4882a593Smuzhiyun */
4963*4882a593Smuzhiyun int
lpfc_unreg_rpi(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)4964*4882a593Smuzhiyun lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4965*4882a593Smuzhiyun {
4966*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
4967*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
4968*4882a593Smuzhiyun int rc, acc_plogi = 1;
4969*4882a593Smuzhiyun uint16_t rpi;
4970*4882a593Smuzhiyun
4971*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4972*4882a593Smuzhiyun ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4973*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4974*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO,
4975*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
4976*4882a593Smuzhiyun "3366 RPI x%x needs to be "
4977*4882a593Smuzhiyun "unregistered nlp_flag x%x "
4978*4882a593Smuzhiyun "did x%x\n",
4979*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_flag,
4980*4882a593Smuzhiyun ndlp->nlp_DID);
4981*4882a593Smuzhiyun
4982*4882a593Smuzhiyun /* If there is already an UNREG in progress for this ndlp,
4983*4882a593Smuzhiyun * no need to queue up another one.
4984*4882a593Smuzhiyun */
4985*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_UNREG_INP) {
4986*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO,
4987*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
4988*4882a593Smuzhiyun "1436 unreg_rpi SKIP UNREG x%x on "
4989*4882a593Smuzhiyun "NPort x%x deferred x%x flg x%x "
4990*4882a593Smuzhiyun "Data: x%px\n",
4991*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
4992*4882a593Smuzhiyun ndlp->nlp_defer_did,
4993*4882a593Smuzhiyun ndlp->nlp_flag, ndlp);
4994*4882a593Smuzhiyun goto out;
4995*4882a593Smuzhiyun }
4996*4882a593Smuzhiyun
4997*4882a593Smuzhiyun mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4998*4882a593Smuzhiyun if (mbox) {
4999*4882a593Smuzhiyun /* SLI4 ports require the physical rpi value. */
5000*4882a593Smuzhiyun rpi = ndlp->nlp_rpi;
5001*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
5002*4882a593Smuzhiyun rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5003*4882a593Smuzhiyun
5004*4882a593Smuzhiyun lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
5005*4882a593Smuzhiyun mbox->vport = vport;
5006*4882a593Smuzhiyun lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5007*4882a593Smuzhiyun if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
5008*4882a593Smuzhiyun /*
5009*4882a593Smuzhiyun * accept PLOGIs after unreg_rpi_cmpl
5010*4882a593Smuzhiyun */
5011*4882a593Smuzhiyun acc_plogi = 0;
5012*4882a593Smuzhiyun if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5013*4882a593Smuzhiyun Fabric_DID_MASK) &&
5014*4882a593Smuzhiyun (!(vport->fc_flag & FC_OFFLINE_MODE)))
5015*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_UNREG_INP;
5016*4882a593Smuzhiyun
5017*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO,
5018*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
5019*4882a593Smuzhiyun "1433 unreg_rpi UNREG x%x on "
5020*4882a593Smuzhiyun "NPort x%x deferred flg x%x "
5021*4882a593Smuzhiyun "Data:x%px\n",
5022*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
5023*4882a593Smuzhiyun ndlp->nlp_flag, ndlp);
5024*4882a593Smuzhiyun
5025*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5026*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
5027*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5028*4882a593Smuzhiyun acc_plogi = 1;
5029*4882a593Smuzhiyun }
5030*4882a593Smuzhiyun } else {
5031*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO,
5032*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
5033*4882a593Smuzhiyun "1444 Failed to allocate mempool "
5034*4882a593Smuzhiyun "unreg_rpi UNREG x%x, "
5035*4882a593Smuzhiyun "DID x%x, flag x%x, "
5036*4882a593Smuzhiyun "ndlp x%px\n",
5037*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
5038*4882a593Smuzhiyun ndlp->nlp_flag, ndlp);
5039*4882a593Smuzhiyun
5040*4882a593Smuzhiyun /* Because mempool_alloc failed, we
5041*4882a593Smuzhiyun * will issue a LOGO here and keep the rpi alive if
5042*4882a593Smuzhiyun * not unloading.
5043*4882a593Smuzhiyun */
5044*4882a593Smuzhiyun if (!(vport->load_flag & FC_UNLOADING)) {
5045*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_UNREG_INP;
5046*4882a593Smuzhiyun lpfc_issue_els_logo(vport, ndlp, 0);
5047*4882a593Smuzhiyun ndlp->nlp_prev_state = ndlp->nlp_state;
5048*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp,
5049*4882a593Smuzhiyun NLP_STE_NPR_NODE);
5050*4882a593Smuzhiyun }
5051*4882a593Smuzhiyun
5052*4882a593Smuzhiyun return 1;
5053*4882a593Smuzhiyun }
5054*4882a593Smuzhiyun lpfc_no_rpi(phba, ndlp);
5055*4882a593Smuzhiyun out:
5056*4882a593Smuzhiyun if (phba->sli_rev != LPFC_SLI_REV4)
5057*4882a593Smuzhiyun ndlp->nlp_rpi = 0;
5058*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5059*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5060*4882a593Smuzhiyun if (acc_plogi)
5061*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5062*4882a593Smuzhiyun return 1;
5063*4882a593Smuzhiyun }
5064*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5065*4882a593Smuzhiyun return 0;
5066*4882a593Smuzhiyun }
5067*4882a593Smuzhiyun
5068*4882a593Smuzhiyun /**
5069*4882a593Smuzhiyun * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
5070*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
5071*4882a593Smuzhiyun *
5072*4882a593Smuzhiyun * This routine is invoked to unregister all the currently registered RPIs
5073*4882a593Smuzhiyun * to the HBA.
5074*4882a593Smuzhiyun **/
5075*4882a593Smuzhiyun void
lpfc_unreg_hba_rpis(struct lpfc_hba * phba)5076*4882a593Smuzhiyun lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5077*4882a593Smuzhiyun {
5078*4882a593Smuzhiyun struct lpfc_vport **vports;
5079*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
5080*4882a593Smuzhiyun struct Scsi_Host *shost;
5081*4882a593Smuzhiyun int i;
5082*4882a593Smuzhiyun
5083*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
5084*4882a593Smuzhiyun if (!vports) {
5085*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5086*4882a593Smuzhiyun "2884 Vport array allocation failed \n");
5087*4882a593Smuzhiyun return;
5088*4882a593Smuzhiyun }
5089*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5090*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vports[i]);
5091*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5092*4882a593Smuzhiyun list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5093*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5094*4882a593Smuzhiyun /* The mempool_alloc might sleep */
5095*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5096*4882a593Smuzhiyun lpfc_unreg_rpi(vports[i], ndlp);
5097*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5098*4882a593Smuzhiyun }
5099*4882a593Smuzhiyun }
5100*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5101*4882a593Smuzhiyun }
5102*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
5103*4882a593Smuzhiyun }
5104*4882a593Smuzhiyun
5105*4882a593Smuzhiyun void
lpfc_unreg_all_rpis(struct lpfc_vport * vport)5106*4882a593Smuzhiyun lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5107*4882a593Smuzhiyun {
5108*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5109*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
5110*4882a593Smuzhiyun int rc;
5111*4882a593Smuzhiyun
5112*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
5113*4882a593Smuzhiyun lpfc_sli4_unreg_all_rpis(vport);
5114*4882a593Smuzhiyun return;
5115*4882a593Smuzhiyun }
5116*4882a593Smuzhiyun
5117*4882a593Smuzhiyun mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5118*4882a593Smuzhiyun if (mbox) {
5119*4882a593Smuzhiyun lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5120*4882a593Smuzhiyun mbox);
5121*4882a593Smuzhiyun mbox->vport = vport;
5122*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5123*4882a593Smuzhiyun mbox->ctx_ndlp = NULL;
5124*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5125*4882a593Smuzhiyun if (rc != MBX_TIMEOUT)
5126*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5127*4882a593Smuzhiyun
5128*4882a593Smuzhiyun if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5129*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5130*4882a593Smuzhiyun "1836 Could not issue "
5131*4882a593Smuzhiyun "unreg_login(all_rpis) status %d\n",
5132*4882a593Smuzhiyun rc);
5133*4882a593Smuzhiyun }
5134*4882a593Smuzhiyun }
5135*4882a593Smuzhiyun
5136*4882a593Smuzhiyun void
lpfc_unreg_default_rpis(struct lpfc_vport * vport)5137*4882a593Smuzhiyun lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5138*4882a593Smuzhiyun {
5139*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5140*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
5141*4882a593Smuzhiyun int rc;
5142*4882a593Smuzhiyun
5143*4882a593Smuzhiyun /* Unreg DID is an SLI3 operation. */
5144*4882a593Smuzhiyun if (phba->sli_rev > LPFC_SLI_REV3)
5145*4882a593Smuzhiyun return;
5146*4882a593Smuzhiyun
5147*4882a593Smuzhiyun mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5148*4882a593Smuzhiyun if (mbox) {
5149*4882a593Smuzhiyun lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5150*4882a593Smuzhiyun mbox);
5151*4882a593Smuzhiyun mbox->vport = vport;
5152*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5153*4882a593Smuzhiyun mbox->ctx_ndlp = NULL;
5154*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5155*4882a593Smuzhiyun if (rc != MBX_TIMEOUT)
5156*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5157*4882a593Smuzhiyun
5158*4882a593Smuzhiyun if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5159*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5160*4882a593Smuzhiyun "1815 Could not issue "
5161*4882a593Smuzhiyun "unreg_did (default rpis) status %d\n",
5162*4882a593Smuzhiyun rc);
5163*4882a593Smuzhiyun }
5164*4882a593Smuzhiyun }
5165*4882a593Smuzhiyun
5166*4882a593Smuzhiyun /*
5167*4882a593Smuzhiyun * Free resources associated with LPFC_NODELIST entry
5168*4882a593Smuzhiyun * so it can be freed.
5169*4882a593Smuzhiyun */
5170*4882a593Smuzhiyun static int
lpfc_cleanup_node(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)5171*4882a593Smuzhiyun lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5172*4882a593Smuzhiyun {
5173*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5174*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5175*4882a593Smuzhiyun LPFC_MBOXQ_t *mb, *nextmb;
5176*4882a593Smuzhiyun struct lpfc_dmabuf *mp;
5177*4882a593Smuzhiyun unsigned long iflags;
5178*4882a593Smuzhiyun
5179*4882a593Smuzhiyun /* Cleanup node for NPort <nlp_DID> */
5180*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5181*4882a593Smuzhiyun "0900 Cleanup node for NPort x%x "
5182*4882a593Smuzhiyun "Data: x%x x%x x%x\n",
5183*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5184*4882a593Smuzhiyun ndlp->nlp_state, ndlp->nlp_rpi);
5185*4882a593Smuzhiyun if (NLP_CHK_FREE_REQ(ndlp)) {
5186*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5187*4882a593Smuzhiyun "0280 %s: ndlp:x%px "
5188*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
5189*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
5190*4882a593Smuzhiyun kref_read(&ndlp->kref));
5191*4882a593Smuzhiyun lpfc_dequeue_node(vport, ndlp);
5192*4882a593Smuzhiyun } else {
5193*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5194*4882a593Smuzhiyun "0281 %s: ndlp:x%px "
5195*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
5196*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
5197*4882a593Smuzhiyun kref_read(&ndlp->kref));
5198*4882a593Smuzhiyun lpfc_disable_node(vport, ndlp);
5199*4882a593Smuzhiyun }
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun
5202*4882a593Smuzhiyun /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
5203*4882a593Smuzhiyun
5204*4882a593Smuzhiyun /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5205*4882a593Smuzhiyun if ((mb = phba->sli.mbox_active)) {
5206*4882a593Smuzhiyun if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5207*4882a593Smuzhiyun !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5208*4882a593Smuzhiyun (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5209*4882a593Smuzhiyun mb->ctx_ndlp = NULL;
5210*4882a593Smuzhiyun mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5211*4882a593Smuzhiyun }
5212*4882a593Smuzhiyun }
5213*4882a593Smuzhiyun
5214*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
5215*4882a593Smuzhiyun /* Cleanup REG_LOGIN completions which are not yet processed */
5216*4882a593Smuzhiyun list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5217*4882a593Smuzhiyun if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5218*4882a593Smuzhiyun (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5219*4882a593Smuzhiyun (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5220*4882a593Smuzhiyun continue;
5221*4882a593Smuzhiyun
5222*4882a593Smuzhiyun mb->ctx_ndlp = NULL;
5223*4882a593Smuzhiyun mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5224*4882a593Smuzhiyun }
5225*4882a593Smuzhiyun
5226*4882a593Smuzhiyun list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5227*4882a593Smuzhiyun if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5228*4882a593Smuzhiyun !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5229*4882a593Smuzhiyun (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5230*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
5231*4882a593Smuzhiyun if (mp) {
5232*4882a593Smuzhiyun __lpfc_mbuf_free(phba, mp->virt, mp->phys);
5233*4882a593Smuzhiyun kfree(mp);
5234*4882a593Smuzhiyun }
5235*4882a593Smuzhiyun list_del(&mb->list);
5236*4882a593Smuzhiyun mempool_free(mb, phba->mbox_mem_pool);
5237*4882a593Smuzhiyun /* We shall not invoke the lpfc_nlp_put to decrement
5238*4882a593Smuzhiyun * the ndlp reference count as we are in the process
5239*4882a593Smuzhiyun * of lpfc_nlp_release.
5240*4882a593Smuzhiyun */
5241*4882a593Smuzhiyun }
5242*4882a593Smuzhiyun }
5243*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
5244*4882a593Smuzhiyun
5245*4882a593Smuzhiyun lpfc_els_abort(phba, ndlp);
5246*4882a593Smuzhiyun
5247*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5248*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5249*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5250*4882a593Smuzhiyun
5251*4882a593Smuzhiyun ndlp->nlp_last_elscmd = 0;
5252*4882a593Smuzhiyun del_timer_sync(&ndlp->nlp_delayfunc);
5253*4882a593Smuzhiyun
5254*4882a593Smuzhiyun list_del_init(&ndlp->els_retry_evt.evt_listp);
5255*4882a593Smuzhiyun list_del_init(&ndlp->dev_loss_evt.evt_listp);
5256*4882a593Smuzhiyun list_del_init(&ndlp->recovery_evt.evt_listp);
5257*4882a593Smuzhiyun lpfc_cleanup_vports_rrqs(vport, ndlp);
5258*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
5259*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RELEASE_RPI;
5260*4882a593Smuzhiyun if (!lpfc_unreg_rpi(vport, ndlp)) {
5261*4882a593Smuzhiyun /* Clean up unregistered and non freed rpis */
5262*4882a593Smuzhiyun if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
5263*4882a593Smuzhiyun !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
5264*4882a593Smuzhiyun lpfc_sli4_free_rpi(vport->phba,
5265*4882a593Smuzhiyun ndlp->nlp_rpi);
5266*4882a593Smuzhiyun spin_lock_irqsave(&vport->phba->ndlp_lock,
5267*4882a593Smuzhiyun iflags);
5268*4882a593Smuzhiyun ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5269*4882a593Smuzhiyun ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5270*4882a593Smuzhiyun spin_unlock_irqrestore(&vport->phba->ndlp_lock,
5271*4882a593Smuzhiyun iflags);
5272*4882a593Smuzhiyun }
5273*4882a593Smuzhiyun }
5274*4882a593Smuzhiyun return 0;
5275*4882a593Smuzhiyun }
5276*4882a593Smuzhiyun
5277*4882a593Smuzhiyun /*
5278*4882a593Smuzhiyun * Check to see if we can free the nlp back to the freelist.
5279*4882a593Smuzhiyun * If we are in the middle of using the nlp in the discovery state
5280*4882a593Smuzhiyun * machine, defer the free till we reach the end of the state machine.
5281*4882a593Smuzhiyun */
5282*4882a593Smuzhiyun static void
lpfc_nlp_remove(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)5283*4882a593Smuzhiyun lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5284*4882a593Smuzhiyun {
5285*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5286*4882a593Smuzhiyun struct lpfc_rport_data *rdata;
5287*4882a593Smuzhiyun struct fc_rport *rport;
5288*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
5289*4882a593Smuzhiyun int rc;
5290*4882a593Smuzhiyun
5291*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vport, ndlp);
5292*4882a593Smuzhiyun if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
5293*4882a593Smuzhiyun !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
5294*4882a593Smuzhiyun !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5295*4882a593Smuzhiyun phba->sli_rev != LPFC_SLI_REV4) {
5296*4882a593Smuzhiyun /* For this case we need to cleanup the default rpi
5297*4882a593Smuzhiyun * allocated by the firmware.
5298*4882a593Smuzhiyun */
5299*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO,
5300*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
5301*4882a593Smuzhiyun "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
5302*4882a593Smuzhiyun "ref %d map:x%x ndlp x%px\n",
5303*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5304*4882a593Smuzhiyun kref_read(&ndlp->kref),
5305*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
5306*4882a593Smuzhiyun if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
5307*4882a593Smuzhiyun != NULL) {
5308*4882a593Smuzhiyun rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5309*4882a593Smuzhiyun (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5310*4882a593Smuzhiyun if (rc) {
5311*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5312*4882a593Smuzhiyun }
5313*4882a593Smuzhiyun else {
5314*4882a593Smuzhiyun mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5315*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5316*4882a593Smuzhiyun mbox->vport = vport;
5317*4882a593Smuzhiyun mbox->ctx_ndlp = ndlp;
5318*4882a593Smuzhiyun rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5319*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
5320*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5321*4882a593Smuzhiyun }
5322*4882a593Smuzhiyun }
5323*4882a593Smuzhiyun }
5324*4882a593Smuzhiyun }
5325*4882a593Smuzhiyun lpfc_cleanup_node(vport, ndlp);
5326*4882a593Smuzhiyun
5327*4882a593Smuzhiyun /*
5328*4882a593Smuzhiyun * ndlp->rport must be set to NULL before it reaches here
5329*4882a593Smuzhiyun * i.e. break rport/node link before doing lpfc_nlp_put for
5330*4882a593Smuzhiyun * registered rport and then drop the reference of rport.
5331*4882a593Smuzhiyun */
5332*4882a593Smuzhiyun if (ndlp->rport) {
5333*4882a593Smuzhiyun /*
5334*4882a593Smuzhiyun * extra lpfc_nlp_put dropped the reference of ndlp
5335*4882a593Smuzhiyun * for registered rport so need to cleanup rport
5336*4882a593Smuzhiyun */
5337*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5338*4882a593Smuzhiyun "0940 removed node x%px DID x%x "
5339*4882a593Smuzhiyun "rpi %d rport not null x%px\n",
5340*4882a593Smuzhiyun ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
5341*4882a593Smuzhiyun ndlp->rport);
5342*4882a593Smuzhiyun rport = ndlp->rport;
5343*4882a593Smuzhiyun rdata = rport->dd_data;
5344*4882a593Smuzhiyun rdata->pnode = NULL;
5345*4882a593Smuzhiyun ndlp->rport = NULL;
5346*4882a593Smuzhiyun put_device(&rport->dev);
5347*4882a593Smuzhiyun }
5348*4882a593Smuzhiyun }
5349*4882a593Smuzhiyun
5350*4882a593Smuzhiyun static int
lpfc_matchdid(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did)5351*4882a593Smuzhiyun lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5352*4882a593Smuzhiyun uint32_t did)
5353*4882a593Smuzhiyun {
5354*4882a593Smuzhiyun D_ID mydid, ndlpdid, matchdid;
5355*4882a593Smuzhiyun
5356*4882a593Smuzhiyun if (did == Bcast_DID)
5357*4882a593Smuzhiyun return 0;
5358*4882a593Smuzhiyun
5359*4882a593Smuzhiyun /* First check for Direct match */
5360*4882a593Smuzhiyun if (ndlp->nlp_DID == did)
5361*4882a593Smuzhiyun return 1;
5362*4882a593Smuzhiyun
5363*4882a593Smuzhiyun /* Next check for area/domain identically equals 0 match */
5364*4882a593Smuzhiyun mydid.un.word = vport->fc_myDID;
5365*4882a593Smuzhiyun if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5366*4882a593Smuzhiyun return 0;
5367*4882a593Smuzhiyun }
5368*4882a593Smuzhiyun
5369*4882a593Smuzhiyun matchdid.un.word = did;
5370*4882a593Smuzhiyun ndlpdid.un.word = ndlp->nlp_DID;
5371*4882a593Smuzhiyun if (matchdid.un.b.id == ndlpdid.un.b.id) {
5372*4882a593Smuzhiyun if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5373*4882a593Smuzhiyun (mydid.un.b.area == matchdid.un.b.area)) {
5374*4882a593Smuzhiyun /* This code is supposed to match the ID
5375*4882a593Smuzhiyun * for a private loop device that is
5376*4882a593Smuzhiyun * connect to fl_port. But we need to
5377*4882a593Smuzhiyun * check that the port did not just go
5378*4882a593Smuzhiyun * from pt2pt to fabric or we could end
5379*4882a593Smuzhiyun * up matching ndlp->nlp_DID 000001 to
5380*4882a593Smuzhiyun * fabric DID 0x20101
5381*4882a593Smuzhiyun */
5382*4882a593Smuzhiyun if ((ndlpdid.un.b.domain == 0) &&
5383*4882a593Smuzhiyun (ndlpdid.un.b.area == 0)) {
5384*4882a593Smuzhiyun if (ndlpdid.un.b.id &&
5385*4882a593Smuzhiyun vport->phba->fc_topology ==
5386*4882a593Smuzhiyun LPFC_TOPOLOGY_LOOP)
5387*4882a593Smuzhiyun return 1;
5388*4882a593Smuzhiyun }
5389*4882a593Smuzhiyun return 0;
5390*4882a593Smuzhiyun }
5391*4882a593Smuzhiyun
5392*4882a593Smuzhiyun matchdid.un.word = ndlp->nlp_DID;
5393*4882a593Smuzhiyun if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5394*4882a593Smuzhiyun (mydid.un.b.area == ndlpdid.un.b.area)) {
5395*4882a593Smuzhiyun if ((matchdid.un.b.domain == 0) &&
5396*4882a593Smuzhiyun (matchdid.un.b.area == 0)) {
5397*4882a593Smuzhiyun if (matchdid.un.b.id)
5398*4882a593Smuzhiyun return 1;
5399*4882a593Smuzhiyun }
5400*4882a593Smuzhiyun }
5401*4882a593Smuzhiyun }
5402*4882a593Smuzhiyun return 0;
5403*4882a593Smuzhiyun }
5404*4882a593Smuzhiyun
5405*4882a593Smuzhiyun /* Search for a nodelist entry */
5406*4882a593Smuzhiyun static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport * vport,uint32_t did)5407*4882a593Smuzhiyun __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5408*4882a593Smuzhiyun {
5409*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
5410*4882a593Smuzhiyun uint32_t data1;
5411*4882a593Smuzhiyun
5412*4882a593Smuzhiyun list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5413*4882a593Smuzhiyun if (lpfc_matchdid(vport, ndlp, did)) {
5414*4882a593Smuzhiyun data1 = (((uint32_t)ndlp->nlp_state << 24) |
5415*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_xri << 16) |
5416*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_type << 8) |
5417*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_usg_map & 0xff));
5418*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5419*4882a593Smuzhiyun "0929 FIND node DID "
5420*4882a593Smuzhiyun "Data: x%px x%x x%x x%x x%x x%px\n",
5421*4882a593Smuzhiyun ndlp, ndlp->nlp_DID,
5422*4882a593Smuzhiyun ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5423*4882a593Smuzhiyun ndlp->active_rrqs_xri_bitmap);
5424*4882a593Smuzhiyun return ndlp;
5425*4882a593Smuzhiyun }
5426*4882a593Smuzhiyun }
5427*4882a593Smuzhiyun
5428*4882a593Smuzhiyun /* FIND node did <did> NOT FOUND */
5429*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5430*4882a593Smuzhiyun "0932 FIND node did x%x NOT FOUND.\n", did);
5431*4882a593Smuzhiyun return NULL;
5432*4882a593Smuzhiyun }
5433*4882a593Smuzhiyun
5434*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_findnode_did(struct lpfc_vport * vport,uint32_t did)5435*4882a593Smuzhiyun lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5436*4882a593Smuzhiyun {
5437*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5438*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
5439*4882a593Smuzhiyun unsigned long iflags;
5440*4882a593Smuzhiyun
5441*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
5442*4882a593Smuzhiyun ndlp = __lpfc_findnode_did(vport, did);
5443*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
5444*4882a593Smuzhiyun return ndlp;
5445*4882a593Smuzhiyun }
5446*4882a593Smuzhiyun
5447*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_findnode_mapped(struct lpfc_vport * vport)5448*4882a593Smuzhiyun lpfc_findnode_mapped(struct lpfc_vport *vport)
5449*4882a593Smuzhiyun {
5450*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5451*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
5452*4882a593Smuzhiyun uint32_t data1;
5453*4882a593Smuzhiyun unsigned long iflags;
5454*4882a593Smuzhiyun
5455*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, iflags);
5456*4882a593Smuzhiyun
5457*4882a593Smuzhiyun list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5458*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5459*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5460*4882a593Smuzhiyun data1 = (((uint32_t)ndlp->nlp_state << 24) |
5461*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_xri << 16) |
5462*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_type << 8) |
5463*4882a593Smuzhiyun ((uint32_t)ndlp->nlp_rpi & 0xff));
5464*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
5465*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5466*4882a593Smuzhiyun "2025 FIND node DID "
5467*4882a593Smuzhiyun "Data: x%px x%x x%x x%x x%px\n",
5468*4882a593Smuzhiyun ndlp, ndlp->nlp_DID,
5469*4882a593Smuzhiyun ndlp->nlp_flag, data1,
5470*4882a593Smuzhiyun ndlp->active_rrqs_xri_bitmap);
5471*4882a593Smuzhiyun return ndlp;
5472*4882a593Smuzhiyun }
5473*4882a593Smuzhiyun }
5474*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, iflags);
5475*4882a593Smuzhiyun
5476*4882a593Smuzhiyun /* FIND node did <did> NOT FOUND */
5477*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5478*4882a593Smuzhiyun "2026 FIND mapped did NOT FOUND.\n");
5479*4882a593Smuzhiyun return NULL;
5480*4882a593Smuzhiyun }
5481*4882a593Smuzhiyun
5482*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport * vport,uint32_t did)5483*4882a593Smuzhiyun lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5484*4882a593Smuzhiyun {
5485*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5486*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
5487*4882a593Smuzhiyun
5488*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, did);
5489*4882a593Smuzhiyun if (!ndlp) {
5490*4882a593Smuzhiyun if (vport->phba->nvmet_support)
5491*4882a593Smuzhiyun return NULL;
5492*4882a593Smuzhiyun if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5493*4882a593Smuzhiyun lpfc_rscn_payload_check(vport, did) == 0)
5494*4882a593Smuzhiyun return NULL;
5495*4882a593Smuzhiyun ndlp = lpfc_nlp_init(vport, did);
5496*4882a593Smuzhiyun if (!ndlp)
5497*4882a593Smuzhiyun return NULL;
5498*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5499*4882a593Smuzhiyun
5500*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5501*4882a593Smuzhiyun "6453 Setup New Node 2B_DISC x%x "
5502*4882a593Smuzhiyun "Data:x%x x%x x%x\n",
5503*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5504*4882a593Smuzhiyun ndlp->nlp_state, vport->fc_flag);
5505*4882a593Smuzhiyun
5506*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5507*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5508*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5509*4882a593Smuzhiyun return ndlp;
5510*4882a593Smuzhiyun } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5511*4882a593Smuzhiyun if (vport->phba->nvmet_support)
5512*4882a593Smuzhiyun return NULL;
5513*4882a593Smuzhiyun ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5514*4882a593Smuzhiyun if (!ndlp) {
5515*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
5516*4882a593Smuzhiyun "0014 Could not enable ndlp\n");
5517*4882a593Smuzhiyun return NULL;
5518*4882a593Smuzhiyun }
5519*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5520*4882a593Smuzhiyun "6454 Setup Enabled Node 2B_DISC x%x "
5521*4882a593Smuzhiyun "Data:x%x x%x x%x\n",
5522*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5523*4882a593Smuzhiyun ndlp->nlp_state, vport->fc_flag);
5524*4882a593Smuzhiyun
5525*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5526*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5527*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5528*4882a593Smuzhiyun return ndlp;
5529*4882a593Smuzhiyun }
5530*4882a593Smuzhiyun
5531*4882a593Smuzhiyun /* The NVME Target does not want to actively manage an rport.
5532*4882a593Smuzhiyun * The goal is to allow the target to reset its state and clear
5533*4882a593Smuzhiyun * pending IO in preparation for the initiator to recover.
5534*4882a593Smuzhiyun */
5535*4882a593Smuzhiyun if ((vport->fc_flag & FC_RSCN_MODE) &&
5536*4882a593Smuzhiyun !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5537*4882a593Smuzhiyun if (lpfc_rscn_payload_check(vport, did)) {
5538*4882a593Smuzhiyun
5539*4882a593Smuzhiyun /* Since this node is marked for discovery,
5540*4882a593Smuzhiyun * delay timeout is not needed.
5541*4882a593Smuzhiyun */
5542*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vport, ndlp);
5543*4882a593Smuzhiyun
5544*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5545*4882a593Smuzhiyun "6455 Setup RSCN Node 2B_DISC x%x "
5546*4882a593Smuzhiyun "Data:x%x x%x x%x\n",
5547*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5548*4882a593Smuzhiyun ndlp->nlp_state, vport->fc_flag);
5549*4882a593Smuzhiyun
5550*4882a593Smuzhiyun /* NVME Target mode waits until rport is known to be
5551*4882a593Smuzhiyun * impacted by the RSCN before it transitions. No
5552*4882a593Smuzhiyun * active management - just go to NPR provided the
5553*4882a593Smuzhiyun * node had a valid login.
5554*4882a593Smuzhiyun */
5555*4882a593Smuzhiyun if (vport->phba->nvmet_support)
5556*4882a593Smuzhiyun return ndlp;
5557*4882a593Smuzhiyun
5558*4882a593Smuzhiyun /* If we've already received a PLOGI from this NPort
5559*4882a593Smuzhiyun * we don't need to try to discover it again.
5560*4882a593Smuzhiyun */
5561*4882a593Smuzhiyun if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5562*4882a593Smuzhiyun !(ndlp->nlp_type &
5563*4882a593Smuzhiyun (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5564*4882a593Smuzhiyun return NULL;
5565*4882a593Smuzhiyun
5566*4882a593Smuzhiyun ndlp->nlp_prev_state = ndlp->nlp_state;
5567*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5568*4882a593Smuzhiyun
5569*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5570*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5571*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5572*4882a593Smuzhiyun } else {
5573*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5574*4882a593Smuzhiyun "6456 Skip Setup RSCN Node x%x "
5575*4882a593Smuzhiyun "Data:x%x x%x x%x\n",
5576*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5577*4882a593Smuzhiyun ndlp->nlp_state, vport->fc_flag);
5578*4882a593Smuzhiyun ndlp = NULL;
5579*4882a593Smuzhiyun }
5580*4882a593Smuzhiyun } else {
5581*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5582*4882a593Smuzhiyun "6457 Setup Active Node 2B_DISC x%x "
5583*4882a593Smuzhiyun "Data:x%x x%x x%x\n",
5584*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
5585*4882a593Smuzhiyun ndlp->nlp_state, vport->fc_flag);
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun /* If the initiator received a PLOGI from this NPort or if the
5588*4882a593Smuzhiyun * initiator is already in the process of discovery on it,
5589*4882a593Smuzhiyun * there's no need to try to discover it again.
5590*4882a593Smuzhiyun */
5591*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5592*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5593*4882a593Smuzhiyun (!vport->phba->nvmet_support &&
5594*4882a593Smuzhiyun ndlp->nlp_flag & NLP_RCV_PLOGI))
5595*4882a593Smuzhiyun return NULL;
5596*4882a593Smuzhiyun
5597*4882a593Smuzhiyun if (vport->phba->nvmet_support)
5598*4882a593Smuzhiyun return ndlp;
5599*4882a593Smuzhiyun
5600*4882a593Smuzhiyun /* Moving to NPR state clears unsolicited flags and
5601*4882a593Smuzhiyun * allows for rediscovery
5602*4882a593Smuzhiyun */
5603*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5604*4882a593Smuzhiyun
5605*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5606*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5607*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5608*4882a593Smuzhiyun }
5609*4882a593Smuzhiyun return ndlp;
5610*4882a593Smuzhiyun }
5611*4882a593Smuzhiyun
5612*4882a593Smuzhiyun /* Build a list of nodes to discover based on the loopmap */
5613*4882a593Smuzhiyun void
lpfc_disc_list_loopmap(struct lpfc_vport * vport)5614*4882a593Smuzhiyun lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5615*4882a593Smuzhiyun {
5616*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5617*4882a593Smuzhiyun int j;
5618*4882a593Smuzhiyun uint32_t alpa, index;
5619*4882a593Smuzhiyun
5620*4882a593Smuzhiyun if (!lpfc_is_link_up(phba))
5621*4882a593Smuzhiyun return;
5622*4882a593Smuzhiyun
5623*4882a593Smuzhiyun if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5624*4882a593Smuzhiyun return;
5625*4882a593Smuzhiyun
5626*4882a593Smuzhiyun /* Check for loop map present or not */
5627*4882a593Smuzhiyun if (phba->alpa_map[0]) {
5628*4882a593Smuzhiyun for (j = 1; j <= phba->alpa_map[0]; j++) {
5629*4882a593Smuzhiyun alpa = phba->alpa_map[j];
5630*4882a593Smuzhiyun if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5631*4882a593Smuzhiyun continue;
5632*4882a593Smuzhiyun lpfc_setup_disc_node(vport, alpa);
5633*4882a593Smuzhiyun }
5634*4882a593Smuzhiyun } else {
5635*4882a593Smuzhiyun /* No alpamap, so try all alpa's */
5636*4882a593Smuzhiyun for (j = 0; j < FC_MAXLOOP; j++) {
5637*4882a593Smuzhiyun /* If cfg_scan_down is set, start from highest
5638*4882a593Smuzhiyun * ALPA (0xef) to lowest (0x1).
5639*4882a593Smuzhiyun */
5640*4882a593Smuzhiyun if (vport->cfg_scan_down)
5641*4882a593Smuzhiyun index = j;
5642*4882a593Smuzhiyun else
5643*4882a593Smuzhiyun index = FC_MAXLOOP - j - 1;
5644*4882a593Smuzhiyun alpa = lpfcAlpaArray[index];
5645*4882a593Smuzhiyun if ((vport->fc_myDID & 0xff) == alpa)
5646*4882a593Smuzhiyun continue;
5647*4882a593Smuzhiyun lpfc_setup_disc_node(vport, alpa);
5648*4882a593Smuzhiyun }
5649*4882a593Smuzhiyun }
5650*4882a593Smuzhiyun return;
5651*4882a593Smuzhiyun }
5652*4882a593Smuzhiyun
5653*4882a593Smuzhiyun /* SLI3 only */
5654*4882a593Smuzhiyun void
lpfc_issue_clear_la(struct lpfc_hba * phba,struct lpfc_vport * vport)5655*4882a593Smuzhiyun lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5656*4882a593Smuzhiyun {
5657*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
5658*4882a593Smuzhiyun struct lpfc_sli *psli = &phba->sli;
5659*4882a593Smuzhiyun struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5660*4882a593Smuzhiyun struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
5661*4882a593Smuzhiyun int rc;
5662*4882a593Smuzhiyun
5663*4882a593Smuzhiyun /*
5664*4882a593Smuzhiyun * if it's not a physical port or if we already send
5665*4882a593Smuzhiyun * clear_la then don't send it.
5666*4882a593Smuzhiyun */
5667*4882a593Smuzhiyun if ((phba->link_state >= LPFC_CLEAR_LA) ||
5668*4882a593Smuzhiyun (vport->port_type != LPFC_PHYSICAL_PORT) ||
5669*4882a593Smuzhiyun (phba->sli_rev == LPFC_SLI_REV4))
5670*4882a593Smuzhiyun return;
5671*4882a593Smuzhiyun
5672*4882a593Smuzhiyun /* Link up discovery */
5673*4882a593Smuzhiyun if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5674*4882a593Smuzhiyun phba->link_state = LPFC_CLEAR_LA;
5675*4882a593Smuzhiyun lpfc_clear_la(phba, mbox);
5676*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5677*4882a593Smuzhiyun mbox->vport = vport;
5678*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5679*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
5680*4882a593Smuzhiyun mempool_free(mbox, phba->mbox_mem_pool);
5681*4882a593Smuzhiyun lpfc_disc_flush_list(vport);
5682*4882a593Smuzhiyun extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5683*4882a593Smuzhiyun fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5684*4882a593Smuzhiyun phba->link_state = LPFC_HBA_ERROR;
5685*4882a593Smuzhiyun }
5686*4882a593Smuzhiyun }
5687*4882a593Smuzhiyun }
5688*4882a593Smuzhiyun
5689*4882a593Smuzhiyun /* Reg_vpi to tell firmware to resume normal operations */
5690*4882a593Smuzhiyun void
lpfc_issue_reg_vpi(struct lpfc_hba * phba,struct lpfc_vport * vport)5691*4882a593Smuzhiyun lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5692*4882a593Smuzhiyun {
5693*4882a593Smuzhiyun LPFC_MBOXQ_t *regvpimbox;
5694*4882a593Smuzhiyun
5695*4882a593Smuzhiyun regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5696*4882a593Smuzhiyun if (regvpimbox) {
5697*4882a593Smuzhiyun lpfc_reg_vpi(vport, regvpimbox);
5698*4882a593Smuzhiyun regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5699*4882a593Smuzhiyun regvpimbox->vport = vport;
5700*4882a593Smuzhiyun if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5701*4882a593Smuzhiyun == MBX_NOT_FINISHED) {
5702*4882a593Smuzhiyun mempool_free(regvpimbox, phba->mbox_mem_pool);
5703*4882a593Smuzhiyun }
5704*4882a593Smuzhiyun }
5705*4882a593Smuzhiyun }
5706*4882a593Smuzhiyun
5707*4882a593Smuzhiyun /* Start Link up / RSCN discovery on NPR nodes */
5708*4882a593Smuzhiyun void
lpfc_disc_start(struct lpfc_vport * vport)5709*4882a593Smuzhiyun lpfc_disc_start(struct lpfc_vport *vport)
5710*4882a593Smuzhiyun {
5711*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5712*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5713*4882a593Smuzhiyun uint32_t num_sent;
5714*4882a593Smuzhiyun uint32_t clear_la_pending;
5715*4882a593Smuzhiyun
5716*4882a593Smuzhiyun if (!lpfc_is_link_up(phba)) {
5717*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5718*4882a593Smuzhiyun "3315 Link is not up %x\n",
5719*4882a593Smuzhiyun phba->link_state);
5720*4882a593Smuzhiyun return;
5721*4882a593Smuzhiyun }
5722*4882a593Smuzhiyun
5723*4882a593Smuzhiyun if (phba->link_state == LPFC_CLEAR_LA)
5724*4882a593Smuzhiyun clear_la_pending = 1;
5725*4882a593Smuzhiyun else
5726*4882a593Smuzhiyun clear_la_pending = 0;
5727*4882a593Smuzhiyun
5728*4882a593Smuzhiyun if (vport->port_state < LPFC_VPORT_READY)
5729*4882a593Smuzhiyun vport->port_state = LPFC_DISC_AUTH;
5730*4882a593Smuzhiyun
5731*4882a593Smuzhiyun lpfc_set_disctmo(vport);
5732*4882a593Smuzhiyun
5733*4882a593Smuzhiyun vport->fc_prevDID = vport->fc_myDID;
5734*4882a593Smuzhiyun vport->num_disc_nodes = 0;
5735*4882a593Smuzhiyun
5736*4882a593Smuzhiyun /* Start Discovery state <hba_state> */
5737*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5738*4882a593Smuzhiyun "0202 Start Discovery port state x%x "
5739*4882a593Smuzhiyun "flg x%x Data: x%x x%x x%x\n",
5740*4882a593Smuzhiyun vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5741*4882a593Smuzhiyun vport->fc_adisc_cnt, vport->fc_npr_cnt);
5742*4882a593Smuzhiyun
5743*4882a593Smuzhiyun /* First do ADISCs - if any */
5744*4882a593Smuzhiyun num_sent = lpfc_els_disc_adisc(vport);
5745*4882a593Smuzhiyun
5746*4882a593Smuzhiyun if (num_sent)
5747*4882a593Smuzhiyun return;
5748*4882a593Smuzhiyun
5749*4882a593Smuzhiyun /* Register the VPI for SLI3, NPIV only. */
5750*4882a593Smuzhiyun if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5751*4882a593Smuzhiyun !(vport->fc_flag & FC_PT2PT) &&
5752*4882a593Smuzhiyun !(vport->fc_flag & FC_RSCN_MODE) &&
5753*4882a593Smuzhiyun (phba->sli_rev < LPFC_SLI_REV4)) {
5754*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
5755*4882a593Smuzhiyun lpfc_issue_reg_vpi(phba, vport);
5756*4882a593Smuzhiyun return;
5757*4882a593Smuzhiyun }
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun /*
5760*4882a593Smuzhiyun * For SLI2, we need to set port_state to READY and continue
5761*4882a593Smuzhiyun * discovery.
5762*4882a593Smuzhiyun */
5763*4882a593Smuzhiyun if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5764*4882a593Smuzhiyun /* If we get here, there is nothing to ADISC */
5765*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
5766*4882a593Smuzhiyun
5767*4882a593Smuzhiyun if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5768*4882a593Smuzhiyun vport->num_disc_nodes = 0;
5769*4882a593Smuzhiyun /* go thru NPR nodes and issue ELS PLOGIs */
5770*4882a593Smuzhiyun if (vport->fc_npr_cnt)
5771*4882a593Smuzhiyun lpfc_els_disc_plogi(vport);
5772*4882a593Smuzhiyun
5773*4882a593Smuzhiyun if (!vport->num_disc_nodes) {
5774*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5775*4882a593Smuzhiyun vport->fc_flag &= ~FC_NDISC_ACTIVE;
5776*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5777*4882a593Smuzhiyun lpfc_can_disctmo(vport);
5778*4882a593Smuzhiyun }
5779*4882a593Smuzhiyun }
5780*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
5781*4882a593Smuzhiyun } else {
5782*4882a593Smuzhiyun /* Next do PLOGIs - if any */
5783*4882a593Smuzhiyun num_sent = lpfc_els_disc_plogi(vport);
5784*4882a593Smuzhiyun
5785*4882a593Smuzhiyun if (num_sent)
5786*4882a593Smuzhiyun return;
5787*4882a593Smuzhiyun
5788*4882a593Smuzhiyun if (vport->fc_flag & FC_RSCN_MODE) {
5789*4882a593Smuzhiyun /* Check to see if more RSCNs came in while we
5790*4882a593Smuzhiyun * were processing this one.
5791*4882a593Smuzhiyun */
5792*4882a593Smuzhiyun if ((vport->fc_rscn_id_cnt == 0) &&
5793*4882a593Smuzhiyun (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5794*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5795*4882a593Smuzhiyun vport->fc_flag &= ~FC_RSCN_MODE;
5796*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5797*4882a593Smuzhiyun lpfc_can_disctmo(vport);
5798*4882a593Smuzhiyun } else
5799*4882a593Smuzhiyun lpfc_els_handle_rscn(vport);
5800*4882a593Smuzhiyun }
5801*4882a593Smuzhiyun }
5802*4882a593Smuzhiyun return;
5803*4882a593Smuzhiyun }
5804*4882a593Smuzhiyun
5805*4882a593Smuzhiyun /*
5806*4882a593Smuzhiyun * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5807*4882a593Smuzhiyun * ring the match the sppecified nodelist.
5808*4882a593Smuzhiyun */
5809*4882a593Smuzhiyun static void
lpfc_free_tx(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)5810*4882a593Smuzhiyun lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5811*4882a593Smuzhiyun {
5812*4882a593Smuzhiyun LIST_HEAD(completions);
5813*4882a593Smuzhiyun IOCB_t *icmd;
5814*4882a593Smuzhiyun struct lpfc_iocbq *iocb, *next_iocb;
5815*4882a593Smuzhiyun struct lpfc_sli_ring *pring;
5816*4882a593Smuzhiyun
5817*4882a593Smuzhiyun pring = lpfc_phba_elsring(phba);
5818*4882a593Smuzhiyun if (unlikely(!pring))
5819*4882a593Smuzhiyun return;
5820*4882a593Smuzhiyun
5821*4882a593Smuzhiyun /* Error matching iocb on txq or txcmplq
5822*4882a593Smuzhiyun * First check the txq.
5823*4882a593Smuzhiyun */
5824*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
5825*4882a593Smuzhiyun list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5826*4882a593Smuzhiyun if (iocb->context1 != ndlp) {
5827*4882a593Smuzhiyun continue;
5828*4882a593Smuzhiyun }
5829*4882a593Smuzhiyun icmd = &iocb->iocb;
5830*4882a593Smuzhiyun if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5831*4882a593Smuzhiyun (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5832*4882a593Smuzhiyun
5833*4882a593Smuzhiyun list_move_tail(&iocb->list, &completions);
5834*4882a593Smuzhiyun }
5835*4882a593Smuzhiyun }
5836*4882a593Smuzhiyun
5837*4882a593Smuzhiyun /* Next check the txcmplq */
5838*4882a593Smuzhiyun list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5839*4882a593Smuzhiyun if (iocb->context1 != ndlp) {
5840*4882a593Smuzhiyun continue;
5841*4882a593Smuzhiyun }
5842*4882a593Smuzhiyun icmd = &iocb->iocb;
5843*4882a593Smuzhiyun if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5844*4882a593Smuzhiyun icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5845*4882a593Smuzhiyun lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5846*4882a593Smuzhiyun }
5847*4882a593Smuzhiyun }
5848*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
5849*4882a593Smuzhiyun
5850*4882a593Smuzhiyun /* Cancel all the IOCBs from the completions list */
5851*4882a593Smuzhiyun lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5852*4882a593Smuzhiyun IOERR_SLI_ABORTED);
5853*4882a593Smuzhiyun }
5854*4882a593Smuzhiyun
5855*4882a593Smuzhiyun static void
lpfc_disc_flush_list(struct lpfc_vport * vport)5856*4882a593Smuzhiyun lpfc_disc_flush_list(struct lpfc_vport *vport)
5857*4882a593Smuzhiyun {
5858*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, *next_ndlp;
5859*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5860*4882a593Smuzhiyun
5861*4882a593Smuzhiyun if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5862*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5863*4882a593Smuzhiyun nlp_listp) {
5864*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
5865*4882a593Smuzhiyun continue;
5866*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5867*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5868*4882a593Smuzhiyun lpfc_free_tx(phba, ndlp);
5869*4882a593Smuzhiyun }
5870*4882a593Smuzhiyun }
5871*4882a593Smuzhiyun }
5872*4882a593Smuzhiyun }
5873*4882a593Smuzhiyun
5874*4882a593Smuzhiyun void
lpfc_cleanup_discovery_resources(struct lpfc_vport * vport)5875*4882a593Smuzhiyun lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5876*4882a593Smuzhiyun {
5877*4882a593Smuzhiyun lpfc_els_flush_rscn(vport);
5878*4882a593Smuzhiyun lpfc_els_flush_cmd(vport);
5879*4882a593Smuzhiyun lpfc_disc_flush_list(vport);
5880*4882a593Smuzhiyun }
5881*4882a593Smuzhiyun
5882*4882a593Smuzhiyun /*****************************************************************************/
5883*4882a593Smuzhiyun /*
5884*4882a593Smuzhiyun * NAME: lpfc_disc_timeout
5885*4882a593Smuzhiyun *
5886*4882a593Smuzhiyun * FUNCTION: Fibre Channel driver discovery timeout routine.
5887*4882a593Smuzhiyun *
5888*4882a593Smuzhiyun * EXECUTION ENVIRONMENT: interrupt only
5889*4882a593Smuzhiyun *
5890*4882a593Smuzhiyun * CALLED FROM:
5891*4882a593Smuzhiyun * Timer function
5892*4882a593Smuzhiyun *
5893*4882a593Smuzhiyun * RETURNS:
5894*4882a593Smuzhiyun * none
5895*4882a593Smuzhiyun */
5896*4882a593Smuzhiyun /*****************************************************************************/
5897*4882a593Smuzhiyun void
lpfc_disc_timeout(struct timer_list * t)5898*4882a593Smuzhiyun lpfc_disc_timeout(struct timer_list *t)
5899*4882a593Smuzhiyun {
5900*4882a593Smuzhiyun struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
5901*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5902*4882a593Smuzhiyun uint32_t tmo_posted;
5903*4882a593Smuzhiyun unsigned long flags = 0;
5904*4882a593Smuzhiyun
5905*4882a593Smuzhiyun if (unlikely(!phba))
5906*4882a593Smuzhiyun return;
5907*4882a593Smuzhiyun
5908*4882a593Smuzhiyun spin_lock_irqsave(&vport->work_port_lock, flags);
5909*4882a593Smuzhiyun tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5910*4882a593Smuzhiyun if (!tmo_posted)
5911*4882a593Smuzhiyun vport->work_port_events |= WORKER_DISC_TMO;
5912*4882a593Smuzhiyun spin_unlock_irqrestore(&vport->work_port_lock, flags);
5913*4882a593Smuzhiyun
5914*4882a593Smuzhiyun if (!tmo_posted)
5915*4882a593Smuzhiyun lpfc_worker_wake_up(phba);
5916*4882a593Smuzhiyun return;
5917*4882a593Smuzhiyun }
5918*4882a593Smuzhiyun
5919*4882a593Smuzhiyun static void
lpfc_disc_timeout_handler(struct lpfc_vport * vport)5920*4882a593Smuzhiyun lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5921*4882a593Smuzhiyun {
5922*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5923*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
5924*4882a593Smuzhiyun struct lpfc_sli *psli = &phba->sli;
5925*4882a593Smuzhiyun struct lpfc_nodelist *ndlp, *next_ndlp;
5926*4882a593Smuzhiyun LPFC_MBOXQ_t *initlinkmbox;
5927*4882a593Smuzhiyun int rc, clrlaerr = 0;
5928*4882a593Smuzhiyun
5929*4882a593Smuzhiyun if (!(vport->fc_flag & FC_DISC_TMO))
5930*4882a593Smuzhiyun return;
5931*4882a593Smuzhiyun
5932*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
5933*4882a593Smuzhiyun vport->fc_flag &= ~FC_DISC_TMO;
5934*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
5935*4882a593Smuzhiyun
5936*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5937*4882a593Smuzhiyun "disc timeout: state:x%x rtry:x%x flg:x%x",
5938*4882a593Smuzhiyun vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5939*4882a593Smuzhiyun
5940*4882a593Smuzhiyun switch (vport->port_state) {
5941*4882a593Smuzhiyun
5942*4882a593Smuzhiyun case LPFC_LOCAL_CFG_LINK:
5943*4882a593Smuzhiyun /*
5944*4882a593Smuzhiyun * port_state is identically LPFC_LOCAL_CFG_LINK while
5945*4882a593Smuzhiyun * waiting for FAN timeout
5946*4882a593Smuzhiyun */
5947*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5948*4882a593Smuzhiyun "0221 FAN timeout\n");
5949*4882a593Smuzhiyun
5950*4882a593Smuzhiyun /* Start discovery by sending FLOGI, clean up old rpis */
5951*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5952*4882a593Smuzhiyun nlp_listp) {
5953*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
5954*4882a593Smuzhiyun continue;
5955*4882a593Smuzhiyun if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5956*4882a593Smuzhiyun continue;
5957*4882a593Smuzhiyun if (ndlp->nlp_type & NLP_FABRIC) {
5958*4882a593Smuzhiyun /* Clean up the ndlp on Fabric connections */
5959*4882a593Smuzhiyun lpfc_drop_node(vport, ndlp);
5960*4882a593Smuzhiyun
5961*4882a593Smuzhiyun } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5962*4882a593Smuzhiyun /* Fail outstanding IO now since device
5963*4882a593Smuzhiyun * is marked for PLOGI.
5964*4882a593Smuzhiyun */
5965*4882a593Smuzhiyun lpfc_unreg_rpi(vport, ndlp);
5966*4882a593Smuzhiyun }
5967*4882a593Smuzhiyun }
5968*4882a593Smuzhiyun if (vport->port_state != LPFC_FLOGI) {
5969*4882a593Smuzhiyun if (phba->sli_rev <= LPFC_SLI_REV3)
5970*4882a593Smuzhiyun lpfc_initial_flogi(vport);
5971*4882a593Smuzhiyun else
5972*4882a593Smuzhiyun lpfc_issue_init_vfi(vport);
5973*4882a593Smuzhiyun return;
5974*4882a593Smuzhiyun }
5975*4882a593Smuzhiyun break;
5976*4882a593Smuzhiyun
5977*4882a593Smuzhiyun case LPFC_FDISC:
5978*4882a593Smuzhiyun case LPFC_FLOGI:
5979*4882a593Smuzhiyun /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5980*4882a593Smuzhiyun /* Initial FLOGI timeout */
5981*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
5982*4882a593Smuzhiyun LOG_TRACE_EVENT,
5983*4882a593Smuzhiyun "0222 Initial %s timeout\n",
5984*4882a593Smuzhiyun vport->vpi ? "FDISC" : "FLOGI");
5985*4882a593Smuzhiyun
5986*4882a593Smuzhiyun /* Assume no Fabric and go on with discovery.
5987*4882a593Smuzhiyun * Check for outstanding ELS FLOGI to abort.
5988*4882a593Smuzhiyun */
5989*4882a593Smuzhiyun
5990*4882a593Smuzhiyun /* FLOGI failed, so just use loop map to make discovery list */
5991*4882a593Smuzhiyun lpfc_disc_list_loopmap(vport);
5992*4882a593Smuzhiyun
5993*4882a593Smuzhiyun /* Start discovery */
5994*4882a593Smuzhiyun lpfc_disc_start(vport);
5995*4882a593Smuzhiyun break;
5996*4882a593Smuzhiyun
5997*4882a593Smuzhiyun case LPFC_FABRIC_CFG_LINK:
5998*4882a593Smuzhiyun /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5999*4882a593Smuzhiyun NameServer login */
6000*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6001*4882a593Smuzhiyun LOG_TRACE_EVENT,
6002*4882a593Smuzhiyun "0223 Timeout while waiting for "
6003*4882a593Smuzhiyun "NameServer login\n");
6004*4882a593Smuzhiyun /* Next look for NameServer ndlp */
6005*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, NameServer_DID);
6006*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp))
6007*4882a593Smuzhiyun lpfc_els_abort(phba, ndlp);
6008*4882a593Smuzhiyun
6009*4882a593Smuzhiyun /* ReStart discovery */
6010*4882a593Smuzhiyun goto restart_disc;
6011*4882a593Smuzhiyun
6012*4882a593Smuzhiyun case LPFC_NS_QRY:
6013*4882a593Smuzhiyun /* Check for wait for NameServer Rsp timeout */
6014*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6015*4882a593Smuzhiyun LOG_TRACE_EVENT,
6016*4882a593Smuzhiyun "0224 NameServer Query timeout "
6017*4882a593Smuzhiyun "Data: x%x x%x\n",
6018*4882a593Smuzhiyun vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6019*4882a593Smuzhiyun
6020*4882a593Smuzhiyun if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
6021*4882a593Smuzhiyun /* Try it one more time */
6022*4882a593Smuzhiyun vport->fc_ns_retry++;
6023*4882a593Smuzhiyun vport->gidft_inp = 0;
6024*4882a593Smuzhiyun rc = lpfc_issue_gidft(vport);
6025*4882a593Smuzhiyun if (rc == 0)
6026*4882a593Smuzhiyun break;
6027*4882a593Smuzhiyun }
6028*4882a593Smuzhiyun vport->fc_ns_retry = 0;
6029*4882a593Smuzhiyun
6030*4882a593Smuzhiyun restart_disc:
6031*4882a593Smuzhiyun /*
6032*4882a593Smuzhiyun * Discovery is over.
6033*4882a593Smuzhiyun * set port_state to PORT_READY if SLI2.
6034*4882a593Smuzhiyun * cmpl_reg_vpi will set port_state to READY for SLI3.
6035*4882a593Smuzhiyun */
6036*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4) {
6037*4882a593Smuzhiyun if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6038*4882a593Smuzhiyun lpfc_issue_reg_vpi(phba, vport);
6039*4882a593Smuzhiyun else {
6040*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
6041*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
6042*4882a593Smuzhiyun }
6043*4882a593Smuzhiyun }
6044*4882a593Smuzhiyun
6045*4882a593Smuzhiyun /* Setup and issue mailbox INITIALIZE LINK command */
6046*4882a593Smuzhiyun initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6047*4882a593Smuzhiyun if (!initlinkmbox) {
6048*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6049*4882a593Smuzhiyun LOG_TRACE_EVENT,
6050*4882a593Smuzhiyun "0206 Device Discovery "
6051*4882a593Smuzhiyun "completion error\n");
6052*4882a593Smuzhiyun phba->link_state = LPFC_HBA_ERROR;
6053*4882a593Smuzhiyun break;
6054*4882a593Smuzhiyun }
6055*4882a593Smuzhiyun
6056*4882a593Smuzhiyun lpfc_linkdown(phba);
6057*4882a593Smuzhiyun lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
6058*4882a593Smuzhiyun phba->cfg_link_speed);
6059*4882a593Smuzhiyun initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6060*4882a593Smuzhiyun initlinkmbox->vport = vport;
6061*4882a593Smuzhiyun initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6062*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
6063*4882a593Smuzhiyun lpfc_set_loopback_flag(phba);
6064*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED)
6065*4882a593Smuzhiyun mempool_free(initlinkmbox, phba->mbox_mem_pool);
6066*4882a593Smuzhiyun
6067*4882a593Smuzhiyun break;
6068*4882a593Smuzhiyun
6069*4882a593Smuzhiyun case LPFC_DISC_AUTH:
6070*4882a593Smuzhiyun /* Node Authentication timeout */
6071*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6072*4882a593Smuzhiyun LOG_TRACE_EVENT,
6073*4882a593Smuzhiyun "0227 Node Authentication timeout\n");
6074*4882a593Smuzhiyun lpfc_disc_flush_list(vport);
6075*4882a593Smuzhiyun
6076*4882a593Smuzhiyun /*
6077*4882a593Smuzhiyun * set port_state to PORT_READY if SLI2.
6078*4882a593Smuzhiyun * cmpl_reg_vpi will set port_state to READY for SLI3.
6079*4882a593Smuzhiyun */
6080*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4) {
6081*4882a593Smuzhiyun if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6082*4882a593Smuzhiyun lpfc_issue_reg_vpi(phba, vport);
6083*4882a593Smuzhiyun else { /* NPIV Not enabled */
6084*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
6085*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
6086*4882a593Smuzhiyun }
6087*4882a593Smuzhiyun }
6088*4882a593Smuzhiyun break;
6089*4882a593Smuzhiyun
6090*4882a593Smuzhiyun case LPFC_VPORT_READY:
6091*4882a593Smuzhiyun if (vport->fc_flag & FC_RSCN_MODE) {
6092*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6093*4882a593Smuzhiyun LOG_TRACE_EVENT,
6094*4882a593Smuzhiyun "0231 RSCN timeout Data: x%x "
6095*4882a593Smuzhiyun "x%x\n",
6096*4882a593Smuzhiyun vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6097*4882a593Smuzhiyun
6098*4882a593Smuzhiyun /* Cleanup any outstanding ELS commands */
6099*4882a593Smuzhiyun lpfc_els_flush_cmd(vport);
6100*4882a593Smuzhiyun
6101*4882a593Smuzhiyun lpfc_els_flush_rscn(vport);
6102*4882a593Smuzhiyun lpfc_disc_flush_list(vport);
6103*4882a593Smuzhiyun }
6104*4882a593Smuzhiyun break;
6105*4882a593Smuzhiyun
6106*4882a593Smuzhiyun default:
6107*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6108*4882a593Smuzhiyun LOG_TRACE_EVENT,
6109*4882a593Smuzhiyun "0273 Unexpected discovery timeout, "
6110*4882a593Smuzhiyun "vport State x%x\n", vport->port_state);
6111*4882a593Smuzhiyun break;
6112*4882a593Smuzhiyun }
6113*4882a593Smuzhiyun
6114*4882a593Smuzhiyun switch (phba->link_state) {
6115*4882a593Smuzhiyun case LPFC_CLEAR_LA:
6116*4882a593Smuzhiyun /* CLEAR LA timeout */
6117*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6118*4882a593Smuzhiyun LOG_TRACE_EVENT,
6119*4882a593Smuzhiyun "0228 CLEAR LA timeout\n");
6120*4882a593Smuzhiyun clrlaerr = 1;
6121*4882a593Smuzhiyun break;
6122*4882a593Smuzhiyun
6123*4882a593Smuzhiyun case LPFC_LINK_UP:
6124*4882a593Smuzhiyun lpfc_issue_clear_la(phba, vport);
6125*4882a593Smuzhiyun fallthrough;
6126*4882a593Smuzhiyun case LPFC_LINK_UNKNOWN:
6127*4882a593Smuzhiyun case LPFC_WARM_START:
6128*4882a593Smuzhiyun case LPFC_INIT_START:
6129*4882a593Smuzhiyun case LPFC_INIT_MBX_CMDS:
6130*4882a593Smuzhiyun case LPFC_LINK_DOWN:
6131*4882a593Smuzhiyun case LPFC_HBA_ERROR:
6132*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
6133*4882a593Smuzhiyun LOG_TRACE_EVENT,
6134*4882a593Smuzhiyun "0230 Unexpected timeout, hba link "
6135*4882a593Smuzhiyun "state x%x\n", phba->link_state);
6136*4882a593Smuzhiyun clrlaerr = 1;
6137*4882a593Smuzhiyun break;
6138*4882a593Smuzhiyun
6139*4882a593Smuzhiyun case LPFC_HBA_READY:
6140*4882a593Smuzhiyun break;
6141*4882a593Smuzhiyun }
6142*4882a593Smuzhiyun
6143*4882a593Smuzhiyun if (clrlaerr) {
6144*4882a593Smuzhiyun lpfc_disc_flush_list(vport);
6145*4882a593Smuzhiyun if (phba->sli_rev != LPFC_SLI_REV4) {
6146*4882a593Smuzhiyun psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6147*4882a593Smuzhiyun ~LPFC_STOP_IOCB_EVENT;
6148*4882a593Smuzhiyun psli->sli3_ring[LPFC_FCP_RING].flag &=
6149*4882a593Smuzhiyun ~LPFC_STOP_IOCB_EVENT;
6150*4882a593Smuzhiyun }
6151*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_READY;
6152*4882a593Smuzhiyun }
6153*4882a593Smuzhiyun return;
6154*4882a593Smuzhiyun }
6155*4882a593Smuzhiyun
6156*4882a593Smuzhiyun /*
6157*4882a593Smuzhiyun * This routine handles processing a NameServer REG_LOGIN mailbox
6158*4882a593Smuzhiyun * command upon completion. It is setup in the LPFC_MBOXQ
6159*4882a593Smuzhiyun * as the completion routine when the command is
6160*4882a593Smuzhiyun * handed off to the SLI layer.
6161*4882a593Smuzhiyun */
6162*4882a593Smuzhiyun void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6163*4882a593Smuzhiyun lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6164*4882a593Smuzhiyun {
6165*4882a593Smuzhiyun MAILBOX_t *mb = &pmb->u.mb;
6166*4882a593Smuzhiyun struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
6167*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6168*4882a593Smuzhiyun struct lpfc_vport *vport = pmb->vport;
6169*4882a593Smuzhiyun
6170*4882a593Smuzhiyun pmb->ctx_buf = NULL;
6171*4882a593Smuzhiyun pmb->ctx_ndlp = NULL;
6172*4882a593Smuzhiyun
6173*4882a593Smuzhiyun if (phba->sli_rev < LPFC_SLI_REV4)
6174*4882a593Smuzhiyun ndlp->nlp_rpi = mb->un.varWords[0];
6175*4882a593Smuzhiyun ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6176*4882a593Smuzhiyun ndlp->nlp_type |= NLP_FABRIC;
6177*4882a593Smuzhiyun lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6178*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6179*4882a593Smuzhiyun "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
6180*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6181*4882a593Smuzhiyun kref_read(&ndlp->kref),
6182*4882a593Smuzhiyun ndlp->nlp_usg_map, ndlp);
6183*4882a593Smuzhiyun /*
6184*4882a593Smuzhiyun * Start issuing Fabric-Device Management Interface (FDMI) command to
6185*4882a593Smuzhiyun * 0xfffffa (FDMI well known port).
6186*4882a593Smuzhiyun * DHBA -> DPRT -> RHBA -> RPA (physical port)
6187*4882a593Smuzhiyun * DPRT -> RPRT (vports)
6188*4882a593Smuzhiyun */
6189*4882a593Smuzhiyun if (vport->port_type == LPFC_PHYSICAL_PORT)
6190*4882a593Smuzhiyun lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6191*4882a593Smuzhiyun else
6192*4882a593Smuzhiyun lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6193*4882a593Smuzhiyun
6194*4882a593Smuzhiyun
6195*4882a593Smuzhiyun /* decrement the node reference count held for this callback
6196*4882a593Smuzhiyun * function.
6197*4882a593Smuzhiyun */
6198*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
6199*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
6200*4882a593Smuzhiyun kfree(mp);
6201*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
6202*4882a593Smuzhiyun
6203*4882a593Smuzhiyun return;
6204*4882a593Smuzhiyun }
6205*4882a593Smuzhiyun
6206*4882a593Smuzhiyun static int
lpfc_filter_by_rpi(struct lpfc_nodelist * ndlp,void * param)6207*4882a593Smuzhiyun lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6208*4882a593Smuzhiyun {
6209*4882a593Smuzhiyun uint16_t *rpi = param;
6210*4882a593Smuzhiyun
6211*4882a593Smuzhiyun /* check for active node */
6212*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
6213*4882a593Smuzhiyun return 0;
6214*4882a593Smuzhiyun
6215*4882a593Smuzhiyun return ndlp->nlp_rpi == *rpi;
6216*4882a593Smuzhiyun }
6217*4882a593Smuzhiyun
6218*4882a593Smuzhiyun static int
lpfc_filter_by_wwpn(struct lpfc_nodelist * ndlp,void * param)6219*4882a593Smuzhiyun lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6220*4882a593Smuzhiyun {
6221*4882a593Smuzhiyun return memcmp(&ndlp->nlp_portname, param,
6222*4882a593Smuzhiyun sizeof(ndlp->nlp_portname)) == 0;
6223*4882a593Smuzhiyun }
6224*4882a593Smuzhiyun
6225*4882a593Smuzhiyun static struct lpfc_nodelist *
__lpfc_find_node(struct lpfc_vport * vport,node_filter filter,void * param)6226*4882a593Smuzhiyun __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6227*4882a593Smuzhiyun {
6228*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6229*4882a593Smuzhiyun
6230*4882a593Smuzhiyun list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6231*4882a593Smuzhiyun if (filter(ndlp, param)) {
6232*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6233*4882a593Smuzhiyun "3185 FIND node filter %ps DID "
6234*4882a593Smuzhiyun "ndlp x%px did x%x flg x%x st x%x "
6235*4882a593Smuzhiyun "xri x%x type x%x rpi x%x\n",
6236*4882a593Smuzhiyun filter, ndlp, ndlp->nlp_DID,
6237*4882a593Smuzhiyun ndlp->nlp_flag, ndlp->nlp_state,
6238*4882a593Smuzhiyun ndlp->nlp_xri, ndlp->nlp_type,
6239*4882a593Smuzhiyun ndlp->nlp_rpi);
6240*4882a593Smuzhiyun return ndlp;
6241*4882a593Smuzhiyun }
6242*4882a593Smuzhiyun }
6243*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6244*4882a593Smuzhiyun "3186 FIND node filter %ps NOT FOUND.\n", filter);
6245*4882a593Smuzhiyun return NULL;
6246*4882a593Smuzhiyun }
6247*4882a593Smuzhiyun
6248*4882a593Smuzhiyun /*
6249*4882a593Smuzhiyun * This routine looks up the ndlp lists for the given RPI. If rpi found it
6250*4882a593Smuzhiyun * returns the node list element pointer else return NULL.
6251*4882a593Smuzhiyun */
6252*4882a593Smuzhiyun struct lpfc_nodelist *
__lpfc_findnode_rpi(struct lpfc_vport * vport,uint16_t rpi)6253*4882a593Smuzhiyun __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6254*4882a593Smuzhiyun {
6255*4882a593Smuzhiyun return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6256*4882a593Smuzhiyun }
6257*4882a593Smuzhiyun
6258*4882a593Smuzhiyun /*
6259*4882a593Smuzhiyun * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6260*4882a593Smuzhiyun * returns the node element list pointer else return NULL.
6261*4882a593Smuzhiyun */
6262*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_findnode_wwpn(struct lpfc_vport * vport,struct lpfc_name * wwpn)6263*4882a593Smuzhiyun lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6264*4882a593Smuzhiyun {
6265*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6266*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6267*4882a593Smuzhiyun
6268*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
6269*4882a593Smuzhiyun ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6270*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6271*4882a593Smuzhiyun return ndlp;
6272*4882a593Smuzhiyun }
6273*4882a593Smuzhiyun
6274*4882a593Smuzhiyun /*
6275*4882a593Smuzhiyun * This routine looks up the ndlp lists for the given RPI. If the rpi
6276*4882a593Smuzhiyun * is found, the routine returns the node element list pointer else
6277*4882a593Smuzhiyun * return NULL.
6278*4882a593Smuzhiyun */
6279*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_findnode_rpi(struct lpfc_vport * vport,uint16_t rpi)6280*4882a593Smuzhiyun lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6281*4882a593Smuzhiyun {
6282*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6283*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6284*4882a593Smuzhiyun unsigned long flags;
6285*4882a593Smuzhiyun
6286*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
6287*4882a593Smuzhiyun ndlp = __lpfc_findnode_rpi(vport, rpi);
6288*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
6289*4882a593Smuzhiyun return ndlp;
6290*4882a593Smuzhiyun }
6291*4882a593Smuzhiyun
6292*4882a593Smuzhiyun /**
6293*4882a593Smuzhiyun * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6294*4882a593Smuzhiyun * @phba: pointer to lpfc hba data structure.
6295*4882a593Smuzhiyun * @vpi: the physical host virtual N_Port identifier.
6296*4882a593Smuzhiyun *
6297*4882a593Smuzhiyun * This routine finds a vport on a HBA (referred by @phba) through a
6298*4882a593Smuzhiyun * @vpi. The function walks the HBA's vport list and returns the address
6299*4882a593Smuzhiyun * of the vport with the matching @vpi.
6300*4882a593Smuzhiyun *
6301*4882a593Smuzhiyun * Return code
6302*4882a593Smuzhiyun * NULL - No vport with the matching @vpi found
6303*4882a593Smuzhiyun * Otherwise - Address to the vport with the matching @vpi.
6304*4882a593Smuzhiyun **/
6305*4882a593Smuzhiyun struct lpfc_vport *
lpfc_find_vport_by_vpid(struct lpfc_hba * phba,uint16_t vpi)6306*4882a593Smuzhiyun lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6307*4882a593Smuzhiyun {
6308*4882a593Smuzhiyun struct lpfc_vport *vport;
6309*4882a593Smuzhiyun unsigned long flags;
6310*4882a593Smuzhiyun int i = 0;
6311*4882a593Smuzhiyun
6312*4882a593Smuzhiyun /* The physical ports are always vpi 0 - translate is unnecessary. */
6313*4882a593Smuzhiyun if (vpi > 0) {
6314*4882a593Smuzhiyun /*
6315*4882a593Smuzhiyun * Translate the physical vpi to the logical vpi. The
6316*4882a593Smuzhiyun * vport stores the logical vpi.
6317*4882a593Smuzhiyun */
6318*4882a593Smuzhiyun for (i = 0; i < phba->max_vpi; i++) {
6319*4882a593Smuzhiyun if (vpi == phba->vpi_ids[i])
6320*4882a593Smuzhiyun break;
6321*4882a593Smuzhiyun }
6322*4882a593Smuzhiyun
6323*4882a593Smuzhiyun if (i >= phba->max_vpi) {
6324*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6325*4882a593Smuzhiyun "2936 Could not find Vport mapped "
6326*4882a593Smuzhiyun "to vpi %d\n", vpi);
6327*4882a593Smuzhiyun return NULL;
6328*4882a593Smuzhiyun }
6329*4882a593Smuzhiyun }
6330*4882a593Smuzhiyun
6331*4882a593Smuzhiyun spin_lock_irqsave(&phba->port_list_lock, flags);
6332*4882a593Smuzhiyun list_for_each_entry(vport, &phba->port_list, listentry) {
6333*4882a593Smuzhiyun if (vport->vpi == i) {
6334*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->port_list_lock, flags);
6335*4882a593Smuzhiyun return vport;
6336*4882a593Smuzhiyun }
6337*4882a593Smuzhiyun }
6338*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->port_list_lock, flags);
6339*4882a593Smuzhiyun return NULL;
6340*4882a593Smuzhiyun }
6341*4882a593Smuzhiyun
6342*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_nlp_init(struct lpfc_vport * vport,uint32_t did)6343*4882a593Smuzhiyun lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6344*4882a593Smuzhiyun {
6345*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6346*4882a593Smuzhiyun int rpi = LPFC_RPI_ALLOC_ERROR;
6347*4882a593Smuzhiyun
6348*4882a593Smuzhiyun if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6349*4882a593Smuzhiyun rpi = lpfc_sli4_alloc_rpi(vport->phba);
6350*4882a593Smuzhiyun if (rpi == LPFC_RPI_ALLOC_ERROR)
6351*4882a593Smuzhiyun return NULL;
6352*4882a593Smuzhiyun }
6353*4882a593Smuzhiyun
6354*4882a593Smuzhiyun ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6355*4882a593Smuzhiyun if (!ndlp) {
6356*4882a593Smuzhiyun if (vport->phba->sli_rev == LPFC_SLI_REV4)
6357*4882a593Smuzhiyun lpfc_sli4_free_rpi(vport->phba, rpi);
6358*4882a593Smuzhiyun return NULL;
6359*4882a593Smuzhiyun }
6360*4882a593Smuzhiyun
6361*4882a593Smuzhiyun memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6362*4882a593Smuzhiyun
6363*4882a593Smuzhiyun lpfc_initialize_node(vport, ndlp, did);
6364*4882a593Smuzhiyun INIT_LIST_HEAD(&ndlp->nlp_listp);
6365*4882a593Smuzhiyun if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6366*4882a593Smuzhiyun ndlp->nlp_rpi = rpi;
6367*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6368*4882a593Smuzhiyun "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6369*4882a593Smuzhiyun "flg:x%x refcnt:%d map:x%x\n",
6370*4882a593Smuzhiyun ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6371*4882a593Smuzhiyun ndlp->nlp_flag, kref_read(&ndlp->kref),
6372*4882a593Smuzhiyun ndlp->nlp_usg_map);
6373*4882a593Smuzhiyun
6374*4882a593Smuzhiyun ndlp->active_rrqs_xri_bitmap =
6375*4882a593Smuzhiyun mempool_alloc(vport->phba->active_rrq_pool,
6376*4882a593Smuzhiyun GFP_KERNEL);
6377*4882a593Smuzhiyun if (ndlp->active_rrqs_xri_bitmap)
6378*4882a593Smuzhiyun memset(ndlp->active_rrqs_xri_bitmap, 0,
6379*4882a593Smuzhiyun ndlp->phba->cfg_rrq_xri_bitmap_sz);
6380*4882a593Smuzhiyun }
6381*4882a593Smuzhiyun
6382*4882a593Smuzhiyun
6383*4882a593Smuzhiyun
6384*4882a593Smuzhiyun lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6385*4882a593Smuzhiyun "node init: did:x%x",
6386*4882a593Smuzhiyun ndlp->nlp_DID, 0, 0);
6387*4882a593Smuzhiyun
6388*4882a593Smuzhiyun return ndlp;
6389*4882a593Smuzhiyun }
6390*4882a593Smuzhiyun
6391*4882a593Smuzhiyun /* This routine releases all resources associated with a specifc NPort's ndlp
6392*4882a593Smuzhiyun * and mempool_free's the nodelist.
6393*4882a593Smuzhiyun */
6394*4882a593Smuzhiyun static void
lpfc_nlp_release(struct kref * kref)6395*4882a593Smuzhiyun lpfc_nlp_release(struct kref *kref)
6396*4882a593Smuzhiyun {
6397*4882a593Smuzhiyun struct lpfc_hba *phba;
6398*4882a593Smuzhiyun unsigned long flags;
6399*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6400*4882a593Smuzhiyun kref);
6401*4882a593Smuzhiyun
6402*4882a593Smuzhiyun lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6403*4882a593Smuzhiyun "node release: did:x%x flg:x%x type:x%x",
6404*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6405*4882a593Smuzhiyun
6406*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6407*4882a593Smuzhiyun "0279 %s: ndlp:x%px did %x "
6408*4882a593Smuzhiyun "usgmap:x%x refcnt:%d rpi:%x\n",
6409*4882a593Smuzhiyun __func__,
6410*4882a593Smuzhiyun (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6411*4882a593Smuzhiyun kref_read(&ndlp->kref), ndlp->nlp_rpi);
6412*4882a593Smuzhiyun
6413*4882a593Smuzhiyun /* remove ndlp from action. */
6414*4882a593Smuzhiyun lpfc_nlp_remove(ndlp->vport, ndlp);
6415*4882a593Smuzhiyun
6416*4882a593Smuzhiyun /* clear the ndlp active flag for all release cases */
6417*4882a593Smuzhiyun phba = ndlp->phba;
6418*4882a593Smuzhiyun spin_lock_irqsave(&phba->ndlp_lock, flags);
6419*4882a593Smuzhiyun NLP_CLR_NODE_ACT(ndlp);
6420*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6421*4882a593Smuzhiyun
6422*4882a593Smuzhiyun /* free ndlp memory for final ndlp release */
6423*4882a593Smuzhiyun if (NLP_CHK_FREE_REQ(ndlp)) {
6424*4882a593Smuzhiyun kfree(ndlp->lat_data);
6425*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
6426*4882a593Smuzhiyun mempool_free(ndlp->active_rrqs_xri_bitmap,
6427*4882a593Smuzhiyun ndlp->phba->active_rrq_pool);
6428*4882a593Smuzhiyun mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6429*4882a593Smuzhiyun }
6430*4882a593Smuzhiyun }
6431*4882a593Smuzhiyun
6432*4882a593Smuzhiyun /* This routine bumps the reference count for a ndlp structure to ensure
6433*4882a593Smuzhiyun * that one discovery thread won't free a ndlp while another discovery thread
6434*4882a593Smuzhiyun * is using it.
6435*4882a593Smuzhiyun */
6436*4882a593Smuzhiyun struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist * ndlp)6437*4882a593Smuzhiyun lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6438*4882a593Smuzhiyun {
6439*4882a593Smuzhiyun struct lpfc_hba *phba;
6440*4882a593Smuzhiyun unsigned long flags;
6441*4882a593Smuzhiyun
6442*4882a593Smuzhiyun if (ndlp) {
6443*4882a593Smuzhiyun lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6444*4882a593Smuzhiyun "node get: did:x%x flg:x%x refcnt:x%x",
6445*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
6446*4882a593Smuzhiyun kref_read(&ndlp->kref));
6447*4882a593Smuzhiyun /* The check of ndlp usage to prevent incrementing the
6448*4882a593Smuzhiyun * ndlp reference count that is in the process of being
6449*4882a593Smuzhiyun * released.
6450*4882a593Smuzhiyun */
6451*4882a593Smuzhiyun phba = ndlp->phba;
6452*4882a593Smuzhiyun spin_lock_irqsave(&phba->ndlp_lock, flags);
6453*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6454*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6455*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6456*4882a593Smuzhiyun "0276 %s: ndlp:x%px "
6457*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
6458*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
6459*4882a593Smuzhiyun kref_read(&ndlp->kref));
6460*4882a593Smuzhiyun return NULL;
6461*4882a593Smuzhiyun } else
6462*4882a593Smuzhiyun kref_get(&ndlp->kref);
6463*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6464*4882a593Smuzhiyun }
6465*4882a593Smuzhiyun return ndlp;
6466*4882a593Smuzhiyun }
6467*4882a593Smuzhiyun
6468*4882a593Smuzhiyun /* This routine decrements the reference count for a ndlp structure. If the
6469*4882a593Smuzhiyun * count goes to 0, this indicates the the associated nodelist should be
6470*4882a593Smuzhiyun * freed. Returning 1 indicates the ndlp resource has been released; on the
6471*4882a593Smuzhiyun * other hand, returning 0 indicates the ndlp resource has not been released
6472*4882a593Smuzhiyun * yet.
6473*4882a593Smuzhiyun */
6474*4882a593Smuzhiyun int
lpfc_nlp_put(struct lpfc_nodelist * ndlp)6475*4882a593Smuzhiyun lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6476*4882a593Smuzhiyun {
6477*4882a593Smuzhiyun struct lpfc_hba *phba;
6478*4882a593Smuzhiyun unsigned long flags;
6479*4882a593Smuzhiyun
6480*4882a593Smuzhiyun if (!ndlp)
6481*4882a593Smuzhiyun return 1;
6482*4882a593Smuzhiyun
6483*4882a593Smuzhiyun lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6484*4882a593Smuzhiyun "node put: did:x%x flg:x%x refcnt:x%x",
6485*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
6486*4882a593Smuzhiyun kref_read(&ndlp->kref));
6487*4882a593Smuzhiyun phba = ndlp->phba;
6488*4882a593Smuzhiyun spin_lock_irqsave(&phba->ndlp_lock, flags);
6489*4882a593Smuzhiyun /* Check the ndlp memory free acknowledge flag to avoid the
6490*4882a593Smuzhiyun * possible race condition that kref_put got invoked again
6491*4882a593Smuzhiyun * after previous one has done ndlp memory free.
6492*4882a593Smuzhiyun */
6493*4882a593Smuzhiyun if (NLP_CHK_FREE_ACK(ndlp)) {
6494*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6495*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6496*4882a593Smuzhiyun "0274 %s: ndlp:x%px "
6497*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
6498*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
6499*4882a593Smuzhiyun kref_read(&ndlp->kref));
6500*4882a593Smuzhiyun return 1;
6501*4882a593Smuzhiyun }
6502*4882a593Smuzhiyun /* Check the ndlp inactivate log flag to avoid the possible
6503*4882a593Smuzhiyun * race condition that kref_put got invoked again after ndlp
6504*4882a593Smuzhiyun * is already in inactivating state.
6505*4882a593Smuzhiyun */
6506*4882a593Smuzhiyun if (NLP_CHK_IACT_REQ(ndlp)) {
6507*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6508*4882a593Smuzhiyun lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6509*4882a593Smuzhiyun "0275 %s: ndlp:x%px "
6510*4882a593Smuzhiyun "usgmap:x%x refcnt:%d\n",
6511*4882a593Smuzhiyun __func__, (void *)ndlp, ndlp->nlp_usg_map,
6512*4882a593Smuzhiyun kref_read(&ndlp->kref));
6513*4882a593Smuzhiyun return 1;
6514*4882a593Smuzhiyun }
6515*4882a593Smuzhiyun /* For last put, mark the ndlp usage flags to make sure no
6516*4882a593Smuzhiyun * other kref_get and kref_put on the same ndlp shall get
6517*4882a593Smuzhiyun * in between the process when the final kref_put has been
6518*4882a593Smuzhiyun * invoked on this ndlp.
6519*4882a593Smuzhiyun */
6520*4882a593Smuzhiyun if (kref_read(&ndlp->kref) == 1) {
6521*4882a593Smuzhiyun /* Indicate ndlp is put to inactive state. */
6522*4882a593Smuzhiyun NLP_SET_IACT_REQ(ndlp);
6523*4882a593Smuzhiyun /* Acknowledge ndlp memory free has been seen. */
6524*4882a593Smuzhiyun if (NLP_CHK_FREE_REQ(ndlp))
6525*4882a593Smuzhiyun NLP_SET_FREE_ACK(ndlp);
6526*4882a593Smuzhiyun }
6527*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6528*4882a593Smuzhiyun /* Note, the kref_put returns 1 when decrementing a reference
6529*4882a593Smuzhiyun * count that was 1, it invokes the release callback function,
6530*4882a593Smuzhiyun * but it still left the reference count as 1 (not actually
6531*4882a593Smuzhiyun * performs the last decrementation). Otherwise, it actually
6532*4882a593Smuzhiyun * decrements the reference count and returns 0.
6533*4882a593Smuzhiyun */
6534*4882a593Smuzhiyun return kref_put(&ndlp->kref, lpfc_nlp_release);
6535*4882a593Smuzhiyun }
6536*4882a593Smuzhiyun
6537*4882a593Smuzhiyun /* This routine free's the specified nodelist if it is not in use
6538*4882a593Smuzhiyun * by any other discovery thread. This routine returns 1 if the
6539*4882a593Smuzhiyun * ndlp has been freed. A return value of 0 indicates the ndlp is
6540*4882a593Smuzhiyun * not yet been released.
6541*4882a593Smuzhiyun */
6542*4882a593Smuzhiyun int
lpfc_nlp_not_used(struct lpfc_nodelist * ndlp)6543*4882a593Smuzhiyun lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6544*4882a593Smuzhiyun {
6545*4882a593Smuzhiyun lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6546*4882a593Smuzhiyun "node not used: did:x%x flg:x%x refcnt:x%x",
6547*4882a593Smuzhiyun ndlp->nlp_DID, ndlp->nlp_flag,
6548*4882a593Smuzhiyun kref_read(&ndlp->kref));
6549*4882a593Smuzhiyun if (kref_read(&ndlp->kref) == 1)
6550*4882a593Smuzhiyun if (lpfc_nlp_put(ndlp))
6551*4882a593Smuzhiyun return 1;
6552*4882a593Smuzhiyun return 0;
6553*4882a593Smuzhiyun }
6554*4882a593Smuzhiyun
6555*4882a593Smuzhiyun /**
6556*4882a593Smuzhiyun * lpfc_fcf_inuse - Check if FCF can be unregistered.
6557*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6558*4882a593Smuzhiyun *
6559*4882a593Smuzhiyun * This function iterate through all FC nodes associated
6560*4882a593Smuzhiyun * will all vports to check if there is any node with
6561*4882a593Smuzhiyun * fc_rports associated with it. If there is an fc_rport
6562*4882a593Smuzhiyun * associated with the node, then the node is either in
6563*4882a593Smuzhiyun * discovered state or its devloss_timer is pending.
6564*4882a593Smuzhiyun */
6565*4882a593Smuzhiyun static int
lpfc_fcf_inuse(struct lpfc_hba * phba)6566*4882a593Smuzhiyun lpfc_fcf_inuse(struct lpfc_hba *phba)
6567*4882a593Smuzhiyun {
6568*4882a593Smuzhiyun struct lpfc_vport **vports;
6569*4882a593Smuzhiyun int i, ret = 0;
6570*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6571*4882a593Smuzhiyun struct Scsi_Host *shost;
6572*4882a593Smuzhiyun
6573*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
6574*4882a593Smuzhiyun
6575*4882a593Smuzhiyun /* If driver cannot allocate memory, indicate fcf is in use */
6576*4882a593Smuzhiyun if (!vports)
6577*4882a593Smuzhiyun return 1;
6578*4882a593Smuzhiyun
6579*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6580*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vports[i]);
6581*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
6582*4882a593Smuzhiyun /*
6583*4882a593Smuzhiyun * IF the CVL_RCVD bit is not set then we have sent the
6584*4882a593Smuzhiyun * flogi.
6585*4882a593Smuzhiyun * If dev_loss fires while we are waiting we do not want to
6586*4882a593Smuzhiyun * unreg the fcf.
6587*4882a593Smuzhiyun */
6588*4882a593Smuzhiyun if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6589*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6590*4882a593Smuzhiyun ret = 1;
6591*4882a593Smuzhiyun goto out;
6592*4882a593Smuzhiyun }
6593*4882a593Smuzhiyun list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6594*4882a593Smuzhiyun if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6595*4882a593Smuzhiyun (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6596*4882a593Smuzhiyun ret = 1;
6597*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6598*4882a593Smuzhiyun goto out;
6599*4882a593Smuzhiyun } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6600*4882a593Smuzhiyun ret = 1;
6601*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO,
6602*4882a593Smuzhiyun LOG_NODE | LOG_DISCOVERY,
6603*4882a593Smuzhiyun "2624 RPI %x DID %x flag %x "
6604*4882a593Smuzhiyun "still logged in\n",
6605*4882a593Smuzhiyun ndlp->nlp_rpi, ndlp->nlp_DID,
6606*4882a593Smuzhiyun ndlp->nlp_flag);
6607*4882a593Smuzhiyun }
6608*4882a593Smuzhiyun }
6609*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6610*4882a593Smuzhiyun }
6611*4882a593Smuzhiyun out:
6612*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
6613*4882a593Smuzhiyun return ret;
6614*4882a593Smuzhiyun }
6615*4882a593Smuzhiyun
6616*4882a593Smuzhiyun /**
6617*4882a593Smuzhiyun * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6618*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6619*4882a593Smuzhiyun * @mboxq: Pointer to mailbox object.
6620*4882a593Smuzhiyun *
6621*4882a593Smuzhiyun * This function frees memory associated with the mailbox command.
6622*4882a593Smuzhiyun */
6623*4882a593Smuzhiyun void
lpfc_unregister_vfi_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)6624*4882a593Smuzhiyun lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6625*4882a593Smuzhiyun {
6626*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
6627*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6628*4882a593Smuzhiyun
6629*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus) {
6630*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6631*4882a593Smuzhiyun "2555 UNREG_VFI mbxStatus error x%x "
6632*4882a593Smuzhiyun "HBA state x%x\n",
6633*4882a593Smuzhiyun mboxq->u.mb.mbxStatus, vport->port_state);
6634*4882a593Smuzhiyun }
6635*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
6636*4882a593Smuzhiyun phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6637*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6638*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
6639*4882a593Smuzhiyun return;
6640*4882a593Smuzhiyun }
6641*4882a593Smuzhiyun
6642*4882a593Smuzhiyun /**
6643*4882a593Smuzhiyun * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6644*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6645*4882a593Smuzhiyun * @mboxq: Pointer to mailbox object.
6646*4882a593Smuzhiyun *
6647*4882a593Smuzhiyun * This function frees memory associated with the mailbox command.
6648*4882a593Smuzhiyun */
6649*4882a593Smuzhiyun static void
lpfc_unregister_fcfi_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)6650*4882a593Smuzhiyun lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6651*4882a593Smuzhiyun {
6652*4882a593Smuzhiyun struct lpfc_vport *vport = mboxq->vport;
6653*4882a593Smuzhiyun
6654*4882a593Smuzhiyun if (mboxq->u.mb.mbxStatus) {
6655*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6656*4882a593Smuzhiyun "2550 UNREG_FCFI mbxStatus error x%x "
6657*4882a593Smuzhiyun "HBA state x%x\n",
6658*4882a593Smuzhiyun mboxq->u.mb.mbxStatus, vport->port_state);
6659*4882a593Smuzhiyun }
6660*4882a593Smuzhiyun mempool_free(mboxq, phba->mbox_mem_pool);
6661*4882a593Smuzhiyun return;
6662*4882a593Smuzhiyun }
6663*4882a593Smuzhiyun
6664*4882a593Smuzhiyun /**
6665*4882a593Smuzhiyun * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6666*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6667*4882a593Smuzhiyun *
6668*4882a593Smuzhiyun * This function prepare the HBA for unregistering the currently registered
6669*4882a593Smuzhiyun * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6670*4882a593Smuzhiyun * VFIs.
6671*4882a593Smuzhiyun */
6672*4882a593Smuzhiyun int
lpfc_unregister_fcf_prep(struct lpfc_hba * phba)6673*4882a593Smuzhiyun lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6674*4882a593Smuzhiyun {
6675*4882a593Smuzhiyun struct lpfc_vport **vports;
6676*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
6677*4882a593Smuzhiyun struct Scsi_Host *shost;
6678*4882a593Smuzhiyun int i = 0, rc;
6679*4882a593Smuzhiyun
6680*4882a593Smuzhiyun /* Unregister RPIs */
6681*4882a593Smuzhiyun if (lpfc_fcf_inuse(phba))
6682*4882a593Smuzhiyun lpfc_unreg_hba_rpis(phba);
6683*4882a593Smuzhiyun
6684*4882a593Smuzhiyun /* At this point, all discovery is aborted */
6685*4882a593Smuzhiyun phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6686*4882a593Smuzhiyun
6687*4882a593Smuzhiyun /* Unregister VPIs */
6688*4882a593Smuzhiyun vports = lpfc_create_vport_work_array(phba);
6689*4882a593Smuzhiyun if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6690*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6691*4882a593Smuzhiyun /* Stop FLOGI/FDISC retries */
6692*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6693*4882a593Smuzhiyun if (ndlp)
6694*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6695*4882a593Smuzhiyun lpfc_cleanup_pending_mbox(vports[i]);
6696*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
6697*4882a593Smuzhiyun lpfc_sli4_unreg_all_rpis(vports[i]);
6698*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(vports[i]);
6699*4882a593Smuzhiyun shost = lpfc_shost_from_vport(vports[i]);
6700*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
6701*4882a593Smuzhiyun vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6702*4882a593Smuzhiyun vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6703*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6704*4882a593Smuzhiyun }
6705*4882a593Smuzhiyun lpfc_destroy_vport_work_array(phba, vports);
6706*4882a593Smuzhiyun if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6707*4882a593Smuzhiyun ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6708*4882a593Smuzhiyun if (ndlp)
6709*4882a593Smuzhiyun lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6710*4882a593Smuzhiyun lpfc_cleanup_pending_mbox(phba->pport);
6711*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
6712*4882a593Smuzhiyun lpfc_sli4_unreg_all_rpis(phba->pport);
6713*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(phba->pport);
6714*4882a593Smuzhiyun shost = lpfc_shost_from_vport(phba->pport);
6715*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
6716*4882a593Smuzhiyun phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6717*4882a593Smuzhiyun phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6718*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
6719*4882a593Smuzhiyun }
6720*4882a593Smuzhiyun
6721*4882a593Smuzhiyun /* Cleanup any outstanding ELS commands */
6722*4882a593Smuzhiyun lpfc_els_flush_all_cmd(phba);
6723*4882a593Smuzhiyun
6724*4882a593Smuzhiyun /* Unregister the physical port VFI */
6725*4882a593Smuzhiyun rc = lpfc_issue_unreg_vfi(phba->pport);
6726*4882a593Smuzhiyun return rc;
6727*4882a593Smuzhiyun }
6728*4882a593Smuzhiyun
6729*4882a593Smuzhiyun /**
6730*4882a593Smuzhiyun * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6731*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6732*4882a593Smuzhiyun *
6733*4882a593Smuzhiyun * This function issues synchronous unregister FCF mailbox command to HBA to
6734*4882a593Smuzhiyun * unregister the currently registered FCF record. The driver does not reset
6735*4882a593Smuzhiyun * the driver FCF usage state flags.
6736*4882a593Smuzhiyun *
6737*4882a593Smuzhiyun * Return 0 if successfully issued, none-zero otherwise.
6738*4882a593Smuzhiyun */
6739*4882a593Smuzhiyun int
lpfc_sli4_unregister_fcf(struct lpfc_hba * phba)6740*4882a593Smuzhiyun lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6741*4882a593Smuzhiyun {
6742*4882a593Smuzhiyun LPFC_MBOXQ_t *mbox;
6743*4882a593Smuzhiyun int rc;
6744*4882a593Smuzhiyun
6745*4882a593Smuzhiyun mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6746*4882a593Smuzhiyun if (!mbox) {
6747*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6748*4882a593Smuzhiyun "2551 UNREG_FCFI mbox allocation failed"
6749*4882a593Smuzhiyun "HBA state x%x\n", phba->pport->port_state);
6750*4882a593Smuzhiyun return -ENOMEM;
6751*4882a593Smuzhiyun }
6752*4882a593Smuzhiyun lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6753*4882a593Smuzhiyun mbox->vport = phba->pport;
6754*4882a593Smuzhiyun mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6755*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6756*4882a593Smuzhiyun
6757*4882a593Smuzhiyun if (rc == MBX_NOT_FINISHED) {
6758*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6759*4882a593Smuzhiyun "2552 Unregister FCFI command failed rc x%x "
6760*4882a593Smuzhiyun "HBA state x%x\n",
6761*4882a593Smuzhiyun rc, phba->pport->port_state);
6762*4882a593Smuzhiyun return -EINVAL;
6763*4882a593Smuzhiyun }
6764*4882a593Smuzhiyun return 0;
6765*4882a593Smuzhiyun }
6766*4882a593Smuzhiyun
6767*4882a593Smuzhiyun /**
6768*4882a593Smuzhiyun * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6769*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6770*4882a593Smuzhiyun *
6771*4882a593Smuzhiyun * This function unregisters the currently reigstered FCF. This function
6772*4882a593Smuzhiyun * also tries to find another FCF for discovery by rescan the HBA FCF table.
6773*4882a593Smuzhiyun */
6774*4882a593Smuzhiyun void
lpfc_unregister_fcf_rescan(struct lpfc_hba * phba)6775*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6776*4882a593Smuzhiyun {
6777*4882a593Smuzhiyun int rc;
6778*4882a593Smuzhiyun
6779*4882a593Smuzhiyun /* Preparation for unregistering fcf */
6780*4882a593Smuzhiyun rc = lpfc_unregister_fcf_prep(phba);
6781*4882a593Smuzhiyun if (rc) {
6782*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6783*4882a593Smuzhiyun "2748 Failed to prepare for unregistering "
6784*4882a593Smuzhiyun "HBA's FCF record: rc=%d\n", rc);
6785*4882a593Smuzhiyun return;
6786*4882a593Smuzhiyun }
6787*4882a593Smuzhiyun
6788*4882a593Smuzhiyun /* Now, unregister FCF record and reset HBA FCF state */
6789*4882a593Smuzhiyun rc = lpfc_sli4_unregister_fcf(phba);
6790*4882a593Smuzhiyun if (rc)
6791*4882a593Smuzhiyun return;
6792*4882a593Smuzhiyun /* Reset HBA FCF states after successful unregister FCF */
6793*4882a593Smuzhiyun phba->fcf.fcf_flag = 0;
6794*4882a593Smuzhiyun phba->fcf.current_rec.flag = 0;
6795*4882a593Smuzhiyun
6796*4882a593Smuzhiyun /*
6797*4882a593Smuzhiyun * If driver is not unloading, check if there is any other
6798*4882a593Smuzhiyun * FCF record that can be used for discovery.
6799*4882a593Smuzhiyun */
6800*4882a593Smuzhiyun if ((phba->pport->load_flag & FC_UNLOADING) ||
6801*4882a593Smuzhiyun (phba->link_state < LPFC_LINK_UP))
6802*4882a593Smuzhiyun return;
6803*4882a593Smuzhiyun
6804*4882a593Smuzhiyun /* This is considered as the initial FCF discovery scan */
6805*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
6806*4882a593Smuzhiyun phba->fcf.fcf_flag |= FCF_INIT_DISC;
6807*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
6808*4882a593Smuzhiyun
6809*4882a593Smuzhiyun /* Reset FCF roundrobin bmask for new discovery */
6810*4882a593Smuzhiyun lpfc_sli4_clear_fcf_rr_bmask(phba);
6811*4882a593Smuzhiyun
6812*4882a593Smuzhiyun rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6813*4882a593Smuzhiyun
6814*4882a593Smuzhiyun if (rc) {
6815*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
6816*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6817*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
6818*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6819*4882a593Smuzhiyun "2553 lpfc_unregister_unused_fcf failed "
6820*4882a593Smuzhiyun "to read FCF record HBA state x%x\n",
6821*4882a593Smuzhiyun phba->pport->port_state);
6822*4882a593Smuzhiyun }
6823*4882a593Smuzhiyun }
6824*4882a593Smuzhiyun
6825*4882a593Smuzhiyun /**
6826*4882a593Smuzhiyun * lpfc_unregister_fcf - Unregister the currently registered fcf record
6827*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6828*4882a593Smuzhiyun *
6829*4882a593Smuzhiyun * This function just unregisters the currently reigstered FCF. It does not
6830*4882a593Smuzhiyun * try to find another FCF for discovery.
6831*4882a593Smuzhiyun */
6832*4882a593Smuzhiyun void
lpfc_unregister_fcf(struct lpfc_hba * phba)6833*4882a593Smuzhiyun lpfc_unregister_fcf(struct lpfc_hba *phba)
6834*4882a593Smuzhiyun {
6835*4882a593Smuzhiyun int rc;
6836*4882a593Smuzhiyun
6837*4882a593Smuzhiyun /* Preparation for unregistering fcf */
6838*4882a593Smuzhiyun rc = lpfc_unregister_fcf_prep(phba);
6839*4882a593Smuzhiyun if (rc) {
6840*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6841*4882a593Smuzhiyun "2749 Failed to prepare for unregistering "
6842*4882a593Smuzhiyun "HBA's FCF record: rc=%d\n", rc);
6843*4882a593Smuzhiyun return;
6844*4882a593Smuzhiyun }
6845*4882a593Smuzhiyun
6846*4882a593Smuzhiyun /* Now, unregister FCF record and reset HBA FCF state */
6847*4882a593Smuzhiyun rc = lpfc_sli4_unregister_fcf(phba);
6848*4882a593Smuzhiyun if (rc)
6849*4882a593Smuzhiyun return;
6850*4882a593Smuzhiyun /* Set proper HBA FCF states after successful unregister FCF */
6851*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
6852*4882a593Smuzhiyun phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6853*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
6854*4882a593Smuzhiyun }
6855*4882a593Smuzhiyun
6856*4882a593Smuzhiyun /**
6857*4882a593Smuzhiyun * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6858*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6859*4882a593Smuzhiyun *
6860*4882a593Smuzhiyun * This function check if there are any connected remote port for the FCF and
6861*4882a593Smuzhiyun * if all the devices are disconnected, this function unregister FCFI.
6862*4882a593Smuzhiyun * This function also tries to use another FCF for discovery.
6863*4882a593Smuzhiyun */
6864*4882a593Smuzhiyun void
lpfc_unregister_unused_fcf(struct lpfc_hba * phba)6865*4882a593Smuzhiyun lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6866*4882a593Smuzhiyun {
6867*4882a593Smuzhiyun /*
6868*4882a593Smuzhiyun * If HBA is not running in FIP mode, if HBA does not support
6869*4882a593Smuzhiyun * FCoE, if FCF discovery is ongoing, or if FCF has not been
6870*4882a593Smuzhiyun * registered, do nothing.
6871*4882a593Smuzhiyun */
6872*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
6873*4882a593Smuzhiyun if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6874*4882a593Smuzhiyun !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6875*4882a593Smuzhiyun !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6876*4882a593Smuzhiyun (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6877*4882a593Smuzhiyun (phba->pport->port_state == LPFC_FLOGI)) {
6878*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
6879*4882a593Smuzhiyun return;
6880*4882a593Smuzhiyun }
6881*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
6882*4882a593Smuzhiyun
6883*4882a593Smuzhiyun if (lpfc_fcf_inuse(phba))
6884*4882a593Smuzhiyun return;
6885*4882a593Smuzhiyun
6886*4882a593Smuzhiyun lpfc_unregister_fcf_rescan(phba);
6887*4882a593Smuzhiyun }
6888*4882a593Smuzhiyun
6889*4882a593Smuzhiyun /**
6890*4882a593Smuzhiyun * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6891*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6892*4882a593Smuzhiyun * @buff: Buffer containing the FCF connection table as in the config
6893*4882a593Smuzhiyun * region.
6894*4882a593Smuzhiyun * This function create driver data structure for the FCF connection
6895*4882a593Smuzhiyun * record table read from config region 23.
6896*4882a593Smuzhiyun */
6897*4882a593Smuzhiyun static void
lpfc_read_fcf_conn_tbl(struct lpfc_hba * phba,uint8_t * buff)6898*4882a593Smuzhiyun lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6899*4882a593Smuzhiyun uint8_t *buff)
6900*4882a593Smuzhiyun {
6901*4882a593Smuzhiyun struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6902*4882a593Smuzhiyun struct lpfc_fcf_conn_hdr *conn_hdr;
6903*4882a593Smuzhiyun struct lpfc_fcf_conn_rec *conn_rec;
6904*4882a593Smuzhiyun uint32_t record_count;
6905*4882a593Smuzhiyun int i;
6906*4882a593Smuzhiyun
6907*4882a593Smuzhiyun /* Free the current connect table */
6908*4882a593Smuzhiyun list_for_each_entry_safe(conn_entry, next_conn_entry,
6909*4882a593Smuzhiyun &phba->fcf_conn_rec_list, list) {
6910*4882a593Smuzhiyun list_del_init(&conn_entry->list);
6911*4882a593Smuzhiyun kfree(conn_entry);
6912*4882a593Smuzhiyun }
6913*4882a593Smuzhiyun
6914*4882a593Smuzhiyun conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6915*4882a593Smuzhiyun record_count = conn_hdr->length * sizeof(uint32_t)/
6916*4882a593Smuzhiyun sizeof(struct lpfc_fcf_conn_rec);
6917*4882a593Smuzhiyun
6918*4882a593Smuzhiyun conn_rec = (struct lpfc_fcf_conn_rec *)
6919*4882a593Smuzhiyun (buff + sizeof(struct lpfc_fcf_conn_hdr));
6920*4882a593Smuzhiyun
6921*4882a593Smuzhiyun for (i = 0; i < record_count; i++) {
6922*4882a593Smuzhiyun if (!(conn_rec[i].flags & FCFCNCT_VALID))
6923*4882a593Smuzhiyun continue;
6924*4882a593Smuzhiyun conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6925*4882a593Smuzhiyun GFP_KERNEL);
6926*4882a593Smuzhiyun if (!conn_entry) {
6927*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6928*4882a593Smuzhiyun "2566 Failed to allocate connection"
6929*4882a593Smuzhiyun " table entry\n");
6930*4882a593Smuzhiyun return;
6931*4882a593Smuzhiyun }
6932*4882a593Smuzhiyun
6933*4882a593Smuzhiyun memcpy(&conn_entry->conn_rec, &conn_rec[i],
6934*4882a593Smuzhiyun sizeof(struct lpfc_fcf_conn_rec));
6935*4882a593Smuzhiyun list_add_tail(&conn_entry->list,
6936*4882a593Smuzhiyun &phba->fcf_conn_rec_list);
6937*4882a593Smuzhiyun }
6938*4882a593Smuzhiyun
6939*4882a593Smuzhiyun if (!list_empty(&phba->fcf_conn_rec_list)) {
6940*4882a593Smuzhiyun i = 0;
6941*4882a593Smuzhiyun list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6942*4882a593Smuzhiyun list) {
6943*4882a593Smuzhiyun conn_rec = &conn_entry->conn_rec;
6944*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6945*4882a593Smuzhiyun "3345 FCF connection list rec[%02d]: "
6946*4882a593Smuzhiyun "flags:x%04x, vtag:x%04x, "
6947*4882a593Smuzhiyun "fabric_name:x%02x:%02x:%02x:%02x:"
6948*4882a593Smuzhiyun "%02x:%02x:%02x:%02x, "
6949*4882a593Smuzhiyun "switch_name:x%02x:%02x:%02x:%02x:"
6950*4882a593Smuzhiyun "%02x:%02x:%02x:%02x\n", i++,
6951*4882a593Smuzhiyun conn_rec->flags, conn_rec->vlan_tag,
6952*4882a593Smuzhiyun conn_rec->fabric_name[0],
6953*4882a593Smuzhiyun conn_rec->fabric_name[1],
6954*4882a593Smuzhiyun conn_rec->fabric_name[2],
6955*4882a593Smuzhiyun conn_rec->fabric_name[3],
6956*4882a593Smuzhiyun conn_rec->fabric_name[4],
6957*4882a593Smuzhiyun conn_rec->fabric_name[5],
6958*4882a593Smuzhiyun conn_rec->fabric_name[6],
6959*4882a593Smuzhiyun conn_rec->fabric_name[7],
6960*4882a593Smuzhiyun conn_rec->switch_name[0],
6961*4882a593Smuzhiyun conn_rec->switch_name[1],
6962*4882a593Smuzhiyun conn_rec->switch_name[2],
6963*4882a593Smuzhiyun conn_rec->switch_name[3],
6964*4882a593Smuzhiyun conn_rec->switch_name[4],
6965*4882a593Smuzhiyun conn_rec->switch_name[5],
6966*4882a593Smuzhiyun conn_rec->switch_name[6],
6967*4882a593Smuzhiyun conn_rec->switch_name[7]);
6968*4882a593Smuzhiyun }
6969*4882a593Smuzhiyun }
6970*4882a593Smuzhiyun }
6971*4882a593Smuzhiyun
6972*4882a593Smuzhiyun /**
6973*4882a593Smuzhiyun * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6974*4882a593Smuzhiyun * @phba: Pointer to hba context object.
6975*4882a593Smuzhiyun * @buff: Buffer containing the FCoE parameter data structure.
6976*4882a593Smuzhiyun *
6977*4882a593Smuzhiyun * This function update driver data structure with config
6978*4882a593Smuzhiyun * parameters read from config region 23.
6979*4882a593Smuzhiyun */
6980*4882a593Smuzhiyun static void
lpfc_read_fcoe_param(struct lpfc_hba * phba,uint8_t * buff)6981*4882a593Smuzhiyun lpfc_read_fcoe_param(struct lpfc_hba *phba,
6982*4882a593Smuzhiyun uint8_t *buff)
6983*4882a593Smuzhiyun {
6984*4882a593Smuzhiyun struct lpfc_fip_param_hdr *fcoe_param_hdr;
6985*4882a593Smuzhiyun struct lpfc_fcoe_params *fcoe_param;
6986*4882a593Smuzhiyun
6987*4882a593Smuzhiyun fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6988*4882a593Smuzhiyun buff;
6989*4882a593Smuzhiyun fcoe_param = (struct lpfc_fcoe_params *)
6990*4882a593Smuzhiyun (buff + sizeof(struct lpfc_fip_param_hdr));
6991*4882a593Smuzhiyun
6992*4882a593Smuzhiyun if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6993*4882a593Smuzhiyun (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6994*4882a593Smuzhiyun return;
6995*4882a593Smuzhiyun
6996*4882a593Smuzhiyun if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6997*4882a593Smuzhiyun phba->valid_vlan = 1;
6998*4882a593Smuzhiyun phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6999*4882a593Smuzhiyun 0xFFF;
7000*4882a593Smuzhiyun }
7001*4882a593Smuzhiyun
7002*4882a593Smuzhiyun phba->fc_map[0] = fcoe_param->fc_map[0];
7003*4882a593Smuzhiyun phba->fc_map[1] = fcoe_param->fc_map[1];
7004*4882a593Smuzhiyun phba->fc_map[2] = fcoe_param->fc_map[2];
7005*4882a593Smuzhiyun return;
7006*4882a593Smuzhiyun }
7007*4882a593Smuzhiyun
7008*4882a593Smuzhiyun /**
7009*4882a593Smuzhiyun * lpfc_get_rec_conf23 - Get a record type in config region data.
7010*4882a593Smuzhiyun * @buff: Buffer containing config region 23 data.
7011*4882a593Smuzhiyun * @size: Size of the data buffer.
7012*4882a593Smuzhiyun * @rec_type: Record type to be searched.
7013*4882a593Smuzhiyun *
7014*4882a593Smuzhiyun * This function searches config region data to find the beginning
7015*4882a593Smuzhiyun * of the record specified by record_type. If record found, this
7016*4882a593Smuzhiyun * function return pointer to the record else return NULL.
7017*4882a593Smuzhiyun */
7018*4882a593Smuzhiyun static uint8_t *
lpfc_get_rec_conf23(uint8_t * buff,uint32_t size,uint8_t rec_type)7019*4882a593Smuzhiyun lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
7020*4882a593Smuzhiyun {
7021*4882a593Smuzhiyun uint32_t offset = 0, rec_length;
7022*4882a593Smuzhiyun
7023*4882a593Smuzhiyun if ((buff[0] == LPFC_REGION23_LAST_REC) ||
7024*4882a593Smuzhiyun (size < sizeof(uint32_t)))
7025*4882a593Smuzhiyun return NULL;
7026*4882a593Smuzhiyun
7027*4882a593Smuzhiyun rec_length = buff[offset + 1];
7028*4882a593Smuzhiyun
7029*4882a593Smuzhiyun /*
7030*4882a593Smuzhiyun * One TLV record has one word header and number of data words
7031*4882a593Smuzhiyun * specified in the rec_length field of the record header.
7032*4882a593Smuzhiyun */
7033*4882a593Smuzhiyun while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
7034*4882a593Smuzhiyun <= size) {
7035*4882a593Smuzhiyun if (buff[offset] == rec_type)
7036*4882a593Smuzhiyun return &buff[offset];
7037*4882a593Smuzhiyun
7038*4882a593Smuzhiyun if (buff[offset] == LPFC_REGION23_LAST_REC)
7039*4882a593Smuzhiyun return NULL;
7040*4882a593Smuzhiyun
7041*4882a593Smuzhiyun offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
7042*4882a593Smuzhiyun rec_length = buff[offset + 1];
7043*4882a593Smuzhiyun }
7044*4882a593Smuzhiyun return NULL;
7045*4882a593Smuzhiyun }
7046*4882a593Smuzhiyun
7047*4882a593Smuzhiyun /**
7048*4882a593Smuzhiyun * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
7049*4882a593Smuzhiyun * @phba: Pointer to lpfc_hba data structure.
7050*4882a593Smuzhiyun * @buff: Buffer containing config region 23 data.
7051*4882a593Smuzhiyun * @size: Size of the data buffer.
7052*4882a593Smuzhiyun *
7053*4882a593Smuzhiyun * This function parses the FCoE config parameters in config region 23 and
7054*4882a593Smuzhiyun * populate driver data structure with the parameters.
7055*4882a593Smuzhiyun */
7056*4882a593Smuzhiyun void
lpfc_parse_fcoe_conf(struct lpfc_hba * phba,uint8_t * buff,uint32_t size)7057*4882a593Smuzhiyun lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
7058*4882a593Smuzhiyun uint8_t *buff,
7059*4882a593Smuzhiyun uint32_t size)
7060*4882a593Smuzhiyun {
7061*4882a593Smuzhiyun uint32_t offset = 0;
7062*4882a593Smuzhiyun uint8_t *rec_ptr;
7063*4882a593Smuzhiyun
7064*4882a593Smuzhiyun /*
7065*4882a593Smuzhiyun * If data size is less than 2 words signature and version cannot be
7066*4882a593Smuzhiyun * verified.
7067*4882a593Smuzhiyun */
7068*4882a593Smuzhiyun if (size < 2*sizeof(uint32_t))
7069*4882a593Smuzhiyun return;
7070*4882a593Smuzhiyun
7071*4882a593Smuzhiyun /* Check the region signature first */
7072*4882a593Smuzhiyun if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
7073*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7074*4882a593Smuzhiyun "2567 Config region 23 has bad signature\n");
7075*4882a593Smuzhiyun return;
7076*4882a593Smuzhiyun }
7077*4882a593Smuzhiyun
7078*4882a593Smuzhiyun offset += 4;
7079*4882a593Smuzhiyun
7080*4882a593Smuzhiyun /* Check the data structure version */
7081*4882a593Smuzhiyun if (buff[offset] != LPFC_REGION23_VERSION) {
7082*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7083*4882a593Smuzhiyun "2568 Config region 23 has bad version\n");
7084*4882a593Smuzhiyun return;
7085*4882a593Smuzhiyun }
7086*4882a593Smuzhiyun offset += 4;
7087*4882a593Smuzhiyun
7088*4882a593Smuzhiyun /* Read FCoE param record */
7089*4882a593Smuzhiyun rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7090*4882a593Smuzhiyun size - offset, FCOE_PARAM_TYPE);
7091*4882a593Smuzhiyun if (rec_ptr)
7092*4882a593Smuzhiyun lpfc_read_fcoe_param(phba, rec_ptr);
7093*4882a593Smuzhiyun
7094*4882a593Smuzhiyun /* Read FCF connection table */
7095*4882a593Smuzhiyun rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7096*4882a593Smuzhiyun size - offset, FCOE_CONN_TBL_TYPE);
7097*4882a593Smuzhiyun if (rec_ptr)
7098*4882a593Smuzhiyun lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7099*4882a593Smuzhiyun
7100*4882a593Smuzhiyun }
7101