1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun * This file is part of the Emulex Linux Device Driver for *
3*4882a593Smuzhiyun * Fibre Channel Host Bus Adapters. *
4*4882a593Smuzhiyun * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6*4882a593Smuzhiyun * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7*4882a593Smuzhiyun * EMULEX and SLI are trademarks of Emulex. *
8*4882a593Smuzhiyun * www.broadcom.com *
9*4882a593Smuzhiyun * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10*4882a593Smuzhiyun * *
11*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or *
12*4882a593Smuzhiyun * modify it under the terms of version 2 of the GNU General *
13*4882a593Smuzhiyun * Public License as published by the Free Software Foundation. *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful. *
15*4882a593Smuzhiyun * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16*4882a593Smuzhiyun * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18*4882a593Smuzhiyun * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19*4882a593Smuzhiyun * TO BE LEGALLY INVALID. See the GNU General Public License for *
20*4882a593Smuzhiyun * more details, a copy of which can be found in the file COPYING *
21*4882a593Smuzhiyun * included with this package. *
22*4882a593Smuzhiyun *******************************************************************/
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/blkdev.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/dma-mapping.h>
27*4882a593Smuzhiyun #include <linux/idr.h>
28*4882a593Smuzhiyun #include <linux/interrupt.h>
29*4882a593Smuzhiyun #include <linux/kthread.h>
30*4882a593Smuzhiyun #include <linux/pci.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/spinlock.h>
33*4882a593Smuzhiyun #include <linux/sched/signal.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <scsi/scsi.h>
36*4882a593Smuzhiyun #include <scsi/scsi_device.h>
37*4882a593Smuzhiyun #include <scsi/scsi_host.h>
38*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "lpfc_hw4.h"
41*4882a593Smuzhiyun #include "lpfc_hw.h"
42*4882a593Smuzhiyun #include "lpfc_sli.h"
43*4882a593Smuzhiyun #include "lpfc_sli4.h"
44*4882a593Smuzhiyun #include "lpfc_nl.h"
45*4882a593Smuzhiyun #include "lpfc_disc.h"
46*4882a593Smuzhiyun #include "lpfc_scsi.h"
47*4882a593Smuzhiyun #include "lpfc.h"
48*4882a593Smuzhiyun #include "lpfc_logmsg.h"
49*4882a593Smuzhiyun #include "lpfc_crtn.h"
50*4882a593Smuzhiyun #include "lpfc_version.h"
51*4882a593Smuzhiyun #include "lpfc_vport.h"
52*4882a593Smuzhiyun
lpfc_vport_set_state(struct lpfc_vport * vport,enum fc_vport_state new_state)53*4882a593Smuzhiyun inline void lpfc_vport_set_state(struct lpfc_vport *vport,
54*4882a593Smuzhiyun enum fc_vport_state new_state)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct fc_vport *fc_vport = vport->fc_vport;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (fc_vport) {
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * When the transport defines fc_vport_set state we will replace
61*4882a593Smuzhiyun * this code with the following line
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun /* fc_vport_set_state(fc_vport, new_state); */
64*4882a593Smuzhiyun if (new_state != FC_VPORT_INITIALIZING)
65*4882a593Smuzhiyun fc_vport->vport_last_state = fc_vport->vport_state;
66*4882a593Smuzhiyun fc_vport->vport_state = new_state;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* for all the error states we will set the invternal state to FAILED */
70*4882a593Smuzhiyun switch (new_state) {
71*4882a593Smuzhiyun case FC_VPORT_NO_FABRIC_SUPP:
72*4882a593Smuzhiyun case FC_VPORT_NO_FABRIC_RSCS:
73*4882a593Smuzhiyun case FC_VPORT_FABRIC_LOGOUT:
74*4882a593Smuzhiyun case FC_VPORT_FABRIC_REJ_WWN:
75*4882a593Smuzhiyun case FC_VPORT_FAILED:
76*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_FAILED;
77*4882a593Smuzhiyun break;
78*4882a593Smuzhiyun case FC_VPORT_LINKDOWN:
79*4882a593Smuzhiyun vport->port_state = LPFC_VPORT_UNKNOWN;
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun default:
82*4882a593Smuzhiyun /* do nothing */
83*4882a593Smuzhiyun break;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun int
lpfc_alloc_vpi(struct lpfc_hba * phba)88*4882a593Smuzhiyun lpfc_alloc_vpi(struct lpfc_hba *phba)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun unsigned long vpi;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
93*4882a593Smuzhiyun /* Start at bit 1 because vpi zero is reserved for the physical port */
94*4882a593Smuzhiyun vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
95*4882a593Smuzhiyun if (vpi > phba->max_vpi)
96*4882a593Smuzhiyun vpi = 0;
97*4882a593Smuzhiyun else
98*4882a593Smuzhiyun set_bit(vpi, phba->vpi_bmask);
99*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
100*4882a593Smuzhiyun phba->sli4_hba.max_cfg_param.vpi_used++;
101*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
102*4882a593Smuzhiyun return vpi;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static void
lpfc_free_vpi(struct lpfc_hba * phba,int vpi)106*4882a593Smuzhiyun lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if (vpi == 0)
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
111*4882a593Smuzhiyun clear_bit(vpi, phba->vpi_bmask);
112*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4)
113*4882a593Smuzhiyun phba->sli4_hba.max_cfg_param.vpi_used--;
114*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static int
lpfc_vport_sparm(struct lpfc_hba * phba,struct lpfc_vport * vport)118*4882a593Smuzhiyun lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun LPFC_MBOXQ_t *pmb;
121*4882a593Smuzhiyun MAILBOX_t *mb;
122*4882a593Smuzhiyun struct lpfc_dmabuf *mp;
123*4882a593Smuzhiyun int rc;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
126*4882a593Smuzhiyun if (!pmb) {
127*4882a593Smuzhiyun return -ENOMEM;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun mb = &pmb->u.mb;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun rc = lpfc_read_sparam(phba, pmb, vport->vpi);
132*4882a593Smuzhiyun if (rc) {
133*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
134*4882a593Smuzhiyun return -ENOMEM;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * Grab buffer pointer and clear context1 so we can use
139*4882a593Smuzhiyun * lpfc_sli_issue_box_wait
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
142*4882a593Smuzhiyun pmb->ctx_buf = NULL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun pmb->vport = vport;
145*4882a593Smuzhiyun rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
146*4882a593Smuzhiyun if (rc != MBX_SUCCESS) {
147*4882a593Smuzhiyun if (signal_pending(current)) {
148*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
149*4882a593Smuzhiyun "1830 Signal aborted mbxCmd x%x\n",
150*4882a593Smuzhiyun mb->mbxCommand);
151*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
152*4882a593Smuzhiyun kfree(mp);
153*4882a593Smuzhiyun if (rc != MBX_TIMEOUT)
154*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
155*4882a593Smuzhiyun return -EINTR;
156*4882a593Smuzhiyun } else {
157*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
158*4882a593Smuzhiyun "1818 VPort failed init, mbxCmd x%x "
159*4882a593Smuzhiyun "READ_SPARM mbxStatus x%x, rc = x%x\n",
160*4882a593Smuzhiyun mb->mbxCommand, mb->mbxStatus, rc);
161*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
162*4882a593Smuzhiyun kfree(mp);
163*4882a593Smuzhiyun if (rc != MBX_TIMEOUT)
164*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
165*4882a593Smuzhiyun return -EIO;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
170*4882a593Smuzhiyun memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
171*4882a593Smuzhiyun sizeof (struct lpfc_name));
172*4882a593Smuzhiyun memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
173*4882a593Smuzhiyun sizeof (struct lpfc_name));
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun lpfc_mbuf_free(phba, mp->virt, mp->phys);
176*4882a593Smuzhiyun kfree(mp);
177*4882a593Smuzhiyun mempool_free(pmb, phba->mbox_mem_pool);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun static int
lpfc_valid_wwn_format(struct lpfc_hba * phba,struct lpfc_name * wwn,const char * name_type)183*4882a593Smuzhiyun lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
184*4882a593Smuzhiyun const char *name_type)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun /* ensure that IEEE format 1 addresses
187*4882a593Smuzhiyun * contain zeros in bits 59-48
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun if (!((wwn->u.wwn[0] >> 4) == 1 &&
190*4882a593Smuzhiyun ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
191*4882a593Smuzhiyun return 1;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
194*4882a593Smuzhiyun "1822 Invalid %s: %02x:%02x:%02x:%02x:"
195*4882a593Smuzhiyun "%02x:%02x:%02x:%02x\n",
196*4882a593Smuzhiyun name_type,
197*4882a593Smuzhiyun wwn->u.wwn[0], wwn->u.wwn[1],
198*4882a593Smuzhiyun wwn->u.wwn[2], wwn->u.wwn[3],
199*4882a593Smuzhiyun wwn->u.wwn[4], wwn->u.wwn[5],
200*4882a593Smuzhiyun wwn->u.wwn[6], wwn->u.wwn[7]);
201*4882a593Smuzhiyun return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun static int
lpfc_unique_wwpn(struct lpfc_hba * phba,struct lpfc_vport * new_vport)205*4882a593Smuzhiyun lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct lpfc_vport *vport;
208*4882a593Smuzhiyun unsigned long flags;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun spin_lock_irqsave(&phba->port_list_lock, flags);
211*4882a593Smuzhiyun list_for_each_entry(vport, &phba->port_list, listentry) {
212*4882a593Smuzhiyun if (vport == new_vport)
213*4882a593Smuzhiyun continue;
214*4882a593Smuzhiyun /* If they match, return not unique */
215*4882a593Smuzhiyun if (memcmp(&vport->fc_sparam.portName,
216*4882a593Smuzhiyun &new_vport->fc_sparam.portName,
217*4882a593Smuzhiyun sizeof(struct lpfc_name)) == 0) {
218*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->port_list_lock, flags);
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun spin_unlock_irqrestore(&phba->port_list_lock, flags);
223*4882a593Smuzhiyun return 1;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * lpfc_discovery_wait - Wait for driver discovery to quiesce
228*4882a593Smuzhiyun * @vport: The virtual port for which this call is being executed.
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * This driver calls this routine specifically from lpfc_vport_delete
231*4882a593Smuzhiyun * to enforce a synchronous execution of vport
232*4882a593Smuzhiyun * delete relative to discovery activities. The
233*4882a593Smuzhiyun * lpfc_vport_delete routine should not return until it
234*4882a593Smuzhiyun * can reasonably guarantee that discovery has quiesced.
235*4882a593Smuzhiyun * Post FDISC LOGO, the driver must wait until its SAN teardown is
236*4882a593Smuzhiyun * complete and all resources recovered before allowing
237*4882a593Smuzhiyun * cleanup.
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * This routine does not require any locks held.
240*4882a593Smuzhiyun **/
lpfc_discovery_wait(struct lpfc_vport * vport)241*4882a593Smuzhiyun static void lpfc_discovery_wait(struct lpfc_vport *vport)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
244*4882a593Smuzhiyun uint32_t wait_flags = 0;
245*4882a593Smuzhiyun unsigned long wait_time_max;
246*4882a593Smuzhiyun unsigned long start_time;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
249*4882a593Smuzhiyun FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * The time constraint on this loop is a balance between the
253*4882a593Smuzhiyun * fabric RA_TOV value and dev_loss tmo. The driver's
254*4882a593Smuzhiyun * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
257*4882a593Smuzhiyun wait_time_max += jiffies;
258*4882a593Smuzhiyun start_time = jiffies;
259*4882a593Smuzhiyun while (time_before(jiffies, wait_time_max)) {
260*4882a593Smuzhiyun if ((vport->num_disc_nodes > 0) ||
261*4882a593Smuzhiyun (vport->fc_flag & wait_flags) ||
262*4882a593Smuzhiyun ((vport->port_state > LPFC_VPORT_FAILED) &&
263*4882a593Smuzhiyun (vport->port_state < LPFC_VPORT_READY))) {
264*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
265*4882a593Smuzhiyun "1833 Vport discovery quiesce Wait:"
266*4882a593Smuzhiyun " state x%x fc_flags x%x"
267*4882a593Smuzhiyun " num_nodes x%x, waiting 1000 msecs"
268*4882a593Smuzhiyun " total wait msecs x%x\n",
269*4882a593Smuzhiyun vport->port_state, vport->fc_flag,
270*4882a593Smuzhiyun vport->num_disc_nodes,
271*4882a593Smuzhiyun jiffies_to_msecs(jiffies - start_time));
272*4882a593Smuzhiyun msleep(1000);
273*4882a593Smuzhiyun } else {
274*4882a593Smuzhiyun /* Base case. Wait variants satisfied. Break out */
275*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
276*4882a593Smuzhiyun "1834 Vport discovery quiesced:"
277*4882a593Smuzhiyun " state x%x fc_flags x%x"
278*4882a593Smuzhiyun " wait msecs x%x\n",
279*4882a593Smuzhiyun vport->port_state, vport->fc_flag,
280*4882a593Smuzhiyun jiffies_to_msecs(jiffies
281*4882a593Smuzhiyun - start_time));
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (time_after(jiffies, wait_time_max))
287*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
288*4882a593Smuzhiyun "1835 Vport discovery quiesce failed:"
289*4882a593Smuzhiyun " state x%x fc_flags x%x wait msecs x%x\n",
290*4882a593Smuzhiyun vport->port_state, vport->fc_flag,
291*4882a593Smuzhiyun jiffies_to_msecs(jiffies - start_time));
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun int
lpfc_vport_create(struct fc_vport * fc_vport,bool disable)295*4882a593Smuzhiyun lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct lpfc_nodelist *ndlp;
298*4882a593Smuzhiyun struct Scsi_Host *shost = fc_vport->shost;
299*4882a593Smuzhiyun struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
300*4882a593Smuzhiyun struct lpfc_hba *phba = pport->phba;
301*4882a593Smuzhiyun struct lpfc_vport *vport = NULL;
302*4882a593Smuzhiyun int instance;
303*4882a593Smuzhiyun int vpi;
304*4882a593Smuzhiyun int rc = VPORT_ERROR;
305*4882a593Smuzhiyun int status;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
308*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
309*4882a593Smuzhiyun "1808 Create VPORT failed: "
310*4882a593Smuzhiyun "NPIV is not enabled: SLImode:%d\n",
311*4882a593Smuzhiyun phba->sli_rev);
312*4882a593Smuzhiyun rc = VPORT_INVAL;
313*4882a593Smuzhiyun goto error_out;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* NPIV is not supported if HBA has NVME Target enabled */
317*4882a593Smuzhiyun if (phba->nvmet_support) {
318*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
319*4882a593Smuzhiyun "3189 Create VPORT failed: "
320*4882a593Smuzhiyun "NPIV is not supported on NVME Target\n");
321*4882a593Smuzhiyun rc = VPORT_INVAL;
322*4882a593Smuzhiyun goto error_out;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun vpi = lpfc_alloc_vpi(phba);
326*4882a593Smuzhiyun if (vpi == 0) {
327*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
328*4882a593Smuzhiyun "1809 Create VPORT failed: "
329*4882a593Smuzhiyun "Max VPORTs (%d) exceeded\n",
330*4882a593Smuzhiyun phba->max_vpi);
331*4882a593Smuzhiyun rc = VPORT_NORESOURCES;
332*4882a593Smuzhiyun goto error_out;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Assign an unused board number */
336*4882a593Smuzhiyun if ((instance = lpfc_get_instance()) < 0) {
337*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
338*4882a593Smuzhiyun "1810 Create VPORT failed: Cannot get "
339*4882a593Smuzhiyun "instance number\n");
340*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
341*4882a593Smuzhiyun rc = VPORT_NORESOURCES;
342*4882a593Smuzhiyun goto error_out;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun vport = lpfc_create_port(phba, instance, &fc_vport->dev);
346*4882a593Smuzhiyun if (!vport) {
347*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
348*4882a593Smuzhiyun "1811 Create VPORT failed: vpi x%x\n", vpi);
349*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
350*4882a593Smuzhiyun rc = VPORT_NORESOURCES;
351*4882a593Smuzhiyun goto error_out;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun vport->vpi = vpi;
355*4882a593Smuzhiyun lpfc_debugfs_initialize(vport);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if ((status = lpfc_vport_sparm(phba, vport))) {
358*4882a593Smuzhiyun if (status == -EINTR) {
359*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
360*4882a593Smuzhiyun "1831 Create VPORT Interrupted.\n");
361*4882a593Smuzhiyun rc = VPORT_ERROR;
362*4882a593Smuzhiyun } else {
363*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
364*4882a593Smuzhiyun "1813 Create VPORT failed. "
365*4882a593Smuzhiyun "Cannot get sparam\n");
366*4882a593Smuzhiyun rc = VPORT_NORESOURCES;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
369*4882a593Smuzhiyun destroy_port(vport);
370*4882a593Smuzhiyun goto error_out;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
374*4882a593Smuzhiyun u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
377*4882a593Smuzhiyun memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
380*4882a593Smuzhiyun !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
381*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
382*4882a593Smuzhiyun "1821 Create VPORT failed. "
383*4882a593Smuzhiyun "Invalid WWN format\n");
384*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
385*4882a593Smuzhiyun destroy_port(vport);
386*4882a593Smuzhiyun rc = VPORT_INVAL;
387*4882a593Smuzhiyun goto error_out;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (!lpfc_unique_wwpn(phba, vport)) {
391*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
392*4882a593Smuzhiyun "1823 Create VPORT failed. "
393*4882a593Smuzhiyun "Duplicate WWN on HBA\n");
394*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
395*4882a593Smuzhiyun destroy_port(vport);
396*4882a593Smuzhiyun rc = VPORT_INVAL;
397*4882a593Smuzhiyun goto error_out;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Create binary sysfs attribute for vport */
401*4882a593Smuzhiyun lpfc_alloc_sysfs_attr(vport);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Set the DFT_LUN_Q_DEPTH accordingly */
404*4882a593Smuzhiyun vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Only the physical port can support NVME for now */
407*4882a593Smuzhiyun vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun *(struct lpfc_vport **)fc_vport->dd_data = vport;
410*4882a593Smuzhiyun vport->fc_vport = fc_vport;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /* At this point we are fully registered with SCSI Layer. */
413*4882a593Smuzhiyun vport->load_flag |= FC_ALLOW_FDMI;
414*4882a593Smuzhiyun if (phba->cfg_enable_SmartSAN ||
415*4882a593Smuzhiyun (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
416*4882a593Smuzhiyun /* Setup appropriate attribute masks */
417*4882a593Smuzhiyun vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
418*4882a593Smuzhiyun vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun * In SLI4, the vpi must be activated before it can be used
423*4882a593Smuzhiyun * by the port.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun if ((phba->sli_rev == LPFC_SLI_REV4) &&
426*4882a593Smuzhiyun (pport->fc_flag & FC_VFI_REGISTERED)) {
427*4882a593Smuzhiyun rc = lpfc_sli4_init_vpi(vport);
428*4882a593Smuzhiyun if (rc) {
429*4882a593Smuzhiyun lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
430*4882a593Smuzhiyun "1838 Failed to INIT_VPI on vpi %d "
431*4882a593Smuzhiyun "status %d\n", vpi, rc);
432*4882a593Smuzhiyun rc = VPORT_NORESOURCES;
433*4882a593Smuzhiyun lpfc_free_vpi(phba, vpi);
434*4882a593Smuzhiyun goto error_out;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun } else if (phba->sli_rev == LPFC_SLI_REV4) {
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Driver cannot INIT_VPI now. Set the flags to
439*4882a593Smuzhiyun * init_vpi when reg_vfi complete.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
442*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
443*4882a593Smuzhiyun rc = VPORT_OK;
444*4882a593Smuzhiyun goto out;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if ((phba->link_state < LPFC_LINK_UP) ||
448*4882a593Smuzhiyun (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
449*4882a593Smuzhiyun (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
450*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
451*4882a593Smuzhiyun rc = VPORT_OK;
452*4882a593Smuzhiyun goto out;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (disable) {
456*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
457*4882a593Smuzhiyun rc = VPORT_OK;
458*4882a593Smuzhiyun goto out;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* Use the Physical nodes Fabric NDLP to determine if the link is
462*4882a593Smuzhiyun * up and ready to FDISC.
463*4882a593Smuzhiyun */
464*4882a593Smuzhiyun ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
465*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
466*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
467*4882a593Smuzhiyun if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
468*4882a593Smuzhiyun lpfc_set_disctmo(vport);
469*4882a593Smuzhiyun lpfc_initial_fdisc(vport);
470*4882a593Smuzhiyun } else {
471*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
472*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
473*4882a593Smuzhiyun "0262 No NPIV Fabric support\n");
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun } else {
476*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun rc = VPORT_OK;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun out:
481*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
482*4882a593Smuzhiyun "1825 Vport Created.\n");
483*4882a593Smuzhiyun lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
484*4882a593Smuzhiyun error_out:
485*4882a593Smuzhiyun return rc;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun static int
disable_vport(struct fc_vport * fc_vport)489*4882a593Smuzhiyun disable_vport(struct fc_vport *fc_vport)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
492*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
493*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
494*4882a593Smuzhiyun long timeout;
495*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, Fabric_DID);
498*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp)
499*4882a593Smuzhiyun && phba->link_state >= LPFC_LINK_UP) {
500*4882a593Smuzhiyun vport->unreg_vpi_cmpl = VPORT_INVAL;
501*4882a593Smuzhiyun timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
502*4882a593Smuzhiyun if (!lpfc_issue_els_npiv_logo(vport, ndlp))
503*4882a593Smuzhiyun while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
504*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun lpfc_sli_host_down(vport);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Mark all nodes for discovery so we can remove them by
510*4882a593Smuzhiyun * calling lpfc_cleanup_rpis(vport, 1)
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
513*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
514*4882a593Smuzhiyun continue;
515*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
516*4882a593Smuzhiyun continue;
517*4882a593Smuzhiyun lpfc_disc_state_machine(vport, ndlp, NULL,
518*4882a593Smuzhiyun NLP_EVT_DEVICE_RECOVERY);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun lpfc_cleanup_rpis(vport, 1);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun lpfc_stop_vport_timers(vport);
523*4882a593Smuzhiyun lpfc_unreg_all_rpis(vport);
524*4882a593Smuzhiyun lpfc_unreg_default_rpis(vport);
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
527*4882a593Smuzhiyun * scsi_host_put() to release the vport.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(vport);
530*4882a593Smuzhiyun if (phba->sli_rev == LPFC_SLI_REV4) {
531*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
532*4882a593Smuzhiyun vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
533*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
537*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
538*4882a593Smuzhiyun "1826 Vport Disabled.\n");
539*4882a593Smuzhiyun return VPORT_OK;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun static int
enable_vport(struct fc_vport * fc_vport)543*4882a593Smuzhiyun enable_vport(struct fc_vport *fc_vport)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
546*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
547*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL;
548*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if ((phba->link_state < LPFC_LINK_UP) ||
551*4882a593Smuzhiyun (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
552*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
553*4882a593Smuzhiyun return VPORT_OK;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun spin_lock_irq(shost->host_lock);
557*4882a593Smuzhiyun vport->load_flag |= FC_LOADING;
558*4882a593Smuzhiyun if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
559*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
560*4882a593Smuzhiyun lpfc_issue_init_vpi(vport);
561*4882a593Smuzhiyun goto out;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
565*4882a593Smuzhiyun spin_unlock_irq(shost->host_lock);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* Use the Physical nodes Fabric NDLP to determine if the link is
568*4882a593Smuzhiyun * up and ready to FDISC.
569*4882a593Smuzhiyun */
570*4882a593Smuzhiyun ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
571*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp)
572*4882a593Smuzhiyun && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
573*4882a593Smuzhiyun if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
574*4882a593Smuzhiyun lpfc_set_disctmo(vport);
575*4882a593Smuzhiyun lpfc_initial_fdisc(vport);
576*4882a593Smuzhiyun } else {
577*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
578*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
579*4882a593Smuzhiyun "0264 No NPIV Fabric support\n");
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun } else {
582*4882a593Smuzhiyun lpfc_vport_set_state(vport, FC_VPORT_FAILED);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun out:
586*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
587*4882a593Smuzhiyun "1827 Vport Enabled.\n");
588*4882a593Smuzhiyun return VPORT_OK;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun int
lpfc_vport_disable(struct fc_vport * fc_vport,bool disable)592*4882a593Smuzhiyun lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun if (disable)
595*4882a593Smuzhiyun return disable_vport(fc_vport);
596*4882a593Smuzhiyun else
597*4882a593Smuzhiyun return enable_vport(fc_vport);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun int
lpfc_vport_delete(struct fc_vport * fc_vport)602*4882a593Smuzhiyun lpfc_vport_delete(struct fc_vport *fc_vport)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL;
605*4882a593Smuzhiyun struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
606*4882a593Smuzhiyun struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
607*4882a593Smuzhiyun struct lpfc_hba *phba = vport->phba;
608*4882a593Smuzhiyun long timeout;
609*4882a593Smuzhiyun bool ns_ndlp_referenced = false;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (vport->port_type == LPFC_PHYSICAL_PORT) {
612*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
613*4882a593Smuzhiyun "1812 vport_delete failed: Cannot delete "
614*4882a593Smuzhiyun "physical host\n");
615*4882a593Smuzhiyun return VPORT_ERROR;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* If the vport is a static vport fail the deletion. */
619*4882a593Smuzhiyun if ((vport->vport_flag & STATIC_VPORT) &&
620*4882a593Smuzhiyun !(phba->pport->load_flag & FC_UNLOADING)) {
621*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
622*4882a593Smuzhiyun "1837 vport_delete failed: Cannot delete "
623*4882a593Smuzhiyun "static vport.\n");
624*4882a593Smuzhiyun return VPORT_ERROR;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun spin_lock_irq(&phba->hbalock);
627*4882a593Smuzhiyun vport->load_flag |= FC_UNLOADING;
628*4882a593Smuzhiyun spin_unlock_irq(&phba->hbalock);
629*4882a593Smuzhiyun /*
630*4882a593Smuzhiyun * If we are not unloading the driver then prevent the vport_delete
631*4882a593Smuzhiyun * from happening until after this vport's discovery is finished.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun if (!(phba->pport->load_flag & FC_UNLOADING)) {
634*4882a593Smuzhiyun int check_count = 0;
635*4882a593Smuzhiyun while (check_count < ((phba->fc_ratov * 3) + 3) &&
636*4882a593Smuzhiyun vport->port_state > LPFC_VPORT_FAILED &&
637*4882a593Smuzhiyun vport->port_state < LPFC_VPORT_READY) {
638*4882a593Smuzhiyun check_count++;
639*4882a593Smuzhiyun msleep(1000);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun if (vport->port_state > LPFC_VPORT_FAILED &&
642*4882a593Smuzhiyun vport->port_state < LPFC_VPORT_READY)
643*4882a593Smuzhiyun return -EAGAIN;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Take early refcount for outstanding I/O requests we schedule during
648*4882a593Smuzhiyun * delete processing for unreg_vpi. Always keep this before
649*4882a593Smuzhiyun * scsi_remove_host() as we can no longer obtain a reference through
650*4882a593Smuzhiyun * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun if (!scsi_host_get(shost))
653*4882a593Smuzhiyun return VPORT_INVAL;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun lpfc_free_sysfs_attr(vport);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun lpfc_debugfs_terminate(vport);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun * The call to fc_remove_host might release the NameServer ndlp. Since
661*4882a593Smuzhiyun * we might need to use the ndlp to send the DA_ID CT command,
662*4882a593Smuzhiyun * increment the reference for the NameServer ndlp to prevent it from
663*4882a593Smuzhiyun * being released.
664*4882a593Smuzhiyun */
665*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, NameServer_DID);
666*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
667*4882a593Smuzhiyun lpfc_nlp_get(ndlp);
668*4882a593Smuzhiyun ns_ndlp_referenced = true;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* Remove FC host and then SCSI host with the vport */
672*4882a593Smuzhiyun fc_remove_host(shost);
673*4882a593Smuzhiyun scsi_remove_host(shost);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* In case of driver unload, we shall not perform fabric logo as the
678*4882a593Smuzhiyun * worker thread already stopped at this stage and, in this case, we
679*4882a593Smuzhiyun * can safely skip the fabric logo.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun if (phba->pport->load_flag & FC_UNLOADING) {
682*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
683*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
684*4882a593Smuzhiyun phba->link_state >= LPFC_LINK_UP) {
685*4882a593Smuzhiyun /* First look for the Fabric ndlp */
686*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, Fabric_DID);
687*4882a593Smuzhiyun if (!ndlp)
688*4882a593Smuzhiyun goto skip_logo;
689*4882a593Smuzhiyun else if (!NLP_CHK_NODE_ACT(ndlp)) {
690*4882a593Smuzhiyun ndlp = lpfc_enable_node(vport, ndlp,
691*4882a593Smuzhiyun NLP_STE_UNUSED_NODE);
692*4882a593Smuzhiyun if (!ndlp)
693*4882a593Smuzhiyun goto skip_logo;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun /* Remove ndlp from vport npld list */
696*4882a593Smuzhiyun lpfc_dequeue_node(vport, ndlp);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /* Indicate free memory when release */
699*4882a593Smuzhiyun spin_lock_irq(&phba->ndlp_lock);
700*4882a593Smuzhiyun NLP_SET_FREE_REQ(ndlp);
701*4882a593Smuzhiyun spin_unlock_irq(&phba->ndlp_lock);
702*4882a593Smuzhiyun /* Kick off release ndlp when it can be safely done */
703*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun goto skip_logo;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Otherwise, we will perform fabric logo as needed */
709*4882a593Smuzhiyun if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
710*4882a593Smuzhiyun ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
711*4882a593Smuzhiyun phba->link_state >= LPFC_LINK_UP &&
712*4882a593Smuzhiyun phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
713*4882a593Smuzhiyun if (vport->cfg_enable_da_id) {
714*4882a593Smuzhiyun timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
715*4882a593Smuzhiyun if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
716*4882a593Smuzhiyun while (vport->ct_flags && timeout)
717*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
718*4882a593Smuzhiyun else
719*4882a593Smuzhiyun lpfc_printf_log(vport->phba, KERN_WARNING,
720*4882a593Smuzhiyun LOG_VPORT,
721*4882a593Smuzhiyun "1829 CT command failed to "
722*4882a593Smuzhiyun "delete objects on fabric\n");
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun /* First look for the Fabric ndlp */
725*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, Fabric_DID);
726*4882a593Smuzhiyun if (!ndlp) {
727*4882a593Smuzhiyun /* Cannot find existing Fabric ndlp, allocate one */
728*4882a593Smuzhiyun ndlp = lpfc_nlp_init(vport, Fabric_DID);
729*4882a593Smuzhiyun if (!ndlp)
730*4882a593Smuzhiyun goto skip_logo;
731*4882a593Smuzhiyun /* Indicate free memory when release */
732*4882a593Smuzhiyun NLP_SET_FREE_REQ(ndlp);
733*4882a593Smuzhiyun } else {
734*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp)) {
735*4882a593Smuzhiyun ndlp = lpfc_enable_node(vport, ndlp,
736*4882a593Smuzhiyun NLP_STE_UNUSED_NODE);
737*4882a593Smuzhiyun if (!ndlp)
738*4882a593Smuzhiyun goto skip_logo;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* Remove ndlp from vport list */
742*4882a593Smuzhiyun lpfc_dequeue_node(vport, ndlp);
743*4882a593Smuzhiyun spin_lock_irq(&phba->ndlp_lock);
744*4882a593Smuzhiyun if (!NLP_CHK_FREE_REQ(ndlp))
745*4882a593Smuzhiyun /* Indicate free memory when release */
746*4882a593Smuzhiyun NLP_SET_FREE_REQ(ndlp);
747*4882a593Smuzhiyun else {
748*4882a593Smuzhiyun /* Skip this if ndlp is already in free mode */
749*4882a593Smuzhiyun spin_unlock_irq(&phba->ndlp_lock);
750*4882a593Smuzhiyun goto skip_logo;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun spin_unlock_irq(&phba->ndlp_lock);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * If the vpi is not registered, then a valid FDISC doesn't
757*4882a593Smuzhiyun * exist and there is no need for a ELS LOGO. Just cleanup
758*4882a593Smuzhiyun * the ndlp.
759*4882a593Smuzhiyun */
760*4882a593Smuzhiyun if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
761*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
762*4882a593Smuzhiyun goto skip_logo;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun vport->unreg_vpi_cmpl = VPORT_INVAL;
766*4882a593Smuzhiyun timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
767*4882a593Smuzhiyun if (!lpfc_issue_els_npiv_logo(vport, ndlp))
768*4882a593Smuzhiyun while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
769*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (!(phba->pport->load_flag & FC_UNLOADING))
773*4882a593Smuzhiyun lpfc_discovery_wait(vport);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun skip_logo:
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * If the NameServer ndlp has been incremented to allow the DA_ID CT
779*4882a593Smuzhiyun * command to be sent, decrement the ndlp now.
780*4882a593Smuzhiyun */
781*4882a593Smuzhiyun if (ns_ndlp_referenced) {
782*4882a593Smuzhiyun ndlp = lpfc_findnode_did(vport, NameServer_DID);
783*4882a593Smuzhiyun lpfc_nlp_put(ndlp);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun lpfc_cleanup(vport);
787*4882a593Smuzhiyun lpfc_sli_host_down(vport);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun lpfc_stop_vport_timers(vport);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (!(phba->pport->load_flag & FC_UNLOADING)) {
792*4882a593Smuzhiyun lpfc_unreg_all_rpis(vport);
793*4882a593Smuzhiyun lpfc_unreg_default_rpis(vport);
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
796*4882a593Smuzhiyun * does the scsi_host_put() to release the vport.
797*4882a593Smuzhiyun */
798*4882a593Smuzhiyun if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
799*4882a593Smuzhiyun lpfc_mbx_unreg_vpi(vport))
800*4882a593Smuzhiyun scsi_host_put(shost);
801*4882a593Smuzhiyun } else {
802*4882a593Smuzhiyun scsi_host_put(shost);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun lpfc_free_vpi(phba, vport->vpi);
806*4882a593Smuzhiyun vport->work_port_events = 0;
807*4882a593Smuzhiyun spin_lock_irq(&phba->port_list_lock);
808*4882a593Smuzhiyun list_del_init(&vport->listentry);
809*4882a593Smuzhiyun spin_unlock_irq(&phba->port_list_lock);
810*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
811*4882a593Smuzhiyun "1828 Vport Deleted.\n");
812*4882a593Smuzhiyun scsi_host_put(shost);
813*4882a593Smuzhiyun return VPORT_OK;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba * phba)817*4882a593Smuzhiyun lpfc_create_vport_work_array(struct lpfc_hba *phba)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct lpfc_vport *port_iterator;
820*4882a593Smuzhiyun struct lpfc_vport **vports;
821*4882a593Smuzhiyun int index = 0;
822*4882a593Smuzhiyun vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *),
823*4882a593Smuzhiyun GFP_KERNEL);
824*4882a593Smuzhiyun if (vports == NULL)
825*4882a593Smuzhiyun return NULL;
826*4882a593Smuzhiyun spin_lock_irq(&phba->port_list_lock);
827*4882a593Smuzhiyun list_for_each_entry(port_iterator, &phba->port_list, listentry) {
828*4882a593Smuzhiyun if (port_iterator->load_flag & FC_UNLOADING)
829*4882a593Smuzhiyun continue;
830*4882a593Smuzhiyun if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
831*4882a593Smuzhiyun lpfc_printf_vlog(port_iterator, KERN_ERR,
832*4882a593Smuzhiyun LOG_TRACE_EVENT,
833*4882a593Smuzhiyun "1801 Create vport work array FAILED: "
834*4882a593Smuzhiyun "cannot do scsi_host_get\n");
835*4882a593Smuzhiyun continue;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun vports[index++] = port_iterator;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun spin_unlock_irq(&phba->port_list_lock);
840*4882a593Smuzhiyun return vports;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun void
lpfc_destroy_vport_work_array(struct lpfc_hba * phba,struct lpfc_vport ** vports)844*4882a593Smuzhiyun lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun int i;
847*4882a593Smuzhiyun if (vports == NULL)
848*4882a593Smuzhiyun return;
849*4882a593Smuzhiyun for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
850*4882a593Smuzhiyun scsi_host_put(lpfc_shost_from_vport(vports[i]));
851*4882a593Smuzhiyun kfree(vports);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /**
856*4882a593Smuzhiyun * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
857*4882a593Smuzhiyun * @vport: Pointer to vport object.
858*4882a593Smuzhiyun *
859*4882a593Smuzhiyun * This function resets the statistical data for the vport. This function
860*4882a593Smuzhiyun * is called with the host_lock held
861*4882a593Smuzhiyun **/
862*4882a593Smuzhiyun void
lpfc_vport_reset_stat_data(struct lpfc_vport * vport)863*4882a593Smuzhiyun lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
868*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
869*4882a593Smuzhiyun continue;
870*4882a593Smuzhiyun if (ndlp->lat_data)
871*4882a593Smuzhiyun memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
872*4882a593Smuzhiyun sizeof(struct lpfc_scsicmd_bkt));
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /**
878*4882a593Smuzhiyun * lpfc_alloc_bucket - Allocate data buffer required for statistical data
879*4882a593Smuzhiyun * @vport: Pointer to vport object.
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * This function allocates data buffer required for all the FC
882*4882a593Smuzhiyun * nodes of the vport to collect statistical data.
883*4882a593Smuzhiyun **/
884*4882a593Smuzhiyun void
lpfc_alloc_bucket(struct lpfc_vport * vport)885*4882a593Smuzhiyun lpfc_alloc_bucket(struct lpfc_vport *vport)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
890*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
891*4882a593Smuzhiyun continue;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun kfree(ndlp->lat_data);
894*4882a593Smuzhiyun ndlp->lat_data = NULL;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
897*4882a593Smuzhiyun ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
898*4882a593Smuzhiyun sizeof(struct lpfc_scsicmd_bkt),
899*4882a593Smuzhiyun GFP_ATOMIC);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (!ndlp->lat_data)
902*4882a593Smuzhiyun lpfc_printf_vlog(vport, KERN_ERR,
903*4882a593Smuzhiyun LOG_TRACE_EVENT,
904*4882a593Smuzhiyun "0287 lpfc_alloc_bucket failed to "
905*4882a593Smuzhiyun "allocate statistical data buffer DID "
906*4882a593Smuzhiyun "0x%x\n", ndlp->nlp_DID);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /**
912*4882a593Smuzhiyun * lpfc_free_bucket - Free data buffer required for statistical data
913*4882a593Smuzhiyun * @vport: Pointer to vport object.
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * Th function frees statistical data buffer of all the FC
916*4882a593Smuzhiyun * nodes of the vport.
917*4882a593Smuzhiyun **/
918*4882a593Smuzhiyun void
lpfc_free_bucket(struct lpfc_vport * vport)919*4882a593Smuzhiyun lpfc_free_bucket(struct lpfc_vport *vport)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
924*4882a593Smuzhiyun if (!NLP_CHK_NODE_ACT(ndlp))
925*4882a593Smuzhiyun continue;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun kfree(ndlp->lat_data);
928*4882a593Smuzhiyun ndlp->lat_data = NULL;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun }
931