xref: /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/lpfc_scsi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun  * This file is part of the Emulex Linux Device Driver for         *
3*4882a593Smuzhiyun  * Fibre Channel Host Bus Adapters.                                *
4*4882a593Smuzhiyun  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6*4882a593Smuzhiyun  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7*4882a593Smuzhiyun  * EMULEX and SLI are trademarks of Emulex.                        *
8*4882a593Smuzhiyun  * www.broadcom.com                                                *
9*4882a593Smuzhiyun  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10*4882a593Smuzhiyun  *                                                                 *
11*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or   *
12*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General       *
13*4882a593Smuzhiyun  * Public License as published by the Free Software Foundation.    *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful. *
15*4882a593Smuzhiyun  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16*4882a593Smuzhiyun  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18*4882a593Smuzhiyun  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19*4882a593Smuzhiyun  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20*4882a593Smuzhiyun  * more details, a copy of which can be found in the file COPYING  *
21*4882a593Smuzhiyun  * included with this package.                                     *
22*4882a593Smuzhiyun  *******************************************************************/
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/export.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <asm/unaligned.h>
29*4882a593Smuzhiyun #include <linux/t10-pi.h>
30*4882a593Smuzhiyun #include <linux/crc-t10dif.h>
31*4882a593Smuzhiyun #include <net/checksum.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <scsi/scsi.h>
34*4882a593Smuzhiyun #include <scsi/scsi_device.h>
35*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
36*4882a593Smuzhiyun #include <scsi/scsi_host.h>
37*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
38*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include "lpfc_version.h"
41*4882a593Smuzhiyun #include "lpfc_hw4.h"
42*4882a593Smuzhiyun #include "lpfc_hw.h"
43*4882a593Smuzhiyun #include "lpfc_sli.h"
44*4882a593Smuzhiyun #include "lpfc_sli4.h"
45*4882a593Smuzhiyun #include "lpfc_nl.h"
46*4882a593Smuzhiyun #include "lpfc_disc.h"
47*4882a593Smuzhiyun #include "lpfc.h"
48*4882a593Smuzhiyun #include "lpfc_scsi.h"
49*4882a593Smuzhiyun #include "lpfc_logmsg.h"
50*4882a593Smuzhiyun #include "lpfc_crtn.h"
51*4882a593Smuzhiyun #include "lpfc_vport.h"
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define LPFC_RESET_WAIT  2
54*4882a593Smuzhiyun #define LPFC_ABORT_WAIT  2
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun static char *dif_op_str[] = {
57*4882a593Smuzhiyun 	"PROT_NORMAL",
58*4882a593Smuzhiyun 	"PROT_READ_INSERT",
59*4882a593Smuzhiyun 	"PROT_WRITE_STRIP",
60*4882a593Smuzhiyun 	"PROT_READ_STRIP",
61*4882a593Smuzhiyun 	"PROT_WRITE_INSERT",
62*4882a593Smuzhiyun 	"PROT_READ_PASS",
63*4882a593Smuzhiyun 	"PROT_WRITE_PASS",
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct scsi_dif_tuple {
67*4882a593Smuzhiyun 	__be16 guard_tag;       /* Checksum */
68*4882a593Smuzhiyun 	__be16 app_tag;         /* Opaque storage */
69*4882a593Smuzhiyun 	__be32 ref_tag;         /* Target LBA or indirect LBA */
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)73*4882a593Smuzhiyun lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (vport->phba->cfg_fof)
78*4882a593Smuzhiyun 		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79*4882a593Smuzhiyun 	else
80*4882a593Smuzhiyun 		return (struct lpfc_rport_data *)sdev->hostdata;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun static void
84*4882a593Smuzhiyun lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85*4882a593Smuzhiyun static void
86*4882a593Smuzhiyun lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87*4882a593Smuzhiyun static int
88*4882a593Smuzhiyun lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd * sc)91*4882a593Smuzhiyun lpfc_cmd_blksize(struct scsi_cmnd *sc)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return sc->device->sector_size;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define LPFC_CHECK_PROTECT_GUARD	1
97*4882a593Smuzhiyun #define LPFC_CHECK_PROTECT_REF		2
98*4882a593Smuzhiyun static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd * sc,int flag)99*4882a593Smuzhiyun lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return 1;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd * sc)105*4882a593Smuzhiyun lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108*4882a593Smuzhiyun 		return 0;
109*4882a593Smuzhiyun 	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110*4882a593Smuzhiyun 		return 1;
111*4882a593Smuzhiyun 	return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116*4882a593Smuzhiyun  * @phba: Pointer to HBA object.
117*4882a593Smuzhiyun  * @lpfc_cmd: lpfc scsi command object pointer.
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * This function is called from the lpfc_prep_task_mgmt_cmd function to
120*4882a593Smuzhiyun  * set the last bit in the response sge entry.
121*4882a593Smuzhiyun  **/
122*4882a593Smuzhiyun static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)123*4882a593Smuzhiyun lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124*4882a593Smuzhiyun 				struct lpfc_io_buf *lpfc_cmd)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127*4882a593Smuzhiyun 	if (sgl) {
128*4882a593Smuzhiyun 		sgl += 1;
129*4882a593Smuzhiyun 		sgl->word2 = le32_to_cpu(sgl->word2);
130*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_last, sgl, 1);
131*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * lpfc_update_stats - Update statistical data for the command completion
137*4882a593Smuzhiyun  * @vport: The virtual port on which this call is executing.
138*4882a593Smuzhiyun  * @lpfc_cmd: lpfc scsi command object pointer.
139*4882a593Smuzhiyun  *
140*4882a593Smuzhiyun  * This function is called when there is a command completion and this
141*4882a593Smuzhiyun  * function updates the statistical data for the command completion.
142*4882a593Smuzhiyun  **/
143*4882a593Smuzhiyun static void
lpfc_update_stats(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)144*4882a593Smuzhiyun lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
147*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
148*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode;
149*4882a593Smuzhiyun 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
150*4882a593Smuzhiyun 	unsigned long flags;
151*4882a593Smuzhiyun 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
152*4882a593Smuzhiyun 	unsigned long latency;
153*4882a593Smuzhiyun 	int i;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!vport->stat_data_enabled ||
156*4882a593Smuzhiyun 	    vport->stat_data_blocked ||
157*4882a593Smuzhiyun 	    (cmd->result))
158*4882a593Smuzhiyun 		return;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161*4882a593Smuzhiyun 	rdata = lpfc_cmd->rdata;
162*4882a593Smuzhiyun 	pnode = rdata->pnode;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
165*4882a593Smuzhiyun 	if (!pnode ||
166*4882a593Smuzhiyun 	    !pnode->lat_data ||
167*4882a593Smuzhiyun 	    (phba->bucket_type == LPFC_NO_BUCKET)) {
168*4882a593Smuzhiyun 		spin_unlock_irqrestore(shost->host_lock, flags);
169*4882a593Smuzhiyun 		return;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173*4882a593Smuzhiyun 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174*4882a593Smuzhiyun 			phba->bucket_step;
175*4882a593Smuzhiyun 		/* check array subscript bounds */
176*4882a593Smuzhiyun 		if (i < 0)
177*4882a593Smuzhiyun 			i = 0;
178*4882a593Smuzhiyun 		else if (i >= LPFC_MAX_BUCKET_COUNT)
179*4882a593Smuzhiyun 			i = LPFC_MAX_BUCKET_COUNT - 1;
180*4882a593Smuzhiyun 	} else {
181*4882a593Smuzhiyun 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182*4882a593Smuzhiyun 			if (latency <= (phba->bucket_base +
183*4882a593Smuzhiyun 				((1<<i)*phba->bucket_step)))
184*4882a593Smuzhiyun 				break;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	pnode->lat_data[i].cmd_count++;
188*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * This routine is called when there is resource error in driver or firmware.
196*4882a593Smuzhiyun  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197*4882a593Smuzhiyun  * posts at most 1 event each second. This routine wakes up worker thread of
198*4882a593Smuzhiyun  * @phba to process WORKER_RAM_DOWN_EVENT event.
199*4882a593Smuzhiyun  *
200*4882a593Smuzhiyun  * This routine should be called with no lock held.
201*4882a593Smuzhiyun  **/
202*4882a593Smuzhiyun void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)203*4882a593Smuzhiyun lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	unsigned long flags;
206*4882a593Smuzhiyun 	uint32_t evt_posted;
207*4882a593Smuzhiyun 	unsigned long expires;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
210*4882a593Smuzhiyun 	atomic_inc(&phba->num_rsrc_err);
211*4882a593Smuzhiyun 	phba->last_rsrc_error_time = jiffies;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214*4882a593Smuzhiyun 	if (time_after(expires, jiffies)) {
215*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
216*4882a593Smuzhiyun 		return;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	phba->last_ramp_down_time = jiffies;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224*4882a593Smuzhiyun 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225*4882a593Smuzhiyun 	if (!evt_posted)
226*4882a593Smuzhiyun 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (!evt_posted)
230*4882a593Smuzhiyun 		lpfc_worker_wake_up(phba);
231*4882a593Smuzhiyun 	return;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
237*4882a593Smuzhiyun  *
238*4882a593Smuzhiyun  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
239*4882a593Smuzhiyun  * thread.This routine reduces queue depth for all scsi device on each vport
240*4882a593Smuzhiyun  * associated with @phba.
241*4882a593Smuzhiyun  **/
242*4882a593Smuzhiyun void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)243*4882a593Smuzhiyun lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	struct lpfc_vport **vports;
246*4882a593Smuzhiyun 	struct Scsi_Host  *shost;
247*4882a593Smuzhiyun 	struct scsi_device *sdev;
248*4882a593Smuzhiyun 	unsigned long new_queue_depth;
249*4882a593Smuzhiyun 	unsigned long num_rsrc_err, num_cmd_success;
250*4882a593Smuzhiyun 	int i;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253*4882a593Smuzhiyun 	num_cmd_success = atomic_read(&phba->num_cmd_success);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * The error and success command counters are global per
257*4882a593Smuzhiyun 	 * driver instance.  If another handler has already
258*4882a593Smuzhiyun 	 * operated on this error event, just exit.
259*4882a593Smuzhiyun 	 */
260*4882a593Smuzhiyun 	if (num_rsrc_err == 0)
261*4882a593Smuzhiyun 		return;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	vports = lpfc_create_vport_work_array(phba);
264*4882a593Smuzhiyun 	if (vports != NULL)
265*4882a593Smuzhiyun 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266*4882a593Smuzhiyun 			shost = lpfc_shost_from_vport(vports[i]);
267*4882a593Smuzhiyun 			shost_for_each_device(sdev, shost) {
268*4882a593Smuzhiyun 				new_queue_depth =
269*4882a593Smuzhiyun 					sdev->queue_depth * num_rsrc_err /
270*4882a593Smuzhiyun 					(num_rsrc_err + num_cmd_success);
271*4882a593Smuzhiyun 				if (!new_queue_depth)
272*4882a593Smuzhiyun 					new_queue_depth = sdev->queue_depth - 1;
273*4882a593Smuzhiyun 				else
274*4882a593Smuzhiyun 					new_queue_depth = sdev->queue_depth -
275*4882a593Smuzhiyun 								new_queue_depth;
276*4882a593Smuzhiyun 				scsi_change_queue_depth(sdev, new_queue_depth);
277*4882a593Smuzhiyun 			}
278*4882a593Smuzhiyun 		}
279*4882a593Smuzhiyun 	lpfc_destroy_vport_work_array(phba, vports);
280*4882a593Smuzhiyun 	atomic_set(&phba->num_rsrc_err, 0);
281*4882a593Smuzhiyun 	atomic_set(&phba->num_cmd_success, 0);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun  * lpfc_scsi_dev_block - set all scsi hosts to block state
286*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * This function walks vport list and set each SCSI host to block state
289*4882a593Smuzhiyun  * by invoking fc_remote_port_delete() routine. This function is invoked
290*4882a593Smuzhiyun  * with EEH when device's PCI slot has been permanently disabled.
291*4882a593Smuzhiyun  **/
292*4882a593Smuzhiyun void
lpfc_scsi_dev_block(struct lpfc_hba * phba)293*4882a593Smuzhiyun lpfc_scsi_dev_block(struct lpfc_hba *phba)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct lpfc_vport **vports;
296*4882a593Smuzhiyun 	struct Scsi_Host  *shost;
297*4882a593Smuzhiyun 	struct scsi_device *sdev;
298*4882a593Smuzhiyun 	struct fc_rport *rport;
299*4882a593Smuzhiyun 	int i;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	vports = lpfc_create_vport_work_array(phba);
302*4882a593Smuzhiyun 	if (vports != NULL)
303*4882a593Smuzhiyun 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304*4882a593Smuzhiyun 			shost = lpfc_shost_from_vport(vports[i]);
305*4882a593Smuzhiyun 			shost_for_each_device(sdev, shost) {
306*4882a593Smuzhiyun 				rport = starget_to_rport(scsi_target(sdev));
307*4882a593Smuzhiyun 				fc_remote_port_delete(rport);
308*4882a593Smuzhiyun 			}
309*4882a593Smuzhiyun 		}
310*4882a593Smuzhiyun 	lpfc_destroy_vport_work_array(phba, vports);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /**
314*4882a593Smuzhiyun  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315*4882a593Smuzhiyun  * @vport: The virtual port for which this call being executed.
316*4882a593Smuzhiyun  * @num_to_allocate: The requested number of buffers to allocate.
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319*4882a593Smuzhiyun  * the scsi buffer contains all the necessary information needed to initiate
320*4882a593Smuzhiyun  * a SCSI I/O. The non-DMAable buffer region contains information to build
321*4882a593Smuzhiyun  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322*4882a593Smuzhiyun  * and the initial BPL. In addition to allocating memory, the FCP CMND and
323*4882a593Smuzhiyun  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * Return codes:
326*4882a593Smuzhiyun  *   int - number of scsi buffers that were allocated.
327*4882a593Smuzhiyun  *   0 = failure, less than num_to_alloc is a partial failure.
328*4882a593Smuzhiyun  **/
329*4882a593Smuzhiyun static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)330*4882a593Smuzhiyun lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
333*4882a593Smuzhiyun 	struct lpfc_io_buf *psb;
334*4882a593Smuzhiyun 	struct ulp_bde64 *bpl;
335*4882a593Smuzhiyun 	IOCB_t *iocb;
336*4882a593Smuzhiyun 	dma_addr_t pdma_phys_fcp_cmd;
337*4882a593Smuzhiyun 	dma_addr_t pdma_phys_fcp_rsp;
338*4882a593Smuzhiyun 	dma_addr_t pdma_phys_sgl;
339*4882a593Smuzhiyun 	uint16_t iotag;
340*4882a593Smuzhiyun 	int bcnt, bpl_size;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	bpl_size = phba->cfg_sg_dma_buf_size -
343*4882a593Smuzhiyun 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346*4882a593Smuzhiyun 			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347*4882a593Smuzhiyun 			 num_to_alloc, phba->cfg_sg_dma_buf_size,
348*4882a593Smuzhiyun 			 (int)sizeof(struct fcp_cmnd),
349*4882a593Smuzhiyun 			 (int)sizeof(struct fcp_rsp), bpl_size);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352*4882a593Smuzhiyun 		psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353*4882a593Smuzhiyun 		if (!psb)
354*4882a593Smuzhiyun 			break;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		/*
357*4882a593Smuzhiyun 		 * Get memory from the pci pool to map the virt space to pci
358*4882a593Smuzhiyun 		 * bus space for an I/O.  The DMA buffer includes space for the
359*4882a593Smuzhiyun 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360*4882a593Smuzhiyun 		 * necessary to support the sg_tablesize.
361*4882a593Smuzhiyun 		 */
362*4882a593Smuzhiyun 		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363*4882a593Smuzhiyun 					GFP_KERNEL, &psb->dma_handle);
364*4882a593Smuzhiyun 		if (!psb->data) {
365*4882a593Smuzhiyun 			kfree(psb);
366*4882a593Smuzhiyun 			break;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		/* Allocate iotag for psb->cur_iocbq. */
371*4882a593Smuzhiyun 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372*4882a593Smuzhiyun 		if (iotag == 0) {
373*4882a593Smuzhiyun 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374*4882a593Smuzhiyun 				      psb->data, psb->dma_handle);
375*4882a593Smuzhiyun 			kfree(psb);
376*4882a593Smuzhiyun 			break;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		psb->fcp_cmnd = psb->data;
381*4882a593Smuzhiyun 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382*4882a593Smuzhiyun 		psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383*4882a593Smuzhiyun 			sizeof(struct fcp_rsp);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		/* Initialize local short-hand pointers. */
386*4882a593Smuzhiyun 		bpl = (struct ulp_bde64 *)psb->dma_sgl;
387*4882a593Smuzhiyun 		pdma_phys_fcp_cmd = psb->dma_handle;
388*4882a593Smuzhiyun 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389*4882a593Smuzhiyun 		pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390*4882a593Smuzhiyun 			sizeof(struct fcp_rsp);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		/*
393*4882a593Smuzhiyun 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394*4882a593Smuzhiyun 		 * are sg list bdes.  Initialize the first two and leave the
395*4882a593Smuzhiyun 		 * rest for queuecommand.
396*4882a593Smuzhiyun 		 */
397*4882a593Smuzhiyun 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398*4882a593Smuzhiyun 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399*4882a593Smuzhiyun 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400*4882a593Smuzhiyun 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401*4882a593Smuzhiyun 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		/* Setup the physical region for the FCP RSP */
404*4882a593Smuzhiyun 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405*4882a593Smuzhiyun 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406*4882a593Smuzhiyun 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407*4882a593Smuzhiyun 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408*4882a593Smuzhiyun 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		/*
411*4882a593Smuzhiyun 		 * Since the IOCB for the FCP I/O is built into this
412*4882a593Smuzhiyun 		 * lpfc_scsi_buf, initialize it with all known data now.
413*4882a593Smuzhiyun 		 */
414*4882a593Smuzhiyun 		iocb = &psb->cur_iocbq.iocb;
415*4882a593Smuzhiyun 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416*4882a593Smuzhiyun 		if ((phba->sli_rev == 3) &&
417*4882a593Smuzhiyun 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418*4882a593Smuzhiyun 			/* fill in immediate fcp command BDE */
419*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422*4882a593Smuzhiyun 					unsli3.fcp_ext.icd);
423*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.addrHigh = 0;
424*4882a593Smuzhiyun 			iocb->ulpBdeCount = 0;
425*4882a593Smuzhiyun 			iocb->ulpLe = 0;
426*4882a593Smuzhiyun 			/* fill in response BDE */
427*4882a593Smuzhiyun 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428*4882a593Smuzhiyun 							BUFF_TYPE_BDE_64;
429*4882a593Smuzhiyun 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430*4882a593Smuzhiyun 				sizeof(struct fcp_rsp);
431*4882a593Smuzhiyun 			iocb->unsli3.fcp_ext.rbde.addrLow =
432*4882a593Smuzhiyun 				putPaddrLow(pdma_phys_fcp_rsp);
433*4882a593Smuzhiyun 			iocb->unsli3.fcp_ext.rbde.addrHigh =
434*4882a593Smuzhiyun 				putPaddrHigh(pdma_phys_fcp_rsp);
435*4882a593Smuzhiyun 		} else {
436*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.bdeSize =
438*4882a593Smuzhiyun 					(2 * sizeof(struct ulp_bde64));
439*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.addrLow =
440*4882a593Smuzhiyun 					putPaddrLow(pdma_phys_sgl);
441*4882a593Smuzhiyun 			iocb->un.fcpi64.bdl.addrHigh =
442*4882a593Smuzhiyun 					putPaddrHigh(pdma_phys_sgl);
443*4882a593Smuzhiyun 			iocb->ulpBdeCount = 1;
444*4882a593Smuzhiyun 			iocb->ulpLe = 1;
445*4882a593Smuzhiyun 		}
446*4882a593Smuzhiyun 		iocb->ulpClass = CLASS3;
447*4882a593Smuzhiyun 		psb->status = IOSTAT_SUCCESS;
448*4882a593Smuzhiyun 		/* Put it back into the SCSI buffer list */
449*4882a593Smuzhiyun 		psb->cur_iocbq.context1  = psb;
450*4882a593Smuzhiyun 		spin_lock_init(&psb->buf_lock);
451*4882a593Smuzhiyun 		lpfc_release_scsi_buf_s3(phba, psb);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	return bcnt;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460*4882a593Smuzhiyun  * @vport: pointer to lpfc vport data structure.
461*4882a593Smuzhiyun  *
462*4882a593Smuzhiyun  * This routine is invoked by the vport cleanup for deletions and the cleanup
463*4882a593Smuzhiyun  * for an ndlp on removal.
464*4882a593Smuzhiyun  **/
465*4882a593Smuzhiyun void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)466*4882a593Smuzhiyun lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
469*4882a593Smuzhiyun 	struct lpfc_io_buf *psb, *next_psb;
470*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *qp;
471*4882a593Smuzhiyun 	unsigned long iflag = 0;
472*4882a593Smuzhiyun 	int idx;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475*4882a593Smuzhiyun 		return;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, iflag);
478*4882a593Smuzhiyun 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479*4882a593Smuzhiyun 		qp = &phba->sli4_hba.hdwq[idx];
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		spin_lock(&qp->abts_io_buf_list_lock);
482*4882a593Smuzhiyun 		list_for_each_entry_safe(psb, next_psb,
483*4882a593Smuzhiyun 					 &qp->lpfc_abts_io_buf_list, list) {
484*4882a593Smuzhiyun 			if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
485*4882a593Smuzhiyun 				continue;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 			if (psb->rdata && psb->rdata->pnode &&
488*4882a593Smuzhiyun 			    psb->rdata->pnode->vport == vport)
489*4882a593Smuzhiyun 				psb->rdata = NULL;
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 		spin_unlock(&qp->abts_io_buf_list_lock);
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, iflag);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /**
497*4882a593Smuzhiyun  * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498*4882a593Smuzhiyun  * @phba: pointer to lpfc hba data structure.
499*4882a593Smuzhiyun  * @axri: pointer to the fcp xri abort wcqe structure.
500*4882a593Smuzhiyun  *
501*4882a593Smuzhiyun  * This routine is invoked by the worker thread to process a SLI4 fast-path
502*4882a593Smuzhiyun  * FCP or NVME aborted xri.
503*4882a593Smuzhiyun  **/
504*4882a593Smuzhiyun void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)505*4882a593Smuzhiyun lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
506*4882a593Smuzhiyun 			 struct sli4_wcqe_xri_aborted *axri, int idx)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
509*4882a593Smuzhiyun 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
510*4882a593Smuzhiyun 	struct lpfc_io_buf *psb, *next_psb;
511*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *qp;
512*4882a593Smuzhiyun 	unsigned long iflag = 0;
513*4882a593Smuzhiyun 	struct lpfc_iocbq *iocbq;
514*4882a593Smuzhiyun 	int i;
515*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
516*4882a593Smuzhiyun 	int rrq_empty = 0;
517*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
520*4882a593Smuzhiyun 		return;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	qp = &phba->sli4_hba.hdwq[idx];
523*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, iflag);
524*4882a593Smuzhiyun 	spin_lock(&qp->abts_io_buf_list_lock);
525*4882a593Smuzhiyun 	list_for_each_entry_safe(psb, next_psb,
526*4882a593Smuzhiyun 		&qp->lpfc_abts_io_buf_list, list) {
527*4882a593Smuzhiyun 		if (psb->cur_iocbq.sli4_xritag == xri) {
528*4882a593Smuzhiyun 			list_del_init(&psb->list);
529*4882a593Smuzhiyun 			psb->flags &= ~LPFC_SBUF_XBUSY;
530*4882a593Smuzhiyun 			psb->status = IOSTAT_SUCCESS;
531*4882a593Smuzhiyun 			if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
532*4882a593Smuzhiyun 				qp->abts_nvme_io_bufs--;
533*4882a593Smuzhiyun 				spin_unlock(&qp->abts_io_buf_list_lock);
534*4882a593Smuzhiyun 				spin_unlock_irqrestore(&phba->hbalock, iflag);
535*4882a593Smuzhiyun 				lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
536*4882a593Smuzhiyun 				return;
537*4882a593Smuzhiyun 			}
538*4882a593Smuzhiyun 			qp->abts_scsi_io_bufs--;
539*4882a593Smuzhiyun 			spin_unlock(&qp->abts_io_buf_list_lock);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 			if (psb->rdata && psb->rdata->pnode)
542*4882a593Smuzhiyun 				ndlp = psb->rdata->pnode;
543*4882a593Smuzhiyun 			else
544*4882a593Smuzhiyun 				ndlp = NULL;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 			rrq_empty = list_empty(&phba->active_rrq_list);
547*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->hbalock, iflag);
548*4882a593Smuzhiyun 			if (ndlp) {
549*4882a593Smuzhiyun 				lpfc_set_rrq_active(phba, ndlp,
550*4882a593Smuzhiyun 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
551*4882a593Smuzhiyun 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
552*4882a593Smuzhiyun 			}
553*4882a593Smuzhiyun 			lpfc_release_scsi_buf_s4(phba, psb);
554*4882a593Smuzhiyun 			if (rrq_empty)
555*4882a593Smuzhiyun 				lpfc_worker_wake_up(phba);
556*4882a593Smuzhiyun 			return;
557*4882a593Smuzhiyun 		}
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun 	spin_unlock(&qp->abts_io_buf_list_lock);
560*4882a593Smuzhiyun 	for (i = 1; i <= phba->sli.last_iotag; i++) {
561*4882a593Smuzhiyun 		iocbq = phba->sli.iocbq_lookup[i];
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
564*4882a593Smuzhiyun 		    (iocbq->iocb_flag & LPFC_IO_LIBDFC))
565*4882a593Smuzhiyun 			continue;
566*4882a593Smuzhiyun 		if (iocbq->sli4_xritag != xri)
567*4882a593Smuzhiyun 			continue;
568*4882a593Smuzhiyun 		psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
569*4882a593Smuzhiyun 		psb->flags &= ~LPFC_SBUF_XBUSY;
570*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, iflag);
571*4882a593Smuzhiyun 		if (!list_empty(&pring->txq))
572*4882a593Smuzhiyun 			lpfc_worker_wake_up(phba);
573*4882a593Smuzhiyun 		return;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, iflag);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun /**
580*4882a593Smuzhiyun  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
581*4882a593Smuzhiyun  * @phba: The HBA for which this call is being executed.
582*4882a593Smuzhiyun  *
583*4882a593Smuzhiyun  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
584*4882a593Smuzhiyun  * and returns to caller.
585*4882a593Smuzhiyun  *
586*4882a593Smuzhiyun  * Return codes:
587*4882a593Smuzhiyun  *   NULL - Error
588*4882a593Smuzhiyun  *   Pointer to lpfc_scsi_buf - Success
589*4882a593Smuzhiyun  **/
590*4882a593Smuzhiyun static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)591*4882a593Smuzhiyun lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
592*4882a593Smuzhiyun 		     struct scsi_cmnd *cmnd)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd = NULL;
595*4882a593Smuzhiyun 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
596*4882a593Smuzhiyun 	unsigned long iflag = 0;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
599*4882a593Smuzhiyun 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
600*4882a593Smuzhiyun 			 list);
601*4882a593Smuzhiyun 	if (!lpfc_cmd) {
602*4882a593Smuzhiyun 		spin_lock(&phba->scsi_buf_list_put_lock);
603*4882a593Smuzhiyun 		list_splice(&phba->lpfc_scsi_buf_list_put,
604*4882a593Smuzhiyun 			    &phba->lpfc_scsi_buf_list_get);
605*4882a593Smuzhiyun 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
606*4882a593Smuzhiyun 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
607*4882a593Smuzhiyun 				 struct lpfc_io_buf, list);
608*4882a593Smuzhiyun 		spin_unlock(&phba->scsi_buf_list_put_lock);
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
613*4882a593Smuzhiyun 		atomic_inc(&ndlp->cmd_pending);
614*4882a593Smuzhiyun 		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 	return  lpfc_cmd;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun /**
619*4882a593Smuzhiyun  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
620*4882a593Smuzhiyun  * @phba: The HBA for which this call is being executed.
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * This routine removes a scsi buffer from head of @hdwq io_buf_list
623*4882a593Smuzhiyun  * and returns to caller.
624*4882a593Smuzhiyun  *
625*4882a593Smuzhiyun  * Return codes:
626*4882a593Smuzhiyun  *   NULL - Error
627*4882a593Smuzhiyun  *   Pointer to lpfc_scsi_buf - Success
628*4882a593Smuzhiyun  **/
629*4882a593Smuzhiyun static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)630*4882a593Smuzhiyun lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
631*4882a593Smuzhiyun 		     struct scsi_cmnd *cmnd)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd;
634*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *qp;
635*4882a593Smuzhiyun 	struct sli4_sge *sgl;
636*4882a593Smuzhiyun 	IOCB_t *iocb;
637*4882a593Smuzhiyun 	dma_addr_t pdma_phys_fcp_rsp;
638*4882a593Smuzhiyun 	dma_addr_t pdma_phys_fcp_cmd;
639*4882a593Smuzhiyun 	uint32_t cpu, idx;
640*4882a593Smuzhiyun 	int tag;
641*4882a593Smuzhiyun 	struct fcp_cmd_rsp_buf *tmp = NULL;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	cpu = raw_smp_processor_id();
644*4882a593Smuzhiyun 	if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
645*4882a593Smuzhiyun 		tag = blk_mq_unique_tag(cmnd->request);
646*4882a593Smuzhiyun 		idx = blk_mq_unique_tag_to_hwq(tag);
647*4882a593Smuzhiyun 	} else {
648*4882a593Smuzhiyun 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
652*4882a593Smuzhiyun 				   !phba->cfg_xri_rebalancing);
653*4882a593Smuzhiyun 	if (!lpfc_cmd) {
654*4882a593Smuzhiyun 		qp = &phba->sli4_hba.hdwq[idx];
655*4882a593Smuzhiyun 		qp->empty_io_bufs++;
656*4882a593Smuzhiyun 		return NULL;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* Setup key fields in buffer that may have been changed
660*4882a593Smuzhiyun 	 * if other protocols used this buffer.
661*4882a593Smuzhiyun 	 */
662*4882a593Smuzhiyun 	lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
663*4882a593Smuzhiyun 	lpfc_cmd->prot_seg_cnt = 0;
664*4882a593Smuzhiyun 	lpfc_cmd->seg_cnt = 0;
665*4882a593Smuzhiyun 	lpfc_cmd->timeout = 0;
666*4882a593Smuzhiyun 	lpfc_cmd->flags = 0;
667*4882a593Smuzhiyun 	lpfc_cmd->start_time = jiffies;
668*4882a593Smuzhiyun 	lpfc_cmd->waitq = NULL;
669*4882a593Smuzhiyun 	lpfc_cmd->cpu = cpu;
670*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
671*4882a593Smuzhiyun 	lpfc_cmd->prot_data_type = 0;
672*4882a593Smuzhiyun #endif
673*4882a593Smuzhiyun 	tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
674*4882a593Smuzhiyun 	if (!tmp) {
675*4882a593Smuzhiyun 		lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
676*4882a593Smuzhiyun 		return NULL;
677*4882a593Smuzhiyun 	}
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
680*4882a593Smuzhiyun 	lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/*
683*4882a593Smuzhiyun 	 * The first two SGEs are the FCP_CMD and FCP_RSP.
684*4882a593Smuzhiyun 	 * The balance are sg list bdes. Initialize the
685*4882a593Smuzhiyun 	 * first two and leave the rest for queuecommand.
686*4882a593Smuzhiyun 	 */
687*4882a593Smuzhiyun 	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
688*4882a593Smuzhiyun 	pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
689*4882a593Smuzhiyun 	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
690*4882a593Smuzhiyun 	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
691*4882a593Smuzhiyun 	sgl->word2 = le32_to_cpu(sgl->word2);
692*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_last, sgl, 0);
693*4882a593Smuzhiyun 	sgl->word2 = cpu_to_le32(sgl->word2);
694*4882a593Smuzhiyun 	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
695*4882a593Smuzhiyun 	sgl++;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Setup the physical region for the FCP RSP */
698*4882a593Smuzhiyun 	pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
699*4882a593Smuzhiyun 	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
700*4882a593Smuzhiyun 	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
701*4882a593Smuzhiyun 	sgl->word2 = le32_to_cpu(sgl->word2);
702*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_last, sgl, 1);
703*4882a593Smuzhiyun 	sgl->word2 = cpu_to_le32(sgl->word2);
704*4882a593Smuzhiyun 	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/*
707*4882a593Smuzhiyun 	 * Since the IOCB for the FCP I/O is built into this
708*4882a593Smuzhiyun 	 * lpfc_io_buf, initialize it with all known data now.
709*4882a593Smuzhiyun 	 */
710*4882a593Smuzhiyun 	iocb = &lpfc_cmd->cur_iocbq.iocb;
711*4882a593Smuzhiyun 	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
712*4882a593Smuzhiyun 	iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
713*4882a593Smuzhiyun 	/* setting the BLP size to 2 * sizeof BDE may not be correct.
714*4882a593Smuzhiyun 	 * We are setting the bpl to point to out sgl. An sgl's
715*4882a593Smuzhiyun 	 * entries are 16 bytes, a bpl entries are 12 bytes.
716*4882a593Smuzhiyun 	 */
717*4882a593Smuzhiyun 	iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
718*4882a593Smuzhiyun 	iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
719*4882a593Smuzhiyun 	iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
720*4882a593Smuzhiyun 	iocb->ulpBdeCount = 1;
721*4882a593Smuzhiyun 	iocb->ulpLe = 1;
722*4882a593Smuzhiyun 	iocb->ulpClass = CLASS3;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
725*4882a593Smuzhiyun 		atomic_inc(&ndlp->cmd_pending);
726*4882a593Smuzhiyun 		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 	return  lpfc_cmd;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun /**
731*4882a593Smuzhiyun  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
732*4882a593Smuzhiyun  * @phba: The HBA for which this call is being executed.
733*4882a593Smuzhiyun  *
734*4882a593Smuzhiyun  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
735*4882a593Smuzhiyun  * and returns to caller.
736*4882a593Smuzhiyun  *
737*4882a593Smuzhiyun  * Return codes:
738*4882a593Smuzhiyun  *   NULL - Error
739*4882a593Smuzhiyun  *   Pointer to lpfc_scsi_buf - Success
740*4882a593Smuzhiyun  **/
741*4882a593Smuzhiyun static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)742*4882a593Smuzhiyun lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
743*4882a593Smuzhiyun 		  struct scsi_cmnd *cmnd)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
750*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
751*4882a593Smuzhiyun  * @psb: The scsi buffer which is being released.
752*4882a593Smuzhiyun  *
753*4882a593Smuzhiyun  * This routine releases @psb scsi buffer by adding it to tail of @phba
754*4882a593Smuzhiyun  * lpfc_scsi_buf_list list.
755*4882a593Smuzhiyun  **/
756*4882a593Smuzhiyun static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)757*4882a593Smuzhiyun lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	unsigned long iflag = 0;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	psb->seg_cnt = 0;
762*4882a593Smuzhiyun 	psb->prot_seg_cnt = 0;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
765*4882a593Smuzhiyun 	psb->pCmd = NULL;
766*4882a593Smuzhiyun 	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
767*4882a593Smuzhiyun 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
768*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun /**
772*4882a593Smuzhiyun  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
773*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
774*4882a593Smuzhiyun  * @psb: The scsi buffer which is being released.
775*4882a593Smuzhiyun  *
776*4882a593Smuzhiyun  * This routine releases @psb scsi buffer by adding it to tail of @hdwq
777*4882a593Smuzhiyun  * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
778*4882a593Smuzhiyun  * and cannot be reused for at least RA_TOV amount of time if it was
779*4882a593Smuzhiyun  * aborted.
780*4882a593Smuzhiyun  **/
781*4882a593Smuzhiyun static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)782*4882a593Smuzhiyun lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *qp;
785*4882a593Smuzhiyun 	unsigned long iflag = 0;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	psb->seg_cnt = 0;
788*4882a593Smuzhiyun 	psb->prot_seg_cnt = 0;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	qp = psb->hdwq;
791*4882a593Smuzhiyun 	if (psb->flags & LPFC_SBUF_XBUSY) {
792*4882a593Smuzhiyun 		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
793*4882a593Smuzhiyun 		psb->pCmd = NULL;
794*4882a593Smuzhiyun 		list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
795*4882a593Smuzhiyun 		qp->abts_scsi_io_bufs++;
796*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
797*4882a593Smuzhiyun 	} else {
798*4882a593Smuzhiyun 		lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
804*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
805*4882a593Smuzhiyun  * @psb: The scsi buffer which is being released.
806*4882a593Smuzhiyun  *
807*4882a593Smuzhiyun  * This routine releases @psb scsi buffer by adding it to tail of @phba
808*4882a593Smuzhiyun  * lpfc_scsi_buf_list list.
809*4882a593Smuzhiyun  **/
810*4882a593Smuzhiyun static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)811*4882a593Smuzhiyun lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
814*4882a593Smuzhiyun 		atomic_dec(&psb->ndlp->cmd_pending);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
817*4882a593Smuzhiyun 	phba->lpfc_release_scsi_buf(phba, psb);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun /**
821*4882a593Smuzhiyun  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
822*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
823*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be mapped.
824*4882a593Smuzhiyun  *
825*4882a593Smuzhiyun  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
826*4882a593Smuzhiyun  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
827*4882a593Smuzhiyun  * through sg elements and format the bde. This routine also initializes all
828*4882a593Smuzhiyun  * IOCB fields which are dependent on scsi command request buffer.
829*4882a593Smuzhiyun  *
830*4882a593Smuzhiyun  * Return codes:
831*4882a593Smuzhiyun  *   1 - Error
832*4882a593Smuzhiyun  *   0 - Success
833*4882a593Smuzhiyun  **/
834*4882a593Smuzhiyun static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)835*4882a593Smuzhiyun lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
838*4882a593Smuzhiyun 	struct scatterlist *sgel = NULL;
839*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
840*4882a593Smuzhiyun 	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
841*4882a593Smuzhiyun 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
842*4882a593Smuzhiyun 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
843*4882a593Smuzhiyun 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
844*4882a593Smuzhiyun 	dma_addr_t physaddr;
845*4882a593Smuzhiyun 	uint32_t num_bde = 0;
846*4882a593Smuzhiyun 	int nseg, datadir = scsi_cmnd->sc_data_direction;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/*
849*4882a593Smuzhiyun 	 * There are three possibilities here - use scatter-gather segment, use
850*4882a593Smuzhiyun 	 * the single mapping, or neither.  Start the lpfc command prep by
851*4882a593Smuzhiyun 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
852*4882a593Smuzhiyun 	 * data bde entry.
853*4882a593Smuzhiyun 	 */
854*4882a593Smuzhiyun 	bpl += 2;
855*4882a593Smuzhiyun 	if (scsi_sg_count(scsi_cmnd)) {
856*4882a593Smuzhiyun 		/*
857*4882a593Smuzhiyun 		 * The driver stores the segment count returned from pci_map_sg
858*4882a593Smuzhiyun 		 * because this a count of dma-mappings used to map the use_sg
859*4882a593Smuzhiyun 		 * pages.  They are not guaranteed to be the same for those
860*4882a593Smuzhiyun 		 * architectures that implement an IOMMU.
861*4882a593Smuzhiyun 		 */
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
864*4882a593Smuzhiyun 				  scsi_sg_count(scsi_cmnd), datadir);
865*4882a593Smuzhiyun 		if (unlikely(!nseg))
866*4882a593Smuzhiyun 			return 1;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		lpfc_cmd->seg_cnt = nseg;
869*4882a593Smuzhiyun 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
870*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
871*4882a593Smuzhiyun 					"9064 BLKGRD: %s: Too many sg segments"
872*4882a593Smuzhiyun 					" from dma_map_sg.  Config %d, seg_cnt"
873*4882a593Smuzhiyun 					" %d\n", __func__, phba->cfg_sg_seg_cnt,
874*4882a593Smuzhiyun 					lpfc_cmd->seg_cnt);
875*4882a593Smuzhiyun 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
876*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt = 0;
877*4882a593Smuzhiyun 			scsi_dma_unmap(scsi_cmnd);
878*4882a593Smuzhiyun 			return 2;
879*4882a593Smuzhiyun 		}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 		/*
882*4882a593Smuzhiyun 		 * The driver established a maximum scatter-gather segment count
883*4882a593Smuzhiyun 		 * during probe that limits the number of sg elements in any
884*4882a593Smuzhiyun 		 * single scsi command.  Just run through the seg_cnt and format
885*4882a593Smuzhiyun 		 * the bde's.
886*4882a593Smuzhiyun 		 * When using SLI-3 the driver will try to fit all the BDEs into
887*4882a593Smuzhiyun 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
888*4882a593Smuzhiyun 		 * does for SLI-2 mode.
889*4882a593Smuzhiyun 		 */
890*4882a593Smuzhiyun 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
891*4882a593Smuzhiyun 			physaddr = sg_dma_address(sgel);
892*4882a593Smuzhiyun 			if (phba->sli_rev == 3 &&
893*4882a593Smuzhiyun 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
894*4882a593Smuzhiyun 			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
895*4882a593Smuzhiyun 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
896*4882a593Smuzhiyun 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
897*4882a593Smuzhiyun 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
898*4882a593Smuzhiyun 				data_bde->addrLow = putPaddrLow(physaddr);
899*4882a593Smuzhiyun 				data_bde->addrHigh = putPaddrHigh(physaddr);
900*4882a593Smuzhiyun 				data_bde++;
901*4882a593Smuzhiyun 			} else {
902*4882a593Smuzhiyun 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
903*4882a593Smuzhiyun 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
904*4882a593Smuzhiyun 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
905*4882a593Smuzhiyun 				bpl->addrLow =
906*4882a593Smuzhiyun 					le32_to_cpu(putPaddrLow(physaddr));
907*4882a593Smuzhiyun 				bpl->addrHigh =
908*4882a593Smuzhiyun 					le32_to_cpu(putPaddrHigh(physaddr));
909*4882a593Smuzhiyun 				bpl++;
910*4882a593Smuzhiyun 			}
911*4882a593Smuzhiyun 		}
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	/*
915*4882a593Smuzhiyun 	 * Finish initializing those IOCB fields that are dependent on the
916*4882a593Smuzhiyun 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
917*4882a593Smuzhiyun 	 * explicitly reinitialized and for SLI-3 the extended bde count is
918*4882a593Smuzhiyun 	 * explicitly reinitialized since all iocb memory resources are reused.
919*4882a593Smuzhiyun 	 */
920*4882a593Smuzhiyun 	if (phba->sli_rev == 3 &&
921*4882a593Smuzhiyun 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
922*4882a593Smuzhiyun 	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
923*4882a593Smuzhiyun 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
924*4882a593Smuzhiyun 			/*
925*4882a593Smuzhiyun 			 * The extended IOCB format can only fit 3 BDE or a BPL.
926*4882a593Smuzhiyun 			 * This I/O has more than 3 BDE so the 1st data bde will
927*4882a593Smuzhiyun 			 * be a BPL that is filled in here.
928*4882a593Smuzhiyun 			 */
929*4882a593Smuzhiyun 			physaddr = lpfc_cmd->dma_handle;
930*4882a593Smuzhiyun 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
931*4882a593Smuzhiyun 			data_bde->tus.f.bdeSize = (num_bde *
932*4882a593Smuzhiyun 						   sizeof(struct ulp_bde64));
933*4882a593Smuzhiyun 			physaddr += (sizeof(struct fcp_cmnd) +
934*4882a593Smuzhiyun 				     sizeof(struct fcp_rsp) +
935*4882a593Smuzhiyun 				     (2 * sizeof(struct ulp_bde64)));
936*4882a593Smuzhiyun 			data_bde->addrHigh = putPaddrHigh(physaddr);
937*4882a593Smuzhiyun 			data_bde->addrLow = putPaddrLow(physaddr);
938*4882a593Smuzhiyun 			/* ebde count includes the response bde and data bpl */
939*4882a593Smuzhiyun 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
940*4882a593Smuzhiyun 		} else {
941*4882a593Smuzhiyun 			/* ebde count includes the response bde and data bdes */
942*4882a593Smuzhiyun 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
943*4882a593Smuzhiyun 		}
944*4882a593Smuzhiyun 	} else {
945*4882a593Smuzhiyun 		iocb_cmd->un.fcpi64.bdl.bdeSize =
946*4882a593Smuzhiyun 			((num_bde + 2) * sizeof(struct ulp_bde64));
947*4882a593Smuzhiyun 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	/*
952*4882a593Smuzhiyun 	 * Due to difference in data length between DIF/non-DIF paths,
953*4882a593Smuzhiyun 	 * we need to set word 4 of IOCB here
954*4882a593Smuzhiyun 	 */
955*4882a593Smuzhiyun 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
956*4882a593Smuzhiyun 	return 0;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /* Return BG_ERR_INIT if error injection is detected by Initiator */
962*4882a593Smuzhiyun #define BG_ERR_INIT	0x1
963*4882a593Smuzhiyun /* Return BG_ERR_TGT if error injection is detected by Target */
964*4882a593Smuzhiyun #define BG_ERR_TGT	0x2
965*4882a593Smuzhiyun /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
966*4882a593Smuzhiyun #define BG_ERR_SWAP	0x10
967*4882a593Smuzhiyun /**
968*4882a593Smuzhiyun  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
969*4882a593Smuzhiyun  * error injection
970*4882a593Smuzhiyun  **/
971*4882a593Smuzhiyun #define BG_ERR_CHECK	0x20
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun /**
974*4882a593Smuzhiyun  * lpfc_bg_err_inject - Determine if we should inject an error
975*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
976*4882a593Smuzhiyun  * @sc: The SCSI command to examine
977*4882a593Smuzhiyun  * @reftag: (out) BlockGuard reference tag for transmitted data
978*4882a593Smuzhiyun  * @apptag: (out) BlockGuard application tag for transmitted data
979*4882a593Smuzhiyun  * @new_guard (in) Value to replace CRC with if needed
980*4882a593Smuzhiyun  *
981*4882a593Smuzhiyun  * Returns BG_ERR_* bit mask or 0 if request ignored
982*4882a593Smuzhiyun  **/
983*4882a593Smuzhiyun static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)984*4882a593Smuzhiyun lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
985*4882a593Smuzhiyun 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	struct scatterlist *sgpe; /* s/g prot entry */
988*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd = NULL;
989*4882a593Smuzhiyun 	struct scsi_dif_tuple *src = NULL;
990*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
991*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
992*4882a593Smuzhiyun 	uint32_t op = scsi_get_prot_op(sc);
993*4882a593Smuzhiyun 	uint32_t blksize;
994*4882a593Smuzhiyun 	uint32_t numblks;
995*4882a593Smuzhiyun 	sector_t lba;
996*4882a593Smuzhiyun 	int rc = 0;
997*4882a593Smuzhiyun 	int blockoff = 0;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	if (op == SCSI_PROT_NORMAL)
1000*4882a593Smuzhiyun 		return 0;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	sgpe = scsi_prot_sglist(sc);
1003*4882a593Smuzhiyun 	lba = scsi_get_lba(sc);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* First check if we need to match the LBA */
1006*4882a593Smuzhiyun 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1007*4882a593Smuzhiyun 		blksize = lpfc_cmd_blksize(sc);
1008*4882a593Smuzhiyun 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 		/* Make sure we have the right LBA if one is specified */
1011*4882a593Smuzhiyun 		if ((phba->lpfc_injerr_lba < lba) ||
1012*4882a593Smuzhiyun 			(phba->lpfc_injerr_lba >= (lba + numblks)))
1013*4882a593Smuzhiyun 			return 0;
1014*4882a593Smuzhiyun 		if (sgpe) {
1015*4882a593Smuzhiyun 			blockoff = phba->lpfc_injerr_lba - lba;
1016*4882a593Smuzhiyun 			numblks = sg_dma_len(sgpe) /
1017*4882a593Smuzhiyun 				sizeof(struct scsi_dif_tuple);
1018*4882a593Smuzhiyun 			if (numblks < blockoff)
1019*4882a593Smuzhiyun 				blockoff = numblks;
1020*4882a593Smuzhiyun 		}
1021*4882a593Smuzhiyun 	}
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	/* Next check if we need to match the remote NPortID or WWPN */
1024*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1025*4882a593Smuzhiyun 	if (rdata && rdata->pnode) {
1026*4882a593Smuzhiyun 		ndlp = rdata->pnode;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 		/* Make sure we have the right NPortID if one is specified */
1029*4882a593Smuzhiyun 		if (phba->lpfc_injerr_nportid  &&
1030*4882a593Smuzhiyun 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1031*4882a593Smuzhiyun 			return 0;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 		/*
1034*4882a593Smuzhiyun 		 * Make sure we have the right WWPN if one is specified.
1035*4882a593Smuzhiyun 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1036*4882a593Smuzhiyun 		 */
1037*4882a593Smuzhiyun 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1038*4882a593Smuzhiyun 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1039*4882a593Smuzhiyun 				sizeof(struct lpfc_name)) != 0))
1040*4882a593Smuzhiyun 			return 0;
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/* Setup a ptr to the protection data if the SCSI host provides it */
1044*4882a593Smuzhiyun 	if (sgpe) {
1045*4882a593Smuzhiyun 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1046*4882a593Smuzhiyun 		src += blockoff;
1047*4882a593Smuzhiyun 		lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1048*4882a593Smuzhiyun 	}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	/* Should we change the Reference Tag */
1051*4882a593Smuzhiyun 	if (reftag) {
1052*4882a593Smuzhiyun 		if (phba->lpfc_injerr_wref_cnt) {
1053*4882a593Smuzhiyun 			switch (op) {
1054*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_PASS:
1055*4882a593Smuzhiyun 				if (src) {
1056*4882a593Smuzhiyun 					/*
1057*4882a593Smuzhiyun 					 * For WRITE_PASS, force the error
1058*4882a593Smuzhiyun 					 * to be sent on the wire. It should
1059*4882a593Smuzhiyun 					 * be detected by the Target.
1060*4882a593Smuzhiyun 					 * If blockoff != 0 error will be
1061*4882a593Smuzhiyun 					 * inserted in middle of the IO.
1062*4882a593Smuzhiyun 					 */
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 					lpfc_printf_log(phba, KERN_ERR,
1065*4882a593Smuzhiyun 							LOG_TRACE_EVENT,
1066*4882a593Smuzhiyun 					"9076 BLKGRD: Injecting reftag error: "
1067*4882a593Smuzhiyun 					"write lba x%lx + x%x oldrefTag x%x\n",
1068*4882a593Smuzhiyun 					(unsigned long)lba, blockoff,
1069*4882a593Smuzhiyun 					be32_to_cpu(src->ref_tag));
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 					/*
1072*4882a593Smuzhiyun 					 * Save the old ref_tag so we can
1073*4882a593Smuzhiyun 					 * restore it on completion.
1074*4882a593Smuzhiyun 					 */
1075*4882a593Smuzhiyun 					if (lpfc_cmd) {
1076*4882a593Smuzhiyun 						lpfc_cmd->prot_data_type =
1077*4882a593Smuzhiyun 							LPFC_INJERR_REFTAG;
1078*4882a593Smuzhiyun 						lpfc_cmd->prot_data_segment =
1079*4882a593Smuzhiyun 							src;
1080*4882a593Smuzhiyun 						lpfc_cmd->prot_data =
1081*4882a593Smuzhiyun 							src->ref_tag;
1082*4882a593Smuzhiyun 					}
1083*4882a593Smuzhiyun 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1084*4882a593Smuzhiyun 					phba->lpfc_injerr_wref_cnt--;
1085*4882a593Smuzhiyun 					if (phba->lpfc_injerr_wref_cnt == 0) {
1086*4882a593Smuzhiyun 						phba->lpfc_injerr_nportid = 0;
1087*4882a593Smuzhiyun 						phba->lpfc_injerr_lba =
1088*4882a593Smuzhiyun 							LPFC_INJERR_LBA_OFF;
1089*4882a593Smuzhiyun 						memset(&phba->lpfc_injerr_wwpn,
1090*4882a593Smuzhiyun 						  0, sizeof(struct lpfc_name));
1091*4882a593Smuzhiyun 					}
1092*4882a593Smuzhiyun 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 					break;
1095*4882a593Smuzhiyun 				}
1096*4882a593Smuzhiyun 				fallthrough;
1097*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_INSERT:
1098*4882a593Smuzhiyun 				/*
1099*4882a593Smuzhiyun 				 * For WRITE_INSERT, force the error
1100*4882a593Smuzhiyun 				 * to be sent on the wire. It should be
1101*4882a593Smuzhiyun 				 * detected by the Target.
1102*4882a593Smuzhiyun 				 */
1103*4882a593Smuzhiyun 				/* DEADBEEF will be the reftag on the wire */
1104*4882a593Smuzhiyun 				*reftag = 0xDEADBEEF;
1105*4882a593Smuzhiyun 				phba->lpfc_injerr_wref_cnt--;
1106*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wref_cnt == 0) {
1107*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1108*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1109*4882a593Smuzhiyun 					LPFC_INJERR_LBA_OFF;
1110*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1111*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1112*4882a593Smuzhiyun 				}
1113*4882a593Smuzhiyun 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1116*4882a593Smuzhiyun 					"9078 BLKGRD: Injecting reftag error: "
1117*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1118*4882a593Smuzhiyun 				break;
1119*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_STRIP:
1120*4882a593Smuzhiyun 				/*
1121*4882a593Smuzhiyun 				 * For WRITE_STRIP and WRITE_PASS,
1122*4882a593Smuzhiyun 				 * force the error on data
1123*4882a593Smuzhiyun 				 * being copied from SLI-Host to SLI-Port.
1124*4882a593Smuzhiyun 				 */
1125*4882a593Smuzhiyun 				*reftag = 0xDEADBEEF;
1126*4882a593Smuzhiyun 				phba->lpfc_injerr_wref_cnt--;
1127*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wref_cnt == 0) {
1128*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1129*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1130*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1131*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1132*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1133*4882a593Smuzhiyun 				}
1134*4882a593Smuzhiyun 				rc = BG_ERR_INIT;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1137*4882a593Smuzhiyun 					"9077 BLKGRD: Injecting reftag error: "
1138*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1139*4882a593Smuzhiyun 				break;
1140*4882a593Smuzhiyun 			}
1141*4882a593Smuzhiyun 		}
1142*4882a593Smuzhiyun 		if (phba->lpfc_injerr_rref_cnt) {
1143*4882a593Smuzhiyun 			switch (op) {
1144*4882a593Smuzhiyun 			case SCSI_PROT_READ_INSERT:
1145*4882a593Smuzhiyun 			case SCSI_PROT_READ_STRIP:
1146*4882a593Smuzhiyun 			case SCSI_PROT_READ_PASS:
1147*4882a593Smuzhiyun 				/*
1148*4882a593Smuzhiyun 				 * For READ_STRIP and READ_PASS, force the
1149*4882a593Smuzhiyun 				 * error on data being read off the wire. It
1150*4882a593Smuzhiyun 				 * should force an IO error to the driver.
1151*4882a593Smuzhiyun 				 */
1152*4882a593Smuzhiyun 				*reftag = 0xDEADBEEF;
1153*4882a593Smuzhiyun 				phba->lpfc_injerr_rref_cnt--;
1154*4882a593Smuzhiyun 				if (phba->lpfc_injerr_rref_cnt == 0) {
1155*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1156*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1157*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1158*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1159*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1160*4882a593Smuzhiyun 				}
1161*4882a593Smuzhiyun 				rc = BG_ERR_INIT;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164*4882a593Smuzhiyun 					"9079 BLKGRD: Injecting reftag error: "
1165*4882a593Smuzhiyun 					"read lba x%lx\n", (unsigned long)lba);
1166*4882a593Smuzhiyun 				break;
1167*4882a593Smuzhiyun 			}
1168*4882a593Smuzhiyun 		}
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	/* Should we change the Application Tag */
1172*4882a593Smuzhiyun 	if (apptag) {
1173*4882a593Smuzhiyun 		if (phba->lpfc_injerr_wapp_cnt) {
1174*4882a593Smuzhiyun 			switch (op) {
1175*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_PASS:
1176*4882a593Smuzhiyun 				if (src) {
1177*4882a593Smuzhiyun 					/*
1178*4882a593Smuzhiyun 					 * For WRITE_PASS, force the error
1179*4882a593Smuzhiyun 					 * to be sent on the wire. It should
1180*4882a593Smuzhiyun 					 * be detected by the Target.
1181*4882a593Smuzhiyun 					 * If blockoff != 0 error will be
1182*4882a593Smuzhiyun 					 * inserted in middle of the IO.
1183*4882a593Smuzhiyun 					 */
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 					lpfc_printf_log(phba, KERN_ERR,
1186*4882a593Smuzhiyun 							LOG_TRACE_EVENT,
1187*4882a593Smuzhiyun 					"9080 BLKGRD: Injecting apptag error: "
1188*4882a593Smuzhiyun 					"write lba x%lx + x%x oldappTag x%x\n",
1189*4882a593Smuzhiyun 					(unsigned long)lba, blockoff,
1190*4882a593Smuzhiyun 					be16_to_cpu(src->app_tag));
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 					/*
1193*4882a593Smuzhiyun 					 * Save the old app_tag so we can
1194*4882a593Smuzhiyun 					 * restore it on completion.
1195*4882a593Smuzhiyun 					 */
1196*4882a593Smuzhiyun 					if (lpfc_cmd) {
1197*4882a593Smuzhiyun 						lpfc_cmd->prot_data_type =
1198*4882a593Smuzhiyun 							LPFC_INJERR_APPTAG;
1199*4882a593Smuzhiyun 						lpfc_cmd->prot_data_segment =
1200*4882a593Smuzhiyun 							src;
1201*4882a593Smuzhiyun 						lpfc_cmd->prot_data =
1202*4882a593Smuzhiyun 							src->app_tag;
1203*4882a593Smuzhiyun 					}
1204*4882a593Smuzhiyun 					src->app_tag = cpu_to_be16(0xDEAD);
1205*4882a593Smuzhiyun 					phba->lpfc_injerr_wapp_cnt--;
1206*4882a593Smuzhiyun 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1207*4882a593Smuzhiyun 						phba->lpfc_injerr_nportid = 0;
1208*4882a593Smuzhiyun 						phba->lpfc_injerr_lba =
1209*4882a593Smuzhiyun 							LPFC_INJERR_LBA_OFF;
1210*4882a593Smuzhiyun 						memset(&phba->lpfc_injerr_wwpn,
1211*4882a593Smuzhiyun 						  0, sizeof(struct lpfc_name));
1212*4882a593Smuzhiyun 					}
1213*4882a593Smuzhiyun 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1214*4882a593Smuzhiyun 					break;
1215*4882a593Smuzhiyun 				}
1216*4882a593Smuzhiyun 				fallthrough;
1217*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_INSERT:
1218*4882a593Smuzhiyun 				/*
1219*4882a593Smuzhiyun 				 * For WRITE_INSERT, force the
1220*4882a593Smuzhiyun 				 * error to be sent on the wire. It should be
1221*4882a593Smuzhiyun 				 * detected by the Target.
1222*4882a593Smuzhiyun 				 */
1223*4882a593Smuzhiyun 				/* DEAD will be the apptag on the wire */
1224*4882a593Smuzhiyun 				*apptag = 0xDEAD;
1225*4882a593Smuzhiyun 				phba->lpfc_injerr_wapp_cnt--;
1226*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1227*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1228*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1229*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1230*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1231*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1232*4882a593Smuzhiyun 				}
1233*4882a593Smuzhiyun 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1236*4882a593Smuzhiyun 					"0813 BLKGRD: Injecting apptag error: "
1237*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1238*4882a593Smuzhiyun 				break;
1239*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_STRIP:
1240*4882a593Smuzhiyun 				/*
1241*4882a593Smuzhiyun 				 * For WRITE_STRIP and WRITE_PASS,
1242*4882a593Smuzhiyun 				 * force the error on data
1243*4882a593Smuzhiyun 				 * being copied from SLI-Host to SLI-Port.
1244*4882a593Smuzhiyun 				 */
1245*4882a593Smuzhiyun 				*apptag = 0xDEAD;
1246*4882a593Smuzhiyun 				phba->lpfc_injerr_wapp_cnt--;
1247*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1248*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1249*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1250*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1251*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1252*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1253*4882a593Smuzhiyun 				}
1254*4882a593Smuzhiyun 				rc = BG_ERR_INIT;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1257*4882a593Smuzhiyun 					"0812 BLKGRD: Injecting apptag error: "
1258*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1259*4882a593Smuzhiyun 				break;
1260*4882a593Smuzhiyun 			}
1261*4882a593Smuzhiyun 		}
1262*4882a593Smuzhiyun 		if (phba->lpfc_injerr_rapp_cnt) {
1263*4882a593Smuzhiyun 			switch (op) {
1264*4882a593Smuzhiyun 			case SCSI_PROT_READ_INSERT:
1265*4882a593Smuzhiyun 			case SCSI_PROT_READ_STRIP:
1266*4882a593Smuzhiyun 			case SCSI_PROT_READ_PASS:
1267*4882a593Smuzhiyun 				/*
1268*4882a593Smuzhiyun 				 * For READ_STRIP and READ_PASS, force the
1269*4882a593Smuzhiyun 				 * error on data being read off the wire. It
1270*4882a593Smuzhiyun 				 * should force an IO error to the driver.
1271*4882a593Smuzhiyun 				 */
1272*4882a593Smuzhiyun 				*apptag = 0xDEAD;
1273*4882a593Smuzhiyun 				phba->lpfc_injerr_rapp_cnt--;
1274*4882a593Smuzhiyun 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1275*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1276*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1277*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1278*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1279*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1280*4882a593Smuzhiyun 				}
1281*4882a593Smuzhiyun 				rc = BG_ERR_INIT;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1284*4882a593Smuzhiyun 					"0814 BLKGRD: Injecting apptag error: "
1285*4882a593Smuzhiyun 					"read lba x%lx\n", (unsigned long)lba);
1286*4882a593Smuzhiyun 				break;
1287*4882a593Smuzhiyun 			}
1288*4882a593Smuzhiyun 		}
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	/* Should we change the Guard Tag */
1293*4882a593Smuzhiyun 	if (new_guard) {
1294*4882a593Smuzhiyun 		if (phba->lpfc_injerr_wgrd_cnt) {
1295*4882a593Smuzhiyun 			switch (op) {
1296*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_PASS:
1297*4882a593Smuzhiyun 				rc = BG_ERR_CHECK;
1298*4882a593Smuzhiyun 				fallthrough;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_INSERT:
1301*4882a593Smuzhiyun 				/*
1302*4882a593Smuzhiyun 				 * For WRITE_INSERT, force the
1303*4882a593Smuzhiyun 				 * error to be sent on the wire. It should be
1304*4882a593Smuzhiyun 				 * detected by the Target.
1305*4882a593Smuzhiyun 				 */
1306*4882a593Smuzhiyun 				phba->lpfc_injerr_wgrd_cnt--;
1307*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1308*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1309*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1310*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1311*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1312*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1313*4882a593Smuzhiyun 				}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1316*4882a593Smuzhiyun 				/* Signals the caller to swap CRC->CSUM */
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319*4882a593Smuzhiyun 					"0817 BLKGRD: Injecting guard error: "
1320*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1321*4882a593Smuzhiyun 				break;
1322*4882a593Smuzhiyun 			case SCSI_PROT_WRITE_STRIP:
1323*4882a593Smuzhiyun 				/*
1324*4882a593Smuzhiyun 				 * For WRITE_STRIP and WRITE_PASS,
1325*4882a593Smuzhiyun 				 * force the error on data
1326*4882a593Smuzhiyun 				 * being copied from SLI-Host to SLI-Port.
1327*4882a593Smuzhiyun 				 */
1328*4882a593Smuzhiyun 				phba->lpfc_injerr_wgrd_cnt--;
1329*4882a593Smuzhiyun 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1330*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1331*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1332*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1333*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1334*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1335*4882a593Smuzhiyun 				}
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1338*4882a593Smuzhiyun 				/* Signals the caller to swap CRC->CSUM */
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1341*4882a593Smuzhiyun 					"0816 BLKGRD: Injecting guard error: "
1342*4882a593Smuzhiyun 					"write lba x%lx\n", (unsigned long)lba);
1343*4882a593Smuzhiyun 				break;
1344*4882a593Smuzhiyun 			}
1345*4882a593Smuzhiyun 		}
1346*4882a593Smuzhiyun 		if (phba->lpfc_injerr_rgrd_cnt) {
1347*4882a593Smuzhiyun 			switch (op) {
1348*4882a593Smuzhiyun 			case SCSI_PROT_READ_INSERT:
1349*4882a593Smuzhiyun 			case SCSI_PROT_READ_STRIP:
1350*4882a593Smuzhiyun 			case SCSI_PROT_READ_PASS:
1351*4882a593Smuzhiyun 				/*
1352*4882a593Smuzhiyun 				 * For READ_STRIP and READ_PASS, force the
1353*4882a593Smuzhiyun 				 * error on data being read off the wire. It
1354*4882a593Smuzhiyun 				 * should force an IO error to the driver.
1355*4882a593Smuzhiyun 				 */
1356*4882a593Smuzhiyun 				phba->lpfc_injerr_rgrd_cnt--;
1357*4882a593Smuzhiyun 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1358*4882a593Smuzhiyun 					phba->lpfc_injerr_nportid = 0;
1359*4882a593Smuzhiyun 					phba->lpfc_injerr_lba =
1360*4882a593Smuzhiyun 						LPFC_INJERR_LBA_OFF;
1361*4882a593Smuzhiyun 					memset(&phba->lpfc_injerr_wwpn,
1362*4882a593Smuzhiyun 						0, sizeof(struct lpfc_name));
1363*4882a593Smuzhiyun 				}
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1366*4882a593Smuzhiyun 				/* Signals the caller to swap CRC->CSUM */
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1369*4882a593Smuzhiyun 					"0818 BLKGRD: Injecting guard error: "
1370*4882a593Smuzhiyun 					"read lba x%lx\n", (unsigned long)lba);
1371*4882a593Smuzhiyun 			}
1372*4882a593Smuzhiyun 		}
1373*4882a593Smuzhiyun 	}
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	return rc;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun #endif
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun /**
1380*4882a593Smuzhiyun  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1381*4882a593Smuzhiyun  * the specified SCSI command.
1382*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
1383*4882a593Smuzhiyun  * @sc: The SCSI command to examine
1384*4882a593Smuzhiyun  * @txopt: (out) BlockGuard operation for transmitted data
1385*4882a593Smuzhiyun  * @rxopt: (out) BlockGuard operation for received data
1386*4882a593Smuzhiyun  *
1387*4882a593Smuzhiyun  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1388*4882a593Smuzhiyun  *
1389*4882a593Smuzhiyun  **/
1390*4882a593Smuzhiyun static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1391*4882a593Smuzhiyun lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1392*4882a593Smuzhiyun 		uint8_t *txop, uint8_t *rxop)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun 	uint8_t ret = 0;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	if (lpfc_cmd_guard_csum(sc)) {
1397*4882a593Smuzhiyun 		switch (scsi_get_prot_op(sc)) {
1398*4882a593Smuzhiyun 		case SCSI_PROT_READ_INSERT:
1399*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_STRIP:
1400*4882a593Smuzhiyun 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1401*4882a593Smuzhiyun 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1402*4882a593Smuzhiyun 			break;
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 		case SCSI_PROT_READ_STRIP:
1405*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_INSERT:
1406*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1407*4882a593Smuzhiyun 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1408*4882a593Smuzhiyun 			break;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 		case SCSI_PROT_READ_PASS:
1411*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_PASS:
1412*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1413*4882a593Smuzhiyun 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1414*4882a593Smuzhiyun 			break;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 		case SCSI_PROT_NORMAL:
1417*4882a593Smuzhiyun 		default:
1418*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1419*4882a593Smuzhiyun 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1420*4882a593Smuzhiyun 					scsi_get_prot_op(sc));
1421*4882a593Smuzhiyun 			ret = 1;
1422*4882a593Smuzhiyun 			break;
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 		}
1425*4882a593Smuzhiyun 	} else {
1426*4882a593Smuzhiyun 		switch (scsi_get_prot_op(sc)) {
1427*4882a593Smuzhiyun 		case SCSI_PROT_READ_STRIP:
1428*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_INSERT:
1429*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1430*4882a593Smuzhiyun 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1431*4882a593Smuzhiyun 			break;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 		case SCSI_PROT_READ_PASS:
1434*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_PASS:
1435*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1436*4882a593Smuzhiyun 			*txop = BG_OP_IN_CRC_OUT_CRC;
1437*4882a593Smuzhiyun 			break;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 		case SCSI_PROT_READ_INSERT:
1440*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_STRIP:
1441*4882a593Smuzhiyun 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1442*4882a593Smuzhiyun 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1443*4882a593Smuzhiyun 			break;
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 		case SCSI_PROT_NORMAL:
1446*4882a593Smuzhiyun 		default:
1447*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1448*4882a593Smuzhiyun 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1449*4882a593Smuzhiyun 					scsi_get_prot_op(sc));
1450*4882a593Smuzhiyun 			ret = 1;
1451*4882a593Smuzhiyun 			break;
1452*4882a593Smuzhiyun 		}
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	return ret;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1459*4882a593Smuzhiyun /**
1460*4882a593Smuzhiyun  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1461*4882a593Smuzhiyun  * the specified SCSI command in order to force a guard tag error.
1462*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
1463*4882a593Smuzhiyun  * @sc: The SCSI command to examine
1464*4882a593Smuzhiyun  * @txopt: (out) BlockGuard operation for transmitted data
1465*4882a593Smuzhiyun  * @rxopt: (out) BlockGuard operation for received data
1466*4882a593Smuzhiyun  *
1467*4882a593Smuzhiyun  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1468*4882a593Smuzhiyun  *
1469*4882a593Smuzhiyun  **/
1470*4882a593Smuzhiyun static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1471*4882a593Smuzhiyun lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1472*4882a593Smuzhiyun 		uint8_t *txop, uint8_t *rxop)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	uint8_t ret = 0;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	if (lpfc_cmd_guard_csum(sc)) {
1477*4882a593Smuzhiyun 		switch (scsi_get_prot_op(sc)) {
1478*4882a593Smuzhiyun 		case SCSI_PROT_READ_INSERT:
1479*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_STRIP:
1480*4882a593Smuzhiyun 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1481*4882a593Smuzhiyun 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1482*4882a593Smuzhiyun 			break;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 		case SCSI_PROT_READ_STRIP:
1485*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_INSERT:
1486*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1487*4882a593Smuzhiyun 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1488*4882a593Smuzhiyun 			break;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 		case SCSI_PROT_READ_PASS:
1491*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_PASS:
1492*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1493*4882a593Smuzhiyun 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1494*4882a593Smuzhiyun 			break;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 		case SCSI_PROT_NORMAL:
1497*4882a593Smuzhiyun 		default:
1498*4882a593Smuzhiyun 			break;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 		}
1501*4882a593Smuzhiyun 	} else {
1502*4882a593Smuzhiyun 		switch (scsi_get_prot_op(sc)) {
1503*4882a593Smuzhiyun 		case SCSI_PROT_READ_STRIP:
1504*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_INSERT:
1505*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1506*4882a593Smuzhiyun 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1507*4882a593Smuzhiyun 			break;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 		case SCSI_PROT_READ_PASS:
1510*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_PASS:
1511*4882a593Smuzhiyun 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1512*4882a593Smuzhiyun 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1513*4882a593Smuzhiyun 			break;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		case SCSI_PROT_READ_INSERT:
1516*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_STRIP:
1517*4882a593Smuzhiyun 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1518*4882a593Smuzhiyun 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1519*4882a593Smuzhiyun 			break;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 		case SCSI_PROT_NORMAL:
1522*4882a593Smuzhiyun 		default:
1523*4882a593Smuzhiyun 			break;
1524*4882a593Smuzhiyun 		}
1525*4882a593Smuzhiyun 	}
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	return ret;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun #endif
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun /**
1532*4882a593Smuzhiyun  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1533*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
1534*4882a593Smuzhiyun  * @sc: pointer to scsi command we're working on
1535*4882a593Smuzhiyun  * @bpl: pointer to buffer list for protection groups
1536*4882a593Smuzhiyun  * @datacnt: number of segments of data that have been dma mapped
1537*4882a593Smuzhiyun  *
1538*4882a593Smuzhiyun  * This function sets up BPL buffer list for protection groups of
1539*4882a593Smuzhiyun  * type LPFC_PG_TYPE_NO_DIF
1540*4882a593Smuzhiyun  *
1541*4882a593Smuzhiyun  * This is usually used when the HBA is instructed to generate
1542*4882a593Smuzhiyun  * DIFs and insert them into data stream (or strip DIF from
1543*4882a593Smuzhiyun  * incoming data stream)
1544*4882a593Smuzhiyun  *
1545*4882a593Smuzhiyun  * The buffer list consists of just one protection group described
1546*4882a593Smuzhiyun  * below:
1547*4882a593Smuzhiyun  *                                +-------------------------+
1548*4882a593Smuzhiyun  *   start of prot group  -->     |          PDE_5          |
1549*4882a593Smuzhiyun  *                                +-------------------------+
1550*4882a593Smuzhiyun  *                                |          PDE_6          |
1551*4882a593Smuzhiyun  *                                +-------------------------+
1552*4882a593Smuzhiyun  *                                |         Data BDE        |
1553*4882a593Smuzhiyun  *                                +-------------------------+
1554*4882a593Smuzhiyun  *                                |more Data BDE's ... (opt)|
1555*4882a593Smuzhiyun  *                                +-------------------------+
1556*4882a593Smuzhiyun  *
1557*4882a593Smuzhiyun  *
1558*4882a593Smuzhiyun  * Note: Data s/g buffers have been dma mapped
1559*4882a593Smuzhiyun  *
1560*4882a593Smuzhiyun  * Returns the number of BDEs added to the BPL.
1561*4882a593Smuzhiyun  **/
1562*4882a593Smuzhiyun static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1563*4882a593Smuzhiyun lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1564*4882a593Smuzhiyun 		struct ulp_bde64 *bpl, int datasegcnt)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun 	struct scatterlist *sgde = NULL; /* s/g data entry */
1567*4882a593Smuzhiyun 	struct lpfc_pde5 *pde5 = NULL;
1568*4882a593Smuzhiyun 	struct lpfc_pde6 *pde6 = NULL;
1569*4882a593Smuzhiyun 	dma_addr_t physaddr;
1570*4882a593Smuzhiyun 	int i = 0, num_bde = 0, status;
1571*4882a593Smuzhiyun 	int datadir = sc->sc_data_direction;
1572*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1573*4882a593Smuzhiyun 	uint32_t rc;
1574*4882a593Smuzhiyun #endif
1575*4882a593Smuzhiyun 	uint32_t checking = 1;
1576*4882a593Smuzhiyun 	uint32_t reftag;
1577*4882a593Smuzhiyun 	uint8_t txop, rxop;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1580*4882a593Smuzhiyun 	if (status)
1581*4882a593Smuzhiyun 		goto out;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	/* extract some info from the scsi command for pde*/
1584*4882a593Smuzhiyun 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1587*4882a593Smuzhiyun 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1588*4882a593Smuzhiyun 	if (rc) {
1589*4882a593Smuzhiyun 		if (rc & BG_ERR_SWAP)
1590*4882a593Smuzhiyun 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1591*4882a593Smuzhiyun 		if (rc & BG_ERR_CHECK)
1592*4882a593Smuzhiyun 			checking = 0;
1593*4882a593Smuzhiyun 	}
1594*4882a593Smuzhiyun #endif
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	/* setup PDE5 with what we have */
1597*4882a593Smuzhiyun 	pde5 = (struct lpfc_pde5 *) bpl;
1598*4882a593Smuzhiyun 	memset(pde5, 0, sizeof(struct lpfc_pde5));
1599*4882a593Smuzhiyun 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	/* Endianness conversion if necessary for PDE5 */
1602*4882a593Smuzhiyun 	pde5->word0 = cpu_to_le32(pde5->word0);
1603*4882a593Smuzhiyun 	pde5->reftag = cpu_to_le32(reftag);
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	/* advance bpl and increment bde count */
1606*4882a593Smuzhiyun 	num_bde++;
1607*4882a593Smuzhiyun 	bpl++;
1608*4882a593Smuzhiyun 	pde6 = (struct lpfc_pde6 *) bpl;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* setup PDE6 with the rest of the info */
1611*4882a593Smuzhiyun 	memset(pde6, 0, sizeof(struct lpfc_pde6));
1612*4882a593Smuzhiyun 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1613*4882a593Smuzhiyun 	bf_set(pde6_optx, pde6, txop);
1614*4882a593Smuzhiyun 	bf_set(pde6_oprx, pde6, rxop);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	/*
1617*4882a593Smuzhiyun 	 * We only need to check the data on READs, for WRITEs
1618*4882a593Smuzhiyun 	 * protection data is automatically generated, not checked.
1619*4882a593Smuzhiyun 	 */
1620*4882a593Smuzhiyun 	if (datadir == DMA_FROM_DEVICE) {
1621*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1622*4882a593Smuzhiyun 			bf_set(pde6_ce, pde6, checking);
1623*4882a593Smuzhiyun 		else
1624*4882a593Smuzhiyun 			bf_set(pde6_ce, pde6, 0);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1627*4882a593Smuzhiyun 			bf_set(pde6_re, pde6, checking);
1628*4882a593Smuzhiyun 		else
1629*4882a593Smuzhiyun 			bf_set(pde6_re, pde6, 0);
1630*4882a593Smuzhiyun 	}
1631*4882a593Smuzhiyun 	bf_set(pde6_ai, pde6, 1);
1632*4882a593Smuzhiyun 	bf_set(pde6_ae, pde6, 0);
1633*4882a593Smuzhiyun 	bf_set(pde6_apptagval, pde6, 0);
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	/* Endianness conversion if necessary for PDE6 */
1636*4882a593Smuzhiyun 	pde6->word0 = cpu_to_le32(pde6->word0);
1637*4882a593Smuzhiyun 	pde6->word1 = cpu_to_le32(pde6->word1);
1638*4882a593Smuzhiyun 	pde6->word2 = cpu_to_le32(pde6->word2);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* advance bpl and increment bde count */
1641*4882a593Smuzhiyun 	num_bde++;
1642*4882a593Smuzhiyun 	bpl++;
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	/* assumption: caller has already run dma_map_sg on command data */
1645*4882a593Smuzhiyun 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1646*4882a593Smuzhiyun 		physaddr = sg_dma_address(sgde);
1647*4882a593Smuzhiyun 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1648*4882a593Smuzhiyun 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1649*4882a593Smuzhiyun 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
1650*4882a593Smuzhiyun 		if (datadir == DMA_TO_DEVICE)
1651*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1652*4882a593Smuzhiyun 		else
1653*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1654*4882a593Smuzhiyun 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
1655*4882a593Smuzhiyun 		bpl++;
1656*4882a593Smuzhiyun 		num_bde++;
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun out:
1660*4882a593Smuzhiyun 	return num_bde;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun /**
1664*4882a593Smuzhiyun  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1665*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
1666*4882a593Smuzhiyun  * @sc: pointer to scsi command we're working on
1667*4882a593Smuzhiyun  * @bpl: pointer to buffer list for protection groups
1668*4882a593Smuzhiyun  * @datacnt: number of segments of data that have been dma mapped
1669*4882a593Smuzhiyun  * @protcnt: number of segment of protection data that have been dma mapped
1670*4882a593Smuzhiyun  *
1671*4882a593Smuzhiyun  * This function sets up BPL buffer list for protection groups of
1672*4882a593Smuzhiyun  * type LPFC_PG_TYPE_DIF
1673*4882a593Smuzhiyun  *
1674*4882a593Smuzhiyun  * This is usually used when DIFs are in their own buffers,
1675*4882a593Smuzhiyun  * separate from the data. The HBA can then by instructed
1676*4882a593Smuzhiyun  * to place the DIFs in the outgoing stream.  For read operations,
1677*4882a593Smuzhiyun  * The HBA could extract the DIFs and place it in DIF buffers.
1678*4882a593Smuzhiyun  *
1679*4882a593Smuzhiyun  * The buffer list for this type consists of one or more of the
1680*4882a593Smuzhiyun  * protection groups described below:
1681*4882a593Smuzhiyun  *                                    +-------------------------+
1682*4882a593Smuzhiyun  *   start of first prot group  -->   |          PDE_5          |
1683*4882a593Smuzhiyun  *                                    +-------------------------+
1684*4882a593Smuzhiyun  *                                    |          PDE_6          |
1685*4882a593Smuzhiyun  *                                    +-------------------------+
1686*4882a593Smuzhiyun  *                                    |      PDE_7 (Prot BDE)   |
1687*4882a593Smuzhiyun  *                                    +-------------------------+
1688*4882a593Smuzhiyun  *                                    |        Data BDE         |
1689*4882a593Smuzhiyun  *                                    +-------------------------+
1690*4882a593Smuzhiyun  *                                    |more Data BDE's ... (opt)|
1691*4882a593Smuzhiyun  *                                    +-------------------------+
1692*4882a593Smuzhiyun  *   start of new  prot group  -->    |          PDE_5          |
1693*4882a593Smuzhiyun  *                                    +-------------------------+
1694*4882a593Smuzhiyun  *                                    |          ...            |
1695*4882a593Smuzhiyun  *                                    +-------------------------+
1696*4882a593Smuzhiyun  *
1697*4882a593Smuzhiyun  * Note: It is assumed that both data and protection s/g buffers have been
1698*4882a593Smuzhiyun  *       mapped for DMA
1699*4882a593Smuzhiyun  *
1700*4882a593Smuzhiyun  * Returns the number of BDEs added to the BPL.
1701*4882a593Smuzhiyun  **/
1702*4882a593Smuzhiyun static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1703*4882a593Smuzhiyun lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1704*4882a593Smuzhiyun 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun 	struct scatterlist *sgde = NULL; /* s/g data entry */
1707*4882a593Smuzhiyun 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
1708*4882a593Smuzhiyun 	struct lpfc_pde5 *pde5 = NULL;
1709*4882a593Smuzhiyun 	struct lpfc_pde6 *pde6 = NULL;
1710*4882a593Smuzhiyun 	struct lpfc_pde7 *pde7 = NULL;
1711*4882a593Smuzhiyun 	dma_addr_t dataphysaddr, protphysaddr;
1712*4882a593Smuzhiyun 	unsigned short curr_data = 0, curr_prot = 0;
1713*4882a593Smuzhiyun 	unsigned int split_offset;
1714*4882a593Smuzhiyun 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1715*4882a593Smuzhiyun 	unsigned int protgrp_blks, protgrp_bytes;
1716*4882a593Smuzhiyun 	unsigned int remainder, subtotal;
1717*4882a593Smuzhiyun 	int status;
1718*4882a593Smuzhiyun 	int datadir = sc->sc_data_direction;
1719*4882a593Smuzhiyun 	unsigned char pgdone = 0, alldone = 0;
1720*4882a593Smuzhiyun 	unsigned blksize;
1721*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1722*4882a593Smuzhiyun 	uint32_t rc;
1723*4882a593Smuzhiyun #endif
1724*4882a593Smuzhiyun 	uint32_t checking = 1;
1725*4882a593Smuzhiyun 	uint32_t reftag;
1726*4882a593Smuzhiyun 	uint8_t txop, rxop;
1727*4882a593Smuzhiyun 	int num_bde = 0;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	sgpe = scsi_prot_sglist(sc);
1730*4882a593Smuzhiyun 	sgde = scsi_sglist(sc);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	if (!sgpe || !sgde) {
1733*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1734*4882a593Smuzhiyun 				"9020 Invalid s/g entry: data=x%px prot=x%px\n",
1735*4882a593Smuzhiyun 				sgpe, sgde);
1736*4882a593Smuzhiyun 		return 0;
1737*4882a593Smuzhiyun 	}
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1740*4882a593Smuzhiyun 	if (status)
1741*4882a593Smuzhiyun 		goto out;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	/* extract some info from the scsi command */
1744*4882a593Smuzhiyun 	blksize = lpfc_cmd_blksize(sc);
1745*4882a593Smuzhiyun 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1748*4882a593Smuzhiyun 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1749*4882a593Smuzhiyun 	if (rc) {
1750*4882a593Smuzhiyun 		if (rc & BG_ERR_SWAP)
1751*4882a593Smuzhiyun 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1752*4882a593Smuzhiyun 		if (rc & BG_ERR_CHECK)
1753*4882a593Smuzhiyun 			checking = 0;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun #endif
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	split_offset = 0;
1758*4882a593Smuzhiyun 	do {
1759*4882a593Smuzhiyun 		/* Check to see if we ran out of space */
1760*4882a593Smuzhiyun 		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1761*4882a593Smuzhiyun 			return num_bde + 3;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 		/* setup PDE5 with what we have */
1764*4882a593Smuzhiyun 		pde5 = (struct lpfc_pde5 *) bpl;
1765*4882a593Smuzhiyun 		memset(pde5, 0, sizeof(struct lpfc_pde5));
1766*4882a593Smuzhiyun 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 		/* Endianness conversion if necessary for PDE5 */
1769*4882a593Smuzhiyun 		pde5->word0 = cpu_to_le32(pde5->word0);
1770*4882a593Smuzhiyun 		pde5->reftag = cpu_to_le32(reftag);
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 		/* advance bpl and increment bde count */
1773*4882a593Smuzhiyun 		num_bde++;
1774*4882a593Smuzhiyun 		bpl++;
1775*4882a593Smuzhiyun 		pde6 = (struct lpfc_pde6 *) bpl;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 		/* setup PDE6 with the rest of the info */
1778*4882a593Smuzhiyun 		memset(pde6, 0, sizeof(struct lpfc_pde6));
1779*4882a593Smuzhiyun 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1780*4882a593Smuzhiyun 		bf_set(pde6_optx, pde6, txop);
1781*4882a593Smuzhiyun 		bf_set(pde6_oprx, pde6, rxop);
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1784*4882a593Smuzhiyun 			bf_set(pde6_ce, pde6, checking);
1785*4882a593Smuzhiyun 		else
1786*4882a593Smuzhiyun 			bf_set(pde6_ce, pde6, 0);
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1789*4882a593Smuzhiyun 			bf_set(pde6_re, pde6, checking);
1790*4882a593Smuzhiyun 		else
1791*4882a593Smuzhiyun 			bf_set(pde6_re, pde6, 0);
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 		bf_set(pde6_ai, pde6, 1);
1794*4882a593Smuzhiyun 		bf_set(pde6_ae, pde6, 0);
1795*4882a593Smuzhiyun 		bf_set(pde6_apptagval, pde6, 0);
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 		/* Endianness conversion if necessary for PDE6 */
1798*4882a593Smuzhiyun 		pde6->word0 = cpu_to_le32(pde6->word0);
1799*4882a593Smuzhiyun 		pde6->word1 = cpu_to_le32(pde6->word1);
1800*4882a593Smuzhiyun 		pde6->word2 = cpu_to_le32(pde6->word2);
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 		/* advance bpl and increment bde count */
1803*4882a593Smuzhiyun 		num_bde++;
1804*4882a593Smuzhiyun 		bpl++;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 		/* setup the first BDE that points to protection buffer */
1807*4882a593Smuzhiyun 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1808*4882a593Smuzhiyun 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 		/* must be integer multiple of the DIF block length */
1811*4882a593Smuzhiyun 		BUG_ON(protgroup_len % 8);
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		pde7 = (struct lpfc_pde7 *) bpl;
1814*4882a593Smuzhiyun 		memset(pde7, 0, sizeof(struct lpfc_pde7));
1815*4882a593Smuzhiyun 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1818*4882a593Smuzhiyun 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 		protgrp_blks = protgroup_len / 8;
1821*4882a593Smuzhiyun 		protgrp_bytes = protgrp_blks * blksize;
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 		/* check if this pde is crossing the 4K boundary; if so split */
1824*4882a593Smuzhiyun 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1825*4882a593Smuzhiyun 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1826*4882a593Smuzhiyun 			protgroup_offset += protgroup_remainder;
1827*4882a593Smuzhiyun 			protgrp_blks = protgroup_remainder / 8;
1828*4882a593Smuzhiyun 			protgrp_bytes = protgrp_blks * blksize;
1829*4882a593Smuzhiyun 		} else {
1830*4882a593Smuzhiyun 			protgroup_offset = 0;
1831*4882a593Smuzhiyun 			curr_prot++;
1832*4882a593Smuzhiyun 		}
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 		num_bde++;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 		/* setup BDE's for data blocks associated with DIF data */
1837*4882a593Smuzhiyun 		pgdone = 0;
1838*4882a593Smuzhiyun 		subtotal = 0; /* total bytes processed for current prot grp */
1839*4882a593Smuzhiyun 		while (!pgdone) {
1840*4882a593Smuzhiyun 			/* Check to see if we ran out of space */
1841*4882a593Smuzhiyun 			if (num_bde >= phba->cfg_total_seg_cnt)
1842*4882a593Smuzhiyun 				return num_bde + 1;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 			if (!sgde) {
1845*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1846*4882a593Smuzhiyun 					"9065 BLKGRD:%s Invalid data segment\n",
1847*4882a593Smuzhiyun 						__func__);
1848*4882a593Smuzhiyun 				return 0;
1849*4882a593Smuzhiyun 			}
1850*4882a593Smuzhiyun 			bpl++;
1851*4882a593Smuzhiyun 			dataphysaddr = sg_dma_address(sgde) + split_offset;
1852*4882a593Smuzhiyun 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1853*4882a593Smuzhiyun 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 			remainder = sg_dma_len(sgde) - split_offset;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 			if ((subtotal + remainder) <= protgrp_bytes) {
1858*4882a593Smuzhiyun 				/* we can use this whole buffer */
1859*4882a593Smuzhiyun 				bpl->tus.f.bdeSize = remainder;
1860*4882a593Smuzhiyun 				split_offset = 0;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 				if ((subtotal + remainder) == protgrp_bytes)
1863*4882a593Smuzhiyun 					pgdone = 1;
1864*4882a593Smuzhiyun 			} else {
1865*4882a593Smuzhiyun 				/* must split this buffer with next prot grp */
1866*4882a593Smuzhiyun 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1867*4882a593Smuzhiyun 				split_offset += bpl->tus.f.bdeSize;
1868*4882a593Smuzhiyun 			}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 			subtotal += bpl->tus.f.bdeSize;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 			if (datadir == DMA_TO_DEVICE)
1873*4882a593Smuzhiyun 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1874*4882a593Smuzhiyun 			else
1875*4882a593Smuzhiyun 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1876*4882a593Smuzhiyun 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 			num_bde++;
1879*4882a593Smuzhiyun 			curr_data++;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 			if (split_offset)
1882*4882a593Smuzhiyun 				break;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 			/* Move to the next s/g segment if possible */
1885*4882a593Smuzhiyun 			sgde = sg_next(sgde);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 		}
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 		if (protgroup_offset) {
1890*4882a593Smuzhiyun 			/* update the reference tag */
1891*4882a593Smuzhiyun 			reftag += protgrp_blks;
1892*4882a593Smuzhiyun 			bpl++;
1893*4882a593Smuzhiyun 			continue;
1894*4882a593Smuzhiyun 		}
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		/* are we done ? */
1897*4882a593Smuzhiyun 		if (curr_prot == protcnt) {
1898*4882a593Smuzhiyun 			alldone = 1;
1899*4882a593Smuzhiyun 		} else if (curr_prot < protcnt) {
1900*4882a593Smuzhiyun 			/* advance to next prot buffer */
1901*4882a593Smuzhiyun 			sgpe = sg_next(sgpe);
1902*4882a593Smuzhiyun 			bpl++;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 			/* update the reference tag */
1905*4882a593Smuzhiyun 			reftag += protgrp_blks;
1906*4882a593Smuzhiyun 		} else {
1907*4882a593Smuzhiyun 			/* if we're here, we have a bug */
1908*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1909*4882a593Smuzhiyun 					"9054 BLKGRD: bug in %s\n", __func__);
1910*4882a593Smuzhiyun 		}
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	} while (!alldone);
1913*4882a593Smuzhiyun out:
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	return num_bde;
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun /**
1919*4882a593Smuzhiyun  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1920*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
1921*4882a593Smuzhiyun  * @sc: pointer to scsi command we're working on
1922*4882a593Smuzhiyun  * @sgl: pointer to buffer list for protection groups
1923*4882a593Smuzhiyun  * @datacnt: number of segments of data that have been dma mapped
1924*4882a593Smuzhiyun  *
1925*4882a593Smuzhiyun  * This function sets up SGL buffer list for protection groups of
1926*4882a593Smuzhiyun  * type LPFC_PG_TYPE_NO_DIF
1927*4882a593Smuzhiyun  *
1928*4882a593Smuzhiyun  * This is usually used when the HBA is instructed to generate
1929*4882a593Smuzhiyun  * DIFs and insert them into data stream (or strip DIF from
1930*4882a593Smuzhiyun  * incoming data stream)
1931*4882a593Smuzhiyun  *
1932*4882a593Smuzhiyun  * The buffer list consists of just one protection group described
1933*4882a593Smuzhiyun  * below:
1934*4882a593Smuzhiyun  *                                +-------------------------+
1935*4882a593Smuzhiyun  *   start of prot group  -->     |         DI_SEED         |
1936*4882a593Smuzhiyun  *                                +-------------------------+
1937*4882a593Smuzhiyun  *                                |         Data SGE        |
1938*4882a593Smuzhiyun  *                                +-------------------------+
1939*4882a593Smuzhiyun  *                                |more Data SGE's ... (opt)|
1940*4882a593Smuzhiyun  *                                +-------------------------+
1941*4882a593Smuzhiyun  *
1942*4882a593Smuzhiyun  *
1943*4882a593Smuzhiyun  * Note: Data s/g buffers have been dma mapped
1944*4882a593Smuzhiyun  *
1945*4882a593Smuzhiyun  * Returns the number of SGEs added to the SGL.
1946*4882a593Smuzhiyun  **/
1947*4882a593Smuzhiyun static int
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1948*4882a593Smuzhiyun lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1949*4882a593Smuzhiyun 		struct sli4_sge *sgl, int datasegcnt,
1950*4882a593Smuzhiyun 		struct lpfc_io_buf *lpfc_cmd)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	struct scatterlist *sgde = NULL; /* s/g data entry */
1953*4882a593Smuzhiyun 	struct sli4_sge_diseed *diseed = NULL;
1954*4882a593Smuzhiyun 	dma_addr_t physaddr;
1955*4882a593Smuzhiyun 	int i = 0, num_sge = 0, status;
1956*4882a593Smuzhiyun 	uint32_t reftag;
1957*4882a593Smuzhiyun 	uint8_t txop, rxop;
1958*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1959*4882a593Smuzhiyun 	uint32_t rc;
1960*4882a593Smuzhiyun #endif
1961*4882a593Smuzhiyun 	uint32_t checking = 1;
1962*4882a593Smuzhiyun 	uint32_t dma_len;
1963*4882a593Smuzhiyun 	uint32_t dma_offset = 0;
1964*4882a593Smuzhiyun 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
1965*4882a593Smuzhiyun 	int j;
1966*4882a593Smuzhiyun 	bool lsp_just_set = false;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1969*4882a593Smuzhiyun 	if (status)
1970*4882a593Smuzhiyun 		goto out;
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	/* extract some info from the scsi command for pde*/
1973*4882a593Smuzhiyun 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1976*4882a593Smuzhiyun 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1977*4882a593Smuzhiyun 	if (rc) {
1978*4882a593Smuzhiyun 		if (rc & BG_ERR_SWAP)
1979*4882a593Smuzhiyun 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1980*4882a593Smuzhiyun 		if (rc & BG_ERR_CHECK)
1981*4882a593Smuzhiyun 			checking = 0;
1982*4882a593Smuzhiyun 	}
1983*4882a593Smuzhiyun #endif
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	/* setup DISEED with what we have */
1986*4882a593Smuzhiyun 	diseed = (struct sli4_sge_diseed *) sgl;
1987*4882a593Smuzhiyun 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1988*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	/* Endianness conversion if necessary */
1991*4882a593Smuzhiyun 	diseed->ref_tag = cpu_to_le32(reftag);
1992*4882a593Smuzhiyun 	diseed->ref_tag_tran = diseed->ref_tag;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	/*
1995*4882a593Smuzhiyun 	 * We only need to check the data on READs, for WRITEs
1996*4882a593Smuzhiyun 	 * protection data is automatically generated, not checked.
1997*4882a593Smuzhiyun 	 */
1998*4882a593Smuzhiyun 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1999*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2000*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2001*4882a593Smuzhiyun 		else
2002*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2005*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2006*4882a593Smuzhiyun 		else
2007*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2008*4882a593Smuzhiyun 	}
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	/* setup DISEED with the rest of the info */
2011*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2012*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2015*4882a593Smuzhiyun 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	/* Endianness conversion if necessary for DISEED */
2018*4882a593Smuzhiyun 	diseed->word2 = cpu_to_le32(diseed->word2);
2019*4882a593Smuzhiyun 	diseed->word3 = cpu_to_le32(diseed->word3);
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	/* advance bpl and increment sge count */
2022*4882a593Smuzhiyun 	num_sge++;
2023*4882a593Smuzhiyun 	sgl++;
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	/* assumption: caller has already run dma_map_sg on command data */
2026*4882a593Smuzhiyun 	sgde = scsi_sglist(sc);
2027*4882a593Smuzhiyun 	j = 3;
2028*4882a593Smuzhiyun 	for (i = 0; i < datasegcnt; i++) {
2029*4882a593Smuzhiyun 		/* clear it */
2030*4882a593Smuzhiyun 		sgl->word2 = 0;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 		/* do we need to expand the segment */
2033*4882a593Smuzhiyun 		if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2034*4882a593Smuzhiyun 		    ((datasegcnt - 1) != i)) {
2035*4882a593Smuzhiyun 			/* set LSP type */
2036*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 			if (unlikely(!sgl_xtra)) {
2041*4882a593Smuzhiyun 				lpfc_cmd->seg_cnt = 0;
2042*4882a593Smuzhiyun 				return 0;
2043*4882a593Smuzhiyun 			}
2044*4882a593Smuzhiyun 			sgl->addr_lo = cpu_to_le32(putPaddrLow(
2045*4882a593Smuzhiyun 						sgl_xtra->dma_phys_sgl));
2046*4882a593Smuzhiyun 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2047*4882a593Smuzhiyun 						sgl_xtra->dma_phys_sgl));
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 		} else {
2050*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2051*4882a593Smuzhiyun 		}
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 		if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2054*4882a593Smuzhiyun 			if ((datasegcnt - 1) == i)
2055*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_last, sgl, 1);
2056*4882a593Smuzhiyun 			physaddr = sg_dma_address(sgde);
2057*4882a593Smuzhiyun 			dma_len = sg_dma_len(sgde);
2058*4882a593Smuzhiyun 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2059*4882a593Smuzhiyun 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2062*4882a593Smuzhiyun 			sgl->word2 = cpu_to_le32(sgl->word2);
2063*4882a593Smuzhiyun 			sgl->sge_len = cpu_to_le32(dma_len);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 			dma_offset += dma_len;
2066*4882a593Smuzhiyun 			sgde = sg_next(sgde);
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 			sgl++;
2069*4882a593Smuzhiyun 			num_sge++;
2070*4882a593Smuzhiyun 			lsp_just_set = false;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 		} else {
2073*4882a593Smuzhiyun 			sgl->word2 = cpu_to_le32(sgl->word2);
2074*4882a593Smuzhiyun 			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2077*4882a593Smuzhiyun 			i = i - 1;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 			lsp_just_set = true;
2080*4882a593Smuzhiyun 		}
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 		j++;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	}
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun out:
2087*4882a593Smuzhiyun 	return num_sge;
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun /**
2091*4882a593Smuzhiyun  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2092*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
2093*4882a593Smuzhiyun  * @sc: pointer to scsi command we're working on
2094*4882a593Smuzhiyun  * @sgl: pointer to buffer list for protection groups
2095*4882a593Smuzhiyun  * @datacnt: number of segments of data that have been dma mapped
2096*4882a593Smuzhiyun  * @protcnt: number of segment of protection data that have been dma mapped
2097*4882a593Smuzhiyun  *
2098*4882a593Smuzhiyun  * This function sets up SGL buffer list for protection groups of
2099*4882a593Smuzhiyun  * type LPFC_PG_TYPE_DIF
2100*4882a593Smuzhiyun  *
2101*4882a593Smuzhiyun  * This is usually used when DIFs are in their own buffers,
2102*4882a593Smuzhiyun  * separate from the data. The HBA can then by instructed
2103*4882a593Smuzhiyun  * to place the DIFs in the outgoing stream.  For read operations,
2104*4882a593Smuzhiyun  * The HBA could extract the DIFs and place it in DIF buffers.
2105*4882a593Smuzhiyun  *
2106*4882a593Smuzhiyun  * The buffer list for this type consists of one or more of the
2107*4882a593Smuzhiyun  * protection groups described below:
2108*4882a593Smuzhiyun  *                                    +-------------------------+
2109*4882a593Smuzhiyun  *   start of first prot group  -->   |         DISEED          |
2110*4882a593Smuzhiyun  *                                    +-------------------------+
2111*4882a593Smuzhiyun  *                                    |      DIF (Prot SGE)     |
2112*4882a593Smuzhiyun  *                                    +-------------------------+
2113*4882a593Smuzhiyun  *                                    |        Data SGE         |
2114*4882a593Smuzhiyun  *                                    +-------------------------+
2115*4882a593Smuzhiyun  *                                    |more Data SGE's ... (opt)|
2116*4882a593Smuzhiyun  *                                    +-------------------------+
2117*4882a593Smuzhiyun  *   start of new  prot group  -->    |         DISEED          |
2118*4882a593Smuzhiyun  *                                    +-------------------------+
2119*4882a593Smuzhiyun  *                                    |          ...            |
2120*4882a593Smuzhiyun  *                                    +-------------------------+
2121*4882a593Smuzhiyun  *
2122*4882a593Smuzhiyun  * Note: It is assumed that both data and protection s/g buffers have been
2123*4882a593Smuzhiyun  *       mapped for DMA
2124*4882a593Smuzhiyun  *
2125*4882a593Smuzhiyun  * Returns the number of SGEs added to the SGL.
2126*4882a593Smuzhiyun  **/
2127*4882a593Smuzhiyun static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2128*4882a593Smuzhiyun lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2129*4882a593Smuzhiyun 		struct sli4_sge *sgl, int datacnt, int protcnt,
2130*4882a593Smuzhiyun 		struct lpfc_io_buf *lpfc_cmd)
2131*4882a593Smuzhiyun {
2132*4882a593Smuzhiyun 	struct scatterlist *sgde = NULL; /* s/g data entry */
2133*4882a593Smuzhiyun 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2134*4882a593Smuzhiyun 	struct sli4_sge_diseed *diseed = NULL;
2135*4882a593Smuzhiyun 	dma_addr_t dataphysaddr, protphysaddr;
2136*4882a593Smuzhiyun 	unsigned short curr_data = 0, curr_prot = 0;
2137*4882a593Smuzhiyun 	unsigned int split_offset;
2138*4882a593Smuzhiyun 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2139*4882a593Smuzhiyun 	unsigned int protgrp_blks, protgrp_bytes;
2140*4882a593Smuzhiyun 	unsigned int remainder, subtotal;
2141*4882a593Smuzhiyun 	int status;
2142*4882a593Smuzhiyun 	unsigned char pgdone = 0, alldone = 0;
2143*4882a593Smuzhiyun 	unsigned blksize;
2144*4882a593Smuzhiyun 	uint32_t reftag;
2145*4882a593Smuzhiyun 	uint8_t txop, rxop;
2146*4882a593Smuzhiyun 	uint32_t dma_len;
2147*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2148*4882a593Smuzhiyun 	uint32_t rc;
2149*4882a593Smuzhiyun #endif
2150*4882a593Smuzhiyun 	uint32_t checking = 1;
2151*4882a593Smuzhiyun 	uint32_t dma_offset = 0;
2152*4882a593Smuzhiyun 	int num_sge = 0, j = 2;
2153*4882a593Smuzhiyun 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	sgpe = scsi_prot_sglist(sc);
2156*4882a593Smuzhiyun 	sgde = scsi_sglist(sc);
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	if (!sgpe || !sgde) {
2159*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2160*4882a593Smuzhiyun 				"9082 Invalid s/g entry: data=x%px prot=x%px\n",
2161*4882a593Smuzhiyun 				sgpe, sgde);
2162*4882a593Smuzhiyun 		return 0;
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2166*4882a593Smuzhiyun 	if (status)
2167*4882a593Smuzhiyun 		goto out;
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	/* extract some info from the scsi command */
2170*4882a593Smuzhiyun 	blksize = lpfc_cmd_blksize(sc);
2171*4882a593Smuzhiyun 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2174*4882a593Smuzhiyun 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2175*4882a593Smuzhiyun 	if (rc) {
2176*4882a593Smuzhiyun 		if (rc & BG_ERR_SWAP)
2177*4882a593Smuzhiyun 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2178*4882a593Smuzhiyun 		if (rc & BG_ERR_CHECK)
2179*4882a593Smuzhiyun 			checking = 0;
2180*4882a593Smuzhiyun 	}
2181*4882a593Smuzhiyun #endif
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	split_offset = 0;
2184*4882a593Smuzhiyun 	do {
2185*4882a593Smuzhiyun 		/* Check to see if we ran out of space */
2186*4882a593Smuzhiyun 		if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2187*4882a593Smuzhiyun 		    !(phba->cfg_xpsgl))
2188*4882a593Smuzhiyun 			return num_sge + 3;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 		/* DISEED and DIF have to be together */
2191*4882a593Smuzhiyun 		if (!((j + 1) % phba->border_sge_num) ||
2192*4882a593Smuzhiyun 		    !((j + 2) % phba->border_sge_num) ||
2193*4882a593Smuzhiyun 		    !((j + 3) % phba->border_sge_num)) {
2194*4882a593Smuzhiyun 			sgl->word2 = 0;
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 			/* set LSP type */
2197*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 			if (unlikely(!sgl_xtra)) {
2202*4882a593Smuzhiyun 				goto out;
2203*4882a593Smuzhiyun 			} else {
2204*4882a593Smuzhiyun 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2205*4882a593Smuzhiyun 						sgl_xtra->dma_phys_sgl));
2206*4882a593Smuzhiyun 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2207*4882a593Smuzhiyun 						       sgl_xtra->dma_phys_sgl));
2208*4882a593Smuzhiyun 			}
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 			sgl->word2 = cpu_to_le32(sgl->word2);
2211*4882a593Smuzhiyun 			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2214*4882a593Smuzhiyun 			j = 0;
2215*4882a593Smuzhiyun 		}
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 		/* setup DISEED with what we have */
2218*4882a593Smuzhiyun 		diseed = (struct sli4_sge_diseed *) sgl;
2219*4882a593Smuzhiyun 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2220*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 		/* Endianness conversion if necessary */
2223*4882a593Smuzhiyun 		diseed->ref_tag = cpu_to_le32(reftag);
2224*4882a593Smuzhiyun 		diseed->ref_tag_tran = diseed->ref_tag;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2227*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 		} else {
2230*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2231*4882a593Smuzhiyun 			/*
2232*4882a593Smuzhiyun 			 * When in this mode, the hardware will replace
2233*4882a593Smuzhiyun 			 * the guard tag from the host with a
2234*4882a593Smuzhiyun 			 * newly generated good CRC for the wire.
2235*4882a593Smuzhiyun 			 * Switch to raw mode here to avoid this
2236*4882a593Smuzhiyun 			 * behavior. What the host sends gets put on the wire.
2237*4882a593Smuzhiyun 			 */
2238*4882a593Smuzhiyun 			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2239*4882a593Smuzhiyun 				txop = BG_OP_RAW_MODE;
2240*4882a593Smuzhiyun 				rxop = BG_OP_RAW_MODE;
2241*4882a593Smuzhiyun 			}
2242*4882a593Smuzhiyun 		}
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2246*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2247*4882a593Smuzhiyun 		else
2248*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 		/* setup DISEED with the rest of the info */
2251*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2252*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2255*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 		/* Endianness conversion if necessary for DISEED */
2258*4882a593Smuzhiyun 		diseed->word2 = cpu_to_le32(diseed->word2);
2259*4882a593Smuzhiyun 		diseed->word3 = cpu_to_le32(diseed->word3);
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 		/* advance sgl and increment bde count */
2262*4882a593Smuzhiyun 		num_sge++;
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 		sgl++;
2265*4882a593Smuzhiyun 		j++;
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 		/* setup the first BDE that points to protection buffer */
2268*4882a593Smuzhiyun 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2269*4882a593Smuzhiyun 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 		/* must be integer multiple of the DIF block length */
2272*4882a593Smuzhiyun 		BUG_ON(protgroup_len % 8);
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 		/* Now setup DIF SGE */
2275*4882a593Smuzhiyun 		sgl->word2 = 0;
2276*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2277*4882a593Smuzhiyun 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2278*4882a593Smuzhiyun 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2279*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
2280*4882a593Smuzhiyun 		sgl->sge_len = 0;
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 		protgrp_blks = protgroup_len / 8;
2283*4882a593Smuzhiyun 		protgrp_bytes = protgrp_blks * blksize;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2286*4882a593Smuzhiyun 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2287*4882a593Smuzhiyun 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2288*4882a593Smuzhiyun 			protgroup_offset += protgroup_remainder;
2289*4882a593Smuzhiyun 			protgrp_blks = protgroup_remainder / 8;
2290*4882a593Smuzhiyun 			protgrp_bytes = protgrp_blks * blksize;
2291*4882a593Smuzhiyun 		} else {
2292*4882a593Smuzhiyun 			protgroup_offset = 0;
2293*4882a593Smuzhiyun 			curr_prot++;
2294*4882a593Smuzhiyun 		}
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 		num_sge++;
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 		/* setup SGE's for data blocks associated with DIF data */
2299*4882a593Smuzhiyun 		pgdone = 0;
2300*4882a593Smuzhiyun 		subtotal = 0; /* total bytes processed for current prot grp */
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 		sgl++;
2303*4882a593Smuzhiyun 		j++;
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 		while (!pgdone) {
2306*4882a593Smuzhiyun 			/* Check to see if we ran out of space */
2307*4882a593Smuzhiyun 			if ((num_sge >= phba->cfg_total_seg_cnt) &&
2308*4882a593Smuzhiyun 			    !phba->cfg_xpsgl)
2309*4882a593Smuzhiyun 				return num_sge + 1;
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun 			if (!sgde) {
2312*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2313*4882a593Smuzhiyun 					"9086 BLKGRD:%s Invalid data segment\n",
2314*4882a593Smuzhiyun 						__func__);
2315*4882a593Smuzhiyun 				return 0;
2316*4882a593Smuzhiyun 			}
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 			if (!((j + 1) % phba->border_sge_num)) {
2319*4882a593Smuzhiyun 				sgl->word2 = 0;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 				/* set LSP type */
2322*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_type, sgl,
2323*4882a593Smuzhiyun 				       LPFC_SGE_TYPE_LSP);
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 				sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2326*4882a593Smuzhiyun 								 lpfc_cmd);
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 				if (unlikely(!sgl_xtra)) {
2329*4882a593Smuzhiyun 					goto out;
2330*4882a593Smuzhiyun 				} else {
2331*4882a593Smuzhiyun 					sgl->addr_lo = cpu_to_le32(
2332*4882a593Smuzhiyun 					  putPaddrLow(sgl_xtra->dma_phys_sgl));
2333*4882a593Smuzhiyun 					sgl->addr_hi = cpu_to_le32(
2334*4882a593Smuzhiyun 					  putPaddrHigh(sgl_xtra->dma_phys_sgl));
2335*4882a593Smuzhiyun 				}
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 				sgl->word2 = cpu_to_le32(sgl->word2);
2338*4882a593Smuzhiyun 				sgl->sge_len = cpu_to_le32(
2339*4882a593Smuzhiyun 						     phba->cfg_sg_dma_buf_size);
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2342*4882a593Smuzhiyun 			} else {
2343*4882a593Smuzhiyun 				dataphysaddr = sg_dma_address(sgde) +
2344*4882a593Smuzhiyun 								   split_offset;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 				remainder = sg_dma_len(sgde) - split_offset;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 				if ((subtotal + remainder) <= protgrp_bytes) {
2349*4882a593Smuzhiyun 					/* we can use this whole buffer */
2350*4882a593Smuzhiyun 					dma_len = remainder;
2351*4882a593Smuzhiyun 					split_offset = 0;
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 					if ((subtotal + remainder) ==
2354*4882a593Smuzhiyun 								  protgrp_bytes)
2355*4882a593Smuzhiyun 						pgdone = 1;
2356*4882a593Smuzhiyun 				} else {
2357*4882a593Smuzhiyun 					/* must split this buffer with next
2358*4882a593Smuzhiyun 					 * prot grp
2359*4882a593Smuzhiyun 					 */
2360*4882a593Smuzhiyun 					dma_len = protgrp_bytes - subtotal;
2361*4882a593Smuzhiyun 					split_offset += dma_len;
2362*4882a593Smuzhiyun 				}
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 				subtotal += dma_len;
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 				sgl->word2 = 0;
2367*4882a593Smuzhiyun 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2368*4882a593Smuzhiyun 								 dataphysaddr));
2369*4882a593Smuzhiyun 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2370*4882a593Smuzhiyun 								 dataphysaddr));
2371*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_last, sgl, 0);
2372*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2373*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_type, sgl,
2374*4882a593Smuzhiyun 				       LPFC_SGE_TYPE_DATA);
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 				sgl->sge_len = cpu_to_le32(dma_len);
2377*4882a593Smuzhiyun 				dma_offset += dma_len;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 				num_sge++;
2380*4882a593Smuzhiyun 				curr_data++;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 				if (split_offset) {
2383*4882a593Smuzhiyun 					sgl++;
2384*4882a593Smuzhiyun 					j++;
2385*4882a593Smuzhiyun 					break;
2386*4882a593Smuzhiyun 				}
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 				/* Move to the next s/g segment if possible */
2389*4882a593Smuzhiyun 				sgde = sg_next(sgde);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 				sgl++;
2392*4882a593Smuzhiyun 			}
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 			j++;
2395*4882a593Smuzhiyun 		}
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 		if (protgroup_offset) {
2398*4882a593Smuzhiyun 			/* update the reference tag */
2399*4882a593Smuzhiyun 			reftag += protgrp_blks;
2400*4882a593Smuzhiyun 			continue;
2401*4882a593Smuzhiyun 		}
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		/* are we done ? */
2404*4882a593Smuzhiyun 		if (curr_prot == protcnt) {
2405*4882a593Smuzhiyun 			/* mark the last SGL */
2406*4882a593Smuzhiyun 			sgl--;
2407*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2408*4882a593Smuzhiyun 			alldone = 1;
2409*4882a593Smuzhiyun 		} else if (curr_prot < protcnt) {
2410*4882a593Smuzhiyun 			/* advance to next prot buffer */
2411*4882a593Smuzhiyun 			sgpe = sg_next(sgpe);
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun 			/* update the reference tag */
2414*4882a593Smuzhiyun 			reftag += protgrp_blks;
2415*4882a593Smuzhiyun 		} else {
2416*4882a593Smuzhiyun 			/* if we're here, we have a bug */
2417*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2418*4882a593Smuzhiyun 					"9085 BLKGRD: bug in %s\n", __func__);
2419*4882a593Smuzhiyun 		}
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	} while (!alldone);
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun out:
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	return num_sge;
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun /**
2429*4882a593Smuzhiyun  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2430*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
2431*4882a593Smuzhiyun  * @sc: pointer to scsi command we're working on
2432*4882a593Smuzhiyun  *
2433*4882a593Smuzhiyun  * Given a SCSI command that supports DIF, determine composition of protection
2434*4882a593Smuzhiyun  * groups involved in setting up buffer lists
2435*4882a593Smuzhiyun  *
2436*4882a593Smuzhiyun  * Returns: Protection group type (with or without DIF)
2437*4882a593Smuzhiyun  *
2438*4882a593Smuzhiyun  **/
2439*4882a593Smuzhiyun static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2440*4882a593Smuzhiyun lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun 	int ret = LPFC_PG_TYPE_INVALID;
2443*4882a593Smuzhiyun 	unsigned char op = scsi_get_prot_op(sc);
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	switch (op) {
2446*4882a593Smuzhiyun 	case SCSI_PROT_READ_STRIP:
2447*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_INSERT:
2448*4882a593Smuzhiyun 		ret = LPFC_PG_TYPE_NO_DIF;
2449*4882a593Smuzhiyun 		break;
2450*4882a593Smuzhiyun 	case SCSI_PROT_READ_INSERT:
2451*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_STRIP:
2452*4882a593Smuzhiyun 	case SCSI_PROT_READ_PASS:
2453*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_PASS:
2454*4882a593Smuzhiyun 		ret = LPFC_PG_TYPE_DIF_BUF;
2455*4882a593Smuzhiyun 		break;
2456*4882a593Smuzhiyun 	default:
2457*4882a593Smuzhiyun 		if (phba)
2458*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2459*4882a593Smuzhiyun 					"9021 Unsupported protection op:%d\n",
2460*4882a593Smuzhiyun 					op);
2461*4882a593Smuzhiyun 		break;
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 	return ret;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun /**
2467*4882a593Smuzhiyun  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2468*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
2469*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2470*4882a593Smuzhiyun  *
2471*4882a593Smuzhiyun  * Adjust the data length to account for how much data
2472*4882a593Smuzhiyun  * is actually on the wire.
2473*4882a593Smuzhiyun  *
2474*4882a593Smuzhiyun  * returns the adjusted data length
2475*4882a593Smuzhiyun  **/
2476*4882a593Smuzhiyun static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2477*4882a593Smuzhiyun lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2478*4882a593Smuzhiyun 		       struct lpfc_io_buf *lpfc_cmd)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2481*4882a593Smuzhiyun 	int fcpdl;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	fcpdl = scsi_bufflen(sc);
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	/* Check if there is protection data on the wire */
2486*4882a593Smuzhiyun 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2487*4882a593Smuzhiyun 		/* Read check for protection data */
2488*4882a593Smuzhiyun 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2489*4882a593Smuzhiyun 			return fcpdl;
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	} else {
2492*4882a593Smuzhiyun 		/* Write check for protection data */
2493*4882a593Smuzhiyun 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2494*4882a593Smuzhiyun 			return fcpdl;
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	/*
2498*4882a593Smuzhiyun 	 * If we are in DIF Type 1 mode every data block has a 8 byte
2499*4882a593Smuzhiyun 	 * DIF (trailer) attached to it. Must ajust FCP data length
2500*4882a593Smuzhiyun 	 * to account for the protection data.
2501*4882a593Smuzhiyun 	 */
2502*4882a593Smuzhiyun 	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun 	return fcpdl;
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun /**
2508*4882a593Smuzhiyun  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2509*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
2510*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2511*4882a593Smuzhiyun  *
2512*4882a593Smuzhiyun  * This is the protection/DIF aware version of
2513*4882a593Smuzhiyun  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2514*4882a593Smuzhiyun  * two functions eventually, but for now, it's here.
2515*4882a593Smuzhiyun  * RETURNS 0 - SUCCESS,
2516*4882a593Smuzhiyun  *         1 - Failed DMA map, retry.
2517*4882a593Smuzhiyun  *         2 - Invalid scsi cmd or prot-type. Do not rety.
2518*4882a593Smuzhiyun  **/
2519*4882a593Smuzhiyun static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2520*4882a593Smuzhiyun lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2521*4882a593Smuzhiyun 		struct lpfc_io_buf *lpfc_cmd)
2522*4882a593Smuzhiyun {
2523*4882a593Smuzhiyun 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2524*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2525*4882a593Smuzhiyun 	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2526*4882a593Smuzhiyun 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2527*4882a593Smuzhiyun 	uint32_t num_bde = 0;
2528*4882a593Smuzhiyun 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2529*4882a593Smuzhiyun 	int prot_group_type = 0;
2530*4882a593Smuzhiyun 	int fcpdl;
2531*4882a593Smuzhiyun 	int ret = 1;
2532*4882a593Smuzhiyun 	struct lpfc_vport *vport = phba->pport;
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	/*
2535*4882a593Smuzhiyun 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2536*4882a593Smuzhiyun 	 *  fcp_rsp regions to the first data bde entry
2537*4882a593Smuzhiyun 	 */
2538*4882a593Smuzhiyun 	bpl += 2;
2539*4882a593Smuzhiyun 	if (scsi_sg_count(scsi_cmnd)) {
2540*4882a593Smuzhiyun 		/*
2541*4882a593Smuzhiyun 		 * The driver stores the segment count returned from pci_map_sg
2542*4882a593Smuzhiyun 		 * because this a count of dma-mappings used to map the use_sg
2543*4882a593Smuzhiyun 		 * pages.  They are not guaranteed to be the same for those
2544*4882a593Smuzhiyun 		 * architectures that implement an IOMMU.
2545*4882a593Smuzhiyun 		 */
2546*4882a593Smuzhiyun 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2547*4882a593Smuzhiyun 					scsi_sglist(scsi_cmnd),
2548*4882a593Smuzhiyun 					scsi_sg_count(scsi_cmnd), datadir);
2549*4882a593Smuzhiyun 		if (unlikely(!datasegcnt))
2550*4882a593Smuzhiyun 			return 1;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 		lpfc_cmd->seg_cnt = datasegcnt;
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 		/* First check if data segment count from SCSI Layer is good */
2555*4882a593Smuzhiyun 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2556*4882a593Smuzhiyun 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2557*4882a593Smuzhiyun 			ret = 2;
2558*4882a593Smuzhiyun 			goto err;
2559*4882a593Smuzhiyun 		}
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		switch (prot_group_type) {
2564*4882a593Smuzhiyun 		case LPFC_PG_TYPE_NO_DIF:
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 			/* Here we need to add a PDE5 and PDE6 to the count */
2567*4882a593Smuzhiyun 			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2568*4882a593Smuzhiyun 				ret = 2;
2569*4882a593Smuzhiyun 				goto err;
2570*4882a593Smuzhiyun 			}
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2573*4882a593Smuzhiyun 					datasegcnt);
2574*4882a593Smuzhiyun 			/* we should have 2 or more entries in buffer list */
2575*4882a593Smuzhiyun 			if (num_bde < 2) {
2576*4882a593Smuzhiyun 				ret = 2;
2577*4882a593Smuzhiyun 				goto err;
2578*4882a593Smuzhiyun 			}
2579*4882a593Smuzhiyun 			break;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 		case LPFC_PG_TYPE_DIF_BUF:
2582*4882a593Smuzhiyun 			/*
2583*4882a593Smuzhiyun 			 * This type indicates that protection buffers are
2584*4882a593Smuzhiyun 			 * passed to the driver, so that needs to be prepared
2585*4882a593Smuzhiyun 			 * for DMA
2586*4882a593Smuzhiyun 			 */
2587*4882a593Smuzhiyun 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2588*4882a593Smuzhiyun 					scsi_prot_sglist(scsi_cmnd),
2589*4882a593Smuzhiyun 					scsi_prot_sg_count(scsi_cmnd), datadir);
2590*4882a593Smuzhiyun 			if (unlikely(!protsegcnt)) {
2591*4882a593Smuzhiyun 				scsi_dma_unmap(scsi_cmnd);
2592*4882a593Smuzhiyun 				return 1;
2593*4882a593Smuzhiyun 			}
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 			/*
2598*4882a593Smuzhiyun 			 * There is a minimun of 4 BPLs used for every
2599*4882a593Smuzhiyun 			 * protection data segment.
2600*4882a593Smuzhiyun 			 */
2601*4882a593Smuzhiyun 			if ((lpfc_cmd->prot_seg_cnt * 4) >
2602*4882a593Smuzhiyun 			    (phba->cfg_total_seg_cnt - 2)) {
2603*4882a593Smuzhiyun 				ret = 2;
2604*4882a593Smuzhiyun 				goto err;
2605*4882a593Smuzhiyun 			}
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2608*4882a593Smuzhiyun 					datasegcnt, protsegcnt);
2609*4882a593Smuzhiyun 			/* we should have 3 or more entries in buffer list */
2610*4882a593Smuzhiyun 			if ((num_bde < 3) ||
2611*4882a593Smuzhiyun 			    (num_bde > phba->cfg_total_seg_cnt)) {
2612*4882a593Smuzhiyun 				ret = 2;
2613*4882a593Smuzhiyun 				goto err;
2614*4882a593Smuzhiyun 			}
2615*4882a593Smuzhiyun 			break;
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 		case LPFC_PG_TYPE_INVALID:
2618*4882a593Smuzhiyun 		default:
2619*4882a593Smuzhiyun 			scsi_dma_unmap(scsi_cmnd);
2620*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt = 0;
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2623*4882a593Smuzhiyun 					"9022 Unexpected protection group %i\n",
2624*4882a593Smuzhiyun 					prot_group_type);
2625*4882a593Smuzhiyun 			return 2;
2626*4882a593Smuzhiyun 		}
2627*4882a593Smuzhiyun 	}
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 	/*
2630*4882a593Smuzhiyun 	 * Finish initializing those IOCB fields that are dependent on the
2631*4882a593Smuzhiyun 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2632*4882a593Smuzhiyun 	 * reinitialized since all iocb memory resources are used many times
2633*4882a593Smuzhiyun 	 * for transmit, receive, and continuation bpl's.
2634*4882a593Smuzhiyun 	 */
2635*4882a593Smuzhiyun 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2636*4882a593Smuzhiyun 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2637*4882a593Smuzhiyun 	iocb_cmd->ulpBdeCount = 1;
2638*4882a593Smuzhiyun 	iocb_cmd->ulpLe = 1;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2641*4882a593Smuzhiyun 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	/*
2644*4882a593Smuzhiyun 	 * Due to difference in data length between DIF/non-DIF paths,
2645*4882a593Smuzhiyun 	 * we need to set word 4 of IOCB here
2646*4882a593Smuzhiyun 	 */
2647*4882a593Smuzhiyun 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	/*
2650*4882a593Smuzhiyun 	 * For First burst, we may need to adjust the initial transfer
2651*4882a593Smuzhiyun 	 * length for DIF
2652*4882a593Smuzhiyun 	 */
2653*4882a593Smuzhiyun 	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2654*4882a593Smuzhiyun 	    (fcpdl < vport->cfg_first_burst_size))
2655*4882a593Smuzhiyun 		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	return 0;
2658*4882a593Smuzhiyun err:
2659*4882a593Smuzhiyun 	if (lpfc_cmd->seg_cnt)
2660*4882a593Smuzhiyun 		scsi_dma_unmap(scsi_cmnd);
2661*4882a593Smuzhiyun 	if (lpfc_cmd->prot_seg_cnt)
2662*4882a593Smuzhiyun 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2663*4882a593Smuzhiyun 			     scsi_prot_sg_count(scsi_cmnd),
2664*4882a593Smuzhiyun 			     scsi_cmnd->sc_data_direction);
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2667*4882a593Smuzhiyun 			"9023 Cannot setup S/G List for HBA"
2668*4882a593Smuzhiyun 			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2669*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2670*4882a593Smuzhiyun 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2671*4882a593Smuzhiyun 			prot_group_type, num_bde);
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 	lpfc_cmd->seg_cnt = 0;
2674*4882a593Smuzhiyun 	lpfc_cmd->prot_seg_cnt = 0;
2675*4882a593Smuzhiyun 	return ret;
2676*4882a593Smuzhiyun }
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun /*
2679*4882a593Smuzhiyun  * This function calcuates the T10 DIF guard tag
2680*4882a593Smuzhiyun  * on the specified data using a CRC algorithmn
2681*4882a593Smuzhiyun  * using crc_t10dif.
2682*4882a593Smuzhiyun  */
2683*4882a593Smuzhiyun static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2684*4882a593Smuzhiyun lpfc_bg_crc(uint8_t *data, int count)
2685*4882a593Smuzhiyun {
2686*4882a593Smuzhiyun 	uint16_t crc = 0;
2687*4882a593Smuzhiyun 	uint16_t x;
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	crc = crc_t10dif(data, count);
2690*4882a593Smuzhiyun 	x = cpu_to_be16(crc);
2691*4882a593Smuzhiyun 	return x;
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun /*
2695*4882a593Smuzhiyun  * This function calcuates the T10 DIF guard tag
2696*4882a593Smuzhiyun  * on the specified data using a CSUM algorithmn
2697*4882a593Smuzhiyun  * using ip_compute_csum.
2698*4882a593Smuzhiyun  */
2699*4882a593Smuzhiyun static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2700*4882a593Smuzhiyun lpfc_bg_csum(uint8_t *data, int count)
2701*4882a593Smuzhiyun {
2702*4882a593Smuzhiyun 	uint16_t ret;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	ret = ip_compute_csum(data, count);
2705*4882a593Smuzhiyun 	return ret;
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun /*
2709*4882a593Smuzhiyun  * This function examines the protection data to try to determine
2710*4882a593Smuzhiyun  * what type of T10-DIF error occurred.
2711*4882a593Smuzhiyun  */
2712*4882a593Smuzhiyun static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2713*4882a593Smuzhiyun lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2714*4882a593Smuzhiyun {
2715*4882a593Smuzhiyun 	struct scatterlist *sgpe; /* s/g prot entry */
2716*4882a593Smuzhiyun 	struct scatterlist *sgde; /* s/g data entry */
2717*4882a593Smuzhiyun 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2718*4882a593Smuzhiyun 	struct scsi_dif_tuple *src = NULL;
2719*4882a593Smuzhiyun 	uint8_t *data_src = NULL;
2720*4882a593Smuzhiyun 	uint16_t guard_tag;
2721*4882a593Smuzhiyun 	uint16_t start_app_tag, app_tag;
2722*4882a593Smuzhiyun 	uint32_t start_ref_tag, ref_tag;
2723*4882a593Smuzhiyun 	int prot, protsegcnt;
2724*4882a593Smuzhiyun 	int err_type, len, data_len;
2725*4882a593Smuzhiyun 	int chk_ref, chk_app, chk_guard;
2726*4882a593Smuzhiyun 	uint16_t sum;
2727*4882a593Smuzhiyun 	unsigned blksize;
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	err_type = BGS_GUARD_ERR_MASK;
2730*4882a593Smuzhiyun 	sum = 0;
2731*4882a593Smuzhiyun 	guard_tag = 0;
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	/* First check to see if there is protection data to examine */
2734*4882a593Smuzhiyun 	prot = scsi_get_prot_op(cmd);
2735*4882a593Smuzhiyun 	if ((prot == SCSI_PROT_READ_STRIP) ||
2736*4882a593Smuzhiyun 	    (prot == SCSI_PROT_WRITE_INSERT) ||
2737*4882a593Smuzhiyun 	    (prot == SCSI_PROT_NORMAL))
2738*4882a593Smuzhiyun 		goto out;
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 	/* Currently the driver just supports ref_tag and guard_tag checking */
2741*4882a593Smuzhiyun 	chk_ref = 1;
2742*4882a593Smuzhiyun 	chk_app = 0;
2743*4882a593Smuzhiyun 	chk_guard = 0;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	/* Setup a ptr to the protection data provided by the SCSI host */
2746*4882a593Smuzhiyun 	sgpe = scsi_prot_sglist(cmd);
2747*4882a593Smuzhiyun 	protsegcnt = lpfc_cmd->prot_seg_cnt;
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	if (sgpe && protsegcnt) {
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 		/*
2752*4882a593Smuzhiyun 		 * We will only try to verify guard tag if the segment
2753*4882a593Smuzhiyun 		 * data length is a multiple of the blksize.
2754*4882a593Smuzhiyun 		 */
2755*4882a593Smuzhiyun 		sgde = scsi_sglist(cmd);
2756*4882a593Smuzhiyun 		blksize = lpfc_cmd_blksize(cmd);
2757*4882a593Smuzhiyun 		data_src = (uint8_t *)sg_virt(sgde);
2758*4882a593Smuzhiyun 		data_len = sgde->length;
2759*4882a593Smuzhiyun 		if ((data_len & (blksize - 1)) == 0)
2760*4882a593Smuzhiyun 			chk_guard = 1;
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2763*4882a593Smuzhiyun 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2764*4882a593Smuzhiyun 		start_app_tag = src->app_tag;
2765*4882a593Smuzhiyun 		len = sgpe->length;
2766*4882a593Smuzhiyun 		while (src && protsegcnt) {
2767*4882a593Smuzhiyun 			while (len) {
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 				/*
2770*4882a593Smuzhiyun 				 * First check to see if a protection data
2771*4882a593Smuzhiyun 				 * check is valid
2772*4882a593Smuzhiyun 				 */
2773*4882a593Smuzhiyun 				if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2774*4882a593Smuzhiyun 				    (src->app_tag == T10_PI_APP_ESCAPE)) {
2775*4882a593Smuzhiyun 					start_ref_tag++;
2776*4882a593Smuzhiyun 					goto skipit;
2777*4882a593Smuzhiyun 				}
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 				/* First Guard Tag checking */
2780*4882a593Smuzhiyun 				if (chk_guard) {
2781*4882a593Smuzhiyun 					guard_tag = src->guard_tag;
2782*4882a593Smuzhiyun 					if (lpfc_cmd_guard_csum(cmd))
2783*4882a593Smuzhiyun 						sum = lpfc_bg_csum(data_src,
2784*4882a593Smuzhiyun 								   blksize);
2785*4882a593Smuzhiyun 					else
2786*4882a593Smuzhiyun 						sum = lpfc_bg_crc(data_src,
2787*4882a593Smuzhiyun 								  blksize);
2788*4882a593Smuzhiyun 					if ((guard_tag != sum)) {
2789*4882a593Smuzhiyun 						err_type = BGS_GUARD_ERR_MASK;
2790*4882a593Smuzhiyun 						goto out;
2791*4882a593Smuzhiyun 					}
2792*4882a593Smuzhiyun 				}
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 				/* Reference Tag checking */
2795*4882a593Smuzhiyun 				ref_tag = be32_to_cpu(src->ref_tag);
2796*4882a593Smuzhiyun 				if (chk_ref && (ref_tag != start_ref_tag)) {
2797*4882a593Smuzhiyun 					err_type = BGS_REFTAG_ERR_MASK;
2798*4882a593Smuzhiyun 					goto out;
2799*4882a593Smuzhiyun 				}
2800*4882a593Smuzhiyun 				start_ref_tag++;
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 				/* App Tag checking */
2803*4882a593Smuzhiyun 				app_tag = src->app_tag;
2804*4882a593Smuzhiyun 				if (chk_app && (app_tag != start_app_tag)) {
2805*4882a593Smuzhiyun 					err_type = BGS_APPTAG_ERR_MASK;
2806*4882a593Smuzhiyun 					goto out;
2807*4882a593Smuzhiyun 				}
2808*4882a593Smuzhiyun skipit:
2809*4882a593Smuzhiyun 				len -= sizeof(struct scsi_dif_tuple);
2810*4882a593Smuzhiyun 				if (len < 0)
2811*4882a593Smuzhiyun 					len = 0;
2812*4882a593Smuzhiyun 				src++;
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun 				data_src += blksize;
2815*4882a593Smuzhiyun 				data_len -= blksize;
2816*4882a593Smuzhiyun 
2817*4882a593Smuzhiyun 				/*
2818*4882a593Smuzhiyun 				 * Are we at the end of the Data segment?
2819*4882a593Smuzhiyun 				 * The data segment is only used for Guard
2820*4882a593Smuzhiyun 				 * tag checking.
2821*4882a593Smuzhiyun 				 */
2822*4882a593Smuzhiyun 				if (chk_guard && (data_len == 0)) {
2823*4882a593Smuzhiyun 					chk_guard = 0;
2824*4882a593Smuzhiyun 					sgde = sg_next(sgde);
2825*4882a593Smuzhiyun 					if (!sgde)
2826*4882a593Smuzhiyun 						goto out;
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 					data_src = (uint8_t *)sg_virt(sgde);
2829*4882a593Smuzhiyun 					data_len = sgde->length;
2830*4882a593Smuzhiyun 					if ((data_len & (blksize - 1)) == 0)
2831*4882a593Smuzhiyun 						chk_guard = 1;
2832*4882a593Smuzhiyun 				}
2833*4882a593Smuzhiyun 			}
2834*4882a593Smuzhiyun 
2835*4882a593Smuzhiyun 			/* Goto the next Protection data segment */
2836*4882a593Smuzhiyun 			sgpe = sg_next(sgpe);
2837*4882a593Smuzhiyun 			if (sgpe) {
2838*4882a593Smuzhiyun 				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2839*4882a593Smuzhiyun 				len = sgpe->length;
2840*4882a593Smuzhiyun 			} else {
2841*4882a593Smuzhiyun 				src = NULL;
2842*4882a593Smuzhiyun 			}
2843*4882a593Smuzhiyun 			protsegcnt--;
2844*4882a593Smuzhiyun 		}
2845*4882a593Smuzhiyun 	}
2846*4882a593Smuzhiyun out:
2847*4882a593Smuzhiyun 	if (err_type == BGS_GUARD_ERR_MASK) {
2848*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2849*4882a593Smuzhiyun 					0x10, 0x1);
2850*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2851*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2852*4882a593Smuzhiyun 		phba->bg_guard_err_cnt++;
2853*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2854*4882a593Smuzhiyun 				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2855*4882a593Smuzhiyun 				(unsigned long)scsi_get_lba(cmd),
2856*4882a593Smuzhiyun 				sum, guard_tag);
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 	} else if (err_type == BGS_REFTAG_ERR_MASK) {
2859*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2860*4882a593Smuzhiyun 					0x10, 0x3);
2861*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2862*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 		phba->bg_reftag_err_cnt++;
2865*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2866*4882a593Smuzhiyun 				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2867*4882a593Smuzhiyun 				(unsigned long)scsi_get_lba(cmd),
2868*4882a593Smuzhiyun 				ref_tag, start_ref_tag);
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	} else if (err_type == BGS_APPTAG_ERR_MASK) {
2871*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2872*4882a593Smuzhiyun 					0x10, 0x2);
2873*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2874*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 		phba->bg_apptag_err_cnt++;
2877*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2878*4882a593Smuzhiyun 				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2879*4882a593Smuzhiyun 				(unsigned long)scsi_get_lba(cmd),
2880*4882a593Smuzhiyun 				app_tag, start_app_tag);
2881*4882a593Smuzhiyun 	}
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun /*
2886*4882a593Smuzhiyun  * This function checks for BlockGuard errors detected by
2887*4882a593Smuzhiyun  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2888*4882a593Smuzhiyun  * sense buffer will be set accordingly, paired with
2889*4882a593Smuzhiyun  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2890*4882a593Smuzhiyun  * detected corruption.
2891*4882a593Smuzhiyun  *
2892*4882a593Smuzhiyun  * Returns:
2893*4882a593Smuzhiyun  *  0 - No error found
2894*4882a593Smuzhiyun  *  1 - BlockGuard error found
2895*4882a593Smuzhiyun  * -1 - Internal error (bad profile, ...etc)
2896*4882a593Smuzhiyun  */
2897*4882a593Smuzhiyun static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2898*4882a593Smuzhiyun lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2899*4882a593Smuzhiyun 		  struct lpfc_iocbq *pIocbOut)
2900*4882a593Smuzhiyun {
2901*4882a593Smuzhiyun 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2902*4882a593Smuzhiyun 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2903*4882a593Smuzhiyun 	int ret = 0;
2904*4882a593Smuzhiyun 	uint32_t bghm = bgf->bghm;
2905*4882a593Smuzhiyun 	uint32_t bgstat = bgf->bgstat;
2906*4882a593Smuzhiyun 	uint64_t failing_sector = 0;
2907*4882a593Smuzhiyun 
2908*4882a593Smuzhiyun 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
2909*4882a593Smuzhiyun 		cmd->result = DID_ERROR << 16;
2910*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911*4882a593Smuzhiyun 				"9072 BLKGRD: Invalid BG Profile in cmd"
2912*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
2913*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2914*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
2915*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
2916*4882a593Smuzhiyun 		ret = (-1);
2917*4882a593Smuzhiyun 		goto out;
2918*4882a593Smuzhiyun 	}
2919*4882a593Smuzhiyun 
2920*4882a593Smuzhiyun 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2921*4882a593Smuzhiyun 		cmd->result = DID_ERROR << 16;
2922*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2923*4882a593Smuzhiyun 				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
2924*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
2925*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2926*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
2927*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
2928*4882a593Smuzhiyun 		ret = (-1);
2929*4882a593Smuzhiyun 		goto out;
2930*4882a593Smuzhiyun 	}
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun 	if (lpfc_bgs_get_guard_err(bgstat)) {
2933*4882a593Smuzhiyun 		ret = 1;
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2936*4882a593Smuzhiyun 				0x10, 0x1);
2937*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2938*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2939*4882a593Smuzhiyun 		phba->bg_guard_err_cnt++;
2940*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2941*4882a593Smuzhiyun 				"9055 BLKGRD: Guard Tag error in cmd"
2942*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
2943*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2944*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
2945*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
2946*4882a593Smuzhiyun 	}
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 	if (lpfc_bgs_get_reftag_err(bgstat)) {
2949*4882a593Smuzhiyun 		ret = 1;
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2952*4882a593Smuzhiyun 				0x10, 0x3);
2953*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2954*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun 		phba->bg_reftag_err_cnt++;
2957*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2958*4882a593Smuzhiyun 				"9056 BLKGRD: Ref Tag error in cmd"
2959*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
2960*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2961*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
2962*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
2963*4882a593Smuzhiyun 	}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	if (lpfc_bgs_get_apptag_err(bgstat)) {
2966*4882a593Smuzhiyun 		ret = 1;
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2969*4882a593Smuzhiyun 				0x10, 0x2);
2970*4882a593Smuzhiyun 		cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2971*4882a593Smuzhiyun 			      SAM_STAT_CHECK_CONDITION;
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun 		phba->bg_apptag_err_cnt++;
2974*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2975*4882a593Smuzhiyun 				"9061 BLKGRD: App Tag error in cmd"
2976*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
2977*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2978*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
2979*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
2980*4882a593Smuzhiyun 	}
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2983*4882a593Smuzhiyun 		/*
2984*4882a593Smuzhiyun 		 * setup sense data descriptor 0 per SPC-4 as an information
2985*4882a593Smuzhiyun 		 * field, and put the failing LBA in it.
2986*4882a593Smuzhiyun 		 * This code assumes there was also a guard/app/ref tag error
2987*4882a593Smuzhiyun 		 * indication.
2988*4882a593Smuzhiyun 		 */
2989*4882a593Smuzhiyun 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2990*4882a593Smuzhiyun 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2991*4882a593Smuzhiyun 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2992*4882a593Smuzhiyun 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun 		/* bghm is a "on the wire" FC frame based count */
2995*4882a593Smuzhiyun 		switch (scsi_get_prot_op(cmd)) {
2996*4882a593Smuzhiyun 		case SCSI_PROT_READ_INSERT:
2997*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_STRIP:
2998*4882a593Smuzhiyun 			bghm /= cmd->device->sector_size;
2999*4882a593Smuzhiyun 			break;
3000*4882a593Smuzhiyun 		case SCSI_PROT_READ_STRIP:
3001*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_INSERT:
3002*4882a593Smuzhiyun 		case SCSI_PROT_READ_PASS:
3003*4882a593Smuzhiyun 		case SCSI_PROT_WRITE_PASS:
3004*4882a593Smuzhiyun 			bghm /= (cmd->device->sector_size +
3005*4882a593Smuzhiyun 				sizeof(struct scsi_dif_tuple));
3006*4882a593Smuzhiyun 			break;
3007*4882a593Smuzhiyun 		}
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 		failing_sector = scsi_get_lba(cmd);
3010*4882a593Smuzhiyun 		failing_sector += bghm;
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 		/* Descriptor Information */
3013*4882a593Smuzhiyun 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3014*4882a593Smuzhiyun 	}
3015*4882a593Smuzhiyun 
3016*4882a593Smuzhiyun 	if (!ret) {
3017*4882a593Smuzhiyun 		/* No error was reported - problem in FW? */
3018*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3019*4882a593Smuzhiyun 				"9057 BLKGRD: Unknown error in cmd"
3020*4882a593Smuzhiyun 				" 0x%x lba 0x%llx blk cnt 0x%x "
3021*4882a593Smuzhiyun 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3022*4882a593Smuzhiyun 				(unsigned long long)scsi_get_lba(cmd),
3023*4882a593Smuzhiyun 				blk_rq_sectors(cmd->request), bgstat, bghm);
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 		/* Calcuate what type of error it was */
3026*4882a593Smuzhiyun 		lpfc_calc_bg_err(phba, lpfc_cmd);
3027*4882a593Smuzhiyun 	}
3028*4882a593Smuzhiyun out:
3029*4882a593Smuzhiyun 	return ret;
3030*4882a593Smuzhiyun }
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun /**
3033*4882a593Smuzhiyun  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3034*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
3035*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3036*4882a593Smuzhiyun  *
3037*4882a593Smuzhiyun  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3038*4882a593Smuzhiyun  * field of @lpfc_cmd for device with SLI-4 interface spec.
3039*4882a593Smuzhiyun  *
3040*4882a593Smuzhiyun  * Return codes:
3041*4882a593Smuzhiyun  *	2 - Error - Do not retry
3042*4882a593Smuzhiyun  *	1 - Error - Retry
3043*4882a593Smuzhiyun  *	0 - Success
3044*4882a593Smuzhiyun  **/
3045*4882a593Smuzhiyun static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3046*4882a593Smuzhiyun lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3047*4882a593Smuzhiyun {
3048*4882a593Smuzhiyun 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3049*4882a593Smuzhiyun 	struct scatterlist *sgel = NULL;
3050*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3051*4882a593Smuzhiyun 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3052*4882a593Smuzhiyun 	struct sli4_sge *first_data_sgl;
3053*4882a593Smuzhiyun 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3054*4882a593Smuzhiyun 	dma_addr_t physaddr;
3055*4882a593Smuzhiyun 	uint32_t num_bde = 0;
3056*4882a593Smuzhiyun 	uint32_t dma_len;
3057*4882a593Smuzhiyun 	uint32_t dma_offset = 0;
3058*4882a593Smuzhiyun 	int nseg, i, j;
3059*4882a593Smuzhiyun 	struct ulp_bde64 *bde;
3060*4882a593Smuzhiyun 	bool lsp_just_set = false;
3061*4882a593Smuzhiyun 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 	/*
3064*4882a593Smuzhiyun 	 * There are three possibilities here - use scatter-gather segment, use
3065*4882a593Smuzhiyun 	 * the single mapping, or neither.  Start the lpfc command prep by
3066*4882a593Smuzhiyun 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3067*4882a593Smuzhiyun 	 * data bde entry.
3068*4882a593Smuzhiyun 	 */
3069*4882a593Smuzhiyun 	if (scsi_sg_count(scsi_cmnd)) {
3070*4882a593Smuzhiyun 		/*
3071*4882a593Smuzhiyun 		 * The driver stores the segment count returned from pci_map_sg
3072*4882a593Smuzhiyun 		 * because this a count of dma-mappings used to map the use_sg
3073*4882a593Smuzhiyun 		 * pages.  They are not guaranteed to be the same for those
3074*4882a593Smuzhiyun 		 * architectures that implement an IOMMU.
3075*4882a593Smuzhiyun 		 */
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 		nseg = scsi_dma_map(scsi_cmnd);
3078*4882a593Smuzhiyun 		if (unlikely(nseg <= 0))
3079*4882a593Smuzhiyun 			return 1;
3080*4882a593Smuzhiyun 		sgl += 1;
3081*4882a593Smuzhiyun 		/* clear the last flag in the fcp_rsp map entry */
3082*4882a593Smuzhiyun 		sgl->word2 = le32_to_cpu(sgl->word2);
3083*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3084*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
3085*4882a593Smuzhiyun 		sgl += 1;
3086*4882a593Smuzhiyun 		first_data_sgl = sgl;
3087*4882a593Smuzhiyun 		lpfc_cmd->seg_cnt = nseg;
3088*4882a593Smuzhiyun 		if (!phba->cfg_xpsgl &&
3089*4882a593Smuzhiyun 		    lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3090*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3091*4882a593Smuzhiyun 					"9074 BLKGRD:"
3092*4882a593Smuzhiyun 					" %s: Too many sg segments from "
3093*4882a593Smuzhiyun 					"dma_map_sg.  Config %d, seg_cnt %d\n",
3094*4882a593Smuzhiyun 					__func__, phba->cfg_sg_seg_cnt,
3095*4882a593Smuzhiyun 					lpfc_cmd->seg_cnt);
3096*4882a593Smuzhiyun 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3097*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt = 0;
3098*4882a593Smuzhiyun 			scsi_dma_unmap(scsi_cmnd);
3099*4882a593Smuzhiyun 			return 2;
3100*4882a593Smuzhiyun 		}
3101*4882a593Smuzhiyun 
3102*4882a593Smuzhiyun 		/*
3103*4882a593Smuzhiyun 		 * The driver established a maximum scatter-gather segment count
3104*4882a593Smuzhiyun 		 * during probe that limits the number of sg elements in any
3105*4882a593Smuzhiyun 		 * single scsi command.  Just run through the seg_cnt and format
3106*4882a593Smuzhiyun 		 * the sge's.
3107*4882a593Smuzhiyun 		 * When using SLI-3 the driver will try to fit all the BDEs into
3108*4882a593Smuzhiyun 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3109*4882a593Smuzhiyun 		 * does for SLI-2 mode.
3110*4882a593Smuzhiyun 		 */
3111*4882a593Smuzhiyun 
3112*4882a593Smuzhiyun 		/* for tracking segment boundaries */
3113*4882a593Smuzhiyun 		sgel = scsi_sglist(scsi_cmnd);
3114*4882a593Smuzhiyun 		j = 2;
3115*4882a593Smuzhiyun 		for (i = 0; i < nseg; i++) {
3116*4882a593Smuzhiyun 			sgl->word2 = 0;
3117*4882a593Smuzhiyun 			if ((num_bde + 1) == nseg) {
3118*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3119*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_type, sgl,
3120*4882a593Smuzhiyun 				       LPFC_SGE_TYPE_DATA);
3121*4882a593Smuzhiyun 			} else {
3122*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 				/* do we need to expand the segment */
3125*4882a593Smuzhiyun 				if (!lsp_just_set &&
3126*4882a593Smuzhiyun 				    !((j + 1) % phba->border_sge_num) &&
3127*4882a593Smuzhiyun 				    ((nseg - 1) != i)) {
3128*4882a593Smuzhiyun 					/* set LSP type */
3129*4882a593Smuzhiyun 					bf_set(lpfc_sli4_sge_type, sgl,
3130*4882a593Smuzhiyun 					       LPFC_SGE_TYPE_LSP);
3131*4882a593Smuzhiyun 
3132*4882a593Smuzhiyun 					sgl_xtra = lpfc_get_sgl_per_hdwq(
3133*4882a593Smuzhiyun 							phba, lpfc_cmd);
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 					if (unlikely(!sgl_xtra)) {
3136*4882a593Smuzhiyun 						lpfc_cmd->seg_cnt = 0;
3137*4882a593Smuzhiyun 						scsi_dma_unmap(scsi_cmnd);
3138*4882a593Smuzhiyun 						return 1;
3139*4882a593Smuzhiyun 					}
3140*4882a593Smuzhiyun 					sgl->addr_lo = cpu_to_le32(putPaddrLow(
3141*4882a593Smuzhiyun 						       sgl_xtra->dma_phys_sgl));
3142*4882a593Smuzhiyun 					sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3143*4882a593Smuzhiyun 						       sgl_xtra->dma_phys_sgl));
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 				} else {
3146*4882a593Smuzhiyun 					bf_set(lpfc_sli4_sge_type, sgl,
3147*4882a593Smuzhiyun 					       LPFC_SGE_TYPE_DATA);
3148*4882a593Smuzhiyun 				}
3149*4882a593Smuzhiyun 			}
3150*4882a593Smuzhiyun 
3151*4882a593Smuzhiyun 			if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3152*4882a593Smuzhiyun 				     LPFC_SGE_TYPE_LSP)) {
3153*4882a593Smuzhiyun 				if ((nseg - 1) == i)
3154*4882a593Smuzhiyun 					bf_set(lpfc_sli4_sge_last, sgl, 1);
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun 				physaddr = sg_dma_address(sgel);
3157*4882a593Smuzhiyun 				dma_len = sg_dma_len(sgel);
3158*4882a593Smuzhiyun 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
3159*4882a593Smuzhiyun 							   physaddr));
3160*4882a593Smuzhiyun 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3161*4882a593Smuzhiyun 							   physaddr));
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun 				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3164*4882a593Smuzhiyun 				sgl->word2 = cpu_to_le32(sgl->word2);
3165*4882a593Smuzhiyun 				sgl->sge_len = cpu_to_le32(dma_len);
3166*4882a593Smuzhiyun 
3167*4882a593Smuzhiyun 				dma_offset += dma_len;
3168*4882a593Smuzhiyun 				sgel = sg_next(sgel);
3169*4882a593Smuzhiyun 
3170*4882a593Smuzhiyun 				sgl++;
3171*4882a593Smuzhiyun 				lsp_just_set = false;
3172*4882a593Smuzhiyun 
3173*4882a593Smuzhiyun 			} else {
3174*4882a593Smuzhiyun 				sgl->word2 = cpu_to_le32(sgl->word2);
3175*4882a593Smuzhiyun 				sgl->sge_len = cpu_to_le32(
3176*4882a593Smuzhiyun 						     phba->cfg_sg_dma_buf_size);
3177*4882a593Smuzhiyun 
3178*4882a593Smuzhiyun 				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3179*4882a593Smuzhiyun 				i = i - 1;
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 				lsp_just_set = true;
3182*4882a593Smuzhiyun 			}
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 			j++;
3185*4882a593Smuzhiyun 		}
3186*4882a593Smuzhiyun 		/*
3187*4882a593Smuzhiyun 		 * Setup the first Payload BDE. For FCoE we just key off
3188*4882a593Smuzhiyun 		 * Performance Hints, for FC we use lpfc_enable_pbde.
3189*4882a593Smuzhiyun 		 * We populate words 13-15 of IOCB/WQE.
3190*4882a593Smuzhiyun 		 */
3191*4882a593Smuzhiyun 		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3192*4882a593Smuzhiyun 		    phba->cfg_enable_pbde) {
3193*4882a593Smuzhiyun 			bde = (struct ulp_bde64 *)
3194*4882a593Smuzhiyun 				&(iocb_cmd->unsli3.sli3Words[5]);
3195*4882a593Smuzhiyun 			bde->addrLow = first_data_sgl->addr_lo;
3196*4882a593Smuzhiyun 			bde->addrHigh = first_data_sgl->addr_hi;
3197*4882a593Smuzhiyun 			bde->tus.f.bdeSize =
3198*4882a593Smuzhiyun 					le32_to_cpu(first_data_sgl->sge_len);
3199*4882a593Smuzhiyun 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3200*4882a593Smuzhiyun 			bde->tus.w = cpu_to_le32(bde->tus.w);
3201*4882a593Smuzhiyun 		}
3202*4882a593Smuzhiyun 	} else {
3203*4882a593Smuzhiyun 		sgl += 1;
3204*4882a593Smuzhiyun 		/* clear the last flag in the fcp_rsp map entry */
3205*4882a593Smuzhiyun 		sgl->word2 = le32_to_cpu(sgl->word2);
3206*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3207*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3210*4882a593Smuzhiyun 		    phba->cfg_enable_pbde) {
3211*4882a593Smuzhiyun 			bde = (struct ulp_bde64 *)
3212*4882a593Smuzhiyun 				&(iocb_cmd->unsli3.sli3Words[5]);
3213*4882a593Smuzhiyun 			memset(bde, 0, (sizeof(uint32_t) * 3));
3214*4882a593Smuzhiyun 		}
3215*4882a593Smuzhiyun 	}
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 	/*
3218*4882a593Smuzhiyun 	 * Finish initializing those IOCB fields that are dependent on the
3219*4882a593Smuzhiyun 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3220*4882a593Smuzhiyun 	 * explicitly reinitialized.
3221*4882a593Smuzhiyun 	 * all iocb memory resources are reused.
3222*4882a593Smuzhiyun 	 */
3223*4882a593Smuzhiyun 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	/*
3226*4882a593Smuzhiyun 	 * Due to difference in data length between DIF/non-DIF paths,
3227*4882a593Smuzhiyun 	 * we need to set word 4 of IOCB here
3228*4882a593Smuzhiyun 	 */
3229*4882a593Smuzhiyun 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	/*
3232*4882a593Smuzhiyun 	 * If the OAS driver feature is enabled and the lun is enabled for
3233*4882a593Smuzhiyun 	 * OAS, set the oas iocb related flags.
3234*4882a593Smuzhiyun 	 */
3235*4882a593Smuzhiyun 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3236*4882a593Smuzhiyun 		scsi_cmnd->device->hostdata)->oas_enabled) {
3237*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3238*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3239*4882a593Smuzhiyun 			scsi_cmnd->device->hostdata)->priority;
3240*4882a593Smuzhiyun 	}
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	return 0;
3243*4882a593Smuzhiyun }
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun /**
3246*4882a593Smuzhiyun  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3247*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
3248*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3249*4882a593Smuzhiyun  *
3250*4882a593Smuzhiyun  * This is the protection/DIF aware version of
3251*4882a593Smuzhiyun  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3252*4882a593Smuzhiyun  * two functions eventually, but for now, it's here
3253*4882a593Smuzhiyun  * Return codes:
3254*4882a593Smuzhiyun  *	2 - Error - Do not retry
3255*4882a593Smuzhiyun  *	1 - Error - Retry
3256*4882a593Smuzhiyun  *	0 - Success
3257*4882a593Smuzhiyun  **/
3258*4882a593Smuzhiyun static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3259*4882a593Smuzhiyun lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3260*4882a593Smuzhiyun 		struct lpfc_io_buf *lpfc_cmd)
3261*4882a593Smuzhiyun {
3262*4882a593Smuzhiyun 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3263*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3264*4882a593Smuzhiyun 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3265*4882a593Smuzhiyun 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3266*4882a593Smuzhiyun 	uint32_t num_sge = 0;
3267*4882a593Smuzhiyun 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3268*4882a593Smuzhiyun 	int prot_group_type = 0;
3269*4882a593Smuzhiyun 	int fcpdl;
3270*4882a593Smuzhiyun 	int ret = 1;
3271*4882a593Smuzhiyun 	struct lpfc_vport *vport = phba->pport;
3272*4882a593Smuzhiyun 
3273*4882a593Smuzhiyun 	/*
3274*4882a593Smuzhiyun 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3275*4882a593Smuzhiyun 	 *  fcp_rsp regions to the first data sge entry
3276*4882a593Smuzhiyun 	 */
3277*4882a593Smuzhiyun 	if (scsi_sg_count(scsi_cmnd)) {
3278*4882a593Smuzhiyun 		/*
3279*4882a593Smuzhiyun 		 * The driver stores the segment count returned from pci_map_sg
3280*4882a593Smuzhiyun 		 * because this a count of dma-mappings used to map the use_sg
3281*4882a593Smuzhiyun 		 * pages.  They are not guaranteed to be the same for those
3282*4882a593Smuzhiyun 		 * architectures that implement an IOMMU.
3283*4882a593Smuzhiyun 		 */
3284*4882a593Smuzhiyun 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3285*4882a593Smuzhiyun 					scsi_sglist(scsi_cmnd),
3286*4882a593Smuzhiyun 					scsi_sg_count(scsi_cmnd), datadir);
3287*4882a593Smuzhiyun 		if (unlikely(!datasegcnt))
3288*4882a593Smuzhiyun 			return 1;
3289*4882a593Smuzhiyun 
3290*4882a593Smuzhiyun 		sgl += 1;
3291*4882a593Smuzhiyun 		/* clear the last flag in the fcp_rsp map entry */
3292*4882a593Smuzhiyun 		sgl->word2 = le32_to_cpu(sgl->word2);
3293*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3294*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 		sgl += 1;
3297*4882a593Smuzhiyun 		lpfc_cmd->seg_cnt = datasegcnt;
3298*4882a593Smuzhiyun 
3299*4882a593Smuzhiyun 		/* First check if data segment count from SCSI Layer is good */
3300*4882a593Smuzhiyun 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3301*4882a593Smuzhiyun 		    !phba->cfg_xpsgl) {
3302*4882a593Smuzhiyun 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3303*4882a593Smuzhiyun 			ret = 2;
3304*4882a593Smuzhiyun 			goto err;
3305*4882a593Smuzhiyun 		}
3306*4882a593Smuzhiyun 
3307*4882a593Smuzhiyun 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun 		switch (prot_group_type) {
3310*4882a593Smuzhiyun 		case LPFC_PG_TYPE_NO_DIF:
3311*4882a593Smuzhiyun 			/* Here we need to add a DISEED to the count */
3312*4882a593Smuzhiyun 			if (((lpfc_cmd->seg_cnt + 1) >
3313*4882a593Smuzhiyun 					phba->cfg_total_seg_cnt) &&
3314*4882a593Smuzhiyun 			    !phba->cfg_xpsgl) {
3315*4882a593Smuzhiyun 				ret = 2;
3316*4882a593Smuzhiyun 				goto err;
3317*4882a593Smuzhiyun 			}
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3320*4882a593Smuzhiyun 					datasegcnt, lpfc_cmd);
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 			/* we should have 2 or more entries in buffer list */
3323*4882a593Smuzhiyun 			if (num_sge < 2) {
3324*4882a593Smuzhiyun 				ret = 2;
3325*4882a593Smuzhiyun 				goto err;
3326*4882a593Smuzhiyun 			}
3327*4882a593Smuzhiyun 			break;
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun 		case LPFC_PG_TYPE_DIF_BUF:
3330*4882a593Smuzhiyun 			/*
3331*4882a593Smuzhiyun 			 * This type indicates that protection buffers are
3332*4882a593Smuzhiyun 			 * passed to the driver, so that needs to be prepared
3333*4882a593Smuzhiyun 			 * for DMA
3334*4882a593Smuzhiyun 			 */
3335*4882a593Smuzhiyun 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3336*4882a593Smuzhiyun 					scsi_prot_sglist(scsi_cmnd),
3337*4882a593Smuzhiyun 					scsi_prot_sg_count(scsi_cmnd), datadir);
3338*4882a593Smuzhiyun 			if (unlikely(!protsegcnt)) {
3339*4882a593Smuzhiyun 				scsi_dma_unmap(scsi_cmnd);
3340*4882a593Smuzhiyun 				return 1;
3341*4882a593Smuzhiyun 			}
3342*4882a593Smuzhiyun 
3343*4882a593Smuzhiyun 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3344*4882a593Smuzhiyun 			/*
3345*4882a593Smuzhiyun 			 * There is a minimun of 3 SGEs used for every
3346*4882a593Smuzhiyun 			 * protection data segment.
3347*4882a593Smuzhiyun 			 */
3348*4882a593Smuzhiyun 			if (((lpfc_cmd->prot_seg_cnt * 3) >
3349*4882a593Smuzhiyun 					(phba->cfg_total_seg_cnt - 2)) &&
3350*4882a593Smuzhiyun 			    !phba->cfg_xpsgl) {
3351*4882a593Smuzhiyun 				ret = 2;
3352*4882a593Smuzhiyun 				goto err;
3353*4882a593Smuzhiyun 			}
3354*4882a593Smuzhiyun 
3355*4882a593Smuzhiyun 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3356*4882a593Smuzhiyun 					datasegcnt, protsegcnt, lpfc_cmd);
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun 			/* we should have 3 or more entries in buffer list */
3359*4882a593Smuzhiyun 			if (num_sge < 3 ||
3360*4882a593Smuzhiyun 			    (num_sge > phba->cfg_total_seg_cnt &&
3361*4882a593Smuzhiyun 			     !phba->cfg_xpsgl)) {
3362*4882a593Smuzhiyun 				ret = 2;
3363*4882a593Smuzhiyun 				goto err;
3364*4882a593Smuzhiyun 			}
3365*4882a593Smuzhiyun 			break;
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 		case LPFC_PG_TYPE_INVALID:
3368*4882a593Smuzhiyun 		default:
3369*4882a593Smuzhiyun 			scsi_dma_unmap(scsi_cmnd);
3370*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt = 0;
3371*4882a593Smuzhiyun 
3372*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3373*4882a593Smuzhiyun 					"9083 Unexpected protection group %i\n",
3374*4882a593Smuzhiyun 					prot_group_type);
3375*4882a593Smuzhiyun 			return 2;
3376*4882a593Smuzhiyun 		}
3377*4882a593Smuzhiyun 	}
3378*4882a593Smuzhiyun 
3379*4882a593Smuzhiyun 	switch (scsi_get_prot_op(scsi_cmnd)) {
3380*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_STRIP:
3381*4882a593Smuzhiyun 	case SCSI_PROT_READ_STRIP:
3382*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3383*4882a593Smuzhiyun 		break;
3384*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_INSERT:
3385*4882a593Smuzhiyun 	case SCSI_PROT_READ_INSERT:
3386*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3387*4882a593Smuzhiyun 		break;
3388*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_PASS:
3389*4882a593Smuzhiyun 	case SCSI_PROT_READ_PASS:
3390*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3391*4882a593Smuzhiyun 		break;
3392*4882a593Smuzhiyun 	}
3393*4882a593Smuzhiyun 
3394*4882a593Smuzhiyun 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3395*4882a593Smuzhiyun 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3396*4882a593Smuzhiyun 
3397*4882a593Smuzhiyun 	/*
3398*4882a593Smuzhiyun 	 * Due to difference in data length between DIF/non-DIF paths,
3399*4882a593Smuzhiyun 	 * we need to set word 4 of IOCB here
3400*4882a593Smuzhiyun 	 */
3401*4882a593Smuzhiyun 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	/*
3404*4882a593Smuzhiyun 	 * For First burst, we may need to adjust the initial transfer
3405*4882a593Smuzhiyun 	 * length for DIF
3406*4882a593Smuzhiyun 	 */
3407*4882a593Smuzhiyun 	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3408*4882a593Smuzhiyun 	    (fcpdl < vport->cfg_first_burst_size))
3409*4882a593Smuzhiyun 		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	/*
3412*4882a593Smuzhiyun 	 * If the OAS driver feature is enabled and the lun is enabled for
3413*4882a593Smuzhiyun 	 * OAS, set the oas iocb related flags.
3414*4882a593Smuzhiyun 	 */
3415*4882a593Smuzhiyun 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3416*4882a593Smuzhiyun 		scsi_cmnd->device->hostdata)->oas_enabled)
3417*4882a593Smuzhiyun 		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3418*4882a593Smuzhiyun 
3419*4882a593Smuzhiyun 	return 0;
3420*4882a593Smuzhiyun err:
3421*4882a593Smuzhiyun 	if (lpfc_cmd->seg_cnt)
3422*4882a593Smuzhiyun 		scsi_dma_unmap(scsi_cmnd);
3423*4882a593Smuzhiyun 	if (lpfc_cmd->prot_seg_cnt)
3424*4882a593Smuzhiyun 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3425*4882a593Smuzhiyun 			     scsi_prot_sg_count(scsi_cmnd),
3426*4882a593Smuzhiyun 			     scsi_cmnd->sc_data_direction);
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3429*4882a593Smuzhiyun 			"9084 Cannot setup S/G List for HBA"
3430*4882a593Smuzhiyun 			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3431*4882a593Smuzhiyun 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3432*4882a593Smuzhiyun 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3433*4882a593Smuzhiyun 			prot_group_type, num_sge);
3434*4882a593Smuzhiyun 
3435*4882a593Smuzhiyun 	lpfc_cmd->seg_cnt = 0;
3436*4882a593Smuzhiyun 	lpfc_cmd->prot_seg_cnt = 0;
3437*4882a593Smuzhiyun 	return ret;
3438*4882a593Smuzhiyun }
3439*4882a593Smuzhiyun 
3440*4882a593Smuzhiyun /**
3441*4882a593Smuzhiyun  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3442*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
3443*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3444*4882a593Smuzhiyun  *
3445*4882a593Smuzhiyun  * This routine wraps the actual DMA mapping function pointer from the
3446*4882a593Smuzhiyun  * lpfc_hba struct.
3447*4882a593Smuzhiyun  *
3448*4882a593Smuzhiyun  * Return codes:
3449*4882a593Smuzhiyun  *	1 - Error
3450*4882a593Smuzhiyun  *	0 - Success
3451*4882a593Smuzhiyun  **/
3452*4882a593Smuzhiyun static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3453*4882a593Smuzhiyun lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3454*4882a593Smuzhiyun {
3455*4882a593Smuzhiyun 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3456*4882a593Smuzhiyun }
3457*4882a593Smuzhiyun 
3458*4882a593Smuzhiyun /**
3459*4882a593Smuzhiyun  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3460*4882a593Smuzhiyun  * using BlockGuard.
3461*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
3462*4882a593Smuzhiyun  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3463*4882a593Smuzhiyun  *
3464*4882a593Smuzhiyun  * This routine wraps the actual DMA mapping function pointer from the
3465*4882a593Smuzhiyun  * lpfc_hba struct.
3466*4882a593Smuzhiyun  *
3467*4882a593Smuzhiyun  * Return codes:
3468*4882a593Smuzhiyun  *	1 - Error
3469*4882a593Smuzhiyun  *	0 - Success
3470*4882a593Smuzhiyun  **/
3471*4882a593Smuzhiyun static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3472*4882a593Smuzhiyun lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3473*4882a593Smuzhiyun {
3474*4882a593Smuzhiyun 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3475*4882a593Smuzhiyun }
3476*4882a593Smuzhiyun 
3477*4882a593Smuzhiyun /**
3478*4882a593Smuzhiyun  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3479*4882a593Smuzhiyun  * @phba: Pointer to hba context object.
3480*4882a593Smuzhiyun  * @vport: Pointer to vport object.
3481*4882a593Smuzhiyun  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3482*4882a593Smuzhiyun  * @rsp_iocb: Pointer to response iocb object which reported error.
3483*4882a593Smuzhiyun  *
3484*4882a593Smuzhiyun  * This function posts an event when there is a SCSI command reporting
3485*4882a593Smuzhiyun  * error from the scsi device.
3486*4882a593Smuzhiyun  **/
3487*4882a593Smuzhiyun static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3488*4882a593Smuzhiyun lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3489*4882a593Smuzhiyun 		struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3490*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3491*4882a593Smuzhiyun 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3492*4882a593Smuzhiyun 	uint32_t resp_info = fcprsp->rspStatus2;
3493*4882a593Smuzhiyun 	uint32_t scsi_status = fcprsp->rspStatus3;
3494*4882a593Smuzhiyun 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3495*4882a593Smuzhiyun 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3496*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3497*4882a593Smuzhiyun 	unsigned long flags;
3498*4882a593Smuzhiyun 
3499*4882a593Smuzhiyun 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3500*4882a593Smuzhiyun 		return;
3501*4882a593Smuzhiyun 
3502*4882a593Smuzhiyun 	/* If there is queuefull or busy condition send a scsi event */
3503*4882a593Smuzhiyun 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3504*4882a593Smuzhiyun 		(cmnd->result == SAM_STAT_BUSY)) {
3505*4882a593Smuzhiyun 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3506*4882a593Smuzhiyun 		if (!fast_path_evt)
3507*4882a593Smuzhiyun 			return;
3508*4882a593Smuzhiyun 		fast_path_evt->un.scsi_evt.event_type =
3509*4882a593Smuzhiyun 			FC_REG_SCSI_EVENT;
3510*4882a593Smuzhiyun 		fast_path_evt->un.scsi_evt.subcategory =
3511*4882a593Smuzhiyun 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3512*4882a593Smuzhiyun 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3513*4882a593Smuzhiyun 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3514*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3515*4882a593Smuzhiyun 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3516*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3517*4882a593Smuzhiyun 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3518*4882a593Smuzhiyun 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3519*4882a593Smuzhiyun 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3520*4882a593Smuzhiyun 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3521*4882a593Smuzhiyun 		if (!fast_path_evt)
3522*4882a593Smuzhiyun 			return;
3523*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3524*4882a593Smuzhiyun 			FC_REG_SCSI_EVENT;
3525*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3526*4882a593Smuzhiyun 			LPFC_EVENT_CHECK_COND;
3527*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3528*4882a593Smuzhiyun 			cmnd->device->lun;
3529*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3530*4882a593Smuzhiyun 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3531*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3532*4882a593Smuzhiyun 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3533*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.sense_key =
3534*4882a593Smuzhiyun 			cmnd->sense_buffer[2] & 0xf;
3535*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3536*4882a593Smuzhiyun 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3537*4882a593Smuzhiyun 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3538*4882a593Smuzhiyun 		     fcpi_parm &&
3539*4882a593Smuzhiyun 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3540*4882a593Smuzhiyun 			((scsi_status == SAM_STAT_GOOD) &&
3541*4882a593Smuzhiyun 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3542*4882a593Smuzhiyun 		/*
3543*4882a593Smuzhiyun 		 * If status is good or resid does not match with fcp_param and
3544*4882a593Smuzhiyun 		 * there is valid fcpi_parm, then there is a read_check error
3545*4882a593Smuzhiyun 		 */
3546*4882a593Smuzhiyun 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3547*4882a593Smuzhiyun 		if (!fast_path_evt)
3548*4882a593Smuzhiyun 			return;
3549*4882a593Smuzhiyun 		fast_path_evt->un.read_check_error.header.event_type =
3550*4882a593Smuzhiyun 			FC_REG_FABRIC_EVENT;
3551*4882a593Smuzhiyun 		fast_path_evt->un.read_check_error.header.subcategory =
3552*4882a593Smuzhiyun 			LPFC_EVENT_FCPRDCHKERR;
3553*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3554*4882a593Smuzhiyun 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3555*4882a593Smuzhiyun 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3556*4882a593Smuzhiyun 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3557*4882a593Smuzhiyun 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3558*4882a593Smuzhiyun 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3559*4882a593Smuzhiyun 		fast_path_evt->un.read_check_error.fcpiparam =
3560*4882a593Smuzhiyun 			fcpi_parm;
3561*4882a593Smuzhiyun 	} else
3562*4882a593Smuzhiyun 		return;
3563*4882a593Smuzhiyun 
3564*4882a593Smuzhiyun 	fast_path_evt->vport = vport;
3565*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
3566*4882a593Smuzhiyun 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3567*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
3568*4882a593Smuzhiyun 	lpfc_worker_wake_up(phba);
3569*4882a593Smuzhiyun 	return;
3570*4882a593Smuzhiyun }
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun /**
3573*4882a593Smuzhiyun  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3574*4882a593Smuzhiyun  * @phba: The HBA for which this call is being executed.
3575*4882a593Smuzhiyun  * @psb: The scsi buffer which is going to be un-mapped.
3576*4882a593Smuzhiyun  *
3577*4882a593Smuzhiyun  * This routine does DMA un-mapping of scatter gather list of scsi command
3578*4882a593Smuzhiyun  * field of @lpfc_cmd for device with SLI-3 interface spec.
3579*4882a593Smuzhiyun  **/
3580*4882a593Smuzhiyun static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3581*4882a593Smuzhiyun lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3582*4882a593Smuzhiyun {
3583*4882a593Smuzhiyun 	/*
3584*4882a593Smuzhiyun 	 * There are only two special cases to consider.  (1) the scsi command
3585*4882a593Smuzhiyun 	 * requested scatter-gather usage or (2) the scsi command allocated
3586*4882a593Smuzhiyun 	 * a request buffer, but did not request use_sg.  There is a third
3587*4882a593Smuzhiyun 	 * case, but it does not require resource deallocation.
3588*4882a593Smuzhiyun 	 */
3589*4882a593Smuzhiyun 	if (psb->seg_cnt > 0)
3590*4882a593Smuzhiyun 		scsi_dma_unmap(psb->pCmd);
3591*4882a593Smuzhiyun 	if (psb->prot_seg_cnt > 0)
3592*4882a593Smuzhiyun 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3593*4882a593Smuzhiyun 				scsi_prot_sg_count(psb->pCmd),
3594*4882a593Smuzhiyun 				psb->pCmd->sc_data_direction);
3595*4882a593Smuzhiyun }
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun /**
3598*4882a593Smuzhiyun  * lpfc_handler_fcp_err - FCP response handler
3599*4882a593Smuzhiyun  * @vport: The virtual port for which this call is being executed.
3600*4882a593Smuzhiyun  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3601*4882a593Smuzhiyun  * @rsp_iocb: The response IOCB which contains FCP error.
3602*4882a593Smuzhiyun  *
3603*4882a593Smuzhiyun  * This routine is called to process response IOCB with status field
3604*4882a593Smuzhiyun  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3605*4882a593Smuzhiyun  * based upon SCSI and FCP error.
3606*4882a593Smuzhiyun  **/
3607*4882a593Smuzhiyun static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3608*4882a593Smuzhiyun lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3609*4882a593Smuzhiyun 		    struct lpfc_iocbq *rsp_iocb)
3610*4882a593Smuzhiyun {
3611*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
3612*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3613*4882a593Smuzhiyun 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3614*4882a593Smuzhiyun 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3615*4882a593Smuzhiyun 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3616*4882a593Smuzhiyun 	uint32_t resp_info = fcprsp->rspStatus2;
3617*4882a593Smuzhiyun 	uint32_t scsi_status = fcprsp->rspStatus3;
3618*4882a593Smuzhiyun 	uint32_t *lp;
3619*4882a593Smuzhiyun 	uint32_t host_status = DID_OK;
3620*4882a593Smuzhiyun 	uint32_t rsplen = 0;
3621*4882a593Smuzhiyun 	uint32_t fcpDl;
3622*4882a593Smuzhiyun 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3623*4882a593Smuzhiyun 
3624*4882a593Smuzhiyun 
3625*4882a593Smuzhiyun 	/*
3626*4882a593Smuzhiyun 	 *  If this is a task management command, there is no
3627*4882a593Smuzhiyun 	 *  scsi packet associated with this lpfc_cmd.  The driver
3628*4882a593Smuzhiyun 	 *  consumes it.
3629*4882a593Smuzhiyun 	 */
3630*4882a593Smuzhiyun 	if (fcpcmd->fcpCntl2) {
3631*4882a593Smuzhiyun 		scsi_status = 0;
3632*4882a593Smuzhiyun 		goto out;
3633*4882a593Smuzhiyun 	}
3634*4882a593Smuzhiyun 
3635*4882a593Smuzhiyun 	if (resp_info & RSP_LEN_VALID) {
3636*4882a593Smuzhiyun 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3637*4882a593Smuzhiyun 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3638*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3639*4882a593Smuzhiyun 					 "2719 Invalid response length: "
3640*4882a593Smuzhiyun 					 "tgt x%x lun x%llx cmnd x%x rsplen "
3641*4882a593Smuzhiyun 					 "x%x\n", cmnd->device->id,
3642*4882a593Smuzhiyun 					 cmnd->device->lun, cmnd->cmnd[0],
3643*4882a593Smuzhiyun 					 rsplen);
3644*4882a593Smuzhiyun 			host_status = DID_ERROR;
3645*4882a593Smuzhiyun 			goto out;
3646*4882a593Smuzhiyun 		}
3647*4882a593Smuzhiyun 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3648*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3649*4882a593Smuzhiyun 				 "2757 Protocol failure detected during "
3650*4882a593Smuzhiyun 				 "processing of FCP I/O op: "
3651*4882a593Smuzhiyun 				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3652*4882a593Smuzhiyun 				 cmnd->device->id,
3653*4882a593Smuzhiyun 				 cmnd->device->lun, cmnd->cmnd[0],
3654*4882a593Smuzhiyun 				 fcprsp->rspInfo3);
3655*4882a593Smuzhiyun 			host_status = DID_ERROR;
3656*4882a593Smuzhiyun 			goto out;
3657*4882a593Smuzhiyun 		}
3658*4882a593Smuzhiyun 	}
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3661*4882a593Smuzhiyun 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3662*4882a593Smuzhiyun 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3663*4882a593Smuzhiyun 			snslen = SCSI_SENSE_BUFFERSIZE;
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 		if (resp_info & RSP_LEN_VALID)
3666*4882a593Smuzhiyun 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3667*4882a593Smuzhiyun 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3668*4882a593Smuzhiyun 	}
3669*4882a593Smuzhiyun 	lp = (uint32_t *)cmnd->sense_buffer;
3670*4882a593Smuzhiyun 
3671*4882a593Smuzhiyun 	/* special handling for under run conditions */
3672*4882a593Smuzhiyun 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3673*4882a593Smuzhiyun 		/* don't log under runs if fcp set... */
3674*4882a593Smuzhiyun 		if (vport->cfg_log_verbose & LOG_FCP)
3675*4882a593Smuzhiyun 			logit = LOG_FCP_ERROR;
3676*4882a593Smuzhiyun 		/* unless operator says so */
3677*4882a593Smuzhiyun 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3678*4882a593Smuzhiyun 			logit = LOG_FCP_UNDER;
3679*4882a593Smuzhiyun 	}
3680*4882a593Smuzhiyun 
3681*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3682*4882a593Smuzhiyun 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3683*4882a593Smuzhiyun 			 "Data: x%x x%x x%x x%x x%x\n",
3684*4882a593Smuzhiyun 			 cmnd->cmnd[0], scsi_status,
3685*4882a593Smuzhiyun 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3686*4882a593Smuzhiyun 			 be32_to_cpu(fcprsp->rspResId),
3687*4882a593Smuzhiyun 			 be32_to_cpu(fcprsp->rspSnsLen),
3688*4882a593Smuzhiyun 			 be32_to_cpu(fcprsp->rspRspLen),
3689*4882a593Smuzhiyun 			 fcprsp->rspInfo3);
3690*4882a593Smuzhiyun 
3691*4882a593Smuzhiyun 	scsi_set_resid(cmnd, 0);
3692*4882a593Smuzhiyun 	fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3693*4882a593Smuzhiyun 	if (resp_info & RESID_UNDER) {
3694*4882a593Smuzhiyun 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3695*4882a593Smuzhiyun 
3696*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3697*4882a593Smuzhiyun 				 "9025 FCP Underrun, expected %d, "
3698*4882a593Smuzhiyun 				 "residual %d Data: x%x x%x x%x\n",
3699*4882a593Smuzhiyun 				 fcpDl,
3700*4882a593Smuzhiyun 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3701*4882a593Smuzhiyun 				 cmnd->underflow);
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun 		/*
3704*4882a593Smuzhiyun 		 * If there is an under run, check if under run reported by
3705*4882a593Smuzhiyun 		 * storage array is same as the under run reported by HBA.
3706*4882a593Smuzhiyun 		 * If this is not same, there is a dropped frame.
3707*4882a593Smuzhiyun 		 */
3708*4882a593Smuzhiyun 		if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3709*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_WARNING,
3710*4882a593Smuzhiyun 					 LOG_FCP | LOG_FCP_ERROR,
3711*4882a593Smuzhiyun 					 "9026 FCP Read Check Error "
3712*4882a593Smuzhiyun 					 "and Underrun Data: x%x x%x x%x x%x\n",
3713*4882a593Smuzhiyun 					 fcpDl,
3714*4882a593Smuzhiyun 					 scsi_get_resid(cmnd), fcpi_parm,
3715*4882a593Smuzhiyun 					 cmnd->cmnd[0]);
3716*4882a593Smuzhiyun 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3717*4882a593Smuzhiyun 			host_status = DID_ERROR;
3718*4882a593Smuzhiyun 		}
3719*4882a593Smuzhiyun 		/*
3720*4882a593Smuzhiyun 		 * The cmnd->underflow is the minimum number of bytes that must
3721*4882a593Smuzhiyun 		 * be transferred for this command.  Provided a sense condition
3722*4882a593Smuzhiyun 		 * is not present, make sure the actual amount transferred is at
3723*4882a593Smuzhiyun 		 * least the underflow value or fail.
3724*4882a593Smuzhiyun 		 */
3725*4882a593Smuzhiyun 		if (!(resp_info & SNS_LEN_VALID) &&
3726*4882a593Smuzhiyun 		    (scsi_status == SAM_STAT_GOOD) &&
3727*4882a593Smuzhiyun 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3728*4882a593Smuzhiyun 		     < cmnd->underflow)) {
3729*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3730*4882a593Smuzhiyun 					 "9027 FCP command x%x residual "
3731*4882a593Smuzhiyun 					 "underrun converted to error "
3732*4882a593Smuzhiyun 					 "Data: x%x x%x x%x\n",
3733*4882a593Smuzhiyun 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3734*4882a593Smuzhiyun 					 scsi_get_resid(cmnd), cmnd->underflow);
3735*4882a593Smuzhiyun 			host_status = DID_ERROR;
3736*4882a593Smuzhiyun 		}
3737*4882a593Smuzhiyun 	} else if (resp_info & RESID_OVER) {
3738*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3739*4882a593Smuzhiyun 				 "9028 FCP command x%x residual overrun error. "
3740*4882a593Smuzhiyun 				 "Data: x%x x%x\n", cmnd->cmnd[0],
3741*4882a593Smuzhiyun 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3742*4882a593Smuzhiyun 		host_status = DID_ERROR;
3743*4882a593Smuzhiyun 
3744*4882a593Smuzhiyun 	/*
3745*4882a593Smuzhiyun 	 * Check SLI validation that all the transfer was actually done
3746*4882a593Smuzhiyun 	 * (fcpi_parm should be zero). Apply check only to reads.
3747*4882a593Smuzhiyun 	 */
3748*4882a593Smuzhiyun 	} else if (fcpi_parm) {
3749*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3750*4882a593Smuzhiyun 				 "9029 FCP %s Check Error xri x%x  Data: "
3751*4882a593Smuzhiyun 				 "x%x x%x x%x x%x x%x\n",
3752*4882a593Smuzhiyun 				 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3753*4882a593Smuzhiyun 				 "Read" : "Write"),
3754*4882a593Smuzhiyun 				 ((phba->sli_rev == LPFC_SLI_REV4) ?
3755*4882a593Smuzhiyun 				 lpfc_cmd->cur_iocbq.sli4_xritag :
3756*4882a593Smuzhiyun 				 rsp_iocb->iocb.ulpContext),
3757*4882a593Smuzhiyun 				 fcpDl, be32_to_cpu(fcprsp->rspResId),
3758*4882a593Smuzhiyun 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 		/* There is some issue with the LPe12000 that causes it
3761*4882a593Smuzhiyun 		 * to miscalculate the fcpi_parm and falsely trip this
3762*4882a593Smuzhiyun 		 * recovery logic.  Detect this case and don't error when true.
3763*4882a593Smuzhiyun 		 */
3764*4882a593Smuzhiyun 		if (fcpi_parm > fcpDl)
3765*4882a593Smuzhiyun 			goto out;
3766*4882a593Smuzhiyun 
3767*4882a593Smuzhiyun 		switch (scsi_status) {
3768*4882a593Smuzhiyun 		case SAM_STAT_GOOD:
3769*4882a593Smuzhiyun 		case SAM_STAT_CHECK_CONDITION:
3770*4882a593Smuzhiyun 			/* Fabric dropped a data frame. Fail any successful
3771*4882a593Smuzhiyun 			 * command in which we detected dropped frames.
3772*4882a593Smuzhiyun 			 * A status of good or some check conditions could
3773*4882a593Smuzhiyun 			 * be considered a successful command.
3774*4882a593Smuzhiyun 			 */
3775*4882a593Smuzhiyun 			host_status = DID_ERROR;
3776*4882a593Smuzhiyun 			break;
3777*4882a593Smuzhiyun 		}
3778*4882a593Smuzhiyun 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3779*4882a593Smuzhiyun 	}
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun  out:
3782*4882a593Smuzhiyun 	cmnd->result = host_status << 16 | scsi_status;
3783*4882a593Smuzhiyun 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3784*4882a593Smuzhiyun }
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun /**
3787*4882a593Smuzhiyun  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3788*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
3789*4882a593Smuzhiyun  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3790*4882a593Smuzhiyun  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3791*4882a593Smuzhiyun  *
3792*4882a593Smuzhiyun  * This routine assigns scsi command result by looking into response IOCB
3793*4882a593Smuzhiyun  * status field appropriately. This routine handles QUEUE FULL condition as
3794*4882a593Smuzhiyun  * well by ramping down device queue depth.
3795*4882a593Smuzhiyun  **/
3796*4882a593Smuzhiyun static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)3797*4882a593Smuzhiyun lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3798*4882a593Smuzhiyun 			struct lpfc_iocbq *pIocbOut)
3799*4882a593Smuzhiyun {
3800*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd =
3801*4882a593Smuzhiyun 		(struct lpfc_io_buf *) pIocbIn->context1;
3802*4882a593Smuzhiyun 	struct lpfc_vport      *vport = pIocbIn->vport;
3803*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3804*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode = rdata->pnode;
3805*4882a593Smuzhiyun 	struct scsi_cmnd *cmd;
3806*4882a593Smuzhiyun 	unsigned long flags;
3807*4882a593Smuzhiyun 	struct lpfc_fast_path_event *fast_path_evt;
3808*4882a593Smuzhiyun 	struct Scsi_Host *shost;
3809*4882a593Smuzhiyun 	int idx;
3810*4882a593Smuzhiyun 	uint32_t logit = LOG_FCP;
3811*4882a593Smuzhiyun 
3812*4882a593Smuzhiyun 	/* Guard against abort handler being called at same time */
3813*4882a593Smuzhiyun 	spin_lock(&lpfc_cmd->buf_lock);
3814*4882a593Smuzhiyun 
3815*4882a593Smuzhiyun 	/* Sanity check on return of outstanding command */
3816*4882a593Smuzhiyun 	cmd = lpfc_cmd->pCmd;
3817*4882a593Smuzhiyun 	if (!cmd || !phba) {
3818*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3819*4882a593Smuzhiyun 				 "2621 IO completion: Not an active IO\n");
3820*4882a593Smuzhiyun 		spin_unlock(&lpfc_cmd->buf_lock);
3821*4882a593Smuzhiyun 		return;
3822*4882a593Smuzhiyun 	}
3823*4882a593Smuzhiyun 
3824*4882a593Smuzhiyun 	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3825*4882a593Smuzhiyun 	if (phba->sli4_hba.hdwq)
3826*4882a593Smuzhiyun 		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3829*4882a593Smuzhiyun 	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
3830*4882a593Smuzhiyun 		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
3831*4882a593Smuzhiyun #endif
3832*4882a593Smuzhiyun 	shost = cmd->device->host;
3833*4882a593Smuzhiyun 
3834*4882a593Smuzhiyun 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3835*4882a593Smuzhiyun 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3836*4882a593Smuzhiyun 	/* pick up SLI4 exhange busy status from HBA */
3837*4882a593Smuzhiyun 	if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
3838*4882a593Smuzhiyun 		lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3839*4882a593Smuzhiyun 	else
3840*4882a593Smuzhiyun 		lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3841*4882a593Smuzhiyun 
3842*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3843*4882a593Smuzhiyun 	if (lpfc_cmd->prot_data_type) {
3844*4882a593Smuzhiyun 		struct scsi_dif_tuple *src = NULL;
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3847*4882a593Smuzhiyun 		/*
3848*4882a593Smuzhiyun 		 * Used to restore any changes to protection
3849*4882a593Smuzhiyun 		 * data for error injection.
3850*4882a593Smuzhiyun 		 */
3851*4882a593Smuzhiyun 		switch (lpfc_cmd->prot_data_type) {
3852*4882a593Smuzhiyun 		case LPFC_INJERR_REFTAG:
3853*4882a593Smuzhiyun 			src->ref_tag =
3854*4882a593Smuzhiyun 				lpfc_cmd->prot_data;
3855*4882a593Smuzhiyun 			break;
3856*4882a593Smuzhiyun 		case LPFC_INJERR_APPTAG:
3857*4882a593Smuzhiyun 			src->app_tag =
3858*4882a593Smuzhiyun 				(uint16_t)lpfc_cmd->prot_data;
3859*4882a593Smuzhiyun 			break;
3860*4882a593Smuzhiyun 		case LPFC_INJERR_GUARD:
3861*4882a593Smuzhiyun 			src->guard_tag =
3862*4882a593Smuzhiyun 				(uint16_t)lpfc_cmd->prot_data;
3863*4882a593Smuzhiyun 			break;
3864*4882a593Smuzhiyun 		default:
3865*4882a593Smuzhiyun 			break;
3866*4882a593Smuzhiyun 		}
3867*4882a593Smuzhiyun 
3868*4882a593Smuzhiyun 		lpfc_cmd->prot_data = 0;
3869*4882a593Smuzhiyun 		lpfc_cmd->prot_data_type = 0;
3870*4882a593Smuzhiyun 		lpfc_cmd->prot_data_segment = NULL;
3871*4882a593Smuzhiyun 	}
3872*4882a593Smuzhiyun #endif
3873*4882a593Smuzhiyun 
3874*4882a593Smuzhiyun 	if (unlikely(lpfc_cmd->status)) {
3875*4882a593Smuzhiyun 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3876*4882a593Smuzhiyun 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
3877*4882a593Smuzhiyun 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3878*4882a593Smuzhiyun 		else if (lpfc_cmd->status >= IOSTAT_CNT)
3879*4882a593Smuzhiyun 			lpfc_cmd->status = IOSTAT_DEFAULT;
3880*4882a593Smuzhiyun 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3881*4882a593Smuzhiyun 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
3882*4882a593Smuzhiyun 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3883*4882a593Smuzhiyun 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3884*4882a593Smuzhiyun 			logit = 0;
3885*4882a593Smuzhiyun 		else
3886*4882a593Smuzhiyun 			logit = LOG_FCP | LOG_FCP_UNDER;
3887*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
3888*4882a593Smuzhiyun 			 "9030 FCP cmd x%x failed <%d/%lld> "
3889*4882a593Smuzhiyun 			 "status: x%x result: x%x "
3890*4882a593Smuzhiyun 			 "sid: x%x did: x%x oxid: x%x "
3891*4882a593Smuzhiyun 			 "Data: x%x x%x\n",
3892*4882a593Smuzhiyun 			 cmd->cmnd[0],
3893*4882a593Smuzhiyun 			 cmd->device ? cmd->device->id : 0xffff,
3894*4882a593Smuzhiyun 			 cmd->device ? cmd->device->lun : 0xffff,
3895*4882a593Smuzhiyun 			 lpfc_cmd->status, lpfc_cmd->result,
3896*4882a593Smuzhiyun 			 vport->fc_myDID,
3897*4882a593Smuzhiyun 			 (pnode) ? pnode->nlp_DID : 0,
3898*4882a593Smuzhiyun 			 phba->sli_rev == LPFC_SLI_REV4 ?
3899*4882a593Smuzhiyun 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3900*4882a593Smuzhiyun 			 pIocbOut->iocb.ulpContext,
3901*4882a593Smuzhiyun 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3902*4882a593Smuzhiyun 
3903*4882a593Smuzhiyun 		switch (lpfc_cmd->status) {
3904*4882a593Smuzhiyun 		case IOSTAT_FCP_RSP_ERROR:
3905*4882a593Smuzhiyun 			/* Call FCP RSP handler to determine result */
3906*4882a593Smuzhiyun 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3907*4882a593Smuzhiyun 			break;
3908*4882a593Smuzhiyun 		case IOSTAT_NPORT_BSY:
3909*4882a593Smuzhiyun 		case IOSTAT_FABRIC_BSY:
3910*4882a593Smuzhiyun 			cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3911*4882a593Smuzhiyun 			fast_path_evt = lpfc_alloc_fast_evt(phba);
3912*4882a593Smuzhiyun 			if (!fast_path_evt)
3913*4882a593Smuzhiyun 				break;
3914*4882a593Smuzhiyun 			fast_path_evt->un.fabric_evt.event_type =
3915*4882a593Smuzhiyun 				FC_REG_FABRIC_EVENT;
3916*4882a593Smuzhiyun 			fast_path_evt->un.fabric_evt.subcategory =
3917*4882a593Smuzhiyun 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3918*4882a593Smuzhiyun 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3919*4882a593Smuzhiyun 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3920*4882a593Smuzhiyun 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3921*4882a593Smuzhiyun 					&pnode->nlp_portname,
3922*4882a593Smuzhiyun 					sizeof(struct lpfc_name));
3923*4882a593Smuzhiyun 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3924*4882a593Smuzhiyun 					&pnode->nlp_nodename,
3925*4882a593Smuzhiyun 					sizeof(struct lpfc_name));
3926*4882a593Smuzhiyun 			}
3927*4882a593Smuzhiyun 			fast_path_evt->vport = vport;
3928*4882a593Smuzhiyun 			fast_path_evt->work_evt.evt =
3929*4882a593Smuzhiyun 				LPFC_EVT_FASTPATH_MGMT_EVT;
3930*4882a593Smuzhiyun 			spin_lock_irqsave(&phba->hbalock, flags);
3931*4882a593Smuzhiyun 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
3932*4882a593Smuzhiyun 				&phba->work_list);
3933*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->hbalock, flags);
3934*4882a593Smuzhiyun 			lpfc_worker_wake_up(phba);
3935*4882a593Smuzhiyun 			break;
3936*4882a593Smuzhiyun 		case IOSTAT_LOCAL_REJECT:
3937*4882a593Smuzhiyun 		case IOSTAT_REMOTE_STOP:
3938*4882a593Smuzhiyun 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3939*4882a593Smuzhiyun 			    lpfc_cmd->result ==
3940*4882a593Smuzhiyun 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3941*4882a593Smuzhiyun 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3942*4882a593Smuzhiyun 			    lpfc_cmd->result ==
3943*4882a593Smuzhiyun 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3944*4882a593Smuzhiyun 				cmd->result = DID_NO_CONNECT << 16;
3945*4882a593Smuzhiyun 				break;
3946*4882a593Smuzhiyun 			}
3947*4882a593Smuzhiyun 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3948*4882a593Smuzhiyun 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
3949*4882a593Smuzhiyun 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3950*4882a593Smuzhiyun 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3951*4882a593Smuzhiyun 				cmd->result = DID_REQUEUE << 16;
3952*4882a593Smuzhiyun 				break;
3953*4882a593Smuzhiyun 			}
3954*4882a593Smuzhiyun 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3955*4882a593Smuzhiyun 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3956*4882a593Smuzhiyun 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3957*4882a593Smuzhiyun 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3958*4882a593Smuzhiyun 					/*
3959*4882a593Smuzhiyun 					 * This is a response for a BG enabled
3960*4882a593Smuzhiyun 					 * cmd. Parse BG error
3961*4882a593Smuzhiyun 					 */
3962*4882a593Smuzhiyun 					lpfc_parse_bg_err(phba, lpfc_cmd,
3963*4882a593Smuzhiyun 							pIocbOut);
3964*4882a593Smuzhiyun 					break;
3965*4882a593Smuzhiyun 				} else {
3966*4882a593Smuzhiyun 					lpfc_printf_vlog(vport, KERN_WARNING,
3967*4882a593Smuzhiyun 							LOG_BG,
3968*4882a593Smuzhiyun 							"9031 non-zero BGSTAT "
3969*4882a593Smuzhiyun 							"on unprotected cmd\n");
3970*4882a593Smuzhiyun 				}
3971*4882a593Smuzhiyun 			}
3972*4882a593Smuzhiyun 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3973*4882a593Smuzhiyun 				&& (phba->sli_rev == LPFC_SLI_REV4)
3974*4882a593Smuzhiyun 				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
3975*4882a593Smuzhiyun 				/* This IO was aborted by the target, we don't
3976*4882a593Smuzhiyun 				 * know the rxid and because we did not send the
3977*4882a593Smuzhiyun 				 * ABTS we cannot generate and RRQ.
3978*4882a593Smuzhiyun 				 */
3979*4882a593Smuzhiyun 				lpfc_set_rrq_active(phba, pnode,
3980*4882a593Smuzhiyun 					lpfc_cmd->cur_iocbq.sli4_lxritag,
3981*4882a593Smuzhiyun 					0, 0);
3982*4882a593Smuzhiyun 			}
3983*4882a593Smuzhiyun 			fallthrough;
3984*4882a593Smuzhiyun 		default:
3985*4882a593Smuzhiyun 			cmd->result = DID_ERROR << 16;
3986*4882a593Smuzhiyun 			break;
3987*4882a593Smuzhiyun 		}
3988*4882a593Smuzhiyun 
3989*4882a593Smuzhiyun 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3990*4882a593Smuzhiyun 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3991*4882a593Smuzhiyun 			cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3992*4882a593Smuzhiyun 				      SAM_STAT_BUSY;
3993*4882a593Smuzhiyun 	} else
3994*4882a593Smuzhiyun 		cmd->result = DID_OK << 16;
3995*4882a593Smuzhiyun 
3996*4882a593Smuzhiyun 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3997*4882a593Smuzhiyun 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3998*4882a593Smuzhiyun 
3999*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4000*4882a593Smuzhiyun 				 "0710 Iodone <%d/%llu> cmd x%px, error "
4001*4882a593Smuzhiyun 				 "x%x SNS x%x x%x Data: x%x x%x\n",
4002*4882a593Smuzhiyun 				 cmd->device->id, cmd->device->lun, cmd,
4003*4882a593Smuzhiyun 				 cmd->result, *lp, *(lp + 3), cmd->retries,
4004*4882a593Smuzhiyun 				 scsi_get_resid(cmd));
4005*4882a593Smuzhiyun 	}
4006*4882a593Smuzhiyun 
4007*4882a593Smuzhiyun 	lpfc_update_stats(vport, lpfc_cmd);
4008*4882a593Smuzhiyun 	if (vport->cfg_max_scsicmpl_time &&
4009*4882a593Smuzhiyun 	   time_after(jiffies, lpfc_cmd->start_time +
4010*4882a593Smuzhiyun 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4011*4882a593Smuzhiyun 		spin_lock_irqsave(shost->host_lock, flags);
4012*4882a593Smuzhiyun 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4013*4882a593Smuzhiyun 			if (pnode->cmd_qdepth >
4014*4882a593Smuzhiyun 				atomic_read(&pnode->cmd_pending) &&
4015*4882a593Smuzhiyun 				(atomic_read(&pnode->cmd_pending) >
4016*4882a593Smuzhiyun 				LPFC_MIN_TGT_QDEPTH) &&
4017*4882a593Smuzhiyun 				((cmd->cmnd[0] == READ_10) ||
4018*4882a593Smuzhiyun 				(cmd->cmnd[0] == WRITE_10)))
4019*4882a593Smuzhiyun 				pnode->cmd_qdepth =
4020*4882a593Smuzhiyun 					atomic_read(&pnode->cmd_pending);
4021*4882a593Smuzhiyun 
4022*4882a593Smuzhiyun 			pnode->last_change_time = jiffies;
4023*4882a593Smuzhiyun 		}
4024*4882a593Smuzhiyun 		spin_unlock_irqrestore(shost->host_lock, flags);
4025*4882a593Smuzhiyun 	}
4026*4882a593Smuzhiyun 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4027*4882a593Smuzhiyun 
4028*4882a593Smuzhiyun 	lpfc_cmd->pCmd = NULL;
4029*4882a593Smuzhiyun 	spin_unlock(&lpfc_cmd->buf_lock);
4030*4882a593Smuzhiyun 
4031*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4032*4882a593Smuzhiyun 	if (lpfc_cmd->ts_cmd_start) {
4033*4882a593Smuzhiyun 		lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4034*4882a593Smuzhiyun 		lpfc_cmd->ts_data_io = ktime_get_ns();
4035*4882a593Smuzhiyun 		phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4036*4882a593Smuzhiyun 		lpfc_io_ktime(phba, lpfc_cmd);
4037*4882a593Smuzhiyun 	}
4038*4882a593Smuzhiyun #endif
4039*4882a593Smuzhiyun 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4040*4882a593Smuzhiyun 	cmd->scsi_done(cmd);
4041*4882a593Smuzhiyun 
4042*4882a593Smuzhiyun 	/*
4043*4882a593Smuzhiyun 	 * If there is an abort thread waiting for command completion
4044*4882a593Smuzhiyun 	 * wake up the thread.
4045*4882a593Smuzhiyun 	 */
4046*4882a593Smuzhiyun 	spin_lock(&lpfc_cmd->buf_lock);
4047*4882a593Smuzhiyun 	lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4048*4882a593Smuzhiyun 	if (lpfc_cmd->waitq)
4049*4882a593Smuzhiyun 		wake_up(lpfc_cmd->waitq);
4050*4882a593Smuzhiyun 	spin_unlock(&lpfc_cmd->buf_lock);
4051*4882a593Smuzhiyun 
4052*4882a593Smuzhiyun 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4053*4882a593Smuzhiyun }
4054*4882a593Smuzhiyun 
4055*4882a593Smuzhiyun /**
4056*4882a593Smuzhiyun  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4057*4882a593Smuzhiyun  * @data: A pointer to the immediate command data portion of the IOCB.
4058*4882a593Smuzhiyun  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4059*4882a593Smuzhiyun  *
4060*4882a593Smuzhiyun  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4061*4882a593Smuzhiyun  * byte swapping the data to big endian format for transmission on the wire.
4062*4882a593Smuzhiyun  **/
4063*4882a593Smuzhiyun static void
lpfc_fcpcmd_to_iocb(uint8_t * data,struct fcp_cmnd * fcp_cmnd)4064*4882a593Smuzhiyun lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4065*4882a593Smuzhiyun {
4066*4882a593Smuzhiyun 	int i, j;
4067*4882a593Smuzhiyun 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4068*4882a593Smuzhiyun 	     i += sizeof(uint32_t), j++) {
4069*4882a593Smuzhiyun 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4070*4882a593Smuzhiyun 	}
4071*4882a593Smuzhiyun }
4072*4882a593Smuzhiyun 
4073*4882a593Smuzhiyun /**
4074*4882a593Smuzhiyun  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4075*4882a593Smuzhiyun  * @vport: The virtual port for which this call is being executed.
4076*4882a593Smuzhiyun  * @lpfc_cmd: The scsi command which needs to send.
4077*4882a593Smuzhiyun  * @pnode: Pointer to lpfc_nodelist.
4078*4882a593Smuzhiyun  *
4079*4882a593Smuzhiyun  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4080*4882a593Smuzhiyun  * to transfer for device with SLI3 interface spec.
4081*4882a593Smuzhiyun  **/
4082*4882a593Smuzhiyun static void
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4083*4882a593Smuzhiyun lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4084*4882a593Smuzhiyun 		    struct lpfc_nodelist *pnode)
4085*4882a593Smuzhiyun {
4086*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
4087*4882a593Smuzhiyun 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4088*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4089*4882a593Smuzhiyun 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4090*4882a593Smuzhiyun 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4091*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *hdwq = NULL;
4092*4882a593Smuzhiyun 	int datadir = scsi_cmnd->sc_data_direction;
4093*4882a593Smuzhiyun 	int idx;
4094*4882a593Smuzhiyun 	uint8_t *ptr;
4095*4882a593Smuzhiyun 	bool sli4;
4096*4882a593Smuzhiyun 	uint32_t fcpdl;
4097*4882a593Smuzhiyun 
4098*4882a593Smuzhiyun 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4099*4882a593Smuzhiyun 		return;
4100*4882a593Smuzhiyun 
4101*4882a593Smuzhiyun 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4102*4882a593Smuzhiyun 	/* clear task management bits */
4103*4882a593Smuzhiyun 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4106*4882a593Smuzhiyun 			&lpfc_cmd->fcp_cmnd->fcp_lun);
4107*4882a593Smuzhiyun 
4108*4882a593Smuzhiyun 	ptr = &fcp_cmnd->fcpCdb[0];
4109*4882a593Smuzhiyun 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4110*4882a593Smuzhiyun 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4111*4882a593Smuzhiyun 		ptr += scsi_cmnd->cmd_len;
4112*4882a593Smuzhiyun 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4113*4882a593Smuzhiyun 	}
4114*4882a593Smuzhiyun 
4115*4882a593Smuzhiyun 	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4116*4882a593Smuzhiyun 
4117*4882a593Smuzhiyun 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4118*4882a593Smuzhiyun 	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4119*4882a593Smuzhiyun 	idx = lpfc_cmd->hdwq_no;
4120*4882a593Smuzhiyun 	if (phba->sli4_hba.hdwq)
4121*4882a593Smuzhiyun 		hdwq = &phba->sli4_hba.hdwq[idx];
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 	/*
4124*4882a593Smuzhiyun 	 * There are three possibilities here - use scatter-gather segment, use
4125*4882a593Smuzhiyun 	 * the single mapping, or neither.  Start the lpfc command prep by
4126*4882a593Smuzhiyun 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4127*4882a593Smuzhiyun 	 * data bde entry.
4128*4882a593Smuzhiyun 	 */
4129*4882a593Smuzhiyun 	if (scsi_sg_count(scsi_cmnd)) {
4130*4882a593Smuzhiyun 		if (datadir == DMA_TO_DEVICE) {
4131*4882a593Smuzhiyun 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4132*4882a593Smuzhiyun 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4133*4882a593Smuzhiyun 			if (vport->cfg_first_burst_size &&
4134*4882a593Smuzhiyun 			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4135*4882a593Smuzhiyun 				fcpdl = scsi_bufflen(scsi_cmnd);
4136*4882a593Smuzhiyun 				if (fcpdl < vport->cfg_first_burst_size)
4137*4882a593Smuzhiyun 					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4138*4882a593Smuzhiyun 				else
4139*4882a593Smuzhiyun 					piocbq->iocb.un.fcpi.fcpi_XRdy =
4140*4882a593Smuzhiyun 						vport->cfg_first_burst_size;
4141*4882a593Smuzhiyun 			}
4142*4882a593Smuzhiyun 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4143*4882a593Smuzhiyun 			if (hdwq)
4144*4882a593Smuzhiyun 				hdwq->scsi_cstat.output_requests++;
4145*4882a593Smuzhiyun 		} else {
4146*4882a593Smuzhiyun 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4147*4882a593Smuzhiyun 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4148*4882a593Smuzhiyun 			fcp_cmnd->fcpCntl3 = READ_DATA;
4149*4882a593Smuzhiyun 			if (hdwq)
4150*4882a593Smuzhiyun 				hdwq->scsi_cstat.input_requests++;
4151*4882a593Smuzhiyun 		}
4152*4882a593Smuzhiyun 	} else {
4153*4882a593Smuzhiyun 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4154*4882a593Smuzhiyun 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4155*4882a593Smuzhiyun 		iocb_cmd->ulpPU = 0;
4156*4882a593Smuzhiyun 		fcp_cmnd->fcpCntl3 = 0;
4157*4882a593Smuzhiyun 		if (hdwq)
4158*4882a593Smuzhiyun 			hdwq->scsi_cstat.control_requests++;
4159*4882a593Smuzhiyun 	}
4160*4882a593Smuzhiyun 	if (phba->sli_rev == 3 &&
4161*4882a593Smuzhiyun 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4162*4882a593Smuzhiyun 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4163*4882a593Smuzhiyun 	/*
4164*4882a593Smuzhiyun 	 * Finish initializing those IOCB fields that are independent
4165*4882a593Smuzhiyun 	 * of the scsi_cmnd request_buffer
4166*4882a593Smuzhiyun 	 */
4167*4882a593Smuzhiyun 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4168*4882a593Smuzhiyun 	if (sli4)
4169*4882a593Smuzhiyun 		piocbq->iocb.ulpContext =
4170*4882a593Smuzhiyun 		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4171*4882a593Smuzhiyun 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4172*4882a593Smuzhiyun 		piocbq->iocb.ulpFCP2Rcvy = 1;
4173*4882a593Smuzhiyun 	else
4174*4882a593Smuzhiyun 		piocbq->iocb.ulpFCP2Rcvy = 0;
4175*4882a593Smuzhiyun 
4176*4882a593Smuzhiyun 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4177*4882a593Smuzhiyun 	piocbq->context1  = lpfc_cmd;
4178*4882a593Smuzhiyun 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4179*4882a593Smuzhiyun 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4180*4882a593Smuzhiyun 	piocbq->vport = vport;
4181*4882a593Smuzhiyun }
4182*4882a593Smuzhiyun 
4183*4882a593Smuzhiyun /**
4184*4882a593Smuzhiyun  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4185*4882a593Smuzhiyun  * @vport: The virtual port for which this call is being executed.
4186*4882a593Smuzhiyun  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4187*4882a593Smuzhiyun  * @lun: Logical unit number.
4188*4882a593Smuzhiyun  * @task_mgmt_cmd: SCSI task management command.
4189*4882a593Smuzhiyun  *
4190*4882a593Smuzhiyun  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4191*4882a593Smuzhiyun  * for device with SLI-3 interface spec.
4192*4882a593Smuzhiyun  *
4193*4882a593Smuzhiyun  * Return codes:
4194*4882a593Smuzhiyun  *   0 - Error
4195*4882a593Smuzhiyun  *   1 - Success
4196*4882a593Smuzhiyun  **/
4197*4882a593Smuzhiyun static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint64_t lun,uint8_t task_mgmt_cmd)4198*4882a593Smuzhiyun lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4199*4882a593Smuzhiyun 			     struct lpfc_io_buf *lpfc_cmd,
4200*4882a593Smuzhiyun 			     uint64_t lun,
4201*4882a593Smuzhiyun 			     uint8_t task_mgmt_cmd)
4202*4882a593Smuzhiyun {
4203*4882a593Smuzhiyun 	struct lpfc_iocbq *piocbq;
4204*4882a593Smuzhiyun 	IOCB_t *piocb;
4205*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd;
4206*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4207*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = rdata->pnode;
4208*4882a593Smuzhiyun 
4209*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4210*4882a593Smuzhiyun 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4211*4882a593Smuzhiyun 		return 0;
4212*4882a593Smuzhiyun 
4213*4882a593Smuzhiyun 	piocbq = &(lpfc_cmd->cur_iocbq);
4214*4882a593Smuzhiyun 	piocbq->vport = vport;
4215*4882a593Smuzhiyun 
4216*4882a593Smuzhiyun 	piocb = &piocbq->iocb;
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4219*4882a593Smuzhiyun 	/* Clear out any old data in the FCP command area */
4220*4882a593Smuzhiyun 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4221*4882a593Smuzhiyun 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4222*4882a593Smuzhiyun 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4223*4882a593Smuzhiyun 	if (vport->phba->sli_rev == 3 &&
4224*4882a593Smuzhiyun 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4225*4882a593Smuzhiyun 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4226*4882a593Smuzhiyun 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4227*4882a593Smuzhiyun 	piocb->ulpContext = ndlp->nlp_rpi;
4228*4882a593Smuzhiyun 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4229*4882a593Smuzhiyun 		piocb->ulpContext =
4230*4882a593Smuzhiyun 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4231*4882a593Smuzhiyun 	}
4232*4882a593Smuzhiyun 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4233*4882a593Smuzhiyun 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4234*4882a593Smuzhiyun 	piocb->ulpPU = 0;
4235*4882a593Smuzhiyun 	piocb->un.fcpi.fcpi_parm = 0;
4236*4882a593Smuzhiyun 
4237*4882a593Smuzhiyun 	/* ulpTimeout is only one byte */
4238*4882a593Smuzhiyun 	if (lpfc_cmd->timeout > 0xff) {
4239*4882a593Smuzhiyun 		/*
4240*4882a593Smuzhiyun 		 * Do not timeout the command at the firmware level.
4241*4882a593Smuzhiyun 		 * The driver will provide the timeout mechanism.
4242*4882a593Smuzhiyun 		 */
4243*4882a593Smuzhiyun 		piocb->ulpTimeout = 0;
4244*4882a593Smuzhiyun 	} else
4245*4882a593Smuzhiyun 		piocb->ulpTimeout = lpfc_cmd->timeout;
4246*4882a593Smuzhiyun 
4247*4882a593Smuzhiyun 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
4248*4882a593Smuzhiyun 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 	return 1;
4251*4882a593Smuzhiyun }
4252*4882a593Smuzhiyun 
4253*4882a593Smuzhiyun /**
4254*4882a593Smuzhiyun  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4255*4882a593Smuzhiyun  * @phba: The hba struct for which this call is being executed.
4256*4882a593Smuzhiyun  * @dev_grp: The HBA PCI-Device group number.
4257*4882a593Smuzhiyun  *
4258*4882a593Smuzhiyun  * This routine sets up the SCSI interface API function jump table in @phba
4259*4882a593Smuzhiyun  * struct.
4260*4882a593Smuzhiyun  * Returns: 0 - success, -ENODEV - failure.
4261*4882a593Smuzhiyun  **/
4262*4882a593Smuzhiyun int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)4263*4882a593Smuzhiyun lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4264*4882a593Smuzhiyun {
4265*4882a593Smuzhiyun 
4266*4882a593Smuzhiyun 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4267*4882a593Smuzhiyun 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4268*4882a593Smuzhiyun 
4269*4882a593Smuzhiyun 	switch (dev_grp) {
4270*4882a593Smuzhiyun 	case LPFC_PCI_DEV_LP:
4271*4882a593Smuzhiyun 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4272*4882a593Smuzhiyun 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4273*4882a593Smuzhiyun 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4274*4882a593Smuzhiyun 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4275*4882a593Smuzhiyun 		break;
4276*4882a593Smuzhiyun 	case LPFC_PCI_DEV_OC:
4277*4882a593Smuzhiyun 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4278*4882a593Smuzhiyun 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4279*4882a593Smuzhiyun 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4280*4882a593Smuzhiyun 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4281*4882a593Smuzhiyun 		break;
4282*4882a593Smuzhiyun 	default:
4283*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4284*4882a593Smuzhiyun 				"1418 Invalid HBA PCI-device group: 0x%x\n",
4285*4882a593Smuzhiyun 				dev_grp);
4286*4882a593Smuzhiyun 		return -ENODEV;
4287*4882a593Smuzhiyun 		break;
4288*4882a593Smuzhiyun 	}
4289*4882a593Smuzhiyun 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4290*4882a593Smuzhiyun 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4291*4882a593Smuzhiyun 	return 0;
4292*4882a593Smuzhiyun }
4293*4882a593Smuzhiyun 
4294*4882a593Smuzhiyun /**
4295*4882a593Smuzhiyun  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4296*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
4297*4882a593Smuzhiyun  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4298*4882a593Smuzhiyun  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4299*4882a593Smuzhiyun  *
4300*4882a593Smuzhiyun  * This routine is IOCB completion routine for device reset and target reset
4301*4882a593Smuzhiyun  * routine. This routine release scsi buffer associated with lpfc_cmd.
4302*4882a593Smuzhiyun  **/
4303*4882a593Smuzhiyun static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)4304*4882a593Smuzhiyun lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4305*4882a593Smuzhiyun 			struct lpfc_iocbq *cmdiocbq,
4306*4882a593Smuzhiyun 			struct lpfc_iocbq *rspiocbq)
4307*4882a593Smuzhiyun {
4308*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd =
4309*4882a593Smuzhiyun 		(struct lpfc_io_buf *) cmdiocbq->context1;
4310*4882a593Smuzhiyun 	if (lpfc_cmd)
4311*4882a593Smuzhiyun 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4312*4882a593Smuzhiyun 	return;
4313*4882a593Smuzhiyun }
4314*4882a593Smuzhiyun 
4315*4882a593Smuzhiyun /**
4316*4882a593Smuzhiyun  * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4317*4882a593Smuzhiyun  *                             if issuing a pci_bus_reset is possibly unsafe
4318*4882a593Smuzhiyun  * @phba: lpfc_hba pointer.
4319*4882a593Smuzhiyun  *
4320*4882a593Smuzhiyun  * Description:
4321*4882a593Smuzhiyun  * Walks the bus_list to ensure only PCI devices with Emulex
4322*4882a593Smuzhiyun  * vendor id, device ids that support hot reset, and only one occurrence
4323*4882a593Smuzhiyun  * of function 0.
4324*4882a593Smuzhiyun  *
4325*4882a593Smuzhiyun  * Returns:
4326*4882a593Smuzhiyun  * -EBADSLT,  detected invalid device
4327*4882a593Smuzhiyun  *      0,    successful
4328*4882a593Smuzhiyun  */
4329*4882a593Smuzhiyun int
lpfc_check_pci_resettable(struct lpfc_hba * phba)4330*4882a593Smuzhiyun lpfc_check_pci_resettable(struct lpfc_hba *phba)
4331*4882a593Smuzhiyun {
4332*4882a593Smuzhiyun 	const struct pci_dev *pdev = phba->pcidev;
4333*4882a593Smuzhiyun 	struct pci_dev *ptr = NULL;
4334*4882a593Smuzhiyun 	u8 counter = 0;
4335*4882a593Smuzhiyun 
4336*4882a593Smuzhiyun 	/* Walk the list of devices on the pci_dev's bus */
4337*4882a593Smuzhiyun 	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4338*4882a593Smuzhiyun 		/* Check for Emulex Vendor ID */
4339*4882a593Smuzhiyun 		if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4340*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4341*4882a593Smuzhiyun 					"8346 Non-Emulex vendor found: "
4342*4882a593Smuzhiyun 					"0x%04x\n", ptr->vendor);
4343*4882a593Smuzhiyun 			return -EBADSLT;
4344*4882a593Smuzhiyun 		}
4345*4882a593Smuzhiyun 
4346*4882a593Smuzhiyun 		/* Check for valid Emulex Device ID */
4347*4882a593Smuzhiyun 		switch (ptr->device) {
4348*4882a593Smuzhiyun 		case PCI_DEVICE_ID_LANCER_FC:
4349*4882a593Smuzhiyun 		case PCI_DEVICE_ID_LANCER_G6_FC:
4350*4882a593Smuzhiyun 		case PCI_DEVICE_ID_LANCER_G7_FC:
4351*4882a593Smuzhiyun 			break;
4352*4882a593Smuzhiyun 		default:
4353*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4354*4882a593Smuzhiyun 					"8347 Invalid device found: "
4355*4882a593Smuzhiyun 					"0x%04x\n", ptr->device);
4356*4882a593Smuzhiyun 			return -EBADSLT;
4357*4882a593Smuzhiyun 		}
4358*4882a593Smuzhiyun 
4359*4882a593Smuzhiyun 		/* Check for only one function 0 ID to ensure only one HBA on
4360*4882a593Smuzhiyun 		 * secondary bus
4361*4882a593Smuzhiyun 		 */
4362*4882a593Smuzhiyun 		if (ptr->devfn == 0) {
4363*4882a593Smuzhiyun 			if (++counter > 1) {
4364*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4365*4882a593Smuzhiyun 						"8348 More than one device on "
4366*4882a593Smuzhiyun 						"secondary bus found\n");
4367*4882a593Smuzhiyun 				return -EBADSLT;
4368*4882a593Smuzhiyun 			}
4369*4882a593Smuzhiyun 		}
4370*4882a593Smuzhiyun 	}
4371*4882a593Smuzhiyun 
4372*4882a593Smuzhiyun 	return 0;
4373*4882a593Smuzhiyun }
4374*4882a593Smuzhiyun 
4375*4882a593Smuzhiyun /**
4376*4882a593Smuzhiyun  * lpfc_info - Info entry point of scsi_host_template data structure
4377*4882a593Smuzhiyun  * @host: The scsi host for which this call is being executed.
4378*4882a593Smuzhiyun  *
4379*4882a593Smuzhiyun  * This routine provides module information about hba.
4380*4882a593Smuzhiyun  *
4381*4882a593Smuzhiyun  * Reutrn code:
4382*4882a593Smuzhiyun  *   Pointer to char - Success.
4383*4882a593Smuzhiyun  **/
4384*4882a593Smuzhiyun const char *
lpfc_info(struct Scsi_Host * host)4385*4882a593Smuzhiyun lpfc_info(struct Scsi_Host *host)
4386*4882a593Smuzhiyun {
4387*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4388*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
4389*4882a593Smuzhiyun 	int link_speed = 0;
4390*4882a593Smuzhiyun 	static char lpfcinfobuf[384];
4391*4882a593Smuzhiyun 	char tmp[384] = {0};
4392*4882a593Smuzhiyun 
4393*4882a593Smuzhiyun 	memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4394*4882a593Smuzhiyun 	if (phba && phba->pcidev){
4395*4882a593Smuzhiyun 		/* Model Description */
4396*4882a593Smuzhiyun 		scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4397*4882a593Smuzhiyun 		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4398*4882a593Smuzhiyun 		    sizeof(lpfcinfobuf))
4399*4882a593Smuzhiyun 			goto buffer_done;
4400*4882a593Smuzhiyun 
4401*4882a593Smuzhiyun 		/* PCI Info */
4402*4882a593Smuzhiyun 		scnprintf(tmp, sizeof(tmp),
4403*4882a593Smuzhiyun 			  " on PCI bus %02x device %02x irq %d",
4404*4882a593Smuzhiyun 			  phba->pcidev->bus->number, phba->pcidev->devfn,
4405*4882a593Smuzhiyun 			  phba->pcidev->irq);
4406*4882a593Smuzhiyun 		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4407*4882a593Smuzhiyun 		    sizeof(lpfcinfobuf))
4408*4882a593Smuzhiyun 			goto buffer_done;
4409*4882a593Smuzhiyun 
4410*4882a593Smuzhiyun 		/* Port Number */
4411*4882a593Smuzhiyun 		if (phba->Port[0]) {
4412*4882a593Smuzhiyun 			scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4413*4882a593Smuzhiyun 			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4414*4882a593Smuzhiyun 			    sizeof(lpfcinfobuf))
4415*4882a593Smuzhiyun 				goto buffer_done;
4416*4882a593Smuzhiyun 		}
4417*4882a593Smuzhiyun 
4418*4882a593Smuzhiyun 		/* Link Speed */
4419*4882a593Smuzhiyun 		link_speed = lpfc_sli_port_speed_get(phba);
4420*4882a593Smuzhiyun 		if (link_speed != 0) {
4421*4882a593Smuzhiyun 			scnprintf(tmp, sizeof(tmp),
4422*4882a593Smuzhiyun 				  " Logical Link Speed: %d Mbps", link_speed);
4423*4882a593Smuzhiyun 			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4424*4882a593Smuzhiyun 			    sizeof(lpfcinfobuf))
4425*4882a593Smuzhiyun 				goto buffer_done;
4426*4882a593Smuzhiyun 		}
4427*4882a593Smuzhiyun 
4428*4882a593Smuzhiyun 		/* PCI resettable */
4429*4882a593Smuzhiyun 		if (!lpfc_check_pci_resettable(phba)) {
4430*4882a593Smuzhiyun 			scnprintf(tmp, sizeof(tmp), " PCI resettable");
4431*4882a593Smuzhiyun 			strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4432*4882a593Smuzhiyun 		}
4433*4882a593Smuzhiyun 	}
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun buffer_done:
4436*4882a593Smuzhiyun 	return lpfcinfobuf;
4437*4882a593Smuzhiyun }
4438*4882a593Smuzhiyun 
4439*4882a593Smuzhiyun /**
4440*4882a593Smuzhiyun  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4441*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
4442*4882a593Smuzhiyun  *
4443*4882a593Smuzhiyun  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4444*4882a593Smuzhiyun  * The default value of cfg_poll_tmo is 10 milliseconds.
4445*4882a593Smuzhiyun  **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)4446*4882a593Smuzhiyun static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4447*4882a593Smuzhiyun {
4448*4882a593Smuzhiyun 	unsigned long  poll_tmo_expires =
4449*4882a593Smuzhiyun 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4450*4882a593Smuzhiyun 
4451*4882a593Smuzhiyun 	if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4452*4882a593Smuzhiyun 		mod_timer(&phba->fcp_poll_timer,
4453*4882a593Smuzhiyun 			  poll_tmo_expires);
4454*4882a593Smuzhiyun }
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun /**
4457*4882a593Smuzhiyun  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4458*4882a593Smuzhiyun  * @phba: The Hba for which this call is being executed.
4459*4882a593Smuzhiyun  *
4460*4882a593Smuzhiyun  * This routine starts the fcp_poll_timer of @phba.
4461*4882a593Smuzhiyun  **/
lpfc_poll_start_timer(struct lpfc_hba * phba)4462*4882a593Smuzhiyun void lpfc_poll_start_timer(struct lpfc_hba * phba)
4463*4882a593Smuzhiyun {
4464*4882a593Smuzhiyun 	lpfc_poll_rearm_timer(phba);
4465*4882a593Smuzhiyun }
4466*4882a593Smuzhiyun 
4467*4882a593Smuzhiyun /**
4468*4882a593Smuzhiyun  * lpfc_poll_timeout - Restart polling timer
4469*4882a593Smuzhiyun  * @ptr: Map to lpfc_hba data structure pointer.
4470*4882a593Smuzhiyun  *
4471*4882a593Smuzhiyun  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4472*4882a593Smuzhiyun  * and FCP Ring interrupt is disable.
4473*4882a593Smuzhiyun  **/
4474*4882a593Smuzhiyun 
lpfc_poll_timeout(struct timer_list * t)4475*4882a593Smuzhiyun void lpfc_poll_timeout(struct timer_list *t)
4476*4882a593Smuzhiyun {
4477*4882a593Smuzhiyun 	struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4478*4882a593Smuzhiyun 
4479*4882a593Smuzhiyun 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4480*4882a593Smuzhiyun 		lpfc_sli_handle_fast_ring_event(phba,
4481*4882a593Smuzhiyun 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4482*4882a593Smuzhiyun 
4483*4882a593Smuzhiyun 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4484*4882a593Smuzhiyun 			lpfc_poll_rearm_timer(phba);
4485*4882a593Smuzhiyun 	}
4486*4882a593Smuzhiyun }
4487*4882a593Smuzhiyun 
4488*4882a593Smuzhiyun /**
4489*4882a593Smuzhiyun  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4490*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
4491*4882a593Smuzhiyun  * @done: Pointer to done routine.
4492*4882a593Smuzhiyun  *
4493*4882a593Smuzhiyun  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4494*4882a593Smuzhiyun  * This routine prepares an IOCB from scsi command and provides to firmware.
4495*4882a593Smuzhiyun  * The @done callback is invoked after driver finished processing the command.
4496*4882a593Smuzhiyun  *
4497*4882a593Smuzhiyun  * Return value :
4498*4882a593Smuzhiyun  *   0 - Success
4499*4882a593Smuzhiyun  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4500*4882a593Smuzhiyun  **/
4501*4882a593Smuzhiyun static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)4502*4882a593Smuzhiyun lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4503*4882a593Smuzhiyun {
4504*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4505*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
4506*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
4507*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
4508*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd;
4509*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4510*4882a593Smuzhiyun 	int err, idx;
4511*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4512*4882a593Smuzhiyun 	uint64_t start = 0L;
4513*4882a593Smuzhiyun 
4514*4882a593Smuzhiyun 	if (phba->ktime_on)
4515*4882a593Smuzhiyun 		start = ktime_get_ns();
4516*4882a593Smuzhiyun #endif
4517*4882a593Smuzhiyun 
4518*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	/* sanity check on references */
4521*4882a593Smuzhiyun 	if (unlikely(!rdata) || unlikely(!rport))
4522*4882a593Smuzhiyun 		goto out_fail_command;
4523*4882a593Smuzhiyun 
4524*4882a593Smuzhiyun 	err = fc_remote_port_chkready(rport);
4525*4882a593Smuzhiyun 	if (err) {
4526*4882a593Smuzhiyun 		cmnd->result = err;
4527*4882a593Smuzhiyun 		goto out_fail_command;
4528*4882a593Smuzhiyun 	}
4529*4882a593Smuzhiyun 	ndlp = rdata->pnode;
4530*4882a593Smuzhiyun 
4531*4882a593Smuzhiyun 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4532*4882a593Smuzhiyun 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4533*4882a593Smuzhiyun 
4534*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4535*4882a593Smuzhiyun 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4536*4882a593Smuzhiyun 				" op:%02x str=%s without registering for"
4537*4882a593Smuzhiyun 				" BlockGuard - Rejecting command\n",
4538*4882a593Smuzhiyun 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4539*4882a593Smuzhiyun 				dif_op_str[scsi_get_prot_op(cmnd)]);
4540*4882a593Smuzhiyun 		goto out_fail_command;
4541*4882a593Smuzhiyun 	}
4542*4882a593Smuzhiyun 
4543*4882a593Smuzhiyun 	/*
4544*4882a593Smuzhiyun 	 * Catch race where our node has transitioned, but the
4545*4882a593Smuzhiyun 	 * transport is still transitioning.
4546*4882a593Smuzhiyun 	 */
4547*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4548*4882a593Smuzhiyun 		goto out_tgt_busy;
4549*4882a593Smuzhiyun 	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4550*4882a593Smuzhiyun 		if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4551*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4552*4882a593Smuzhiyun 					 "3377 Target Queue Full, scsi Id:%d "
4553*4882a593Smuzhiyun 					 "Qdepth:%d Pending command:%d"
4554*4882a593Smuzhiyun 					 " WWNN:%02x:%02x:%02x:%02x:"
4555*4882a593Smuzhiyun 					 "%02x:%02x:%02x:%02x, "
4556*4882a593Smuzhiyun 					 " WWPN:%02x:%02x:%02x:%02x:"
4557*4882a593Smuzhiyun 					 "%02x:%02x:%02x:%02x",
4558*4882a593Smuzhiyun 					 ndlp->nlp_sid, ndlp->cmd_qdepth,
4559*4882a593Smuzhiyun 					 atomic_read(&ndlp->cmd_pending),
4560*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[0],
4561*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[1],
4562*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[2],
4563*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[3],
4564*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[4],
4565*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[5],
4566*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[6],
4567*4882a593Smuzhiyun 					 ndlp->nlp_nodename.u.wwn[7],
4568*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[0],
4569*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[1],
4570*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[2],
4571*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[3],
4572*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[4],
4573*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[5],
4574*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[6],
4575*4882a593Smuzhiyun 					 ndlp->nlp_portname.u.wwn[7]);
4576*4882a593Smuzhiyun 			goto out_tgt_busy;
4577*4882a593Smuzhiyun 		}
4578*4882a593Smuzhiyun 	}
4579*4882a593Smuzhiyun 
4580*4882a593Smuzhiyun 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4581*4882a593Smuzhiyun 	if (lpfc_cmd == NULL) {
4582*4882a593Smuzhiyun 		lpfc_rampdown_queue_depth(phba);
4583*4882a593Smuzhiyun 
4584*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4585*4882a593Smuzhiyun 				 "0707 driver's buffer pool is empty, "
4586*4882a593Smuzhiyun 				 "IO busied\n");
4587*4882a593Smuzhiyun 		goto out_host_busy;
4588*4882a593Smuzhiyun 	}
4589*4882a593Smuzhiyun 
4590*4882a593Smuzhiyun 	/*
4591*4882a593Smuzhiyun 	 * Store the midlayer's command structure for the completion phase
4592*4882a593Smuzhiyun 	 * and complete the command initialization.
4593*4882a593Smuzhiyun 	 */
4594*4882a593Smuzhiyun 	lpfc_cmd->pCmd  = cmnd;
4595*4882a593Smuzhiyun 	lpfc_cmd->rdata = rdata;
4596*4882a593Smuzhiyun 	lpfc_cmd->ndlp = ndlp;
4597*4882a593Smuzhiyun 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4598*4882a593Smuzhiyun 
4599*4882a593Smuzhiyun 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4600*4882a593Smuzhiyun 		if (vport->phba->cfg_enable_bg) {
4601*4882a593Smuzhiyun 			lpfc_printf_vlog(vport,
4602*4882a593Smuzhiyun 					 KERN_INFO, LOG_SCSI_CMD,
4603*4882a593Smuzhiyun 					 "9033 BLKGRD: rcvd %s cmd:x%x "
4604*4882a593Smuzhiyun 					 "sector x%llx cnt %u pt %x\n",
4605*4882a593Smuzhiyun 					 dif_op_str[scsi_get_prot_op(cmnd)],
4606*4882a593Smuzhiyun 					 cmnd->cmnd[0],
4607*4882a593Smuzhiyun 					 (unsigned long long)scsi_get_lba(cmnd),
4608*4882a593Smuzhiyun 					 blk_rq_sectors(cmnd->request),
4609*4882a593Smuzhiyun 					 (cmnd->cmnd[1]>>5));
4610*4882a593Smuzhiyun 		}
4611*4882a593Smuzhiyun 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4612*4882a593Smuzhiyun 	} else {
4613*4882a593Smuzhiyun 		if (vport->phba->cfg_enable_bg) {
4614*4882a593Smuzhiyun 			lpfc_printf_vlog(vport,
4615*4882a593Smuzhiyun 					 KERN_INFO, LOG_SCSI_CMD,
4616*4882a593Smuzhiyun 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4617*4882a593Smuzhiyun 					 "x%x sector x%llx cnt %u pt %x\n",
4618*4882a593Smuzhiyun 					 cmnd->cmnd[0],
4619*4882a593Smuzhiyun 					 (unsigned long long)scsi_get_lba(cmnd),
4620*4882a593Smuzhiyun 					 blk_rq_sectors(cmnd->request),
4621*4882a593Smuzhiyun 					 (cmnd->cmnd[1]>>5));
4622*4882a593Smuzhiyun 		}
4623*4882a593Smuzhiyun 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4624*4882a593Smuzhiyun 	}
4625*4882a593Smuzhiyun 
4626*4882a593Smuzhiyun 	if (unlikely(err)) {
4627*4882a593Smuzhiyun 		if (err == 2) {
4628*4882a593Smuzhiyun 			cmnd->result = DID_ERROR << 16;
4629*4882a593Smuzhiyun 			goto out_fail_command_release_buf;
4630*4882a593Smuzhiyun 		}
4631*4882a593Smuzhiyun 		goto out_host_busy_free_buf;
4632*4882a593Smuzhiyun 	}
4633*4882a593Smuzhiyun 
4634*4882a593Smuzhiyun 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4635*4882a593Smuzhiyun 
4636*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4637*4882a593Smuzhiyun 	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4638*4882a593Smuzhiyun 		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
4639*4882a593Smuzhiyun #endif
4640*4882a593Smuzhiyun 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4641*4882a593Smuzhiyun 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4642*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4643*4882a593Smuzhiyun 	if (start) {
4644*4882a593Smuzhiyun 		lpfc_cmd->ts_cmd_start = start;
4645*4882a593Smuzhiyun 		lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
4646*4882a593Smuzhiyun 		lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
4647*4882a593Smuzhiyun 	} else {
4648*4882a593Smuzhiyun 		lpfc_cmd->ts_cmd_start = 0;
4649*4882a593Smuzhiyun 	}
4650*4882a593Smuzhiyun #endif
4651*4882a593Smuzhiyun 	if (err) {
4652*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4653*4882a593Smuzhiyun 				 "3376 FCP could not issue IOCB err %x"
4654*4882a593Smuzhiyun 				 "FCP cmd x%x <%d/%llu> "
4655*4882a593Smuzhiyun 				 "sid: x%x did: x%x oxid: x%x "
4656*4882a593Smuzhiyun 				 "Data: x%x x%x x%x x%x\n",
4657*4882a593Smuzhiyun 				 err, cmnd->cmnd[0],
4658*4882a593Smuzhiyun 				 cmnd->device ? cmnd->device->id : 0xffff,
4659*4882a593Smuzhiyun 				 cmnd->device ? cmnd->device->lun : (u64) -1,
4660*4882a593Smuzhiyun 				 vport->fc_myDID, ndlp->nlp_DID,
4661*4882a593Smuzhiyun 				 phba->sli_rev == LPFC_SLI_REV4 ?
4662*4882a593Smuzhiyun 				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4663*4882a593Smuzhiyun 				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4664*4882a593Smuzhiyun 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4665*4882a593Smuzhiyun 				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4666*4882a593Smuzhiyun 				 (uint32_t)
4667*4882a593Smuzhiyun 				 (cmnd->request->timeout / 1000));
4668*4882a593Smuzhiyun 
4669*4882a593Smuzhiyun 		goto out_host_busy_free_buf;
4670*4882a593Smuzhiyun 	}
4671*4882a593Smuzhiyun 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4672*4882a593Smuzhiyun 		lpfc_sli_handle_fast_ring_event(phba,
4673*4882a593Smuzhiyun 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4674*4882a593Smuzhiyun 
4675*4882a593Smuzhiyun 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4676*4882a593Smuzhiyun 			lpfc_poll_rearm_timer(phba);
4677*4882a593Smuzhiyun 	}
4678*4882a593Smuzhiyun 
4679*4882a593Smuzhiyun 	if (phba->cfg_xri_rebalancing)
4680*4882a593Smuzhiyun 		lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4681*4882a593Smuzhiyun 
4682*4882a593Smuzhiyun 	return 0;
4683*4882a593Smuzhiyun 
4684*4882a593Smuzhiyun  out_host_busy_free_buf:
4685*4882a593Smuzhiyun 	idx = lpfc_cmd->hdwq_no;
4686*4882a593Smuzhiyun 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4687*4882a593Smuzhiyun 	if (phba->sli4_hba.hdwq) {
4688*4882a593Smuzhiyun 		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4689*4882a593Smuzhiyun 		case WRITE_DATA:
4690*4882a593Smuzhiyun 			phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4691*4882a593Smuzhiyun 			break;
4692*4882a593Smuzhiyun 		case READ_DATA:
4693*4882a593Smuzhiyun 			phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4694*4882a593Smuzhiyun 			break;
4695*4882a593Smuzhiyun 		default:
4696*4882a593Smuzhiyun 			phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4697*4882a593Smuzhiyun 		}
4698*4882a593Smuzhiyun 	}
4699*4882a593Smuzhiyun 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4700*4882a593Smuzhiyun  out_host_busy:
4701*4882a593Smuzhiyun 	return SCSI_MLQUEUE_HOST_BUSY;
4702*4882a593Smuzhiyun 
4703*4882a593Smuzhiyun  out_tgt_busy:
4704*4882a593Smuzhiyun 	return SCSI_MLQUEUE_TARGET_BUSY;
4705*4882a593Smuzhiyun 
4706*4882a593Smuzhiyun  out_fail_command_release_buf:
4707*4882a593Smuzhiyun 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4708*4882a593Smuzhiyun 
4709*4882a593Smuzhiyun  out_fail_command:
4710*4882a593Smuzhiyun 	cmnd->scsi_done(cmnd);
4711*4882a593Smuzhiyun 	return 0;
4712*4882a593Smuzhiyun }
4713*4882a593Smuzhiyun 
4714*4882a593Smuzhiyun 
4715*4882a593Smuzhiyun /**
4716*4882a593Smuzhiyun  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4717*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
4718*4882a593Smuzhiyun  *
4719*4882a593Smuzhiyun  * This routine aborts @cmnd pending in base driver.
4720*4882a593Smuzhiyun  *
4721*4882a593Smuzhiyun  * Return code :
4722*4882a593Smuzhiyun  *   0x2003 - Error
4723*4882a593Smuzhiyun  *   0x2002 - Success
4724*4882a593Smuzhiyun  **/
4725*4882a593Smuzhiyun static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)4726*4882a593Smuzhiyun lpfc_abort_handler(struct scsi_cmnd *cmnd)
4727*4882a593Smuzhiyun {
4728*4882a593Smuzhiyun 	struct Scsi_Host  *shost = cmnd->device->host;
4729*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4730*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
4731*4882a593Smuzhiyun 	struct lpfc_iocbq *iocb;
4732*4882a593Smuzhiyun 	struct lpfc_iocbq *abtsiocb;
4733*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd;
4734*4882a593Smuzhiyun 	IOCB_t *cmd, *icmd;
4735*4882a593Smuzhiyun 	int ret = SUCCESS, status = 0;
4736*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring_s4 = NULL;
4737*4882a593Smuzhiyun 	int ret_val;
4738*4882a593Smuzhiyun 	unsigned long flags;
4739*4882a593Smuzhiyun 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4740*4882a593Smuzhiyun 
4741*4882a593Smuzhiyun 	status = fc_block_scsi_eh(cmnd);
4742*4882a593Smuzhiyun 	if (status != 0 && status != SUCCESS)
4743*4882a593Smuzhiyun 		return status;
4744*4882a593Smuzhiyun 
4745*4882a593Smuzhiyun 	lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4746*4882a593Smuzhiyun 	if (!lpfc_cmd)
4747*4882a593Smuzhiyun 		return ret;
4748*4882a593Smuzhiyun 
4749*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
4750*4882a593Smuzhiyun 	/* driver queued commands are in process of being flushed */
4751*4882a593Smuzhiyun 	if (phba->hba_flag & HBA_IOQ_FLUSH) {
4752*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4753*4882a593Smuzhiyun 			"3168 SCSI Layer abort requested I/O has been "
4754*4882a593Smuzhiyun 			"flushed by LLD.\n");
4755*4882a593Smuzhiyun 		ret = FAILED;
4756*4882a593Smuzhiyun 		goto out_unlock;
4757*4882a593Smuzhiyun 	}
4758*4882a593Smuzhiyun 
4759*4882a593Smuzhiyun 	/* Guard against IO completion being called at same time */
4760*4882a593Smuzhiyun 	spin_lock(&lpfc_cmd->buf_lock);
4761*4882a593Smuzhiyun 
4762*4882a593Smuzhiyun 	if (!lpfc_cmd->pCmd) {
4763*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4764*4882a593Smuzhiyun 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4765*4882a593Smuzhiyun 			 "x%x ID %d LUN %llu\n",
4766*4882a593Smuzhiyun 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
4767*4882a593Smuzhiyun 		goto out_unlock_buf;
4768*4882a593Smuzhiyun 	}
4769*4882a593Smuzhiyun 
4770*4882a593Smuzhiyun 	iocb = &lpfc_cmd->cur_iocbq;
4771*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4) {
4772*4882a593Smuzhiyun 		pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4773*4882a593Smuzhiyun 		if (!pring_s4) {
4774*4882a593Smuzhiyun 			ret = FAILED;
4775*4882a593Smuzhiyun 			goto out_unlock_buf;
4776*4882a593Smuzhiyun 		}
4777*4882a593Smuzhiyun 		spin_lock(&pring_s4->ring_lock);
4778*4882a593Smuzhiyun 	}
4779*4882a593Smuzhiyun 	/* the command is in process of being cancelled */
4780*4882a593Smuzhiyun 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4781*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4782*4882a593Smuzhiyun 			"3169 SCSI Layer abort requested I/O has been "
4783*4882a593Smuzhiyun 			"cancelled by LLD.\n");
4784*4882a593Smuzhiyun 		ret = FAILED;
4785*4882a593Smuzhiyun 		goto out_unlock_ring;
4786*4882a593Smuzhiyun 	}
4787*4882a593Smuzhiyun 	/*
4788*4882a593Smuzhiyun 	 * If pCmd field of the corresponding lpfc_io_buf structure
4789*4882a593Smuzhiyun 	 * points to a different SCSI command, then the driver has
4790*4882a593Smuzhiyun 	 * already completed this command, but the midlayer did not
4791*4882a593Smuzhiyun 	 * see the completion before the eh fired. Just return SUCCESS.
4792*4882a593Smuzhiyun 	 */
4793*4882a593Smuzhiyun 	if (lpfc_cmd->pCmd != cmnd) {
4794*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4795*4882a593Smuzhiyun 			"3170 SCSI Layer abort requested I/O has been "
4796*4882a593Smuzhiyun 			"completed by LLD.\n");
4797*4882a593Smuzhiyun 		goto out_unlock_ring;
4798*4882a593Smuzhiyun 	}
4799*4882a593Smuzhiyun 
4800*4882a593Smuzhiyun 	BUG_ON(iocb->context1 != lpfc_cmd);
4801*4882a593Smuzhiyun 
4802*4882a593Smuzhiyun 	/* abort issued in recovery is still in progress */
4803*4882a593Smuzhiyun 	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4804*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4805*4882a593Smuzhiyun 			 "3389 SCSI Layer I/O Abort Request is pending\n");
4806*4882a593Smuzhiyun 		if (phba->sli_rev == LPFC_SLI_REV4)
4807*4882a593Smuzhiyun 			spin_unlock(&pring_s4->ring_lock);
4808*4882a593Smuzhiyun 		spin_unlock(&lpfc_cmd->buf_lock);
4809*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
4810*4882a593Smuzhiyun 		goto wait_for_cmpl;
4811*4882a593Smuzhiyun 	}
4812*4882a593Smuzhiyun 
4813*4882a593Smuzhiyun 	abtsiocb = __lpfc_sli_get_iocbq(phba);
4814*4882a593Smuzhiyun 	if (abtsiocb == NULL) {
4815*4882a593Smuzhiyun 		ret = FAILED;
4816*4882a593Smuzhiyun 		goto out_unlock_ring;
4817*4882a593Smuzhiyun 	}
4818*4882a593Smuzhiyun 
4819*4882a593Smuzhiyun 	/* Indicate the IO is being aborted by the driver. */
4820*4882a593Smuzhiyun 	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4821*4882a593Smuzhiyun 
4822*4882a593Smuzhiyun 	/*
4823*4882a593Smuzhiyun 	 * The scsi command can not be in txq and it is in flight because the
4824*4882a593Smuzhiyun 	 * pCmd is still pointig at the SCSI command we have to abort. There
4825*4882a593Smuzhiyun 	 * is no need to search the txcmplq. Just send an abort to the FW.
4826*4882a593Smuzhiyun 	 */
4827*4882a593Smuzhiyun 
4828*4882a593Smuzhiyun 	cmd = &iocb->iocb;
4829*4882a593Smuzhiyun 	icmd = &abtsiocb->iocb;
4830*4882a593Smuzhiyun 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4831*4882a593Smuzhiyun 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4832*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
4833*4882a593Smuzhiyun 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4834*4882a593Smuzhiyun 	else
4835*4882a593Smuzhiyun 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4836*4882a593Smuzhiyun 
4837*4882a593Smuzhiyun 	icmd->ulpLe = 1;
4838*4882a593Smuzhiyun 	icmd->ulpClass = cmd->ulpClass;
4839*4882a593Smuzhiyun 
4840*4882a593Smuzhiyun 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4841*4882a593Smuzhiyun 	abtsiocb->hba_wqidx = iocb->hba_wqidx;
4842*4882a593Smuzhiyun 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4843*4882a593Smuzhiyun 	if (iocb->iocb_flag & LPFC_IO_FOF)
4844*4882a593Smuzhiyun 		abtsiocb->iocb_flag |= LPFC_IO_FOF;
4845*4882a593Smuzhiyun 
4846*4882a593Smuzhiyun 	if (lpfc_is_link_up(phba))
4847*4882a593Smuzhiyun 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
4848*4882a593Smuzhiyun 	else
4849*4882a593Smuzhiyun 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4850*4882a593Smuzhiyun 
4851*4882a593Smuzhiyun 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4852*4882a593Smuzhiyun 	abtsiocb->vport = vport;
4853*4882a593Smuzhiyun 	lpfc_cmd->waitq = &waitq;
4854*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4) {
4855*4882a593Smuzhiyun 		/* Note: both hbalock and ring_lock must be set here */
4856*4882a593Smuzhiyun 		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4857*4882a593Smuzhiyun 						abtsiocb, 0);
4858*4882a593Smuzhiyun 		spin_unlock(&pring_s4->ring_lock);
4859*4882a593Smuzhiyun 	} else {
4860*4882a593Smuzhiyun 		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4861*4882a593Smuzhiyun 						abtsiocb, 0);
4862*4882a593Smuzhiyun 	}
4863*4882a593Smuzhiyun 
4864*4882a593Smuzhiyun 	if (ret_val == IOCB_ERROR) {
4865*4882a593Smuzhiyun 		/* Indicate the IO is not being aborted by the driver. */
4866*4882a593Smuzhiyun 		iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4867*4882a593Smuzhiyun 		lpfc_cmd->waitq = NULL;
4868*4882a593Smuzhiyun 		spin_unlock(&lpfc_cmd->buf_lock);
4869*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
4870*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, abtsiocb);
4871*4882a593Smuzhiyun 		ret = FAILED;
4872*4882a593Smuzhiyun 		goto out;
4873*4882a593Smuzhiyun 	}
4874*4882a593Smuzhiyun 
4875*4882a593Smuzhiyun 	/* no longer need the lock after this point */
4876*4882a593Smuzhiyun 	spin_unlock(&lpfc_cmd->buf_lock);
4877*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
4878*4882a593Smuzhiyun 
4879*4882a593Smuzhiyun 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4880*4882a593Smuzhiyun 		lpfc_sli_handle_fast_ring_event(phba,
4881*4882a593Smuzhiyun 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4882*4882a593Smuzhiyun 
4883*4882a593Smuzhiyun wait_for_cmpl:
4884*4882a593Smuzhiyun 	/* Wait for abort to complete */
4885*4882a593Smuzhiyun 	wait_event_timeout(waitq,
4886*4882a593Smuzhiyun 			  (lpfc_cmd->pCmd != cmnd),
4887*4882a593Smuzhiyun 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4888*4882a593Smuzhiyun 
4889*4882a593Smuzhiyun 	spin_lock(&lpfc_cmd->buf_lock);
4890*4882a593Smuzhiyun 
4891*4882a593Smuzhiyun 	if (lpfc_cmd->pCmd == cmnd) {
4892*4882a593Smuzhiyun 		ret = FAILED;
4893*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4894*4882a593Smuzhiyun 				 "0748 abort handler timed out waiting "
4895*4882a593Smuzhiyun 				 "for aborting I/O (xri:x%x) to complete: "
4896*4882a593Smuzhiyun 				 "ret %#x, ID %d, LUN %llu\n",
4897*4882a593Smuzhiyun 				 iocb->sli4_xritag, ret,
4898*4882a593Smuzhiyun 				 cmnd->device->id, cmnd->device->lun);
4899*4882a593Smuzhiyun 	}
4900*4882a593Smuzhiyun 
4901*4882a593Smuzhiyun 	lpfc_cmd->waitq = NULL;
4902*4882a593Smuzhiyun 
4903*4882a593Smuzhiyun 	spin_unlock(&lpfc_cmd->buf_lock);
4904*4882a593Smuzhiyun 	goto out;
4905*4882a593Smuzhiyun 
4906*4882a593Smuzhiyun out_unlock_ring:
4907*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
4908*4882a593Smuzhiyun 		spin_unlock(&pring_s4->ring_lock);
4909*4882a593Smuzhiyun out_unlock_buf:
4910*4882a593Smuzhiyun 	spin_unlock(&lpfc_cmd->buf_lock);
4911*4882a593Smuzhiyun out_unlock:
4912*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
4913*4882a593Smuzhiyun out:
4914*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4915*4882a593Smuzhiyun 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4916*4882a593Smuzhiyun 			 "LUN %llu\n", ret, cmnd->device->id,
4917*4882a593Smuzhiyun 			 cmnd->device->lun);
4918*4882a593Smuzhiyun 	return ret;
4919*4882a593Smuzhiyun }
4920*4882a593Smuzhiyun 
4921*4882a593Smuzhiyun static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)4922*4882a593Smuzhiyun lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4923*4882a593Smuzhiyun {
4924*4882a593Smuzhiyun 	switch (task_mgmt_cmd) {
4925*4882a593Smuzhiyun 	case FCP_ABORT_TASK_SET:
4926*4882a593Smuzhiyun 		return "ABORT_TASK_SET";
4927*4882a593Smuzhiyun 	case FCP_CLEAR_TASK_SET:
4928*4882a593Smuzhiyun 		return "FCP_CLEAR_TASK_SET";
4929*4882a593Smuzhiyun 	case FCP_BUS_RESET:
4930*4882a593Smuzhiyun 		return "FCP_BUS_RESET";
4931*4882a593Smuzhiyun 	case FCP_LUN_RESET:
4932*4882a593Smuzhiyun 		return "FCP_LUN_RESET";
4933*4882a593Smuzhiyun 	case FCP_TARGET_RESET:
4934*4882a593Smuzhiyun 		return "FCP_TARGET_RESET";
4935*4882a593Smuzhiyun 	case FCP_CLEAR_ACA:
4936*4882a593Smuzhiyun 		return "FCP_CLEAR_ACA";
4937*4882a593Smuzhiyun 	case FCP_TERMINATE_TASK:
4938*4882a593Smuzhiyun 		return "FCP_TERMINATE_TASK";
4939*4882a593Smuzhiyun 	default:
4940*4882a593Smuzhiyun 		return "unknown";
4941*4882a593Smuzhiyun 	}
4942*4882a593Smuzhiyun }
4943*4882a593Smuzhiyun 
4944*4882a593Smuzhiyun 
4945*4882a593Smuzhiyun /**
4946*4882a593Smuzhiyun  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4947*4882a593Smuzhiyun  * @vport: The virtual port for which this call is being executed.
4948*4882a593Smuzhiyun  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4949*4882a593Smuzhiyun  *
4950*4882a593Smuzhiyun  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4951*4882a593Smuzhiyun  *
4952*4882a593Smuzhiyun  * Return code :
4953*4882a593Smuzhiyun  *   0x2003 - Error
4954*4882a593Smuzhiyun  *   0x2002 - Success
4955*4882a593Smuzhiyun  **/
4956*4882a593Smuzhiyun static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)4957*4882a593Smuzhiyun lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4958*4882a593Smuzhiyun {
4959*4882a593Smuzhiyun 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4960*4882a593Smuzhiyun 	uint32_t rsp_info;
4961*4882a593Smuzhiyun 	uint32_t rsp_len;
4962*4882a593Smuzhiyun 	uint8_t  rsp_info_code;
4963*4882a593Smuzhiyun 	int ret = FAILED;
4964*4882a593Smuzhiyun 
4965*4882a593Smuzhiyun 
4966*4882a593Smuzhiyun 	if (fcprsp == NULL)
4967*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4968*4882a593Smuzhiyun 				 "0703 fcp_rsp is missing\n");
4969*4882a593Smuzhiyun 	else {
4970*4882a593Smuzhiyun 		rsp_info = fcprsp->rspStatus2;
4971*4882a593Smuzhiyun 		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4972*4882a593Smuzhiyun 		rsp_info_code = fcprsp->rspInfo3;
4973*4882a593Smuzhiyun 
4974*4882a593Smuzhiyun 
4975*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO,
4976*4882a593Smuzhiyun 				 LOG_FCP,
4977*4882a593Smuzhiyun 				 "0706 fcp_rsp valid 0x%x,"
4978*4882a593Smuzhiyun 				 " rsp len=%d code 0x%x\n",
4979*4882a593Smuzhiyun 				 rsp_info,
4980*4882a593Smuzhiyun 				 rsp_len, rsp_info_code);
4981*4882a593Smuzhiyun 
4982*4882a593Smuzhiyun 		/* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
4983*4882a593Smuzhiyun 		 * field specifies the number of valid bytes of FCP_RSP_INFO.
4984*4882a593Smuzhiyun 		 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
4985*4882a593Smuzhiyun 		 */
4986*4882a593Smuzhiyun 		if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4987*4882a593Smuzhiyun 		    ((rsp_len == 8) || (rsp_len == 4))) {
4988*4882a593Smuzhiyun 			switch (rsp_info_code) {
4989*4882a593Smuzhiyun 			case RSP_NO_FAILURE:
4990*4882a593Smuzhiyun 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4991*4882a593Smuzhiyun 						 "0715 Task Mgmt No Failure\n");
4992*4882a593Smuzhiyun 				ret = SUCCESS;
4993*4882a593Smuzhiyun 				break;
4994*4882a593Smuzhiyun 			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4995*4882a593Smuzhiyun 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4996*4882a593Smuzhiyun 						 "0716 Task Mgmt Target "
4997*4882a593Smuzhiyun 						"reject\n");
4998*4882a593Smuzhiyun 				break;
4999*4882a593Smuzhiyun 			case RSP_TM_NOT_COMPLETED: /* TM failed */
5000*4882a593Smuzhiyun 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5001*4882a593Smuzhiyun 						 "0717 Task Mgmt Target "
5002*4882a593Smuzhiyun 						"failed TM\n");
5003*4882a593Smuzhiyun 				break;
5004*4882a593Smuzhiyun 			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5005*4882a593Smuzhiyun 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5006*4882a593Smuzhiyun 						 "0718 Task Mgmt to invalid "
5007*4882a593Smuzhiyun 						"LUN\n");
5008*4882a593Smuzhiyun 				break;
5009*4882a593Smuzhiyun 			}
5010*4882a593Smuzhiyun 		}
5011*4882a593Smuzhiyun 	}
5012*4882a593Smuzhiyun 	return ret;
5013*4882a593Smuzhiyun }
5014*4882a593Smuzhiyun 
5015*4882a593Smuzhiyun 
5016*4882a593Smuzhiyun /**
5017*4882a593Smuzhiyun  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5018*4882a593Smuzhiyun  * @vport: The virtual port for which this call is being executed.
5019*4882a593Smuzhiyun  * @rdata: Pointer to remote port local data
5020*4882a593Smuzhiyun  * @tgt_id: Target ID of remote device.
5021*4882a593Smuzhiyun  * @lun_id: Lun number for the TMF
5022*4882a593Smuzhiyun  * @task_mgmt_cmd: type of TMF to send
5023*4882a593Smuzhiyun  *
5024*4882a593Smuzhiyun  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5025*4882a593Smuzhiyun  * a remote port.
5026*4882a593Smuzhiyun  *
5027*4882a593Smuzhiyun  * Return Code:
5028*4882a593Smuzhiyun  *   0x2003 - Error
5029*4882a593Smuzhiyun  *   0x2002 - Success.
5030*4882a593Smuzhiyun  **/
5031*4882a593Smuzhiyun static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct scsi_cmnd * cmnd,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5032*4882a593Smuzhiyun lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5033*4882a593Smuzhiyun 		   unsigned int tgt_id, uint64_t lun_id,
5034*4882a593Smuzhiyun 		   uint8_t task_mgmt_cmd)
5035*4882a593Smuzhiyun {
5036*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
5037*4882a593Smuzhiyun 	struct lpfc_io_buf *lpfc_cmd;
5038*4882a593Smuzhiyun 	struct lpfc_iocbq *iocbq;
5039*4882a593Smuzhiyun 	struct lpfc_iocbq *iocbqrsp;
5040*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
5041*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode;
5042*4882a593Smuzhiyun 	int ret;
5043*4882a593Smuzhiyun 	int status;
5044*4882a593Smuzhiyun 
5045*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5046*4882a593Smuzhiyun 	if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5047*4882a593Smuzhiyun 		return FAILED;
5048*4882a593Smuzhiyun 	pnode = rdata->pnode;
5049*4882a593Smuzhiyun 
5050*4882a593Smuzhiyun 	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5051*4882a593Smuzhiyun 	if (lpfc_cmd == NULL)
5052*4882a593Smuzhiyun 		return FAILED;
5053*4882a593Smuzhiyun 	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5054*4882a593Smuzhiyun 	lpfc_cmd->rdata = rdata;
5055*4882a593Smuzhiyun 	lpfc_cmd->pCmd = cmnd;
5056*4882a593Smuzhiyun 	lpfc_cmd->ndlp = pnode;
5057*4882a593Smuzhiyun 
5058*4882a593Smuzhiyun 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5059*4882a593Smuzhiyun 					   task_mgmt_cmd);
5060*4882a593Smuzhiyun 	if (!status) {
5061*4882a593Smuzhiyun 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5062*4882a593Smuzhiyun 		return FAILED;
5063*4882a593Smuzhiyun 	}
5064*4882a593Smuzhiyun 
5065*4882a593Smuzhiyun 	iocbq = &lpfc_cmd->cur_iocbq;
5066*4882a593Smuzhiyun 	iocbqrsp = lpfc_sli_get_iocbq(phba);
5067*4882a593Smuzhiyun 	if (iocbqrsp == NULL) {
5068*4882a593Smuzhiyun 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5069*4882a593Smuzhiyun 		return FAILED;
5070*4882a593Smuzhiyun 	}
5071*4882a593Smuzhiyun 	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5072*4882a593Smuzhiyun 
5073*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5074*4882a593Smuzhiyun 			 "0702 Issue %s to TGT %d LUN %llu "
5075*4882a593Smuzhiyun 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5076*4882a593Smuzhiyun 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5077*4882a593Smuzhiyun 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5078*4882a593Smuzhiyun 			 iocbq->iocb_flag);
5079*4882a593Smuzhiyun 
5080*4882a593Smuzhiyun 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5081*4882a593Smuzhiyun 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5082*4882a593Smuzhiyun 	if ((status != IOCB_SUCCESS) ||
5083*4882a593Smuzhiyun 	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5084*4882a593Smuzhiyun 		if (status != IOCB_SUCCESS ||
5085*4882a593Smuzhiyun 		    iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5086*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5087*4882a593Smuzhiyun 					 "0727 TMF %s to TGT %d LUN %llu "
5088*4882a593Smuzhiyun 					 "failed (%d, %d) iocb_flag x%x\n",
5089*4882a593Smuzhiyun 					 lpfc_taskmgmt_name(task_mgmt_cmd),
5090*4882a593Smuzhiyun 					 tgt_id, lun_id,
5091*4882a593Smuzhiyun 					 iocbqrsp->iocb.ulpStatus,
5092*4882a593Smuzhiyun 					 iocbqrsp->iocb.un.ulpWord[4],
5093*4882a593Smuzhiyun 					 iocbq->iocb_flag);
5094*4882a593Smuzhiyun 		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5095*4882a593Smuzhiyun 		if (status == IOCB_SUCCESS) {
5096*4882a593Smuzhiyun 			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5097*4882a593Smuzhiyun 				/* Something in the FCP_RSP was invalid.
5098*4882a593Smuzhiyun 				 * Check conditions */
5099*4882a593Smuzhiyun 				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5100*4882a593Smuzhiyun 			else
5101*4882a593Smuzhiyun 				ret = FAILED;
5102*4882a593Smuzhiyun 		} else if (status == IOCB_TIMEDOUT) {
5103*4882a593Smuzhiyun 			ret = TIMEOUT_ERROR;
5104*4882a593Smuzhiyun 		} else {
5105*4882a593Smuzhiyun 			ret = FAILED;
5106*4882a593Smuzhiyun 		}
5107*4882a593Smuzhiyun 	} else
5108*4882a593Smuzhiyun 		ret = SUCCESS;
5109*4882a593Smuzhiyun 
5110*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, iocbqrsp);
5111*4882a593Smuzhiyun 
5112*4882a593Smuzhiyun 	if (ret != TIMEOUT_ERROR)
5113*4882a593Smuzhiyun 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5114*4882a593Smuzhiyun 
5115*4882a593Smuzhiyun 	return ret;
5116*4882a593Smuzhiyun }
5117*4882a593Smuzhiyun 
5118*4882a593Smuzhiyun /**
5119*4882a593Smuzhiyun  * lpfc_chk_tgt_mapped -
5120*4882a593Smuzhiyun  * @vport: The virtual port to check on
5121*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
5122*4882a593Smuzhiyun  *
5123*4882a593Smuzhiyun  * This routine delays until the scsi target (aka rport) for the
5124*4882a593Smuzhiyun  * command exists (is present and logged in) or we declare it non-existent.
5125*4882a593Smuzhiyun  *
5126*4882a593Smuzhiyun  * Return code :
5127*4882a593Smuzhiyun  *  0x2003 - Error
5128*4882a593Smuzhiyun  *  0x2002 - Success
5129*4882a593Smuzhiyun  **/
5130*4882a593Smuzhiyun static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct scsi_cmnd * cmnd)5131*4882a593Smuzhiyun lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5132*4882a593Smuzhiyun {
5133*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
5134*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode;
5135*4882a593Smuzhiyun 	unsigned long later;
5136*4882a593Smuzhiyun 
5137*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5138*4882a593Smuzhiyun 	if (!rdata) {
5139*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5140*4882a593Smuzhiyun 			"0797 Tgt Map rport failure: rdata x%px\n", rdata);
5141*4882a593Smuzhiyun 		return FAILED;
5142*4882a593Smuzhiyun 	}
5143*4882a593Smuzhiyun 	pnode = rdata->pnode;
5144*4882a593Smuzhiyun 	/*
5145*4882a593Smuzhiyun 	 * If target is not in a MAPPED state, delay until
5146*4882a593Smuzhiyun 	 * target is rediscovered or devloss timeout expires.
5147*4882a593Smuzhiyun 	 */
5148*4882a593Smuzhiyun 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5149*4882a593Smuzhiyun 	while (time_after(later, jiffies)) {
5150*4882a593Smuzhiyun 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5151*4882a593Smuzhiyun 			return FAILED;
5152*4882a593Smuzhiyun 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5153*4882a593Smuzhiyun 			return SUCCESS;
5154*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5155*4882a593Smuzhiyun 		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5156*4882a593Smuzhiyun 		if (!rdata)
5157*4882a593Smuzhiyun 			return FAILED;
5158*4882a593Smuzhiyun 		pnode = rdata->pnode;
5159*4882a593Smuzhiyun 	}
5160*4882a593Smuzhiyun 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5161*4882a593Smuzhiyun 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5162*4882a593Smuzhiyun 		return FAILED;
5163*4882a593Smuzhiyun 	return SUCCESS;
5164*4882a593Smuzhiyun }
5165*4882a593Smuzhiyun 
5166*4882a593Smuzhiyun /**
5167*4882a593Smuzhiyun  * lpfc_reset_flush_io_context -
5168*4882a593Smuzhiyun  * @vport: The virtual port (scsi_host) for the flush context
5169*4882a593Smuzhiyun  * @tgt_id: If aborting by Target contect - specifies the target id
5170*4882a593Smuzhiyun  * @lun_id: If aborting by Lun context - specifies the lun id
5171*4882a593Smuzhiyun  * @context: specifies the context level to flush at.
5172*4882a593Smuzhiyun  *
5173*4882a593Smuzhiyun  * After a reset condition via TMF, we need to flush orphaned i/o
5174*4882a593Smuzhiyun  * contexts from the adapter. This routine aborts any contexts
5175*4882a593Smuzhiyun  * outstanding, then waits for their completions. The wait is
5176*4882a593Smuzhiyun  * bounded by devloss_tmo though.
5177*4882a593Smuzhiyun  *
5178*4882a593Smuzhiyun  * Return code :
5179*4882a593Smuzhiyun  *  0x2003 - Error
5180*4882a593Smuzhiyun  *  0x2002 - Success
5181*4882a593Smuzhiyun  **/
5182*4882a593Smuzhiyun static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)5183*4882a593Smuzhiyun lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5184*4882a593Smuzhiyun 			uint64_t lun_id, lpfc_ctx_cmd context)
5185*4882a593Smuzhiyun {
5186*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
5187*4882a593Smuzhiyun 	unsigned long later;
5188*4882a593Smuzhiyun 	int cnt;
5189*4882a593Smuzhiyun 
5190*4882a593Smuzhiyun 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5191*4882a593Smuzhiyun 	if (cnt)
5192*4882a593Smuzhiyun 		lpfc_sli_abort_taskmgmt(vport,
5193*4882a593Smuzhiyun 					&phba->sli.sli3_ring[LPFC_FCP_RING],
5194*4882a593Smuzhiyun 					tgt_id, lun_id, context);
5195*4882a593Smuzhiyun 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5196*4882a593Smuzhiyun 	while (time_after(later, jiffies) && cnt) {
5197*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5198*4882a593Smuzhiyun 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5199*4882a593Smuzhiyun 	}
5200*4882a593Smuzhiyun 	if (cnt) {
5201*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5202*4882a593Smuzhiyun 			"0724 I/O flush failure for context %s : cnt x%x\n",
5203*4882a593Smuzhiyun 			((context == LPFC_CTX_LUN) ? "LUN" :
5204*4882a593Smuzhiyun 			 ((context == LPFC_CTX_TGT) ? "TGT" :
5205*4882a593Smuzhiyun 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5206*4882a593Smuzhiyun 			cnt);
5207*4882a593Smuzhiyun 		return FAILED;
5208*4882a593Smuzhiyun 	}
5209*4882a593Smuzhiyun 	return SUCCESS;
5210*4882a593Smuzhiyun }
5211*4882a593Smuzhiyun 
5212*4882a593Smuzhiyun /**
5213*4882a593Smuzhiyun  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5214*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
5215*4882a593Smuzhiyun  *
5216*4882a593Smuzhiyun  * This routine does a device reset by sending a LUN_RESET task management
5217*4882a593Smuzhiyun  * command.
5218*4882a593Smuzhiyun  *
5219*4882a593Smuzhiyun  * Return code :
5220*4882a593Smuzhiyun  *  0x2003 - Error
5221*4882a593Smuzhiyun  *  0x2002 - Success
5222*4882a593Smuzhiyun  **/
5223*4882a593Smuzhiyun static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)5224*4882a593Smuzhiyun lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5225*4882a593Smuzhiyun {
5226*4882a593Smuzhiyun 	struct Scsi_Host  *shost = cmnd->device->host;
5227*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5228*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
5229*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode;
5230*4882a593Smuzhiyun 	unsigned tgt_id = cmnd->device->id;
5231*4882a593Smuzhiyun 	uint64_t lun_id = cmnd->device->lun;
5232*4882a593Smuzhiyun 	struct lpfc_scsi_event_header scsi_event;
5233*4882a593Smuzhiyun 	int status;
5234*4882a593Smuzhiyun 
5235*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5236*4882a593Smuzhiyun 	if (!rdata || !rdata->pnode) {
5237*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5238*4882a593Smuzhiyun 				 "0798 Device Reset rdata failure: rdata x%px\n",
5239*4882a593Smuzhiyun 				 rdata);
5240*4882a593Smuzhiyun 		return FAILED;
5241*4882a593Smuzhiyun 	}
5242*4882a593Smuzhiyun 	pnode = rdata->pnode;
5243*4882a593Smuzhiyun 	status = fc_block_scsi_eh(cmnd);
5244*4882a593Smuzhiyun 	if (status != 0 && status != SUCCESS)
5245*4882a593Smuzhiyun 		return status;
5246*4882a593Smuzhiyun 
5247*4882a593Smuzhiyun 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5248*4882a593Smuzhiyun 	if (status == FAILED) {
5249*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5250*4882a593Smuzhiyun 			"0721 Device Reset rport failure: rdata x%px\n", rdata);
5251*4882a593Smuzhiyun 		return FAILED;
5252*4882a593Smuzhiyun 	}
5253*4882a593Smuzhiyun 
5254*4882a593Smuzhiyun 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5255*4882a593Smuzhiyun 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5256*4882a593Smuzhiyun 	scsi_event.lun = lun_id;
5257*4882a593Smuzhiyun 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5258*4882a593Smuzhiyun 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5259*4882a593Smuzhiyun 
5260*4882a593Smuzhiyun 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5261*4882a593Smuzhiyun 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5262*4882a593Smuzhiyun 
5263*4882a593Smuzhiyun 	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5264*4882a593Smuzhiyun 						FCP_LUN_RESET);
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5267*4882a593Smuzhiyun 			 "0713 SCSI layer issued Device Reset (%d, %llu) "
5268*4882a593Smuzhiyun 			 "return x%x\n", tgt_id, lun_id, status);
5269*4882a593Smuzhiyun 
5270*4882a593Smuzhiyun 	/*
5271*4882a593Smuzhiyun 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5272*4882a593Smuzhiyun 	 * or if the TMF failed, they may be in an indeterminate state.
5273*4882a593Smuzhiyun 	 * So, continue on.
5274*4882a593Smuzhiyun 	 * We will report success if all the i/o aborts successfully.
5275*4882a593Smuzhiyun 	 */
5276*4882a593Smuzhiyun 	if (status == SUCCESS)
5277*4882a593Smuzhiyun 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5278*4882a593Smuzhiyun 						LPFC_CTX_LUN);
5279*4882a593Smuzhiyun 
5280*4882a593Smuzhiyun 	return status;
5281*4882a593Smuzhiyun }
5282*4882a593Smuzhiyun 
5283*4882a593Smuzhiyun /**
5284*4882a593Smuzhiyun  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5285*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
5286*4882a593Smuzhiyun  *
5287*4882a593Smuzhiyun  * This routine does a target reset by sending a TARGET_RESET task management
5288*4882a593Smuzhiyun  * command.
5289*4882a593Smuzhiyun  *
5290*4882a593Smuzhiyun  * Return code :
5291*4882a593Smuzhiyun  *  0x2003 - Error
5292*4882a593Smuzhiyun  *  0x2002 - Success
5293*4882a593Smuzhiyun  **/
5294*4882a593Smuzhiyun static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)5295*4882a593Smuzhiyun lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5296*4882a593Smuzhiyun {
5297*4882a593Smuzhiyun 	struct Scsi_Host  *shost = cmnd->device->host;
5298*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5299*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata;
5300*4882a593Smuzhiyun 	struct lpfc_nodelist *pnode;
5301*4882a593Smuzhiyun 	unsigned tgt_id = cmnd->device->id;
5302*4882a593Smuzhiyun 	uint64_t lun_id = cmnd->device->lun;
5303*4882a593Smuzhiyun 	struct lpfc_scsi_event_header scsi_event;
5304*4882a593Smuzhiyun 	int status;
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5307*4882a593Smuzhiyun 	if (!rdata || !rdata->pnode) {
5308*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5309*4882a593Smuzhiyun 				 "0799 Target Reset rdata failure: rdata x%px\n",
5310*4882a593Smuzhiyun 				 rdata);
5311*4882a593Smuzhiyun 		return FAILED;
5312*4882a593Smuzhiyun 	}
5313*4882a593Smuzhiyun 	pnode = rdata->pnode;
5314*4882a593Smuzhiyun 	status = fc_block_scsi_eh(cmnd);
5315*4882a593Smuzhiyun 	if (status != 0 && status != SUCCESS)
5316*4882a593Smuzhiyun 		return status;
5317*4882a593Smuzhiyun 
5318*4882a593Smuzhiyun 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5319*4882a593Smuzhiyun 	if (status == FAILED) {
5320*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5321*4882a593Smuzhiyun 			"0722 Target Reset rport failure: rdata x%px\n", rdata);
5322*4882a593Smuzhiyun 		if (pnode) {
5323*4882a593Smuzhiyun 			spin_lock_irq(shost->host_lock);
5324*4882a593Smuzhiyun 			pnode->nlp_flag &= ~NLP_NPR_ADISC;
5325*4882a593Smuzhiyun 			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5326*4882a593Smuzhiyun 			spin_unlock_irq(shost->host_lock);
5327*4882a593Smuzhiyun 		}
5328*4882a593Smuzhiyun 		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5329*4882a593Smuzhiyun 					  LPFC_CTX_TGT);
5330*4882a593Smuzhiyun 		return FAST_IO_FAIL;
5331*4882a593Smuzhiyun 	}
5332*4882a593Smuzhiyun 
5333*4882a593Smuzhiyun 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5334*4882a593Smuzhiyun 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5335*4882a593Smuzhiyun 	scsi_event.lun = 0;
5336*4882a593Smuzhiyun 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5337*4882a593Smuzhiyun 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5338*4882a593Smuzhiyun 
5339*4882a593Smuzhiyun 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5340*4882a593Smuzhiyun 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5341*4882a593Smuzhiyun 
5342*4882a593Smuzhiyun 	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5343*4882a593Smuzhiyun 					FCP_TARGET_RESET);
5344*4882a593Smuzhiyun 
5345*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5346*4882a593Smuzhiyun 			 "0723 SCSI layer issued Target Reset (%d, %llu) "
5347*4882a593Smuzhiyun 			 "return x%x\n", tgt_id, lun_id, status);
5348*4882a593Smuzhiyun 
5349*4882a593Smuzhiyun 	/*
5350*4882a593Smuzhiyun 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5351*4882a593Smuzhiyun 	 * or if the TMF failed, they may be in an indeterminate state.
5352*4882a593Smuzhiyun 	 * So, continue on.
5353*4882a593Smuzhiyun 	 * We will report success if all the i/o aborts successfully.
5354*4882a593Smuzhiyun 	 */
5355*4882a593Smuzhiyun 	if (status == SUCCESS)
5356*4882a593Smuzhiyun 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5357*4882a593Smuzhiyun 					  LPFC_CTX_TGT);
5358*4882a593Smuzhiyun 	return status;
5359*4882a593Smuzhiyun }
5360*4882a593Smuzhiyun 
5361*4882a593Smuzhiyun /**
5362*4882a593Smuzhiyun  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5363*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
5364*4882a593Smuzhiyun  *
5365*4882a593Smuzhiyun  * This routine does target reset to all targets on @cmnd->device->host.
5366*4882a593Smuzhiyun  * This emulates Parallel SCSI Bus Reset Semantics.
5367*4882a593Smuzhiyun  *
5368*4882a593Smuzhiyun  * Return code :
5369*4882a593Smuzhiyun  *  0x2003 - Error
5370*4882a593Smuzhiyun  *  0x2002 - Success
5371*4882a593Smuzhiyun  **/
5372*4882a593Smuzhiyun static int
lpfc_bus_reset_handler(struct scsi_cmnd * cmnd)5373*4882a593Smuzhiyun lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5374*4882a593Smuzhiyun {
5375*4882a593Smuzhiyun 	struct Scsi_Host  *shost = cmnd->device->host;
5376*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5377*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = NULL;
5378*4882a593Smuzhiyun 	struct lpfc_scsi_event_header scsi_event;
5379*4882a593Smuzhiyun 	int match;
5380*4882a593Smuzhiyun 	int ret = SUCCESS, status, i;
5381*4882a593Smuzhiyun 
5382*4882a593Smuzhiyun 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5383*4882a593Smuzhiyun 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5384*4882a593Smuzhiyun 	scsi_event.lun = 0;
5385*4882a593Smuzhiyun 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5386*4882a593Smuzhiyun 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5387*4882a593Smuzhiyun 
5388*4882a593Smuzhiyun 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5389*4882a593Smuzhiyun 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5390*4882a593Smuzhiyun 
5391*4882a593Smuzhiyun 	status = fc_block_scsi_eh(cmnd);
5392*4882a593Smuzhiyun 	if (status != 0 && status != SUCCESS)
5393*4882a593Smuzhiyun 		return status;
5394*4882a593Smuzhiyun 
5395*4882a593Smuzhiyun 	/*
5396*4882a593Smuzhiyun 	 * Since the driver manages a single bus device, reset all
5397*4882a593Smuzhiyun 	 * targets known to the driver.  Should any target reset
5398*4882a593Smuzhiyun 	 * fail, this routine returns failure to the midlayer.
5399*4882a593Smuzhiyun 	 */
5400*4882a593Smuzhiyun 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5401*4882a593Smuzhiyun 		/* Search for mapped node by target ID */
5402*4882a593Smuzhiyun 		match = 0;
5403*4882a593Smuzhiyun 		spin_lock_irq(shost->host_lock);
5404*4882a593Smuzhiyun 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5405*4882a593Smuzhiyun 			if (!NLP_CHK_NODE_ACT(ndlp))
5406*4882a593Smuzhiyun 				continue;
5407*4882a593Smuzhiyun 			if (vport->phba->cfg_fcp2_no_tgt_reset &&
5408*4882a593Smuzhiyun 			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5409*4882a593Smuzhiyun 				continue;
5410*4882a593Smuzhiyun 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5411*4882a593Smuzhiyun 			    ndlp->nlp_sid == i &&
5412*4882a593Smuzhiyun 			    ndlp->rport &&
5413*4882a593Smuzhiyun 			    ndlp->nlp_type & NLP_FCP_TARGET) {
5414*4882a593Smuzhiyun 				match = 1;
5415*4882a593Smuzhiyun 				break;
5416*4882a593Smuzhiyun 			}
5417*4882a593Smuzhiyun 		}
5418*4882a593Smuzhiyun 		spin_unlock_irq(shost->host_lock);
5419*4882a593Smuzhiyun 		if (!match)
5420*4882a593Smuzhiyun 			continue;
5421*4882a593Smuzhiyun 
5422*4882a593Smuzhiyun 		status = lpfc_send_taskmgmt(vport, cmnd,
5423*4882a593Smuzhiyun 					i, 0, FCP_TARGET_RESET);
5424*4882a593Smuzhiyun 
5425*4882a593Smuzhiyun 		if (status != SUCCESS) {
5426*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5427*4882a593Smuzhiyun 					 "0700 Bus Reset on target %d failed\n",
5428*4882a593Smuzhiyun 					 i);
5429*4882a593Smuzhiyun 			ret = FAILED;
5430*4882a593Smuzhiyun 		}
5431*4882a593Smuzhiyun 	}
5432*4882a593Smuzhiyun 	/*
5433*4882a593Smuzhiyun 	 * We have to clean up i/o as : they may be orphaned by the TMFs
5434*4882a593Smuzhiyun 	 * above; or if any of the TMFs failed, they may be in an
5435*4882a593Smuzhiyun 	 * indeterminate state.
5436*4882a593Smuzhiyun 	 * We will report success if all the i/o aborts successfully.
5437*4882a593Smuzhiyun 	 */
5438*4882a593Smuzhiyun 
5439*4882a593Smuzhiyun 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5440*4882a593Smuzhiyun 	if (status != SUCCESS)
5441*4882a593Smuzhiyun 		ret = FAILED;
5442*4882a593Smuzhiyun 
5443*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5444*4882a593Smuzhiyun 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5445*4882a593Smuzhiyun 	return ret;
5446*4882a593Smuzhiyun }
5447*4882a593Smuzhiyun 
5448*4882a593Smuzhiyun /**
5449*4882a593Smuzhiyun  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5450*4882a593Smuzhiyun  * @cmnd: Pointer to scsi_cmnd data structure.
5451*4882a593Smuzhiyun  *
5452*4882a593Smuzhiyun  * This routine does host reset to the adaptor port. It brings the HBA
5453*4882a593Smuzhiyun  * offline, performs a board restart, and then brings the board back online.
5454*4882a593Smuzhiyun  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5455*4882a593Smuzhiyun  * reject all outstanding SCSI commands to the host and error returned
5456*4882a593Smuzhiyun  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5457*4882a593Smuzhiyun  * of error handling, it will only return error if resetting of the adapter
5458*4882a593Smuzhiyun  * is not successful; in all other cases, will return success.
5459*4882a593Smuzhiyun  *
5460*4882a593Smuzhiyun  * Return code :
5461*4882a593Smuzhiyun  *  0x2003 - Error
5462*4882a593Smuzhiyun  *  0x2002 - Success
5463*4882a593Smuzhiyun  **/
5464*4882a593Smuzhiyun static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)5465*4882a593Smuzhiyun lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5466*4882a593Smuzhiyun {
5467*4882a593Smuzhiyun 	struct Scsi_Host *shost = cmnd->device->host;
5468*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5469*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5470*4882a593Smuzhiyun 	int rc, ret = SUCCESS;
5471*4882a593Smuzhiyun 
5472*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5473*4882a593Smuzhiyun 			 "3172 SCSI layer issued Host Reset Data:\n");
5474*4882a593Smuzhiyun 
5475*4882a593Smuzhiyun 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5476*4882a593Smuzhiyun 	lpfc_offline(phba);
5477*4882a593Smuzhiyun 	rc = lpfc_sli_brdrestart(phba);
5478*4882a593Smuzhiyun 	if (rc)
5479*4882a593Smuzhiyun 		goto error;
5480*4882a593Smuzhiyun 
5481*4882a593Smuzhiyun 	rc = lpfc_online(phba);
5482*4882a593Smuzhiyun 	if (rc)
5483*4882a593Smuzhiyun 		goto error;
5484*4882a593Smuzhiyun 
5485*4882a593Smuzhiyun 	lpfc_unblock_mgmt_io(phba);
5486*4882a593Smuzhiyun 
5487*4882a593Smuzhiyun 	return ret;
5488*4882a593Smuzhiyun error:
5489*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5490*4882a593Smuzhiyun 			 "3323 Failed host reset\n");
5491*4882a593Smuzhiyun 	lpfc_unblock_mgmt_io(phba);
5492*4882a593Smuzhiyun 	return FAILED;
5493*4882a593Smuzhiyun }
5494*4882a593Smuzhiyun 
5495*4882a593Smuzhiyun /**
5496*4882a593Smuzhiyun  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5497*4882a593Smuzhiyun  * @sdev: Pointer to scsi_device.
5498*4882a593Smuzhiyun  *
5499*4882a593Smuzhiyun  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5500*4882a593Smuzhiyun  * globally available list of scsi buffers. This routine also makes sure scsi
5501*4882a593Smuzhiyun  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5502*4882a593Smuzhiyun  * of scsi buffer exists for the lifetime of the driver.
5503*4882a593Smuzhiyun  *
5504*4882a593Smuzhiyun  * Return codes:
5505*4882a593Smuzhiyun  *   non-0 - Error
5506*4882a593Smuzhiyun  *   0 - Success
5507*4882a593Smuzhiyun  **/
5508*4882a593Smuzhiyun static int
lpfc_slave_alloc(struct scsi_device * sdev)5509*4882a593Smuzhiyun lpfc_slave_alloc(struct scsi_device *sdev)
5510*4882a593Smuzhiyun {
5511*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5512*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
5513*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5514*4882a593Smuzhiyun 	uint32_t total = 0;
5515*4882a593Smuzhiyun 	uint32_t num_to_alloc = 0;
5516*4882a593Smuzhiyun 	int num_allocated = 0;
5517*4882a593Smuzhiyun 	uint32_t sdev_cnt;
5518*4882a593Smuzhiyun 	struct lpfc_device_data *device_data;
5519*4882a593Smuzhiyun 	unsigned long flags;
5520*4882a593Smuzhiyun 	struct lpfc_name target_wwpn;
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun 	if (!rport || fc_remote_port_chkready(rport))
5523*4882a593Smuzhiyun 		return -ENXIO;
5524*4882a593Smuzhiyun 
5525*4882a593Smuzhiyun 	if (phba->cfg_fof) {
5526*4882a593Smuzhiyun 
5527*4882a593Smuzhiyun 		/*
5528*4882a593Smuzhiyun 		 * Check to see if the device data structure for the lun
5529*4882a593Smuzhiyun 		 * exists.  If not, create one.
5530*4882a593Smuzhiyun 		 */
5531*4882a593Smuzhiyun 
5532*4882a593Smuzhiyun 		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5533*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->devicelock, flags);
5534*4882a593Smuzhiyun 		device_data = __lpfc_get_device_data(phba,
5535*4882a593Smuzhiyun 						     &phba->luns,
5536*4882a593Smuzhiyun 						     &vport->fc_portname,
5537*4882a593Smuzhiyun 						     &target_wwpn,
5538*4882a593Smuzhiyun 						     sdev->lun);
5539*4882a593Smuzhiyun 		if (!device_data) {
5540*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->devicelock, flags);
5541*4882a593Smuzhiyun 			device_data = lpfc_create_device_data(phba,
5542*4882a593Smuzhiyun 							&vport->fc_portname,
5543*4882a593Smuzhiyun 							&target_wwpn,
5544*4882a593Smuzhiyun 							sdev->lun,
5545*4882a593Smuzhiyun 							phba->cfg_XLanePriority,
5546*4882a593Smuzhiyun 							true);
5547*4882a593Smuzhiyun 			if (!device_data)
5548*4882a593Smuzhiyun 				return -ENOMEM;
5549*4882a593Smuzhiyun 			spin_lock_irqsave(&phba->devicelock, flags);
5550*4882a593Smuzhiyun 			list_add_tail(&device_data->listentry, &phba->luns);
5551*4882a593Smuzhiyun 		}
5552*4882a593Smuzhiyun 		device_data->rport_data = rport->dd_data;
5553*4882a593Smuzhiyun 		device_data->available = true;
5554*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->devicelock, flags);
5555*4882a593Smuzhiyun 		sdev->hostdata = device_data;
5556*4882a593Smuzhiyun 	} else {
5557*4882a593Smuzhiyun 		sdev->hostdata = rport->dd_data;
5558*4882a593Smuzhiyun 	}
5559*4882a593Smuzhiyun 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5560*4882a593Smuzhiyun 
5561*4882a593Smuzhiyun 	/* For SLI4, all IO buffers are pre-allocated */
5562*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
5563*4882a593Smuzhiyun 		return 0;
5564*4882a593Smuzhiyun 
5565*4882a593Smuzhiyun 	/* This code path is now ONLY for SLI3 adapters */
5566*4882a593Smuzhiyun 
5567*4882a593Smuzhiyun 	/*
5568*4882a593Smuzhiyun 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5569*4882a593Smuzhiyun 	 * available list of scsi buffers.  Don't allocate more than the
5570*4882a593Smuzhiyun 	 * HBA limit conveyed to the midlayer via the host structure.  The
5571*4882a593Smuzhiyun 	 * formula accounts for the lun_queue_depth + error handlers + 1
5572*4882a593Smuzhiyun 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
5573*4882a593Smuzhiyun 	 */
5574*4882a593Smuzhiyun 	total = phba->total_scsi_bufs;
5575*4882a593Smuzhiyun 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5576*4882a593Smuzhiyun 
5577*4882a593Smuzhiyun 	/* If allocated buffers are enough do nothing */
5578*4882a593Smuzhiyun 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5579*4882a593Smuzhiyun 		return 0;
5580*4882a593Smuzhiyun 
5581*4882a593Smuzhiyun 	/* Allow some exchanges to be available always to complete discovery */
5582*4882a593Smuzhiyun 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5583*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5584*4882a593Smuzhiyun 				 "0704 At limitation of %d preallocated "
5585*4882a593Smuzhiyun 				 "command buffers\n", total);
5586*4882a593Smuzhiyun 		return 0;
5587*4882a593Smuzhiyun 	/* Allow some exchanges to be available always to complete discovery */
5588*4882a593Smuzhiyun 	} else if (total + num_to_alloc >
5589*4882a593Smuzhiyun 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5590*4882a593Smuzhiyun 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5591*4882a593Smuzhiyun 				 "0705 Allocation request of %d "
5592*4882a593Smuzhiyun 				 "command buffers will exceed max of %d.  "
5593*4882a593Smuzhiyun 				 "Reducing allocation request to %d.\n",
5594*4882a593Smuzhiyun 				 num_to_alloc, phba->cfg_hba_queue_depth,
5595*4882a593Smuzhiyun 				 (phba->cfg_hba_queue_depth - total));
5596*4882a593Smuzhiyun 		num_to_alloc = phba->cfg_hba_queue_depth - total;
5597*4882a593Smuzhiyun 	}
5598*4882a593Smuzhiyun 	num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5599*4882a593Smuzhiyun 	if (num_to_alloc != num_allocated) {
5600*4882a593Smuzhiyun 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5601*4882a593Smuzhiyun 					 "0708 Allocation request of %d "
5602*4882a593Smuzhiyun 					 "command buffers did not succeed.  "
5603*4882a593Smuzhiyun 					 "Allocated %d buffers.\n",
5604*4882a593Smuzhiyun 					 num_to_alloc, num_allocated);
5605*4882a593Smuzhiyun 	}
5606*4882a593Smuzhiyun 	if (num_allocated > 0)
5607*4882a593Smuzhiyun 		phba->total_scsi_bufs += num_allocated;
5608*4882a593Smuzhiyun 	return 0;
5609*4882a593Smuzhiyun }
5610*4882a593Smuzhiyun 
5611*4882a593Smuzhiyun /**
5612*4882a593Smuzhiyun  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5613*4882a593Smuzhiyun  * @sdev: Pointer to scsi_device.
5614*4882a593Smuzhiyun  *
5615*4882a593Smuzhiyun  * This routine configures following items
5616*4882a593Smuzhiyun  *   - Tag command queuing support for @sdev if supported.
5617*4882a593Smuzhiyun  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5618*4882a593Smuzhiyun  *
5619*4882a593Smuzhiyun  * Return codes:
5620*4882a593Smuzhiyun  *   0 - Success
5621*4882a593Smuzhiyun  **/
5622*4882a593Smuzhiyun static int
lpfc_slave_configure(struct scsi_device * sdev)5623*4882a593Smuzhiyun lpfc_slave_configure(struct scsi_device *sdev)
5624*4882a593Smuzhiyun {
5625*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5626*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
5627*4882a593Smuzhiyun 
5628*4882a593Smuzhiyun 	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5629*4882a593Smuzhiyun 
5630*4882a593Smuzhiyun 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5631*4882a593Smuzhiyun 		lpfc_sli_handle_fast_ring_event(phba,
5632*4882a593Smuzhiyun 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5633*4882a593Smuzhiyun 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5634*4882a593Smuzhiyun 			lpfc_poll_rearm_timer(phba);
5635*4882a593Smuzhiyun 	}
5636*4882a593Smuzhiyun 
5637*4882a593Smuzhiyun 	return 0;
5638*4882a593Smuzhiyun }
5639*4882a593Smuzhiyun 
5640*4882a593Smuzhiyun /**
5641*4882a593Smuzhiyun  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5642*4882a593Smuzhiyun  * @sdev: Pointer to scsi_device.
5643*4882a593Smuzhiyun  *
5644*4882a593Smuzhiyun  * This routine sets @sdev hostatdata filed to null.
5645*4882a593Smuzhiyun  **/
5646*4882a593Smuzhiyun static void
lpfc_slave_destroy(struct scsi_device * sdev)5647*4882a593Smuzhiyun lpfc_slave_destroy(struct scsi_device *sdev)
5648*4882a593Smuzhiyun {
5649*4882a593Smuzhiyun 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5650*4882a593Smuzhiyun 	struct lpfc_hba   *phba = vport->phba;
5651*4882a593Smuzhiyun 	unsigned long flags;
5652*4882a593Smuzhiyun 	struct lpfc_device_data *device_data = sdev->hostdata;
5653*4882a593Smuzhiyun 
5654*4882a593Smuzhiyun 	atomic_dec(&phba->sdev_cnt);
5655*4882a593Smuzhiyun 	if ((phba->cfg_fof) && (device_data)) {
5656*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->devicelock, flags);
5657*4882a593Smuzhiyun 		device_data->available = false;
5658*4882a593Smuzhiyun 		if (!device_data->oas_enabled)
5659*4882a593Smuzhiyun 			lpfc_delete_device_data(phba, device_data);
5660*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->devicelock, flags);
5661*4882a593Smuzhiyun 	}
5662*4882a593Smuzhiyun 	sdev->hostdata = NULL;
5663*4882a593Smuzhiyun 	return;
5664*4882a593Smuzhiyun }
5665*4882a593Smuzhiyun 
5666*4882a593Smuzhiyun /**
5667*4882a593Smuzhiyun  * lpfc_create_device_data - creates and initializes device data structure for OAS
5668*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5669*4882a593Smuzhiyun  * @vport_wwpn: Pointer to vport's wwpn information
5670*4882a593Smuzhiyun  * @target_wwpn: Pointer to target's wwpn information
5671*4882a593Smuzhiyun  * @lun: Lun on target
5672*4882a593Smuzhiyun  * @atomic_create: Flag to indicate if memory should be allocated using the
5673*4882a593Smuzhiyun  *		  GFP_ATOMIC flag or not.
5674*4882a593Smuzhiyun  *
5675*4882a593Smuzhiyun  * This routine creates a device data structure which will contain identifying
5676*4882a593Smuzhiyun  * information for the device (host wwpn, target wwpn, lun), state of OAS,
5677*4882a593Smuzhiyun  * whether or not the corresponding lun is available by the system,
5678*4882a593Smuzhiyun  * and pointer to the rport data.
5679*4882a593Smuzhiyun  *
5680*4882a593Smuzhiyun  * Return codes:
5681*4882a593Smuzhiyun  *   NULL - Error
5682*4882a593Smuzhiyun  *   Pointer to lpfc_device_data - Success
5683*4882a593Smuzhiyun  **/
5684*4882a593Smuzhiyun struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)5685*4882a593Smuzhiyun lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5686*4882a593Smuzhiyun 			struct lpfc_name *target_wwpn, uint64_t lun,
5687*4882a593Smuzhiyun 			uint32_t pri, bool atomic_create)
5688*4882a593Smuzhiyun {
5689*4882a593Smuzhiyun 
5690*4882a593Smuzhiyun 	struct lpfc_device_data *lun_info;
5691*4882a593Smuzhiyun 	int memory_flags;
5692*4882a593Smuzhiyun 
5693*4882a593Smuzhiyun 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5694*4882a593Smuzhiyun 	    !(phba->cfg_fof))
5695*4882a593Smuzhiyun 		return NULL;
5696*4882a593Smuzhiyun 
5697*4882a593Smuzhiyun 	/* Attempt to create the device data to contain lun info */
5698*4882a593Smuzhiyun 
5699*4882a593Smuzhiyun 	if (atomic_create)
5700*4882a593Smuzhiyun 		memory_flags = GFP_ATOMIC;
5701*4882a593Smuzhiyun 	else
5702*4882a593Smuzhiyun 		memory_flags = GFP_KERNEL;
5703*4882a593Smuzhiyun 	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5704*4882a593Smuzhiyun 	if (!lun_info)
5705*4882a593Smuzhiyun 		return NULL;
5706*4882a593Smuzhiyun 	INIT_LIST_HEAD(&lun_info->listentry);
5707*4882a593Smuzhiyun 	lun_info->rport_data  = NULL;
5708*4882a593Smuzhiyun 	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5709*4882a593Smuzhiyun 	       sizeof(struct lpfc_name));
5710*4882a593Smuzhiyun 	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5711*4882a593Smuzhiyun 	       sizeof(struct lpfc_name));
5712*4882a593Smuzhiyun 	lun_info->device_id.lun = lun;
5713*4882a593Smuzhiyun 	lun_info->oas_enabled = false;
5714*4882a593Smuzhiyun 	lun_info->priority = pri;
5715*4882a593Smuzhiyun 	lun_info->available = false;
5716*4882a593Smuzhiyun 	return lun_info;
5717*4882a593Smuzhiyun }
5718*4882a593Smuzhiyun 
5719*4882a593Smuzhiyun /**
5720*4882a593Smuzhiyun  * lpfc_delete_device_data - frees a device data structure for OAS
5721*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5722*4882a593Smuzhiyun  * @lun_info: Pointer to device data structure to free.
5723*4882a593Smuzhiyun  *
5724*4882a593Smuzhiyun  * This routine frees the previously allocated device data structure passed.
5725*4882a593Smuzhiyun  *
5726*4882a593Smuzhiyun  **/
5727*4882a593Smuzhiyun void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)5728*4882a593Smuzhiyun lpfc_delete_device_data(struct lpfc_hba *phba,
5729*4882a593Smuzhiyun 			struct lpfc_device_data *lun_info)
5730*4882a593Smuzhiyun {
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	if (unlikely(!phba) || !lun_info  ||
5733*4882a593Smuzhiyun 	    !(phba->cfg_fof))
5734*4882a593Smuzhiyun 		return;
5735*4882a593Smuzhiyun 
5736*4882a593Smuzhiyun 	if (!list_empty(&lun_info->listentry))
5737*4882a593Smuzhiyun 		list_del(&lun_info->listentry);
5738*4882a593Smuzhiyun 	mempool_free(lun_info, phba->device_data_mem_pool);
5739*4882a593Smuzhiyun 	return;
5740*4882a593Smuzhiyun }
5741*4882a593Smuzhiyun 
5742*4882a593Smuzhiyun /**
5743*4882a593Smuzhiyun  * __lpfc_get_device_data - returns the device data for the specified lun
5744*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5745*4882a593Smuzhiyun  * @list: Point to list to search.
5746*4882a593Smuzhiyun  * @vport_wwpn: Pointer to vport's wwpn information
5747*4882a593Smuzhiyun  * @target_wwpn: Pointer to target's wwpn information
5748*4882a593Smuzhiyun  * @lun: Lun on target
5749*4882a593Smuzhiyun  *
5750*4882a593Smuzhiyun  * This routine searches the list passed for the specified lun's device data.
5751*4882a593Smuzhiyun  * This function does not hold locks, it is the responsibility of the caller
5752*4882a593Smuzhiyun  * to ensure the proper lock is held before calling the function.
5753*4882a593Smuzhiyun  *
5754*4882a593Smuzhiyun  * Return codes:
5755*4882a593Smuzhiyun  *   NULL - Error
5756*4882a593Smuzhiyun  *   Pointer to lpfc_device_data - Success
5757*4882a593Smuzhiyun  **/
5758*4882a593Smuzhiyun struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)5759*4882a593Smuzhiyun __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5760*4882a593Smuzhiyun 		       struct lpfc_name *vport_wwpn,
5761*4882a593Smuzhiyun 		       struct lpfc_name *target_wwpn, uint64_t lun)
5762*4882a593Smuzhiyun {
5763*4882a593Smuzhiyun 
5764*4882a593Smuzhiyun 	struct lpfc_device_data *lun_info;
5765*4882a593Smuzhiyun 
5766*4882a593Smuzhiyun 	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5767*4882a593Smuzhiyun 	    !phba->cfg_fof)
5768*4882a593Smuzhiyun 		return NULL;
5769*4882a593Smuzhiyun 
5770*4882a593Smuzhiyun 	/* Check to see if the lun is already enabled for OAS. */
5771*4882a593Smuzhiyun 
5772*4882a593Smuzhiyun 	list_for_each_entry(lun_info, list, listentry) {
5773*4882a593Smuzhiyun 		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5774*4882a593Smuzhiyun 			    sizeof(struct lpfc_name)) == 0) &&
5775*4882a593Smuzhiyun 		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5776*4882a593Smuzhiyun 			    sizeof(struct lpfc_name)) == 0) &&
5777*4882a593Smuzhiyun 		    (lun_info->device_id.lun == lun))
5778*4882a593Smuzhiyun 			return lun_info;
5779*4882a593Smuzhiyun 	}
5780*4882a593Smuzhiyun 
5781*4882a593Smuzhiyun 	return NULL;
5782*4882a593Smuzhiyun }
5783*4882a593Smuzhiyun 
5784*4882a593Smuzhiyun /**
5785*4882a593Smuzhiyun  * lpfc_find_next_oas_lun - searches for the next oas lun
5786*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5787*4882a593Smuzhiyun  * @vport_wwpn: Pointer to vport's wwpn information
5788*4882a593Smuzhiyun  * @target_wwpn: Pointer to target's wwpn information
5789*4882a593Smuzhiyun  * @starting_lun: Pointer to the lun to start searching for
5790*4882a593Smuzhiyun  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5791*4882a593Smuzhiyun  * @found_target_wwpn: Pointer to the found lun's target wwpn information
5792*4882a593Smuzhiyun  * @found_lun: Pointer to the found lun.
5793*4882a593Smuzhiyun  * @found_lun_status: Pointer to status of the found lun.
5794*4882a593Smuzhiyun  *
5795*4882a593Smuzhiyun  * This routine searches the luns list for the specified lun
5796*4882a593Smuzhiyun  * or the first lun for the vport/target.  If the vport wwpn contains
5797*4882a593Smuzhiyun  * a zero value then a specific vport is not specified. In this case
5798*4882a593Smuzhiyun  * any vport which contains the lun will be considered a match.  If the
5799*4882a593Smuzhiyun  * target wwpn contains a zero value then a specific target is not specified.
5800*4882a593Smuzhiyun  * In this case any target which contains the lun will be considered a
5801*4882a593Smuzhiyun  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
5802*4882a593Smuzhiyun  * are returned.  The function will also return the next lun if available.
5803*4882a593Smuzhiyun  * If the next lun is not found, starting_lun parameter will be set to
5804*4882a593Smuzhiyun  * NO_MORE_OAS_LUN.
5805*4882a593Smuzhiyun  *
5806*4882a593Smuzhiyun  * Return codes:
5807*4882a593Smuzhiyun  *   non-0 - Error
5808*4882a593Smuzhiyun  *   0 - Success
5809*4882a593Smuzhiyun  **/
5810*4882a593Smuzhiyun bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)5811*4882a593Smuzhiyun lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5812*4882a593Smuzhiyun 		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5813*4882a593Smuzhiyun 		       struct lpfc_name *found_vport_wwpn,
5814*4882a593Smuzhiyun 		       struct lpfc_name *found_target_wwpn,
5815*4882a593Smuzhiyun 		       uint64_t *found_lun,
5816*4882a593Smuzhiyun 		       uint32_t *found_lun_status,
5817*4882a593Smuzhiyun 		       uint32_t *found_lun_pri)
5818*4882a593Smuzhiyun {
5819*4882a593Smuzhiyun 
5820*4882a593Smuzhiyun 	unsigned long flags;
5821*4882a593Smuzhiyun 	struct lpfc_device_data *lun_info;
5822*4882a593Smuzhiyun 	struct lpfc_device_id *device_id;
5823*4882a593Smuzhiyun 	uint64_t lun;
5824*4882a593Smuzhiyun 	bool found = false;
5825*4882a593Smuzhiyun 
5826*4882a593Smuzhiyun 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5827*4882a593Smuzhiyun 	    !starting_lun || !found_vport_wwpn ||
5828*4882a593Smuzhiyun 	    !found_target_wwpn || !found_lun || !found_lun_status ||
5829*4882a593Smuzhiyun 	    (*starting_lun == NO_MORE_OAS_LUN) ||
5830*4882a593Smuzhiyun 	    !phba->cfg_fof)
5831*4882a593Smuzhiyun 		return false;
5832*4882a593Smuzhiyun 
5833*4882a593Smuzhiyun 	lun = *starting_lun;
5834*4882a593Smuzhiyun 	*found_lun = NO_MORE_OAS_LUN;
5835*4882a593Smuzhiyun 	*starting_lun = NO_MORE_OAS_LUN;
5836*4882a593Smuzhiyun 
5837*4882a593Smuzhiyun 	/* Search for lun or the lun closet in value */
5838*4882a593Smuzhiyun 
5839*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->devicelock, flags);
5840*4882a593Smuzhiyun 	list_for_each_entry(lun_info, &phba->luns, listentry) {
5841*4882a593Smuzhiyun 		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5842*4882a593Smuzhiyun 		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5843*4882a593Smuzhiyun 			    sizeof(struct lpfc_name)) == 0)) &&
5844*4882a593Smuzhiyun 		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5845*4882a593Smuzhiyun 		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5846*4882a593Smuzhiyun 			    sizeof(struct lpfc_name)) == 0)) &&
5847*4882a593Smuzhiyun 		    (lun_info->oas_enabled)) {
5848*4882a593Smuzhiyun 			device_id = &lun_info->device_id;
5849*4882a593Smuzhiyun 			if ((!found) &&
5850*4882a593Smuzhiyun 			    ((lun == FIND_FIRST_OAS_LUN) ||
5851*4882a593Smuzhiyun 			     (device_id->lun == lun))) {
5852*4882a593Smuzhiyun 				*found_lun = device_id->lun;
5853*4882a593Smuzhiyun 				memcpy(found_vport_wwpn,
5854*4882a593Smuzhiyun 				       &device_id->vport_wwpn,
5855*4882a593Smuzhiyun 				       sizeof(struct lpfc_name));
5856*4882a593Smuzhiyun 				memcpy(found_target_wwpn,
5857*4882a593Smuzhiyun 				       &device_id->target_wwpn,
5858*4882a593Smuzhiyun 				       sizeof(struct lpfc_name));
5859*4882a593Smuzhiyun 				if (lun_info->available)
5860*4882a593Smuzhiyun 					*found_lun_status =
5861*4882a593Smuzhiyun 						OAS_LUN_STATUS_EXISTS;
5862*4882a593Smuzhiyun 				else
5863*4882a593Smuzhiyun 					*found_lun_status = 0;
5864*4882a593Smuzhiyun 				*found_lun_pri = lun_info->priority;
5865*4882a593Smuzhiyun 				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5866*4882a593Smuzhiyun 					memset(vport_wwpn, 0x0,
5867*4882a593Smuzhiyun 					       sizeof(struct lpfc_name));
5868*4882a593Smuzhiyun 				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5869*4882a593Smuzhiyun 					memset(target_wwpn, 0x0,
5870*4882a593Smuzhiyun 					       sizeof(struct lpfc_name));
5871*4882a593Smuzhiyun 				found = true;
5872*4882a593Smuzhiyun 			} else if (found) {
5873*4882a593Smuzhiyun 				*starting_lun = device_id->lun;
5874*4882a593Smuzhiyun 				memcpy(vport_wwpn, &device_id->vport_wwpn,
5875*4882a593Smuzhiyun 				       sizeof(struct lpfc_name));
5876*4882a593Smuzhiyun 				memcpy(target_wwpn, &device_id->target_wwpn,
5877*4882a593Smuzhiyun 				       sizeof(struct lpfc_name));
5878*4882a593Smuzhiyun 				break;
5879*4882a593Smuzhiyun 			}
5880*4882a593Smuzhiyun 		}
5881*4882a593Smuzhiyun 	}
5882*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->devicelock, flags);
5883*4882a593Smuzhiyun 	return found;
5884*4882a593Smuzhiyun }
5885*4882a593Smuzhiyun 
5886*4882a593Smuzhiyun /**
5887*4882a593Smuzhiyun  * lpfc_enable_oas_lun - enables a lun for OAS operations
5888*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5889*4882a593Smuzhiyun  * @vport_wwpn: Pointer to vport's wwpn information
5890*4882a593Smuzhiyun  * @target_wwpn: Pointer to target's wwpn information
5891*4882a593Smuzhiyun  * @lun: Lun
5892*4882a593Smuzhiyun  *
5893*4882a593Smuzhiyun  * This routine enables a lun for oas operations.  The routines does so by
5894*4882a593Smuzhiyun  * doing the following :
5895*4882a593Smuzhiyun  *
5896*4882a593Smuzhiyun  *   1) Checks to see if the device data for the lun has been created.
5897*4882a593Smuzhiyun  *   2) If found, sets the OAS enabled flag if not set and returns.
5898*4882a593Smuzhiyun  *   3) Otherwise, creates a device data structure.
5899*4882a593Smuzhiyun  *   4) If successfully created, indicates the device data is for an OAS lun,
5900*4882a593Smuzhiyun  *   indicates the lun is not available and add to the list of luns.
5901*4882a593Smuzhiyun  *
5902*4882a593Smuzhiyun  * Return codes:
5903*4882a593Smuzhiyun  *   false - Error
5904*4882a593Smuzhiyun  *   true - Success
5905*4882a593Smuzhiyun  **/
5906*4882a593Smuzhiyun bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5907*4882a593Smuzhiyun lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5908*4882a593Smuzhiyun 		    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5909*4882a593Smuzhiyun {
5910*4882a593Smuzhiyun 
5911*4882a593Smuzhiyun 	struct lpfc_device_data *lun_info;
5912*4882a593Smuzhiyun 	unsigned long flags;
5913*4882a593Smuzhiyun 
5914*4882a593Smuzhiyun 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5915*4882a593Smuzhiyun 	    !phba->cfg_fof)
5916*4882a593Smuzhiyun 		return false;
5917*4882a593Smuzhiyun 
5918*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->devicelock, flags);
5919*4882a593Smuzhiyun 
5920*4882a593Smuzhiyun 	/* Check to see if the device data for the lun has been created */
5921*4882a593Smuzhiyun 	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5922*4882a593Smuzhiyun 					  target_wwpn, lun);
5923*4882a593Smuzhiyun 	if (lun_info) {
5924*4882a593Smuzhiyun 		if (!lun_info->oas_enabled)
5925*4882a593Smuzhiyun 			lun_info->oas_enabled = true;
5926*4882a593Smuzhiyun 		lun_info->priority = pri;
5927*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->devicelock, flags);
5928*4882a593Smuzhiyun 		return true;
5929*4882a593Smuzhiyun 	}
5930*4882a593Smuzhiyun 
5931*4882a593Smuzhiyun 	/* Create an lun info structure and add to list of luns */
5932*4882a593Smuzhiyun 	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5933*4882a593Smuzhiyun 					   pri, true);
5934*4882a593Smuzhiyun 	if (lun_info) {
5935*4882a593Smuzhiyun 		lun_info->oas_enabled = true;
5936*4882a593Smuzhiyun 		lun_info->priority = pri;
5937*4882a593Smuzhiyun 		lun_info->available = false;
5938*4882a593Smuzhiyun 		list_add_tail(&lun_info->listentry, &phba->luns);
5939*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->devicelock, flags);
5940*4882a593Smuzhiyun 		return true;
5941*4882a593Smuzhiyun 	}
5942*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->devicelock, flags);
5943*4882a593Smuzhiyun 	return false;
5944*4882a593Smuzhiyun }
5945*4882a593Smuzhiyun 
5946*4882a593Smuzhiyun /**
5947*4882a593Smuzhiyun  * lpfc_disable_oas_lun - disables a lun for OAS operations
5948*4882a593Smuzhiyun  * @pha: Pointer to host bus adapter structure.
5949*4882a593Smuzhiyun  * @vport_wwpn: Pointer to vport's wwpn information
5950*4882a593Smuzhiyun  * @target_wwpn: Pointer to target's wwpn information
5951*4882a593Smuzhiyun  * @lun: Lun
5952*4882a593Smuzhiyun  *
5953*4882a593Smuzhiyun  * This routine disables a lun for oas operations.  The routines does so by
5954*4882a593Smuzhiyun  * doing the following :
5955*4882a593Smuzhiyun  *
5956*4882a593Smuzhiyun  *   1) Checks to see if the device data for the lun is created.
5957*4882a593Smuzhiyun  *   2) If present, clears the flag indicating this lun is for OAS.
5958*4882a593Smuzhiyun  *   3) If the lun is not available by the system, the device data is
5959*4882a593Smuzhiyun  *   freed.
5960*4882a593Smuzhiyun  *
5961*4882a593Smuzhiyun  * Return codes:
5962*4882a593Smuzhiyun  *   false - Error
5963*4882a593Smuzhiyun  *   true - Success
5964*4882a593Smuzhiyun  **/
5965*4882a593Smuzhiyun bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5966*4882a593Smuzhiyun lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5967*4882a593Smuzhiyun 		     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5968*4882a593Smuzhiyun {
5969*4882a593Smuzhiyun 
5970*4882a593Smuzhiyun 	struct lpfc_device_data *lun_info;
5971*4882a593Smuzhiyun 	unsigned long flags;
5972*4882a593Smuzhiyun 
5973*4882a593Smuzhiyun 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5974*4882a593Smuzhiyun 	    !phba->cfg_fof)
5975*4882a593Smuzhiyun 		return false;
5976*4882a593Smuzhiyun 
5977*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->devicelock, flags);
5978*4882a593Smuzhiyun 
5979*4882a593Smuzhiyun 	/* Check to see if the lun is available. */
5980*4882a593Smuzhiyun 	lun_info = __lpfc_get_device_data(phba,
5981*4882a593Smuzhiyun 					  &phba->luns, vport_wwpn,
5982*4882a593Smuzhiyun 					  target_wwpn, lun);
5983*4882a593Smuzhiyun 	if (lun_info) {
5984*4882a593Smuzhiyun 		lun_info->oas_enabled = false;
5985*4882a593Smuzhiyun 		lun_info->priority = pri;
5986*4882a593Smuzhiyun 		if (!lun_info->available)
5987*4882a593Smuzhiyun 			lpfc_delete_device_data(phba, lun_info);
5988*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->devicelock, flags);
5989*4882a593Smuzhiyun 		return true;
5990*4882a593Smuzhiyun 	}
5991*4882a593Smuzhiyun 
5992*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->devicelock, flags);
5993*4882a593Smuzhiyun 	return false;
5994*4882a593Smuzhiyun }
5995*4882a593Smuzhiyun 
5996*4882a593Smuzhiyun static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5997*4882a593Smuzhiyun lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5998*4882a593Smuzhiyun {
5999*4882a593Smuzhiyun 	return SCSI_MLQUEUE_HOST_BUSY;
6000*4882a593Smuzhiyun }
6001*4882a593Smuzhiyun 
6002*4882a593Smuzhiyun static int
lpfc_no_handler(struct scsi_cmnd * cmnd)6003*4882a593Smuzhiyun lpfc_no_handler(struct scsi_cmnd *cmnd)
6004*4882a593Smuzhiyun {
6005*4882a593Smuzhiyun 	return FAILED;
6006*4882a593Smuzhiyun }
6007*4882a593Smuzhiyun 
6008*4882a593Smuzhiyun static int
lpfc_no_slave(struct scsi_device * sdev)6009*4882a593Smuzhiyun lpfc_no_slave(struct scsi_device *sdev)
6010*4882a593Smuzhiyun {
6011*4882a593Smuzhiyun 	return -ENODEV;
6012*4882a593Smuzhiyun }
6013*4882a593Smuzhiyun 
6014*4882a593Smuzhiyun struct scsi_host_template lpfc_template_nvme = {
6015*4882a593Smuzhiyun 	.module			= THIS_MODULE,
6016*4882a593Smuzhiyun 	.name			= LPFC_DRIVER_NAME,
6017*4882a593Smuzhiyun 	.proc_name		= LPFC_DRIVER_NAME,
6018*4882a593Smuzhiyun 	.info			= lpfc_info,
6019*4882a593Smuzhiyun 	.queuecommand		= lpfc_no_command,
6020*4882a593Smuzhiyun 	.eh_abort_handler	= lpfc_no_handler,
6021*4882a593Smuzhiyun 	.eh_device_reset_handler = lpfc_no_handler,
6022*4882a593Smuzhiyun 	.eh_target_reset_handler = lpfc_no_handler,
6023*4882a593Smuzhiyun 	.eh_bus_reset_handler	= lpfc_no_handler,
6024*4882a593Smuzhiyun 	.eh_host_reset_handler  = lpfc_no_handler,
6025*4882a593Smuzhiyun 	.slave_alloc		= lpfc_no_slave,
6026*4882a593Smuzhiyun 	.slave_configure	= lpfc_no_slave,
6027*4882a593Smuzhiyun 	.scan_finished		= lpfc_scan_finished,
6028*4882a593Smuzhiyun 	.this_id		= -1,
6029*4882a593Smuzhiyun 	.sg_tablesize		= 1,
6030*4882a593Smuzhiyun 	.cmd_per_lun		= 1,
6031*4882a593Smuzhiyun 	.shost_attrs		= lpfc_hba_attrs,
6032*4882a593Smuzhiyun 	.max_sectors		= 0xFFFF,
6033*4882a593Smuzhiyun 	.vendor_id		= LPFC_NL_VENDOR_ID,
6034*4882a593Smuzhiyun 	.track_queue_depth	= 0,
6035*4882a593Smuzhiyun };
6036*4882a593Smuzhiyun 
6037*4882a593Smuzhiyun struct scsi_host_template lpfc_template = {
6038*4882a593Smuzhiyun 	.module			= THIS_MODULE,
6039*4882a593Smuzhiyun 	.name			= LPFC_DRIVER_NAME,
6040*4882a593Smuzhiyun 	.proc_name		= LPFC_DRIVER_NAME,
6041*4882a593Smuzhiyun 	.info			= lpfc_info,
6042*4882a593Smuzhiyun 	.queuecommand		= lpfc_queuecommand,
6043*4882a593Smuzhiyun 	.eh_timed_out		= fc_eh_timed_out,
6044*4882a593Smuzhiyun 	.eh_abort_handler	= lpfc_abort_handler,
6045*4882a593Smuzhiyun 	.eh_device_reset_handler = lpfc_device_reset_handler,
6046*4882a593Smuzhiyun 	.eh_target_reset_handler = lpfc_target_reset_handler,
6047*4882a593Smuzhiyun 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
6048*4882a593Smuzhiyun 	.eh_host_reset_handler  = lpfc_host_reset_handler,
6049*4882a593Smuzhiyun 	.slave_alloc		= lpfc_slave_alloc,
6050*4882a593Smuzhiyun 	.slave_configure	= lpfc_slave_configure,
6051*4882a593Smuzhiyun 	.slave_destroy		= lpfc_slave_destroy,
6052*4882a593Smuzhiyun 	.scan_finished		= lpfc_scan_finished,
6053*4882a593Smuzhiyun 	.this_id		= -1,
6054*4882a593Smuzhiyun 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6055*4882a593Smuzhiyun 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
6056*4882a593Smuzhiyun 	.shost_attrs		= lpfc_hba_attrs,
6057*4882a593Smuzhiyun 	.max_sectors		= 0xFFFF,
6058*4882a593Smuzhiyun 	.vendor_id		= LPFC_NL_VENDOR_ID,
6059*4882a593Smuzhiyun 	.change_queue_depth	= scsi_change_queue_depth,
6060*4882a593Smuzhiyun 	.track_queue_depth	= 1,
6061*4882a593Smuzhiyun };
6062