xref: /OK3568_Linux_fs/kernel/drivers/scsi/csiostor/csio_scsi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is part of the Chelsio FCoE driver for Linux.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/device.h>
36*4882a593Smuzhiyun #include <linux/delay.h>
37*4882a593Smuzhiyun #include <linux/ctype.h>
38*4882a593Smuzhiyun #include <linux/kernel.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun #include <linux/string.h>
41*4882a593Smuzhiyun #include <linux/compiler.h>
42*4882a593Smuzhiyun #include <linux/export.h>
43*4882a593Smuzhiyun #include <linux/module.h>
44*4882a593Smuzhiyun #include <asm/unaligned.h>
45*4882a593Smuzhiyun #include <asm/page.h>
46*4882a593Smuzhiyun #include <scsi/scsi.h>
47*4882a593Smuzhiyun #include <scsi/scsi_device.h>
48*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #include "csio_hw.h"
51*4882a593Smuzhiyun #include "csio_lnode.h"
52*4882a593Smuzhiyun #include "csio_rnode.h"
53*4882a593Smuzhiyun #include "csio_scsi.h"
54*4882a593Smuzhiyun #include "csio_init.h"
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun int csio_scsi_eqsize = 65536;
57*4882a593Smuzhiyun int csio_scsi_iqlen = 128;
58*4882a593Smuzhiyun int csio_scsi_ioreqs = 2048;
59*4882a593Smuzhiyun uint32_t csio_max_scan_tmo;
60*4882a593Smuzhiyun uint32_t csio_delta_scan_tmo = 5;
61*4882a593Smuzhiyun int csio_lun_qdepth = 32;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun static int csio_ddp_descs = 128;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun static int csio_do_abrt_cls(struct csio_hw *,
66*4882a593Smuzhiyun 				      struct csio_ioreq *, bool);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
69*4882a593Smuzhiyun static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
70*4882a593Smuzhiyun static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
71*4882a593Smuzhiyun static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
72*4882a593Smuzhiyun static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
73*4882a593Smuzhiyun static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
77*4882a593Smuzhiyun  * @ioreq: The I/O request
78*4882a593Smuzhiyun  * @sld: Level information
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Should be called with lock held.
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  */
83*4882a593Smuzhiyun static bool
csio_scsi_match_io(struct csio_ioreq * ioreq,struct csio_scsi_level_data * sld)84*4882a593Smuzhiyun csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	switch (sld->level) {
89*4882a593Smuzhiyun 	case CSIO_LEV_LUN:
90*4882a593Smuzhiyun 		if (scmnd == NULL)
91*4882a593Smuzhiyun 			return false;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		return ((ioreq->lnode == sld->lnode) &&
94*4882a593Smuzhiyun 			(ioreq->rnode == sld->rnode) &&
95*4882a593Smuzhiyun 			((uint64_t)scmnd->device->lun == sld->oslun));
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	case CSIO_LEV_RNODE:
98*4882a593Smuzhiyun 		return ((ioreq->lnode == sld->lnode) &&
99*4882a593Smuzhiyun 				(ioreq->rnode == sld->rnode));
100*4882a593Smuzhiyun 	case CSIO_LEV_LNODE:
101*4882a593Smuzhiyun 		return (ioreq->lnode == sld->lnode);
102*4882a593Smuzhiyun 	case CSIO_LEV_ALL:
103*4882a593Smuzhiyun 		return true;
104*4882a593Smuzhiyun 	default:
105*4882a593Smuzhiyun 		return false;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * csio_scsi_gather_active_ios - Gather active I/Os based on level
111*4882a593Smuzhiyun  * @scm: SCSI module
112*4882a593Smuzhiyun  * @sld: Level information
113*4882a593Smuzhiyun  * @dest: The queue where these I/Os have to be gathered.
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * Should be called with lock held.
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun static void
csio_scsi_gather_active_ios(struct csio_scsim * scm,struct csio_scsi_level_data * sld,struct list_head * dest)118*4882a593Smuzhiyun csio_scsi_gather_active_ios(struct csio_scsim *scm,
119*4882a593Smuzhiyun 			    struct csio_scsi_level_data *sld,
120*4882a593Smuzhiyun 			    struct list_head *dest)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct list_head *tmp, *next;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (list_empty(&scm->active_q))
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Just splice the entire active_q into dest */
128*4882a593Smuzhiyun 	if (sld->level == CSIO_LEV_ALL) {
129*4882a593Smuzhiyun 		list_splice_tail_init(&scm->active_q, dest);
130*4882a593Smuzhiyun 		return;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	list_for_each_safe(tmp, next, &scm->active_q) {
134*4882a593Smuzhiyun 		if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
135*4882a593Smuzhiyun 			list_del_init(tmp);
136*4882a593Smuzhiyun 			list_add_tail(tmp, dest);
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static inline bool
csio_scsi_itnexus_loss_error(uint16_t error)142*4882a593Smuzhiyun csio_scsi_itnexus_loss_error(uint16_t error)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	switch (error) {
145*4882a593Smuzhiyun 	case FW_ERR_LINK_DOWN:
146*4882a593Smuzhiyun 	case FW_RDEV_NOT_READY:
147*4882a593Smuzhiyun 	case FW_ERR_RDEV_LOST:
148*4882a593Smuzhiyun 	case FW_ERR_RDEV_LOGO:
149*4882a593Smuzhiyun 	case FW_ERR_RDEV_IMPL_LOGO:
150*4882a593Smuzhiyun 		return 1;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
157*4882a593Smuzhiyun  * @req: IO req structure.
158*4882a593Smuzhiyun  * @addr: DMA location to place the payload.
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
161*4882a593Smuzhiyun  */
162*4882a593Smuzhiyun static inline void
csio_scsi_fcp_cmnd(struct csio_ioreq * req,void * addr)163*4882a593Smuzhiyun csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
166*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Check for Task Management */
169*4882a593Smuzhiyun 	if (likely(scmnd->SCp.Message == 0)) {
170*4882a593Smuzhiyun 		int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
171*4882a593Smuzhiyun 		fcp_cmnd->fc_tm_flags = 0;
172*4882a593Smuzhiyun 		fcp_cmnd->fc_cmdref = 0;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
175*4882a593Smuzhiyun 		fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
176*4882a593Smuzhiyun 		fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		if (req->nsge)
179*4882a593Smuzhiyun 			if (req->datadir == DMA_TO_DEVICE)
180*4882a593Smuzhiyun 				fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
181*4882a593Smuzhiyun 			else
182*4882a593Smuzhiyun 				fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
183*4882a593Smuzhiyun 		else
184*4882a593Smuzhiyun 			fcp_cmnd->fc_flags = 0;
185*4882a593Smuzhiyun 	} else {
186*4882a593Smuzhiyun 		memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
187*4882a593Smuzhiyun 		int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
188*4882a593Smuzhiyun 		fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun  * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
194*4882a593Smuzhiyun  * @req: IO req structure.
195*4882a593Smuzhiyun  * @addr: DMA location to place the payload.
196*4882a593Smuzhiyun  * @size: Size of WR (including FW WR + immed data + rsp SG entry
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Wrapper for populating fw_scsi_cmd_wr.
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun static inline void
csio_scsi_init_cmd_wr(struct csio_ioreq * req,void * addr,uint32_t size)201*4882a593Smuzhiyun csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
204*4882a593Smuzhiyun 	struct csio_rnode *rn = req->rnode;
205*4882a593Smuzhiyun 	struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
206*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
207*4882a593Smuzhiyun 	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
210*4882a593Smuzhiyun 					  FW_SCSI_CMD_WR_IMMDLEN(imm));
211*4882a593Smuzhiyun 	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
212*4882a593Smuzhiyun 					    FW_WR_LEN16_V(
213*4882a593Smuzhiyun 						DIV_ROUND_UP(size, 16)));
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	wr->cookie = (uintptr_t) req;
216*4882a593Smuzhiyun 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
217*4882a593Smuzhiyun 	wr->tmo_val = (uint8_t) req->tmo;
218*4882a593Smuzhiyun 	wr->r3 = 0;
219*4882a593Smuzhiyun 	memset(&wr->r5, 0, 8);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* Get RSP DMA buffer */
222*4882a593Smuzhiyun 	dma_buf = &req->dma_buf;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Prepare RSP SGL */
225*4882a593Smuzhiyun 	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
226*4882a593Smuzhiyun 	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	wr->r6 = 0;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	wr->u.fcoe.ctl_pri = 0;
231*4882a593Smuzhiyun 	wr->u.fcoe.cp_en_class = 0;
232*4882a593Smuzhiyun 	wr->u.fcoe.r4_lo[0] = 0;
233*4882a593Smuzhiyun 	wr->u.fcoe.r4_lo[1] = 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/* Frame a FCP command */
236*4882a593Smuzhiyun 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
237*4882a593Smuzhiyun 				    sizeof(struct fw_scsi_cmd_wr)));
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun #define CSIO_SCSI_CMD_WR_SZ(_imm)					\
241*4882a593Smuzhiyun 	(sizeof(struct fw_scsi_cmd_wr) +		/* WR size */	\
242*4882a593Smuzhiyun 	 ALIGN((_imm), 16))				/* Immed data */
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #define CSIO_SCSI_CMD_WR_SZ_16(_imm)					\
245*4882a593Smuzhiyun 			(ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun  * csio_scsi_cmd - Create a SCSI CMD WR.
249*4882a593Smuzhiyun  * @req: IO req structure.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  */
254*4882a593Smuzhiyun static inline void
csio_scsi_cmd(struct csio_ioreq * req)255*4882a593Smuzhiyun csio_scsi_cmd(struct csio_ioreq *req)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct csio_wr_pair wrp;
258*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
259*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
260*4882a593Smuzhiyun 	uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
263*4882a593Smuzhiyun 	if (unlikely(req->drv_status != 0))
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (wrp.size1 >= size) {
267*4882a593Smuzhiyun 		/* Initialize WR in one shot */
268*4882a593Smuzhiyun 		csio_scsi_init_cmd_wr(req, wrp.addr1, size);
269*4882a593Smuzhiyun 	} else {
270*4882a593Smuzhiyun 		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		/*
273*4882a593Smuzhiyun 		 * Make a temporary copy of the WR and write back
274*4882a593Smuzhiyun 		 * the copy into the WR pair.
275*4882a593Smuzhiyun 		 */
276*4882a593Smuzhiyun 		csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
277*4882a593Smuzhiyun 		memcpy(wrp.addr1, tmpwr, wrp.size1);
278*4882a593Smuzhiyun 		memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun  * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
284*4882a593Smuzhiyun  * @hw: HW module
285*4882a593Smuzhiyun  * @req: IO request
286*4882a593Smuzhiyun  * @sgl: ULP TX SGL pointer.
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  */
289*4882a593Smuzhiyun static inline void
csio_scsi_init_ultptx_dsgl(struct csio_hw * hw,struct csio_ioreq * req,struct ulptx_sgl * sgl)290*4882a593Smuzhiyun csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
291*4882a593Smuzhiyun 			   struct ulptx_sgl *sgl)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct ulptx_sge_pair *sge_pair = NULL;
294*4882a593Smuzhiyun 	struct scatterlist *sgel;
295*4882a593Smuzhiyun 	uint32_t i = 0;
296*4882a593Smuzhiyun 	uint32_t xfer_len;
297*4882a593Smuzhiyun 	struct list_head *tmp;
298*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
299*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
302*4882a593Smuzhiyun 				     ULPTX_NSGE_V(req->nsge));
303*4882a593Smuzhiyun 	/* Now add the data SGLs */
304*4882a593Smuzhiyun 	if (likely(!req->dcopy)) {
305*4882a593Smuzhiyun 		scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
306*4882a593Smuzhiyun 			if (i == 0) {
307*4882a593Smuzhiyun 				sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
308*4882a593Smuzhiyun 				sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
309*4882a593Smuzhiyun 				sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
310*4882a593Smuzhiyun 				continue;
311*4882a593Smuzhiyun 			}
312*4882a593Smuzhiyun 			if ((i - 1) & 0x1) {
313*4882a593Smuzhiyun 				sge_pair->addr[1] = cpu_to_be64(
314*4882a593Smuzhiyun 							sg_dma_address(sgel));
315*4882a593Smuzhiyun 				sge_pair->len[1] = cpu_to_be32(
316*4882a593Smuzhiyun 							sg_dma_len(sgel));
317*4882a593Smuzhiyun 				sge_pair++;
318*4882a593Smuzhiyun 			} else {
319*4882a593Smuzhiyun 				sge_pair->addr[0] = cpu_to_be64(
320*4882a593Smuzhiyun 							sg_dma_address(sgel));
321*4882a593Smuzhiyun 				sge_pair->len[0] = cpu_to_be32(
322*4882a593Smuzhiyun 							sg_dma_len(sgel));
323*4882a593Smuzhiyun 			}
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 	} else {
326*4882a593Smuzhiyun 		/* Program sg elements with driver's DDP buffer */
327*4882a593Smuzhiyun 		xfer_len = scsi_bufflen(scmnd);
328*4882a593Smuzhiyun 		list_for_each(tmp, &req->gen_list) {
329*4882a593Smuzhiyun 			dma_buf = (struct csio_dma_buf *)tmp;
330*4882a593Smuzhiyun 			if (i == 0) {
331*4882a593Smuzhiyun 				sgl->addr0 = cpu_to_be64(dma_buf->paddr);
332*4882a593Smuzhiyun 				sgl->len0 = cpu_to_be32(
333*4882a593Smuzhiyun 						min(xfer_len, dma_buf->len));
334*4882a593Smuzhiyun 				sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
335*4882a593Smuzhiyun 			} else if ((i - 1) & 0x1) {
336*4882a593Smuzhiyun 				sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
337*4882a593Smuzhiyun 				sge_pair->len[1] = cpu_to_be32(
338*4882a593Smuzhiyun 						min(xfer_len, dma_buf->len));
339*4882a593Smuzhiyun 				sge_pair++;
340*4882a593Smuzhiyun 			} else {
341*4882a593Smuzhiyun 				sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
342*4882a593Smuzhiyun 				sge_pair->len[0] = cpu_to_be32(
343*4882a593Smuzhiyun 						min(xfer_len, dma_buf->len));
344*4882a593Smuzhiyun 			}
345*4882a593Smuzhiyun 			xfer_len -= min(xfer_len, dma_buf->len);
346*4882a593Smuzhiyun 			i++;
347*4882a593Smuzhiyun 		}
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun  * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
353*4882a593Smuzhiyun  * @req: IO req structure.
354*4882a593Smuzhiyun  * @wrp: DMA location to place the payload.
355*4882a593Smuzhiyun  * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
356*4882a593Smuzhiyun  *
357*4882a593Smuzhiyun  * Wrapper for populating fw_scsi_read_wr.
358*4882a593Smuzhiyun  */
359*4882a593Smuzhiyun static inline void
csio_scsi_init_read_wr(struct csio_ioreq * req,void * wrp,uint32_t size)360*4882a593Smuzhiyun csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
363*4882a593Smuzhiyun 	struct csio_rnode *rn = req->rnode;
364*4882a593Smuzhiyun 	struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
365*4882a593Smuzhiyun 	struct ulptx_sgl *sgl;
366*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
367*4882a593Smuzhiyun 	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
368*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
371*4882a593Smuzhiyun 				     FW_SCSI_READ_WR_IMMDLEN(imm));
372*4882a593Smuzhiyun 	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
373*4882a593Smuzhiyun 				       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
374*4882a593Smuzhiyun 	wr->cookie = (uintptr_t)req;
375*4882a593Smuzhiyun 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
376*4882a593Smuzhiyun 	wr->tmo_val = (uint8_t)(req->tmo);
377*4882a593Smuzhiyun 	wr->use_xfer_cnt = 1;
378*4882a593Smuzhiyun 	wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
379*4882a593Smuzhiyun 	wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
380*4882a593Smuzhiyun 	/* Get RSP DMA buffer */
381*4882a593Smuzhiyun 	dma_buf = &req->dma_buf;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* Prepare RSP SGL */
384*4882a593Smuzhiyun 	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
385*4882a593Smuzhiyun 	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	wr->r4 = 0;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	wr->u.fcoe.ctl_pri = 0;
390*4882a593Smuzhiyun 	wr->u.fcoe.cp_en_class = 0;
391*4882a593Smuzhiyun 	wr->u.fcoe.r3_lo[0] = 0;
392*4882a593Smuzhiyun 	wr->u.fcoe.r3_lo[1] = 0;
393*4882a593Smuzhiyun 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
394*4882a593Smuzhiyun 					sizeof(struct fw_scsi_read_wr)));
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Move WR pointer past command and immediate data */
397*4882a593Smuzhiyun 	sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
398*4882a593Smuzhiyun 			      sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Fill in the DSGL */
401*4882a593Smuzhiyun 	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun  * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
406*4882a593Smuzhiyun  * @req: IO req structure.
407*4882a593Smuzhiyun  * @wrp: DMA location to place the payload.
408*4882a593Smuzhiyun  * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * Wrapper for populating fw_scsi_write_wr.
411*4882a593Smuzhiyun  */
412*4882a593Smuzhiyun static inline void
csio_scsi_init_write_wr(struct csio_ioreq * req,void * wrp,uint32_t size)413*4882a593Smuzhiyun csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
416*4882a593Smuzhiyun 	struct csio_rnode *rn = req->rnode;
417*4882a593Smuzhiyun 	struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
418*4882a593Smuzhiyun 	struct ulptx_sgl *sgl;
419*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
420*4882a593Smuzhiyun 	uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
421*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
424*4882a593Smuzhiyun 				     FW_SCSI_WRITE_WR_IMMDLEN(imm));
425*4882a593Smuzhiyun 	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
426*4882a593Smuzhiyun 				       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
427*4882a593Smuzhiyun 	wr->cookie = (uintptr_t)req;
428*4882a593Smuzhiyun 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
429*4882a593Smuzhiyun 	wr->tmo_val = (uint8_t)(req->tmo);
430*4882a593Smuzhiyun 	wr->use_xfer_cnt = 1;
431*4882a593Smuzhiyun 	wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
432*4882a593Smuzhiyun 	wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
433*4882a593Smuzhiyun 	/* Get RSP DMA buffer */
434*4882a593Smuzhiyun 	dma_buf = &req->dma_buf;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/* Prepare RSP SGL */
437*4882a593Smuzhiyun 	wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
438*4882a593Smuzhiyun 	wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	wr->r4 = 0;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	wr->u.fcoe.ctl_pri = 0;
443*4882a593Smuzhiyun 	wr->u.fcoe.cp_en_class = 0;
444*4882a593Smuzhiyun 	wr->u.fcoe.r3_lo[0] = 0;
445*4882a593Smuzhiyun 	wr->u.fcoe.r3_lo[1] = 0;
446*4882a593Smuzhiyun 	csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
447*4882a593Smuzhiyun 					sizeof(struct fw_scsi_write_wr)));
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Move WR pointer past command and immediate data */
450*4882a593Smuzhiyun 	sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
451*4882a593Smuzhiyun 			      sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Fill in the DSGL */
454*4882a593Smuzhiyun 	csio_scsi_init_ultptx_dsgl(hw, req, sgl);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
458*4882a593Smuzhiyun #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm)				       \
459*4882a593Smuzhiyun do {									       \
460*4882a593Smuzhiyun 	(sz) = sizeof(struct fw_scsi_##oper##_wr) +	/* WR size */          \
461*4882a593Smuzhiyun 	       ALIGN((imm), 16) +			/* Immed data */       \
462*4882a593Smuzhiyun 	       sizeof(struct ulptx_sgl);		/* ulptx_sgl */	       \
463*4882a593Smuzhiyun 									       \
464*4882a593Smuzhiyun 	if (unlikely((req)->nsge > 1))				               \
465*4882a593Smuzhiyun 		(sz) += (sizeof(struct ulptx_sge_pair) *		       \
466*4882a593Smuzhiyun 				(ALIGN(((req)->nsge - 1), 2) / 2));            \
467*4882a593Smuzhiyun 							/* Data SGE */	       \
468*4882a593Smuzhiyun } while (0)
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun  * csio_scsi_read - Create a SCSI READ WR.
472*4882a593Smuzhiyun  * @req: IO req structure.
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * Gets a WR slot in the ingress queue and initializes it with
475*4882a593Smuzhiyun  * SCSI READ WR.
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  */
478*4882a593Smuzhiyun static inline void
csio_scsi_read(struct csio_ioreq * req)479*4882a593Smuzhiyun csio_scsi_read(struct csio_ioreq *req)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct csio_wr_pair wrp;
482*4882a593Smuzhiyun 	uint32_t size;
483*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
484*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
487*4882a593Smuzhiyun 	size = ALIGN(size, 16);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
490*4882a593Smuzhiyun 	if (likely(req->drv_status == 0)) {
491*4882a593Smuzhiyun 		if (likely(wrp.size1 >= size)) {
492*4882a593Smuzhiyun 			/* Initialize WR in one shot */
493*4882a593Smuzhiyun 			csio_scsi_init_read_wr(req, wrp.addr1, size);
494*4882a593Smuzhiyun 		} else {
495*4882a593Smuzhiyun 			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
496*4882a593Smuzhiyun 			/*
497*4882a593Smuzhiyun 			 * Make a temporary copy of the WR and write back
498*4882a593Smuzhiyun 			 * the copy into the WR pair.
499*4882a593Smuzhiyun 			 */
500*4882a593Smuzhiyun 			csio_scsi_init_read_wr(req, (void *)tmpwr, size);
501*4882a593Smuzhiyun 			memcpy(wrp.addr1, tmpwr, wrp.size1);
502*4882a593Smuzhiyun 			memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun  * csio_scsi_write - Create a SCSI WRITE WR.
509*4882a593Smuzhiyun  * @req: IO req structure.
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  * Gets a WR slot in the ingress queue and initializes it with
512*4882a593Smuzhiyun  * SCSI WRITE WR.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  */
515*4882a593Smuzhiyun static inline void
csio_scsi_write(struct csio_ioreq * req)516*4882a593Smuzhiyun csio_scsi_write(struct csio_ioreq *req)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	struct csio_wr_pair wrp;
519*4882a593Smuzhiyun 	uint32_t size;
520*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
521*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
524*4882a593Smuzhiyun 	size = ALIGN(size, 16);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
527*4882a593Smuzhiyun 	if (likely(req->drv_status == 0)) {
528*4882a593Smuzhiyun 		if (likely(wrp.size1 >= size)) {
529*4882a593Smuzhiyun 			/* Initialize WR in one shot */
530*4882a593Smuzhiyun 			csio_scsi_init_write_wr(req, wrp.addr1, size);
531*4882a593Smuzhiyun 		} else {
532*4882a593Smuzhiyun 			uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
533*4882a593Smuzhiyun 			/*
534*4882a593Smuzhiyun 			 * Make a temporary copy of the WR and write back
535*4882a593Smuzhiyun 			 * the copy into the WR pair.
536*4882a593Smuzhiyun 			 */
537*4882a593Smuzhiyun 			csio_scsi_init_write_wr(req, (void *)tmpwr, size);
538*4882a593Smuzhiyun 			memcpy(wrp.addr1, tmpwr, wrp.size1);
539*4882a593Smuzhiyun 			memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
540*4882a593Smuzhiyun 		}
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun  * csio_setup_ddp - Setup DDP buffers for Read request.
546*4882a593Smuzhiyun  * @req: IO req structure.
547*4882a593Smuzhiyun  *
548*4882a593Smuzhiyun  * Checks SGLs/Data buffers are virtually contiguous required for DDP.
549*4882a593Smuzhiyun  * If contiguous,driver posts SGLs in the WR otherwise post internal
550*4882a593Smuzhiyun  * buffers for such request for DDP.
551*4882a593Smuzhiyun  */
552*4882a593Smuzhiyun static inline void
csio_setup_ddp(struct csio_scsim * scsim,struct csio_ioreq * req)553*4882a593Smuzhiyun csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun #ifdef __CSIO_DEBUG__
556*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
557*4882a593Smuzhiyun #endif
558*4882a593Smuzhiyun 	struct scatterlist *sgel = NULL;
559*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
560*4882a593Smuzhiyun 	uint64_t sg_addr = 0;
561*4882a593Smuzhiyun 	uint32_t ddp_pagesz = 4096;
562*4882a593Smuzhiyun 	uint32_t buf_off;
563*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf = NULL;
564*4882a593Smuzhiyun 	uint32_t alloc_len = 0;
565*4882a593Smuzhiyun 	uint32_t xfer_len = 0;
566*4882a593Smuzhiyun 	uint32_t sg_len = 0;
567*4882a593Smuzhiyun 	uint32_t i;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
570*4882a593Smuzhiyun 		sg_addr = sg_dma_address(sgel);
571*4882a593Smuzhiyun 		sg_len	= sg_dma_len(sgel);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		buf_off = sg_addr & (ddp_pagesz - 1);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		/* Except 1st buffer,all buffer addr have to be Page aligned */
576*4882a593Smuzhiyun 		if (i != 0 && buf_off) {
577*4882a593Smuzhiyun 			csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
578*4882a593Smuzhiyun 				 sg_addr, sg_len);
579*4882a593Smuzhiyun 			goto unaligned;
580*4882a593Smuzhiyun 		}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 		/* Except last buffer,all buffer must end on page boundary */
583*4882a593Smuzhiyun 		if ((i != (req->nsge - 1)) &&
584*4882a593Smuzhiyun 			((buf_off + sg_len) & (ddp_pagesz - 1))) {
585*4882a593Smuzhiyun 			csio_dbg(hw,
586*4882a593Smuzhiyun 				 "SGL addr not ending on page boundary"
587*4882a593Smuzhiyun 				 "(%llx:%d)\n", sg_addr, sg_len);
588*4882a593Smuzhiyun 			goto unaligned;
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* SGL's are virtually contiguous. HW will DDP to SGLs */
593*4882a593Smuzhiyun 	req->dcopy = 0;
594*4882a593Smuzhiyun 	csio_scsi_read(req);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun unaligned:
599*4882a593Smuzhiyun 	CSIO_INC_STATS(scsim, n_unaligned);
600*4882a593Smuzhiyun 	/*
601*4882a593Smuzhiyun 	 * For unaligned SGLs, driver will allocate internal DDP buffer.
602*4882a593Smuzhiyun 	 * Once command is completed data from DDP buffer copied to SGLs
603*4882a593Smuzhiyun 	 */
604*4882a593Smuzhiyun 	req->dcopy = 1;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* Use gen_list to store the DDP buffers */
607*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->gen_list);
608*4882a593Smuzhiyun 	xfer_len = scsi_bufflen(scmnd);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	i = 0;
611*4882a593Smuzhiyun 	/* Allocate ddp buffers for this request */
612*4882a593Smuzhiyun 	while (alloc_len < xfer_len) {
613*4882a593Smuzhiyun 		dma_buf = csio_get_scsi_ddp(scsim);
614*4882a593Smuzhiyun 		if (dma_buf == NULL || i > scsim->max_sge) {
615*4882a593Smuzhiyun 			req->drv_status = -EBUSY;
616*4882a593Smuzhiyun 			break;
617*4882a593Smuzhiyun 		}
618*4882a593Smuzhiyun 		alloc_len += dma_buf->len;
619*4882a593Smuzhiyun 		/* Added to IO req */
620*4882a593Smuzhiyun 		list_add_tail(&dma_buf->list, &req->gen_list);
621*4882a593Smuzhiyun 		i++;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (!req->drv_status) {
625*4882a593Smuzhiyun 		/* set number of ddp bufs used */
626*4882a593Smuzhiyun 		req->nsge = i;
627*4882a593Smuzhiyun 		csio_scsi_read(req);
628*4882a593Smuzhiyun 		return;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	 /* release dma descs */
632*4882a593Smuzhiyun 	if (i > 0)
633*4882a593Smuzhiyun 		csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun  * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
638*4882a593Smuzhiyun  * @req: IO req structure.
639*4882a593Smuzhiyun  * @addr: DMA location to place the payload.
640*4882a593Smuzhiyun  * @size: Size of WR
641*4882a593Smuzhiyun  * @abort: abort OR close
642*4882a593Smuzhiyun  *
643*4882a593Smuzhiyun  * Wrapper for populating fw_scsi_cmd_wr.
644*4882a593Smuzhiyun  */
645*4882a593Smuzhiyun static inline void
csio_scsi_init_abrt_cls_wr(struct csio_ioreq * req,void * addr,uint32_t size,bool abort)646*4882a593Smuzhiyun csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
647*4882a593Smuzhiyun 			   bool abort)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
650*4882a593Smuzhiyun 	struct csio_rnode *rn = req->rnode;
651*4882a593Smuzhiyun 	struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
654*4882a593Smuzhiyun 	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
655*4882a593Smuzhiyun 					    FW_WR_LEN16_V(
656*4882a593Smuzhiyun 						DIV_ROUND_UP(size, 16)));
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	wr->cookie = (uintptr_t) req;
659*4882a593Smuzhiyun 	wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
660*4882a593Smuzhiyun 	wr->tmo_val = (uint8_t) req->tmo;
661*4882a593Smuzhiyun 	/* 0 for CHK_ALL_IO tells FW to look up t_cookie */
662*4882a593Smuzhiyun 	wr->sub_opcode_to_chk_all_io =
663*4882a593Smuzhiyun 				(FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
664*4882a593Smuzhiyun 				 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
665*4882a593Smuzhiyun 	wr->r3[0] = 0;
666*4882a593Smuzhiyun 	wr->r3[1] = 0;
667*4882a593Smuzhiyun 	wr->r3[2] = 0;
668*4882a593Smuzhiyun 	wr->r3[3] = 0;
669*4882a593Smuzhiyun 	/* Since we re-use the same ioreq for abort as well */
670*4882a593Smuzhiyun 	wr->t_cookie = (uintptr_t) req;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun static inline void
csio_scsi_abrt_cls(struct csio_ioreq * req,bool abort)674*4882a593Smuzhiyun csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	struct csio_wr_pair wrp;
677*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
678*4882a593Smuzhiyun 	uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
681*4882a593Smuzhiyun 	if (req->drv_status != 0)
682*4882a593Smuzhiyun 		return;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	if (wrp.size1 >= size) {
685*4882a593Smuzhiyun 		/* Initialize WR in one shot */
686*4882a593Smuzhiyun 		csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
687*4882a593Smuzhiyun 	} else {
688*4882a593Smuzhiyun 		uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
689*4882a593Smuzhiyun 		/*
690*4882a593Smuzhiyun 		 * Make a temporary copy of the WR and write back
691*4882a593Smuzhiyun 		 * the copy into the WR pair.
692*4882a593Smuzhiyun 		 */
693*4882a593Smuzhiyun 		csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
694*4882a593Smuzhiyun 		memcpy(wrp.addr1, tmpwr, wrp.size1);
695*4882a593Smuzhiyun 		memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
696*4882a593Smuzhiyun 	}
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /*****************************************************************************/
700*4882a593Smuzhiyun /* START: SCSI SM                                                            */
701*4882a593Smuzhiyun /*****************************************************************************/
702*4882a593Smuzhiyun static void
csio_scsis_uninit(struct csio_ioreq * req,enum csio_scsi_ev evt)703*4882a593Smuzhiyun csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
706*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	switch (evt) {
709*4882a593Smuzhiyun 	case CSIO_SCSIE_START_IO:
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 		if (req->nsge) {
712*4882a593Smuzhiyun 			if (req->datadir == DMA_TO_DEVICE) {
713*4882a593Smuzhiyun 				req->dcopy = 0;
714*4882a593Smuzhiyun 				csio_scsi_write(req);
715*4882a593Smuzhiyun 			} else
716*4882a593Smuzhiyun 				csio_setup_ddp(scsim, req);
717*4882a593Smuzhiyun 		} else {
718*4882a593Smuzhiyun 			csio_scsi_cmd(req);
719*4882a593Smuzhiyun 		}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		if (likely(req->drv_status == 0)) {
722*4882a593Smuzhiyun 			/* change state and enqueue on active_q */
723*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_io_active);
724*4882a593Smuzhiyun 			list_add_tail(&req->sm.sm_list, &scsim->active_q);
725*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
726*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_active);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 			return;
729*4882a593Smuzhiyun 		}
730*4882a593Smuzhiyun 		break;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	case CSIO_SCSIE_START_TM:
733*4882a593Smuzhiyun 		csio_scsi_cmd(req);
734*4882a593Smuzhiyun 		if (req->drv_status == 0) {
735*4882a593Smuzhiyun 			/*
736*4882a593Smuzhiyun 			 * NOTE: We collect the affected I/Os prior to issuing
737*4882a593Smuzhiyun 			 * LUN reset, and not after it. This is to prevent
738*4882a593Smuzhiyun 			 * aborting I/Os that get issued after the LUN reset,
739*4882a593Smuzhiyun 			 * but prior to LUN reset completion (in the event that
740*4882a593Smuzhiyun 			 * the host stack has not blocked I/Os to a LUN that is
741*4882a593Smuzhiyun 			 * being reset.
742*4882a593Smuzhiyun 			 */
743*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_tm_active);
744*4882a593Smuzhiyun 			list_add_tail(&req->sm.sm_list, &scsim->active_q);
745*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
746*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_tm_active);
747*4882a593Smuzhiyun 		}
748*4882a593Smuzhiyun 		return;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORT:
751*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
752*4882a593Smuzhiyun 		/*
753*4882a593Smuzhiyun 		 * NOTE:
754*4882a593Smuzhiyun 		 * We could get here due to  :
755*4882a593Smuzhiyun 		 * - a window in the cleanup path of the SCSI module
756*4882a593Smuzhiyun 		 *   (csio_scsi_abort_io()). Please see NOTE in this function.
757*4882a593Smuzhiyun 		 * - a window in the time we tried to issue an abort/close
758*4882a593Smuzhiyun 		 *   of a request to FW, and the FW completed the request
759*4882a593Smuzhiyun 		 *   itself.
760*4882a593Smuzhiyun 		 *   Print a message for now, and return INVAL either way.
761*4882a593Smuzhiyun 		 */
762*4882a593Smuzhiyun 		req->drv_status = -EINVAL;
763*4882a593Smuzhiyun 		csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
764*4882a593Smuzhiyun 		break;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	default:
767*4882a593Smuzhiyun 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
768*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun static void
csio_scsis_io_active(struct csio_ioreq * req,enum csio_scsi_ev evt)773*4882a593Smuzhiyun csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
776*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
777*4882a593Smuzhiyun 	struct csio_rnode *rn;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	switch (evt) {
780*4882a593Smuzhiyun 	case CSIO_SCSIE_COMPLETED:
781*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
782*4882a593Smuzhiyun 		list_del_init(&req->sm.sm_list);
783*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
784*4882a593Smuzhiyun 		/*
785*4882a593Smuzhiyun 		 * In MSIX mode, with multiple queues, the SCSI compeltions
786*4882a593Smuzhiyun 		 * could reach us sooner than the FW events sent to indicate
787*4882a593Smuzhiyun 		 * I-T nexus loss (link down, remote device logo etc). We
788*4882a593Smuzhiyun 		 * dont want to be returning such I/Os to the upper layer
789*4882a593Smuzhiyun 		 * immediately, since we wouldnt have reported the I-T nexus
790*4882a593Smuzhiyun 		 * loss itself. This forces us to serialize such completions
791*4882a593Smuzhiyun 		 * with the reporting of the I-T nexus loss. Therefore, we
792*4882a593Smuzhiyun 		 * internally queue up such up such completions in the rnode.
793*4882a593Smuzhiyun 		 * The reporting of I-T nexus loss to the upper layer is then
794*4882a593Smuzhiyun 		 * followed by the returning of I/Os in this internal queue.
795*4882a593Smuzhiyun 		 * Having another state alongwith another queue helps us take
796*4882a593Smuzhiyun 		 * actions for events such as ABORT received while we are
797*4882a593Smuzhiyun 		 * in this rnode queue.
798*4882a593Smuzhiyun 		 */
799*4882a593Smuzhiyun 		if (unlikely(req->wr_status != FW_SUCCESS)) {
800*4882a593Smuzhiyun 			rn = req->rnode;
801*4882a593Smuzhiyun 			/*
802*4882a593Smuzhiyun 			 * FW says remote device is lost, but rnode
803*4882a593Smuzhiyun 			 * doesnt reflect it.
804*4882a593Smuzhiyun 			 */
805*4882a593Smuzhiyun 			if (csio_scsi_itnexus_loss_error(req->wr_status) &&
806*4882a593Smuzhiyun 						csio_is_rnode_ready(rn)) {
807*4882a593Smuzhiyun 				csio_set_state(&req->sm,
808*4882a593Smuzhiyun 						csio_scsis_shost_cmpl_await);
809*4882a593Smuzhiyun 				list_add_tail(&req->sm.sm_list,
810*4882a593Smuzhiyun 					      &rn->host_cmpl_q);
811*4882a593Smuzhiyun 			}
812*4882a593Smuzhiyun 		}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 		break;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORT:
817*4882a593Smuzhiyun 		csio_scsi_abrt_cls(req, SCSI_ABORT);
818*4882a593Smuzhiyun 		if (req->drv_status == 0) {
819*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
820*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_aborting);
821*4882a593Smuzhiyun 		}
822*4882a593Smuzhiyun 		break;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
825*4882a593Smuzhiyun 		csio_scsi_abrt_cls(req, SCSI_CLOSE);
826*4882a593Smuzhiyun 		if (req->drv_status == 0) {
827*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
828*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_closing);
829*4882a593Smuzhiyun 		}
830*4882a593Smuzhiyun 		break;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	case CSIO_SCSIE_DRVCLEANUP:
833*4882a593Smuzhiyun 		req->wr_status = FW_HOSTERROR;
834*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
835*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
836*4882a593Smuzhiyun 		break;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	default:
839*4882a593Smuzhiyun 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
840*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun static void
csio_scsis_tm_active(struct csio_ioreq * req,enum csio_scsi_ev evt)845*4882a593Smuzhiyun csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
848*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	switch (evt) {
851*4882a593Smuzhiyun 	case CSIO_SCSIE_COMPLETED:
852*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_tm_active);
853*4882a593Smuzhiyun 		list_del_init(&req->sm.sm_list);
854*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 		break;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORT:
859*4882a593Smuzhiyun 		csio_scsi_abrt_cls(req, SCSI_ABORT);
860*4882a593Smuzhiyun 		if (req->drv_status == 0) {
861*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
862*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_aborting);
863*4882a593Smuzhiyun 		}
864*4882a593Smuzhiyun 		break;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
868*4882a593Smuzhiyun 		csio_scsi_abrt_cls(req, SCSI_CLOSE);
869*4882a593Smuzhiyun 		if (req->drv_status == 0) {
870*4882a593Smuzhiyun 			csio_wr_issue(hw, req->eq_idx, false);
871*4882a593Smuzhiyun 			csio_set_state(&req->sm, csio_scsis_closing);
872*4882a593Smuzhiyun 		}
873*4882a593Smuzhiyun 		break;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	case CSIO_SCSIE_DRVCLEANUP:
876*4882a593Smuzhiyun 		req->wr_status = FW_HOSTERROR;
877*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_tm_active);
878*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
879*4882a593Smuzhiyun 		break;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	default:
882*4882a593Smuzhiyun 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
883*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun static void
csio_scsis_aborting(struct csio_ioreq * req,enum csio_scsi_ev evt)888*4882a593Smuzhiyun csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
891*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	switch (evt) {
894*4882a593Smuzhiyun 	case CSIO_SCSIE_COMPLETED:
895*4882a593Smuzhiyun 		csio_dbg(hw,
896*4882a593Smuzhiyun 			 "ioreq %p recvd cmpltd (wr_status:%d) "
897*4882a593Smuzhiyun 			 "in aborting st\n", req, req->wr_status);
898*4882a593Smuzhiyun 		/*
899*4882a593Smuzhiyun 		 * Use -ECANCELED to explicitly tell the ABORTED event that
900*4882a593Smuzhiyun 		 * the original I/O was returned to driver by FW.
901*4882a593Smuzhiyun 		 * We dont really care if the I/O was returned with success by
902*4882a593Smuzhiyun 		 * FW (because the ABORT and completion of the I/O crossed each
903*4882a593Smuzhiyun 		 * other), or any other return value. Once we are in aborting
904*4882a593Smuzhiyun 		 * state, the success or failure of the I/O is unimportant to
905*4882a593Smuzhiyun 		 * us.
906*4882a593Smuzhiyun 		 */
907*4882a593Smuzhiyun 		req->drv_status = -ECANCELED;
908*4882a593Smuzhiyun 		break;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORT:
911*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_abrt_dups);
912*4882a593Smuzhiyun 		break;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORTED:
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 		csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
917*4882a593Smuzhiyun 			 req, req->wr_status, req->drv_status);
918*4882a593Smuzhiyun 		/*
919*4882a593Smuzhiyun 		 * Check if original I/O WR completed before the Abort
920*4882a593Smuzhiyun 		 * completion.
921*4882a593Smuzhiyun 		 */
922*4882a593Smuzhiyun 		if (req->drv_status != -ECANCELED) {
923*4882a593Smuzhiyun 			csio_warn(hw,
924*4882a593Smuzhiyun 				  "Abort completed before original I/O,"
925*4882a593Smuzhiyun 				   " req:%p\n", req);
926*4882a593Smuzhiyun 			CSIO_DB_ASSERT(0);
927*4882a593Smuzhiyun 		}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 		/*
930*4882a593Smuzhiyun 		 * There are the following possible scenarios:
931*4882a593Smuzhiyun 		 * 1. The abort completed successfully, FW returned FW_SUCCESS.
932*4882a593Smuzhiyun 		 * 2. The completion of an I/O and the receipt of
933*4882a593Smuzhiyun 		 *    abort for that I/O by the FW crossed each other.
934*4882a593Smuzhiyun 		 *    The FW returned FW_EINVAL. The original I/O would have
935*4882a593Smuzhiyun 		 *    returned with FW_SUCCESS or any other SCSI error.
936*4882a593Smuzhiyun 		 * 3. The FW couldn't sent the abort out on the wire, as there
937*4882a593Smuzhiyun 		 *    was an I-T nexus loss (link down, remote device logged
938*4882a593Smuzhiyun 		 *    out etc). FW sent back an appropriate IT nexus loss status
939*4882a593Smuzhiyun 		 *    for the abort.
940*4882a593Smuzhiyun 		 * 4. FW sent an abort, but abort timed out (remote device
941*4882a593Smuzhiyun 		 *    didnt respond). FW replied back with
942*4882a593Smuzhiyun 		 *    FW_SCSI_ABORT_TIMEDOUT.
943*4882a593Smuzhiyun 		 * 5. FW couldn't genuinely abort the request for some reason,
944*4882a593Smuzhiyun 		 *    and sent us an error.
945*4882a593Smuzhiyun 		 *
946*4882a593Smuzhiyun 		 * The first 3 scenarios are treated as  succesful abort
947*4882a593Smuzhiyun 		 * operations by the host, while the last 2 are failed attempts
948*4882a593Smuzhiyun 		 * to abort. Manipulate the return value of the request
949*4882a593Smuzhiyun 		 * appropriately, so that host can convey these results
950*4882a593Smuzhiyun 		 * back to the upper layer.
951*4882a593Smuzhiyun 		 */
952*4882a593Smuzhiyun 		if ((req->wr_status == FW_SUCCESS) ||
953*4882a593Smuzhiyun 		    (req->wr_status == FW_EINVAL) ||
954*4882a593Smuzhiyun 		    csio_scsi_itnexus_loss_error(req->wr_status))
955*4882a593Smuzhiyun 			req->wr_status = FW_SCSI_ABORT_REQUESTED;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
958*4882a593Smuzhiyun 		list_del_init(&req->sm.sm_list);
959*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
960*4882a593Smuzhiyun 		break;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	case CSIO_SCSIE_DRVCLEANUP:
963*4882a593Smuzhiyun 		req->wr_status = FW_HOSTERROR;
964*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
965*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
966*4882a593Smuzhiyun 		break;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
969*4882a593Smuzhiyun 		/*
970*4882a593Smuzhiyun 		 * We can receive this event from the module
971*4882a593Smuzhiyun 		 * cleanup paths, if the FW forgot to reply to the ABORT WR
972*4882a593Smuzhiyun 		 * and left this ioreq in this state. For now, just ignore
973*4882a593Smuzhiyun 		 * the event. The CLOSE event is sent to this state, as
974*4882a593Smuzhiyun 		 * the LINK may have already gone down.
975*4882a593Smuzhiyun 		 */
976*4882a593Smuzhiyun 		break;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	default:
979*4882a593Smuzhiyun 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
980*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun static void
csio_scsis_closing(struct csio_ioreq * req,enum csio_scsi_ev evt)985*4882a593Smuzhiyun csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	struct csio_hw *hw = req->lnode->hwp;
988*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	switch (evt) {
991*4882a593Smuzhiyun 	case CSIO_SCSIE_COMPLETED:
992*4882a593Smuzhiyun 		csio_dbg(hw,
993*4882a593Smuzhiyun 			 "ioreq %p recvd cmpltd (wr_status:%d) "
994*4882a593Smuzhiyun 			 "in closing st\n", req, req->wr_status);
995*4882a593Smuzhiyun 		/*
996*4882a593Smuzhiyun 		 * Use -ECANCELED to explicitly tell the CLOSED event that
997*4882a593Smuzhiyun 		 * the original I/O was returned to driver by FW.
998*4882a593Smuzhiyun 		 * We dont really care if the I/O was returned with success by
999*4882a593Smuzhiyun 		 * FW (because the CLOSE and completion of the I/O crossed each
1000*4882a593Smuzhiyun 		 * other), or any other return value. Once we are in aborting
1001*4882a593Smuzhiyun 		 * state, the success or failure of the I/O is unimportant to
1002*4882a593Smuzhiyun 		 * us.
1003*4882a593Smuzhiyun 		 */
1004*4882a593Smuzhiyun 		req->drv_status = -ECANCELED;
1005*4882a593Smuzhiyun 		break;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSED:
1008*4882a593Smuzhiyun 		/*
1009*4882a593Smuzhiyun 		 * Check if original I/O WR completed before the Close
1010*4882a593Smuzhiyun 		 * completion.
1011*4882a593Smuzhiyun 		 */
1012*4882a593Smuzhiyun 		if (req->drv_status != -ECANCELED) {
1013*4882a593Smuzhiyun 			csio_fatal(hw,
1014*4882a593Smuzhiyun 				   "Close completed before original I/O,"
1015*4882a593Smuzhiyun 				   " req:%p\n", req);
1016*4882a593Smuzhiyun 			CSIO_DB_ASSERT(0);
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 		/*
1020*4882a593Smuzhiyun 		 * Either close succeeded, or we issued close to FW at the
1021*4882a593Smuzhiyun 		 * same time FW compelted it to us. Either way, the I/O
1022*4882a593Smuzhiyun 		 * is closed.
1023*4882a593Smuzhiyun 		 */
1024*4882a593Smuzhiyun 		CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
1025*4882a593Smuzhiyun 					(req->wr_status == FW_EINVAL));
1026*4882a593Smuzhiyun 		req->wr_status = FW_SCSI_CLOSE_REQUESTED;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
1029*4882a593Smuzhiyun 		list_del_init(&req->sm.sm_list);
1030*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
1031*4882a593Smuzhiyun 		break;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
1034*4882a593Smuzhiyun 		break;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	case CSIO_SCSIE_DRVCLEANUP:
1037*4882a593Smuzhiyun 		req->wr_status = FW_HOSTERROR;
1038*4882a593Smuzhiyun 		CSIO_DEC_STATS(scm, n_active);
1039*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
1040*4882a593Smuzhiyun 		break;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	default:
1043*4882a593Smuzhiyun 		csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
1044*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
1045*4882a593Smuzhiyun 	}
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun static void
csio_scsis_shost_cmpl_await(struct csio_ioreq * req,enum csio_scsi_ev evt)1049*4882a593Smuzhiyun csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	switch (evt) {
1052*4882a593Smuzhiyun 	case CSIO_SCSIE_ABORT:
1053*4882a593Smuzhiyun 	case CSIO_SCSIE_CLOSE:
1054*4882a593Smuzhiyun 		/*
1055*4882a593Smuzhiyun 		 * Just succeed the abort request, and hope that
1056*4882a593Smuzhiyun 		 * the remote device unregister path will cleanup
1057*4882a593Smuzhiyun 		 * this I/O to the upper layer within a sane
1058*4882a593Smuzhiyun 		 * amount of time.
1059*4882a593Smuzhiyun 		 */
1060*4882a593Smuzhiyun 		/*
1061*4882a593Smuzhiyun 		 * A close can come in during a LINK DOWN. The FW would have
1062*4882a593Smuzhiyun 		 * returned us the I/O back, but not the remote device lost
1063*4882a593Smuzhiyun 		 * FW event. In this interval, if the I/O times out at the upper
1064*4882a593Smuzhiyun 		 * layer, a close can come in. Take the same action as abort:
1065*4882a593Smuzhiyun 		 * return success, and hope that the remote device unregister
1066*4882a593Smuzhiyun 		 * path will cleanup this I/O. If the FW still doesnt send
1067*4882a593Smuzhiyun 		 * the msg, the close times out, and the upper layer resorts
1068*4882a593Smuzhiyun 		 * to the next level of error recovery.
1069*4882a593Smuzhiyun 		 */
1070*4882a593Smuzhiyun 		req->drv_status = 0;
1071*4882a593Smuzhiyun 		break;
1072*4882a593Smuzhiyun 	case CSIO_SCSIE_DRVCLEANUP:
1073*4882a593Smuzhiyun 		csio_set_state(&req->sm, csio_scsis_uninit);
1074*4882a593Smuzhiyun 		break;
1075*4882a593Smuzhiyun 	default:
1076*4882a593Smuzhiyun 		csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
1077*4882a593Smuzhiyun 			 evt, req);
1078*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
1079*4882a593Smuzhiyun 	}
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun  * csio_scsi_cmpl_handler - WR completion handler for SCSI.
1084*4882a593Smuzhiyun  * @hw: HW module.
1085*4882a593Smuzhiyun  * @wr: The completed WR from the ingress queue.
1086*4882a593Smuzhiyun  * @len: Length of the WR.
1087*4882a593Smuzhiyun  * @flb: Freelist buffer array.
1088*4882a593Smuzhiyun  * @priv: Private object
1089*4882a593Smuzhiyun  * @scsiwr: Pointer to SCSI WR.
1090*4882a593Smuzhiyun  *
1091*4882a593Smuzhiyun  * This is the WR completion handler called per completion from the
1092*4882a593Smuzhiyun  * ISR. It is called with lock held. It walks past the RSS and CPL message
1093*4882a593Smuzhiyun  * header where the actual WR is present.
1094*4882a593Smuzhiyun  * It then gets the status, WR handle (ioreq pointer) and the len of
1095*4882a593Smuzhiyun  * the WR, based on WR opcode. Only on a non-good status is the entire
1096*4882a593Smuzhiyun  * WR copied into the WR cache (ioreq->fw_wr).
1097*4882a593Smuzhiyun  * The ioreq corresponding to the WR is returned to the caller.
1098*4882a593Smuzhiyun  * NOTE: The SCSI queue doesnt allocate a freelist today, hence
1099*4882a593Smuzhiyun  * no freelist buffer is expected.
1100*4882a593Smuzhiyun  */
1101*4882a593Smuzhiyun struct csio_ioreq *
csio_scsi_cmpl_handler(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * priv,uint8_t ** scsiwr)1102*4882a593Smuzhiyun csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
1103*4882a593Smuzhiyun 		     struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	struct csio_ioreq *ioreq = NULL;
1106*4882a593Smuzhiyun 	struct cpl_fw6_msg *cpl;
1107*4882a593Smuzhiyun 	uint8_t *tempwr;
1108*4882a593Smuzhiyun 	uint8_t	status;
1109*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	/* skip RSS header */
1112*4882a593Smuzhiyun 	cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
1115*4882a593Smuzhiyun 		csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
1116*4882a593Smuzhiyun 			  cpl->opcode);
1117*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_inval_cplop);
1118*4882a593Smuzhiyun 		return NULL;
1119*4882a593Smuzhiyun 	}
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	tempwr = (uint8_t *)(cpl->data);
1122*4882a593Smuzhiyun 	status = csio_wr_status(tempwr);
1123*4882a593Smuzhiyun 	*scsiwr = tempwr;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	if (likely((*tempwr == FW_SCSI_READ_WR) ||
1126*4882a593Smuzhiyun 			(*tempwr == FW_SCSI_WRITE_WR) ||
1127*4882a593Smuzhiyun 			(*tempwr == FW_SCSI_CMD_WR))) {
1128*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)((uintptr_t)
1129*4882a593Smuzhiyun 				 (((struct fw_scsi_read_wr *)tempwr)->cookie));
1130*4882a593Smuzhiyun 		CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 		ioreq->wr_status = status;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 		return ioreq;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
1138*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)((uintptr_t)
1139*4882a593Smuzhiyun 			 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
1140*4882a593Smuzhiyun 		CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 		ioreq->wr_status = status;
1143*4882a593Smuzhiyun 		return ioreq;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
1147*4882a593Smuzhiyun 	CSIO_INC_STATS(scm, n_inval_scsiop);
1148*4882a593Smuzhiyun 	return NULL;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun /*
1152*4882a593Smuzhiyun  * csio_scsi_cleanup_io_q - Cleanup the given queue.
1153*4882a593Smuzhiyun  * @scm: SCSI module.
1154*4882a593Smuzhiyun  * @q: Queue to be cleaned up.
1155*4882a593Smuzhiyun  *
1156*4882a593Smuzhiyun  * Called with lock held. Has to exit with lock held.
1157*4882a593Smuzhiyun  */
1158*4882a593Smuzhiyun void
csio_scsi_cleanup_io_q(struct csio_scsim * scm,struct list_head * q)1159*4882a593Smuzhiyun csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun 	struct csio_hw *hw = scm->hw;
1162*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
1163*4882a593Smuzhiyun 	struct list_head *tmp, *next;
1164*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd;
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	/* Call back the completion routines of the active_q */
1167*4882a593Smuzhiyun 	list_for_each_safe(tmp, next, q) {
1168*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)tmp;
1169*4882a593Smuzhiyun 		csio_scsi_drvcleanup(ioreq);
1170*4882a593Smuzhiyun 		list_del_init(&ioreq->sm.sm_list);
1171*4882a593Smuzhiyun 		scmnd = csio_scsi_cmnd(ioreq);
1172*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 		/*
1175*4882a593Smuzhiyun 		 * Upper layers may have cleared this command, hence this
1176*4882a593Smuzhiyun 		 * check to avoid accessing stale references.
1177*4882a593Smuzhiyun 		 */
1178*4882a593Smuzhiyun 		if (scmnd != NULL)
1179*4882a593Smuzhiyun 			ioreq->io_cbfn(hw, ioreq);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 		spin_lock_irq(&scm->freelist_lock);
1182*4882a593Smuzhiyun 		csio_put_scsi_ioreq(scm, ioreq);
1183*4882a593Smuzhiyun 		spin_unlock_irq(&scm->freelist_lock);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun #define CSIO_SCSI_ABORT_Q_POLL_MS		2000
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun static void
csio_abrt_cls(struct csio_ioreq * ioreq,struct scsi_cmnd * scmnd)1192*4882a593Smuzhiyun csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	struct csio_lnode *ln = ioreq->lnode;
1195*4882a593Smuzhiyun 	struct csio_hw *hw = ln->hwp;
1196*4882a593Smuzhiyun 	int ready = 0;
1197*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1198*4882a593Smuzhiyun 	int rv;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (csio_scsi_cmnd(ioreq) != scmnd) {
1201*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_abrt_race_comp);
1202*4882a593Smuzhiyun 		return;
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	ready = csio_is_lnode_ready(ln);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1208*4882a593Smuzhiyun 	if (rv != 0) {
1209*4882a593Smuzhiyun 		if (ready)
1210*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_abrt_busy_error);
1211*4882a593Smuzhiyun 		else
1212*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_cls_busy_error);
1213*4882a593Smuzhiyun 	}
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun /*
1217*4882a593Smuzhiyun  * csio_scsi_abort_io_q - Abort all I/Os on given queue
1218*4882a593Smuzhiyun  * @scm: SCSI module.
1219*4882a593Smuzhiyun  * @q: Queue to abort.
1220*4882a593Smuzhiyun  * @tmo: Timeout in ms
1221*4882a593Smuzhiyun  *
1222*4882a593Smuzhiyun  * Attempt to abort all I/Os on given queue, and wait for a max
1223*4882a593Smuzhiyun  * of tmo milliseconds for them to complete. Returns success
1224*4882a593Smuzhiyun  * if all I/Os are aborted. Else returns -ETIMEDOUT.
1225*4882a593Smuzhiyun  * Should be entered with lock held. Exits with lock held.
1226*4882a593Smuzhiyun  * NOTE:
1227*4882a593Smuzhiyun  * Lock has to be held across the loop that aborts I/Os, since dropping the lock
1228*4882a593Smuzhiyun  * in between can cause the list to be corrupted. As a result, the caller
1229*4882a593Smuzhiyun  * of this function has to ensure that the number of I/os to be aborted
1230*4882a593Smuzhiyun  * is finite enough to not cause lock-held-for-too-long issues.
1231*4882a593Smuzhiyun  */
1232*4882a593Smuzhiyun static int
csio_scsi_abort_io_q(struct csio_scsim * scm,struct list_head * q,uint32_t tmo)1233*4882a593Smuzhiyun csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct csio_hw *hw = scm->hw;
1236*4882a593Smuzhiyun 	struct list_head *tmp, *next;
1237*4882a593Smuzhiyun 	int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
1238*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	if (list_empty(q))
1241*4882a593Smuzhiyun 		return 0;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	csio_dbg(hw, "Aborting SCSI I/Os\n");
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	/* Now abort/close I/Os in the queue passed */
1246*4882a593Smuzhiyun 	list_for_each_safe(tmp, next, q) {
1247*4882a593Smuzhiyun 		scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
1248*4882a593Smuzhiyun 		csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
1249*4882a593Smuzhiyun 	}
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	/* Wait till all active I/Os are completed/aborted/closed */
1252*4882a593Smuzhiyun 	while (!list_empty(q) && count--) {
1253*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
1254*4882a593Smuzhiyun 		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1255*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	/* all aborts completed */
1259*4882a593Smuzhiyun 	if (list_empty(q))
1260*4882a593Smuzhiyun 		return 0;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	return -ETIMEDOUT;
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun /*
1266*4882a593Smuzhiyun  * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
1267*4882a593Smuzhiyun  * @scm: SCSI module.
1268*4882a593Smuzhiyun  * @abort: abort required.
1269*4882a593Smuzhiyun  * Called with lock held, should exit with lock held.
1270*4882a593Smuzhiyun  * Can sleep when waiting for I/Os to complete.
1271*4882a593Smuzhiyun  */
1272*4882a593Smuzhiyun int
csio_scsim_cleanup_io(struct csio_scsim * scm,bool abort)1273*4882a593Smuzhiyun csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	struct csio_hw *hw = scm->hw;
1276*4882a593Smuzhiyun 	int rv = 0;
1277*4882a593Smuzhiyun 	int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	/* No I/Os pending */
1280*4882a593Smuzhiyun 	if (list_empty(&scm->active_q))
1281*4882a593Smuzhiyun 		return 0;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	/* Wait until all active I/Os are completed */
1284*4882a593Smuzhiyun 	while (!list_empty(&scm->active_q) && count--) {
1285*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
1286*4882a593Smuzhiyun 		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1287*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
1288*4882a593Smuzhiyun 	}
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	/* all I/Os completed */
1291*4882a593Smuzhiyun 	if (list_empty(&scm->active_q))
1292*4882a593Smuzhiyun 		return 0;
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	/* Else abort */
1295*4882a593Smuzhiyun 	if (abort) {
1296*4882a593Smuzhiyun 		rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
1297*4882a593Smuzhiyun 		if (rv == 0)
1298*4882a593Smuzhiyun 			return rv;
1299*4882a593Smuzhiyun 		csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1300*4882a593Smuzhiyun 	}
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	csio_scsi_cleanup_io_q(scm, &scm->active_q);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	CSIO_DB_ASSERT(list_empty(&scm->active_q));
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	return rv;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun /*
1310*4882a593Smuzhiyun  * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
1311*4882a593Smuzhiyun  * @scm: SCSI module.
1312*4882a593Smuzhiyun  * @lnode: lnode
1313*4882a593Smuzhiyun  *
1314*4882a593Smuzhiyun  * Called with lock held, should exit with lock held.
1315*4882a593Smuzhiyun  * Can sleep (with dropped lock) when waiting for I/Os to complete.
1316*4882a593Smuzhiyun  */
1317*4882a593Smuzhiyun int
csio_scsim_cleanup_io_lnode(struct csio_scsim * scm,struct csio_lnode * ln)1318*4882a593Smuzhiyun csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun 	struct csio_hw *hw = scm->hw;
1321*4882a593Smuzhiyun 	struct csio_scsi_level_data sld;
1322*4882a593Smuzhiyun 	int rv;
1323*4882a593Smuzhiyun 	int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	sld.level = CSIO_LEV_LNODE;
1328*4882a593Smuzhiyun 	sld.lnode = ln;
1329*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ln->cmpl_q);
1330*4882a593Smuzhiyun 	csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	/* No I/Os pending on this lnode  */
1333*4882a593Smuzhiyun 	if (list_empty(&ln->cmpl_q))
1334*4882a593Smuzhiyun 		return 0;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	/* Wait until all active I/Os on this lnode are completed */
1337*4882a593Smuzhiyun 	while (!list_empty(&ln->cmpl_q) && count--) {
1338*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
1339*4882a593Smuzhiyun 		msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1340*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
1341*4882a593Smuzhiyun 	}
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* all I/Os completed */
1344*4882a593Smuzhiyun 	if (list_empty(&ln->cmpl_q))
1345*4882a593Smuzhiyun 		return 0;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	/* I/Os are pending, abort them */
1350*4882a593Smuzhiyun 	rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
1351*4882a593Smuzhiyun 	if (rv != 0) {
1352*4882a593Smuzhiyun 		csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1353*4882a593Smuzhiyun 		csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	return rv;
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun static ssize_t
csio_show_hw_state(struct device * dev,struct device_attribute * attr,char * buf)1362*4882a593Smuzhiyun csio_show_hw_state(struct device *dev,
1363*4882a593Smuzhiyun 		   struct device_attribute *attr, char *buf)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1366*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	if (csio_is_hw_ready(hw))
1369*4882a593Smuzhiyun 		return snprintf(buf, PAGE_SIZE, "ready\n");
1370*4882a593Smuzhiyun 	else
1371*4882a593Smuzhiyun 		return snprintf(buf, PAGE_SIZE, "not ready\n");
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun /* Device reset */
1375*4882a593Smuzhiyun static ssize_t
csio_device_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1376*4882a593Smuzhiyun csio_device_reset(struct device *dev,
1377*4882a593Smuzhiyun 		   struct device_attribute *attr, const char *buf, size_t count)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1380*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	if (*buf != '1')
1383*4882a593Smuzhiyun 		return -EINVAL;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	/* Delete NPIV lnodes */
1386*4882a593Smuzhiyun 	csio_lnodes_exit(hw, 1);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	/* Block upper IOs */
1389*4882a593Smuzhiyun 	csio_lnodes_block_request(hw);
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
1392*4882a593Smuzhiyun 	csio_hw_reset(hw);
1393*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/* Unblock upper IOs */
1396*4882a593Smuzhiyun 	csio_lnodes_unblock_request(hw);
1397*4882a593Smuzhiyun 	return count;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun /* disable port */
1401*4882a593Smuzhiyun static ssize_t
csio_disable_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1402*4882a593Smuzhiyun csio_disable_port(struct device *dev,
1403*4882a593Smuzhiyun 		   struct device_attribute *attr, const char *buf, size_t count)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1406*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1407*4882a593Smuzhiyun 	bool disable;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	if (*buf == '1' || *buf == '0')
1410*4882a593Smuzhiyun 		disable = (*buf == '1') ? true : false;
1411*4882a593Smuzhiyun 	else
1412*4882a593Smuzhiyun 		return -EINVAL;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	/* Block upper IOs */
1415*4882a593Smuzhiyun 	csio_lnodes_block_by_port(hw, ln->portid);
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
1418*4882a593Smuzhiyun 	csio_disable_lnodes(hw, ln->portid, disable);
1419*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	/* Unblock upper IOs */
1422*4882a593Smuzhiyun 	csio_lnodes_unblock_by_port(hw, ln->portid);
1423*4882a593Smuzhiyun 	return count;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun /* Show debug level */
1427*4882a593Smuzhiyun static ssize_t
csio_show_dbg_level(struct device * dev,struct device_attribute * attr,char * buf)1428*4882a593Smuzhiyun csio_show_dbg_level(struct device *dev,
1429*4882a593Smuzhiyun 		   struct device_attribute *attr, char *buf)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun /* Store debug level */
1437*4882a593Smuzhiyun static ssize_t
csio_store_dbg_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1438*4882a593Smuzhiyun csio_store_dbg_level(struct device *dev,
1439*4882a593Smuzhiyun 		   struct device_attribute *attr, const char *buf, size_t count)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1442*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1443*4882a593Smuzhiyun 	uint32_t dbg_level = 0;
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	if (!isdigit(buf[0]))
1446*4882a593Smuzhiyun 		return -EINVAL;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	if (sscanf(buf, "%i", &dbg_level))
1449*4882a593Smuzhiyun 		return -EINVAL;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	ln->params.log_level = dbg_level;
1452*4882a593Smuzhiyun 	hw->params.log_level = dbg_level;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	return 0;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
1458*4882a593Smuzhiyun static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
1459*4882a593Smuzhiyun static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
1460*4882a593Smuzhiyun static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
1461*4882a593Smuzhiyun 		  csio_store_dbg_level);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun static struct device_attribute *csio_fcoe_lport_attrs[] = {
1464*4882a593Smuzhiyun 	&dev_attr_hw_state,
1465*4882a593Smuzhiyun 	&dev_attr_device_reset,
1466*4882a593Smuzhiyun 	&dev_attr_disable_port,
1467*4882a593Smuzhiyun 	&dev_attr_dbg_level,
1468*4882a593Smuzhiyun 	NULL,
1469*4882a593Smuzhiyun };
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun static ssize_t
csio_show_num_reg_rnodes(struct device * dev,struct device_attribute * attr,char * buf)1472*4882a593Smuzhiyun csio_show_num_reg_rnodes(struct device *dev,
1473*4882a593Smuzhiyun 		     struct device_attribute *attr, char *buf)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun static struct device_attribute *csio_fcoe_vport_attrs[] = {
1483*4882a593Smuzhiyun 	&dev_attr_num_reg_rnodes,
1484*4882a593Smuzhiyun 	&dev_attr_dbg_level,
1485*4882a593Smuzhiyun 	NULL,
1486*4882a593Smuzhiyun };
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun static inline uint32_t
csio_scsi_copy_to_sgl(struct csio_hw * hw,struct csio_ioreq * req)1489*4882a593Smuzhiyun csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1492*4882a593Smuzhiyun 	struct scatterlist *sg;
1493*4882a593Smuzhiyun 	uint32_t bytes_left;
1494*4882a593Smuzhiyun 	uint32_t bytes_copy;
1495*4882a593Smuzhiyun 	uint32_t buf_off = 0;
1496*4882a593Smuzhiyun 	uint32_t start_off = 0;
1497*4882a593Smuzhiyun 	uint32_t sg_off = 0;
1498*4882a593Smuzhiyun 	void *sg_addr;
1499*4882a593Smuzhiyun 	void *buf_addr;
1500*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	bytes_left = scsi_bufflen(scmnd);
1503*4882a593Smuzhiyun 	sg = scsi_sglist(scmnd);
1504*4882a593Smuzhiyun 	dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	/* Copy data from driver buffer to SGs of SCSI CMD */
1507*4882a593Smuzhiyun 	while (bytes_left > 0 && sg && dma_buf) {
1508*4882a593Smuzhiyun 		if (buf_off >= dma_buf->len) {
1509*4882a593Smuzhiyun 			buf_off = 0;
1510*4882a593Smuzhiyun 			dma_buf = (struct csio_dma_buf *)
1511*4882a593Smuzhiyun 					csio_list_next(dma_buf);
1512*4882a593Smuzhiyun 			continue;
1513*4882a593Smuzhiyun 		}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		if (start_off >= sg->length) {
1516*4882a593Smuzhiyun 			start_off -= sg->length;
1517*4882a593Smuzhiyun 			sg = sg_next(sg);
1518*4882a593Smuzhiyun 			continue;
1519*4882a593Smuzhiyun 		}
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 		buf_addr = dma_buf->vaddr + buf_off;
1522*4882a593Smuzhiyun 		sg_off = sg->offset + start_off;
1523*4882a593Smuzhiyun 		bytes_copy = min((dma_buf->len - buf_off),
1524*4882a593Smuzhiyun 				sg->length - start_off);
1525*4882a593Smuzhiyun 		bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
1526*4882a593Smuzhiyun 				 bytes_copy);
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 		sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
1529*4882a593Smuzhiyun 		if (!sg_addr) {
1530*4882a593Smuzhiyun 			csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
1531*4882a593Smuzhiyun 				sg, req);
1532*4882a593Smuzhiyun 			break;
1533*4882a593Smuzhiyun 		}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 		csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
1536*4882a593Smuzhiyun 				sg_addr, sg_off, buf_addr, bytes_copy);
1537*4882a593Smuzhiyun 		memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
1538*4882a593Smuzhiyun 		kunmap_atomic(sg_addr);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 		start_off +=  bytes_copy;
1541*4882a593Smuzhiyun 		buf_off += bytes_copy;
1542*4882a593Smuzhiyun 		bytes_left -= bytes_copy;
1543*4882a593Smuzhiyun 	}
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	if (bytes_left > 0)
1546*4882a593Smuzhiyun 		return DID_ERROR;
1547*4882a593Smuzhiyun 	else
1548*4882a593Smuzhiyun 		return DID_OK;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun /*
1552*4882a593Smuzhiyun  * csio_scsi_err_handler - SCSI error handler.
1553*4882a593Smuzhiyun  * @hw: HW module.
1554*4882a593Smuzhiyun  * @req: IO request.
1555*4882a593Smuzhiyun  *
1556*4882a593Smuzhiyun  */
1557*4882a593Smuzhiyun static inline void
csio_scsi_err_handler(struct csio_hw * hw,struct csio_ioreq * req)1558*4882a593Smuzhiyun csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1561*4882a593Smuzhiyun 	struct csio_scsim *scm = csio_hw_to_scsim(hw);
1562*4882a593Smuzhiyun 	struct fcp_resp_with_ext *fcp_resp;
1563*4882a593Smuzhiyun 	struct fcp_resp_rsp_info *rsp_info;
1564*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
1565*4882a593Smuzhiyun 	uint8_t flags, scsi_status = 0;
1566*4882a593Smuzhiyun 	uint32_t host_status = DID_OK;
1567*4882a593Smuzhiyun 	uint32_t rsp_len = 0, sns_len = 0;
1568*4882a593Smuzhiyun 	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	switch (req->wr_status) {
1572*4882a593Smuzhiyun 	case FW_HOSTERROR:
1573*4882a593Smuzhiyun 		if (unlikely(!csio_is_hw_ready(hw)))
1574*4882a593Smuzhiyun 			return;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 		host_status = DID_ERROR;
1577*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_hosterror);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 		break;
1580*4882a593Smuzhiyun 	case FW_SCSI_RSP_ERR:
1581*4882a593Smuzhiyun 		dma_buf = &req->dma_buf;
1582*4882a593Smuzhiyun 		fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
1583*4882a593Smuzhiyun 		rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
1584*4882a593Smuzhiyun 		flags = fcp_resp->resp.fr_flags;
1585*4882a593Smuzhiyun 		scsi_status = fcp_resp->resp.fr_status;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 		if (flags & FCP_RSP_LEN_VAL) {
1588*4882a593Smuzhiyun 			rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
1589*4882a593Smuzhiyun 			if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
1590*4882a593Smuzhiyun 				(rsp_info->rsp_code != FCP_TMF_CMPL)) {
1591*4882a593Smuzhiyun 				host_status = DID_ERROR;
1592*4882a593Smuzhiyun 				goto out;
1593*4882a593Smuzhiyun 			}
1594*4882a593Smuzhiyun 		}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 		if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
1597*4882a593Smuzhiyun 			sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
1598*4882a593Smuzhiyun 			if (sns_len > SCSI_SENSE_BUFFERSIZE)
1599*4882a593Smuzhiyun 				sns_len = SCSI_SENSE_BUFFERSIZE;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 			memcpy(cmnd->sense_buffer,
1602*4882a593Smuzhiyun 			       &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
1603*4882a593Smuzhiyun 			CSIO_INC_STATS(scm, n_autosense);
1604*4882a593Smuzhiyun 		}
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 		scsi_set_resid(cmnd, 0);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 		/* Under run */
1609*4882a593Smuzhiyun 		if (flags & FCP_RESID_UNDER) {
1610*4882a593Smuzhiyun 			scsi_set_resid(cmnd,
1611*4882a593Smuzhiyun 				       be32_to_cpu(fcp_resp->ext.fr_resid));
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 			if (!(flags & FCP_SNS_LEN_VAL) &&
1614*4882a593Smuzhiyun 			    (scsi_status == SAM_STAT_GOOD) &&
1615*4882a593Smuzhiyun 			    ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
1616*4882a593Smuzhiyun 							< cmnd->underflow))
1617*4882a593Smuzhiyun 				host_status = DID_ERROR;
1618*4882a593Smuzhiyun 		} else if (flags & FCP_RESID_OVER)
1619*4882a593Smuzhiyun 			host_status = DID_ERROR;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_rsperror);
1622*4882a593Smuzhiyun 		break;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	case FW_SCSI_OVER_FLOW_ERR:
1625*4882a593Smuzhiyun 		csio_warn(hw,
1626*4882a593Smuzhiyun 			  "Over-flow error,cmnd:0x%x expected len:0x%x"
1627*4882a593Smuzhiyun 			  " resid:0x%x\n", cmnd->cmnd[0],
1628*4882a593Smuzhiyun 			  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1629*4882a593Smuzhiyun 		host_status = DID_ERROR;
1630*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_ovflerror);
1631*4882a593Smuzhiyun 		break;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	case FW_SCSI_UNDER_FLOW_ERR:
1634*4882a593Smuzhiyun 		csio_warn(hw,
1635*4882a593Smuzhiyun 			  "Under-flow error,cmnd:0x%x expected"
1636*4882a593Smuzhiyun 			  " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
1637*4882a593Smuzhiyun 			  cmnd->cmnd[0], scsi_bufflen(cmnd),
1638*4882a593Smuzhiyun 			  scsi_get_resid(cmnd), cmnd->device->lun,
1639*4882a593Smuzhiyun 			  rn->flowid);
1640*4882a593Smuzhiyun 		host_status = DID_ERROR;
1641*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_unflerror);
1642*4882a593Smuzhiyun 		break;
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	case FW_SCSI_ABORT_REQUESTED:
1645*4882a593Smuzhiyun 	case FW_SCSI_ABORTED:
1646*4882a593Smuzhiyun 	case FW_SCSI_CLOSE_REQUESTED:
1647*4882a593Smuzhiyun 		csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
1648*4882a593Smuzhiyun 			     cmnd->cmnd[0],
1649*4882a593Smuzhiyun 			    (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
1650*4882a593Smuzhiyun 			    "closed" : "aborted");
1651*4882a593Smuzhiyun 		/*
1652*4882a593Smuzhiyun 		 * csio_eh_abort_handler checks this value to
1653*4882a593Smuzhiyun 		 * succeed or fail the abort request.
1654*4882a593Smuzhiyun 		 */
1655*4882a593Smuzhiyun 		host_status = DID_REQUEUE;
1656*4882a593Smuzhiyun 		if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
1657*4882a593Smuzhiyun 			CSIO_INC_STATS(scm, n_closed);
1658*4882a593Smuzhiyun 		else
1659*4882a593Smuzhiyun 			CSIO_INC_STATS(scm, n_aborted);
1660*4882a593Smuzhiyun 		break;
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	case FW_SCSI_ABORT_TIMEDOUT:
1663*4882a593Smuzhiyun 		/* FW timed out the abort itself */
1664*4882a593Smuzhiyun 		csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
1665*4882a593Smuzhiyun 			 req, cmnd, req->wr_status);
1666*4882a593Smuzhiyun 		host_status = DID_ERROR;
1667*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_abrt_timedout);
1668*4882a593Smuzhiyun 		break;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	case FW_RDEV_NOT_READY:
1671*4882a593Smuzhiyun 		/*
1672*4882a593Smuzhiyun 		 * In firmware, a RDEV can get into this state
1673*4882a593Smuzhiyun 		 * temporarily, before moving into dissapeared/lost
1674*4882a593Smuzhiyun 		 * state. So, the driver should complete the request equivalent
1675*4882a593Smuzhiyun 		 * to device-disappeared!
1676*4882a593Smuzhiyun 		 */
1677*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_rdev_nr_error);
1678*4882a593Smuzhiyun 		host_status = DID_ERROR;
1679*4882a593Smuzhiyun 		break;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	case FW_ERR_RDEV_LOST:
1682*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_rdev_lost_error);
1683*4882a593Smuzhiyun 		host_status = DID_ERROR;
1684*4882a593Smuzhiyun 		break;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	case FW_ERR_RDEV_LOGO:
1687*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_rdev_logo_error);
1688*4882a593Smuzhiyun 		host_status = DID_ERROR;
1689*4882a593Smuzhiyun 		break;
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	case FW_ERR_RDEV_IMPL_LOGO:
1692*4882a593Smuzhiyun 		host_status = DID_ERROR;
1693*4882a593Smuzhiyun 		break;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	case FW_ERR_LINK_DOWN:
1696*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_link_down_error);
1697*4882a593Smuzhiyun 		host_status = DID_ERROR;
1698*4882a593Smuzhiyun 		break;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	case FW_FCOE_NO_XCHG:
1701*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_no_xchg_error);
1702*4882a593Smuzhiyun 		host_status = DID_ERROR;
1703*4882a593Smuzhiyun 		break;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	default:
1706*4882a593Smuzhiyun 		csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
1707*4882a593Smuzhiyun 			    req->wr_status, req, cmnd);
1708*4882a593Smuzhiyun 		CSIO_DB_ASSERT(0);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_unknown_error);
1711*4882a593Smuzhiyun 		host_status = DID_ERROR;
1712*4882a593Smuzhiyun 		break;
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun out:
1716*4882a593Smuzhiyun 	if (req->nsge > 0) {
1717*4882a593Smuzhiyun 		scsi_dma_unmap(cmnd);
1718*4882a593Smuzhiyun 		if (req->dcopy && (host_status == DID_OK))
1719*4882a593Smuzhiyun 			host_status = csio_scsi_copy_to_sgl(hw, req);
1720*4882a593Smuzhiyun 	}
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	cmnd->result = (((host_status) << 16) | scsi_status);
1723*4882a593Smuzhiyun 	cmnd->scsi_done(cmnd);
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	/* Wake up waiting threads */
1726*4882a593Smuzhiyun 	csio_scsi_cmnd(req) = NULL;
1727*4882a593Smuzhiyun 	complete(&req->cmplobj);
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun /*
1731*4882a593Smuzhiyun  * csio_scsi_cbfn - SCSI callback function.
1732*4882a593Smuzhiyun  * @hw: HW module.
1733*4882a593Smuzhiyun  * @req: IO request.
1734*4882a593Smuzhiyun  *
1735*4882a593Smuzhiyun  */
1736*4882a593Smuzhiyun static void
csio_scsi_cbfn(struct csio_hw * hw,struct csio_ioreq * req)1737*4882a593Smuzhiyun csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1740*4882a593Smuzhiyun 	uint8_t scsi_status = SAM_STAT_GOOD;
1741*4882a593Smuzhiyun 	uint32_t host_status = DID_OK;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	if (likely(req->wr_status == FW_SUCCESS)) {
1744*4882a593Smuzhiyun 		if (req->nsge > 0) {
1745*4882a593Smuzhiyun 			scsi_dma_unmap(cmnd);
1746*4882a593Smuzhiyun 			if (req->dcopy)
1747*4882a593Smuzhiyun 				host_status = csio_scsi_copy_to_sgl(hw, req);
1748*4882a593Smuzhiyun 		}
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 		cmnd->result = (((host_status) << 16) | scsi_status);
1751*4882a593Smuzhiyun 		cmnd->scsi_done(cmnd);
1752*4882a593Smuzhiyun 		csio_scsi_cmnd(req) = NULL;
1753*4882a593Smuzhiyun 		CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
1754*4882a593Smuzhiyun 	} else {
1755*4882a593Smuzhiyun 		/* Error handling */
1756*4882a593Smuzhiyun 		csio_scsi_err_handler(hw, req);
1757*4882a593Smuzhiyun 	}
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun /**
1761*4882a593Smuzhiyun  * csio_queuecommand - Entry point to kickstart an I/O request.
1762*4882a593Smuzhiyun  * @host:	The scsi_host pointer.
1763*4882a593Smuzhiyun  * @cmnd:	The I/O request from ML.
1764*4882a593Smuzhiyun  *
1765*4882a593Smuzhiyun  * This routine does the following:
1766*4882a593Smuzhiyun  *	- Checks for HW and Rnode module readiness.
1767*4882a593Smuzhiyun  *	- Gets a free ioreq structure (which is already initialized
1768*4882a593Smuzhiyun  *	  to uninit during its allocation).
1769*4882a593Smuzhiyun  *	- Maps SG elements.
1770*4882a593Smuzhiyun  *	- Initializes ioreq members.
1771*4882a593Smuzhiyun  *	- Kicks off the SCSI state machine for this IO.
1772*4882a593Smuzhiyun  *	- Returns busy status on error.
1773*4882a593Smuzhiyun  */
1774*4882a593Smuzhiyun static int
csio_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmnd)1775*4882a593Smuzhiyun csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
1776*4882a593Smuzhiyun {
1777*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(host);
1778*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1779*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1780*4882a593Smuzhiyun 	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1781*4882a593Smuzhiyun 	struct csio_ioreq *ioreq = NULL;
1782*4882a593Smuzhiyun 	unsigned long flags;
1783*4882a593Smuzhiyun 	int nsge = 0;
1784*4882a593Smuzhiyun 	int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
1785*4882a593Smuzhiyun 	int retval;
1786*4882a593Smuzhiyun 	struct csio_scsi_qset *sqset;
1787*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	nr = fc_remote_port_chkready(rport);
1792*4882a593Smuzhiyun 	if (nr) {
1793*4882a593Smuzhiyun 		cmnd->result = nr;
1794*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_rn_nr_error);
1795*4882a593Smuzhiyun 		goto err_done;
1796*4882a593Smuzhiyun 	}
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	if (unlikely(!csio_is_hw_ready(hw))) {
1799*4882a593Smuzhiyun 		cmnd->result = (DID_REQUEUE << 16);
1800*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_hw_nr_error);
1801*4882a593Smuzhiyun 		goto err_done;
1802*4882a593Smuzhiyun 	}
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	/* Get req->nsge, if there are SG elements to be mapped  */
1805*4882a593Smuzhiyun 	nsge = scsi_dma_map(cmnd);
1806*4882a593Smuzhiyun 	if (unlikely(nsge < 0)) {
1807*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_dmamap_error);
1808*4882a593Smuzhiyun 		goto err;
1809*4882a593Smuzhiyun 	}
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	/* Do we support so many mappings? */
1812*4882a593Smuzhiyun 	if (unlikely(nsge > scsim->max_sge)) {
1813*4882a593Smuzhiyun 		csio_warn(hw,
1814*4882a593Smuzhiyun 			  "More SGEs than can be supported."
1815*4882a593Smuzhiyun 			  " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
1816*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_unsupp_sge_error);
1817*4882a593Smuzhiyun 		goto err_dma_unmap;
1818*4882a593Smuzhiyun 	}
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	/* Get a free ioreq structure - SM is already set to uninit */
1821*4882a593Smuzhiyun 	ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
1822*4882a593Smuzhiyun 	if (!ioreq) {
1823*4882a593Smuzhiyun 		csio_err(hw, "Out of I/O request elements. Active #:%d\n",
1824*4882a593Smuzhiyun 			 scsim->stats.n_active);
1825*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_no_req_error);
1826*4882a593Smuzhiyun 		goto err_dma_unmap;
1827*4882a593Smuzhiyun 	}
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	ioreq->nsge		= nsge;
1830*4882a593Smuzhiyun 	ioreq->lnode		= ln;
1831*4882a593Smuzhiyun 	ioreq->rnode		= rn;
1832*4882a593Smuzhiyun 	ioreq->iq_idx		= sqset->iq_idx;
1833*4882a593Smuzhiyun 	ioreq->eq_idx		= sqset->eq_idx;
1834*4882a593Smuzhiyun 	ioreq->wr_status	= 0;
1835*4882a593Smuzhiyun 	ioreq->drv_status	= 0;
1836*4882a593Smuzhiyun 	csio_scsi_cmnd(ioreq)	= (void *)cmnd;
1837*4882a593Smuzhiyun 	ioreq->tmo		= 0;
1838*4882a593Smuzhiyun 	ioreq->datadir		= cmnd->sc_data_direction;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
1841*4882a593Smuzhiyun 		CSIO_INC_STATS(ln, n_output_requests);
1842*4882a593Smuzhiyun 		ln->stats.n_output_bytes += scsi_bufflen(cmnd);
1843*4882a593Smuzhiyun 	} else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
1844*4882a593Smuzhiyun 		CSIO_INC_STATS(ln, n_input_requests);
1845*4882a593Smuzhiyun 		ln->stats.n_input_bytes += scsi_bufflen(cmnd);
1846*4882a593Smuzhiyun 	} else
1847*4882a593Smuzhiyun 		CSIO_INC_STATS(ln, n_control_requests);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	/* Set cbfn */
1850*4882a593Smuzhiyun 	ioreq->io_cbfn = csio_scsi_cbfn;
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/* Needed during abort */
1853*4882a593Smuzhiyun 	cmnd->host_scribble = (unsigned char *)ioreq;
1854*4882a593Smuzhiyun 	cmnd->SCp.Message = 0;
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	/* Kick off SCSI IO SM on the ioreq */
1857*4882a593Smuzhiyun 	spin_lock_irqsave(&hw->lock, flags);
1858*4882a593Smuzhiyun 	retval = csio_scsi_start_io(ioreq);
1859*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hw->lock, flags);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	if (retval != 0) {
1862*4882a593Smuzhiyun 		csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
1863*4882a593Smuzhiyun 			 ioreq, retval);
1864*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_busy_error);
1865*4882a593Smuzhiyun 		goto err_put_req;
1866*4882a593Smuzhiyun 	}
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	return 0;
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun err_put_req:
1871*4882a593Smuzhiyun 	csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
1872*4882a593Smuzhiyun err_dma_unmap:
1873*4882a593Smuzhiyun 	if (nsge > 0)
1874*4882a593Smuzhiyun 		scsi_dma_unmap(cmnd);
1875*4882a593Smuzhiyun err:
1876*4882a593Smuzhiyun 	return rv;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun err_done:
1879*4882a593Smuzhiyun 	cmnd->scsi_done(cmnd);
1880*4882a593Smuzhiyun 	return 0;
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun static int
csio_do_abrt_cls(struct csio_hw * hw,struct csio_ioreq * ioreq,bool abort)1884*4882a593Smuzhiyun csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
1885*4882a593Smuzhiyun {
1886*4882a593Smuzhiyun 	int rv;
1887*4882a593Smuzhiyun 	int cpu = smp_processor_id();
1888*4882a593Smuzhiyun 	struct csio_lnode *ln = ioreq->lnode;
1889*4882a593Smuzhiyun 	struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
1892*4882a593Smuzhiyun 	/*
1893*4882a593Smuzhiyun 	 * Use current processor queue for posting the abort/close, but retain
1894*4882a593Smuzhiyun 	 * the ingress queue ID of the original I/O being aborted/closed - we
1895*4882a593Smuzhiyun 	 * need the abort/close completion to be received on the same queue
1896*4882a593Smuzhiyun 	 * as the original I/O.
1897*4882a593Smuzhiyun 	 */
1898*4882a593Smuzhiyun 	ioreq->eq_idx = sqset->eq_idx;
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	if (abort == SCSI_ABORT)
1901*4882a593Smuzhiyun 		rv = csio_scsi_abort(ioreq);
1902*4882a593Smuzhiyun 	else
1903*4882a593Smuzhiyun 		rv = csio_scsi_close(ioreq);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	return rv;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun static int
csio_eh_abort_handler(struct scsi_cmnd * cmnd)1909*4882a593Smuzhiyun csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
1912*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(cmnd->device->host);
1913*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
1914*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1915*4882a593Smuzhiyun 	int ready = 0, ret;
1916*4882a593Smuzhiyun 	unsigned long tmo = 0;
1917*4882a593Smuzhiyun 	int rv;
1918*4882a593Smuzhiyun 	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	ret = fc_block_scsi_eh(cmnd);
1921*4882a593Smuzhiyun 	if (ret)
1922*4882a593Smuzhiyun 		return ret;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	ioreq = (struct csio_ioreq *)cmnd->host_scribble;
1925*4882a593Smuzhiyun 	if (!ioreq)
1926*4882a593Smuzhiyun 		return SUCCESS;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	if (!rn)
1929*4882a593Smuzhiyun 		return FAILED;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	csio_dbg(hw,
1932*4882a593Smuzhiyun 		 "Request to abort ioreq:%p cmd:%p cdb:%08llx"
1933*4882a593Smuzhiyun 		 " ssni:0x%x lun:%llu iq:0x%x\n",
1934*4882a593Smuzhiyun 		ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
1935*4882a593Smuzhiyun 		cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
1938*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_abrt_race_comp);
1939*4882a593Smuzhiyun 		return SUCCESS;
1940*4882a593Smuzhiyun 	}
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	ready = csio_is_lnode_ready(ln);
1943*4882a593Smuzhiyun 	tmo = CSIO_SCSI_ABRT_TMO_MS;
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	reinit_completion(&ioreq->cmplobj);
1946*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
1947*4882a593Smuzhiyun 	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1948*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	if (rv != 0) {
1951*4882a593Smuzhiyun 		if (rv == -EINVAL) {
1952*4882a593Smuzhiyun 			/* Return success, if abort/close request issued on
1953*4882a593Smuzhiyun 			 * already completed IO
1954*4882a593Smuzhiyun 			 */
1955*4882a593Smuzhiyun 			return SUCCESS;
1956*4882a593Smuzhiyun 		}
1957*4882a593Smuzhiyun 		if (ready)
1958*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_abrt_busy_error);
1959*4882a593Smuzhiyun 		else
1960*4882a593Smuzhiyun 			CSIO_INC_STATS(scsim, n_cls_busy_error);
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 		goto inval_scmnd;
1963*4882a593Smuzhiyun 	}
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/* FW didnt respond to abort within our timeout */
1968*4882a593Smuzhiyun 	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 		csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
1971*4882a593Smuzhiyun 		CSIO_INC_STATS(scsim, n_abrt_timedout);
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun inval_scmnd:
1974*4882a593Smuzhiyun 		if (ioreq->nsge > 0)
1975*4882a593Smuzhiyun 			scsi_dma_unmap(cmnd);
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
1978*4882a593Smuzhiyun 		csio_scsi_cmnd(ioreq) = NULL;
1979*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 		cmnd->result = (DID_ERROR << 16);
1982*4882a593Smuzhiyun 		cmnd->scsi_done(cmnd);
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 		return FAILED;
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	/* FW successfully aborted the request */
1988*4882a593Smuzhiyun 	if (host_byte(cmnd->result) == DID_REQUEUE) {
1989*4882a593Smuzhiyun 		csio_info(hw,
1990*4882a593Smuzhiyun 			"Aborted SCSI command to (%d:%llu) tag %u\n",
1991*4882a593Smuzhiyun 			cmnd->device->id, cmnd->device->lun,
1992*4882a593Smuzhiyun 			cmnd->request->tag);
1993*4882a593Smuzhiyun 		return SUCCESS;
1994*4882a593Smuzhiyun 	} else {
1995*4882a593Smuzhiyun 		csio_info(hw,
1996*4882a593Smuzhiyun 			"Failed to abort SCSI command, (%d:%llu) tag %u\n",
1997*4882a593Smuzhiyun 			cmnd->device->id, cmnd->device->lun,
1998*4882a593Smuzhiyun 			cmnd->request->tag);
1999*4882a593Smuzhiyun 		return FAILED;
2000*4882a593Smuzhiyun 	}
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun /*
2004*4882a593Smuzhiyun  * csio_tm_cbfn - TM callback function.
2005*4882a593Smuzhiyun  * @hw: HW module.
2006*4882a593Smuzhiyun  * @req: IO request.
2007*4882a593Smuzhiyun  *
2008*4882a593Smuzhiyun  * Cache the result in 'cmnd', since ioreq will be freed soon
2009*4882a593Smuzhiyun  * after we return from here, and the waiting thread shouldnt trust
2010*4882a593Smuzhiyun  * the ioreq contents.
2011*4882a593Smuzhiyun  */
2012*4882a593Smuzhiyun static void
csio_tm_cbfn(struct csio_hw * hw,struct csio_ioreq * req)2013*4882a593Smuzhiyun csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
2016*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
2017*4882a593Smuzhiyun 	uint8_t flags = 0;
2018*4882a593Smuzhiyun 	struct fcp_resp_with_ext *fcp_resp;
2019*4882a593Smuzhiyun 	struct fcp_resp_rsp_info *rsp_info;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
2022*4882a593Smuzhiyun 		      req, req->wr_status);
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	/* Cache FW return status */
2025*4882a593Smuzhiyun 	cmnd->SCp.Status = req->wr_status;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	/* Special handling based on FCP response */
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	/*
2030*4882a593Smuzhiyun 	 * FW returns us this error, if flags were set. FCP4 says
2031*4882a593Smuzhiyun 	 * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
2032*4882a593Smuzhiyun 	 * So if a target were to set this bit, we expect that the
2033*4882a593Smuzhiyun 	 * rsp_code is set to FCP_TMF_CMPL for a successful TM
2034*4882a593Smuzhiyun 	 * completion. Any other rsp_code means TM operation failed.
2035*4882a593Smuzhiyun 	 * If a target were to just ignore setting flags, we treat
2036*4882a593Smuzhiyun 	 * the TM operation as success, and FW returns FW_SUCCESS.
2037*4882a593Smuzhiyun 	 */
2038*4882a593Smuzhiyun 	if (req->wr_status == FW_SCSI_RSP_ERR) {
2039*4882a593Smuzhiyun 		dma_buf = &req->dma_buf;
2040*4882a593Smuzhiyun 		fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
2041*4882a593Smuzhiyun 		rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 		flags = fcp_resp->resp.fr_flags;
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun 		/* Modify return status if flags indicate success */
2046*4882a593Smuzhiyun 		if (flags & FCP_RSP_LEN_VAL)
2047*4882a593Smuzhiyun 			if (rsp_info->rsp_code == FCP_TMF_CMPL)
2048*4882a593Smuzhiyun 				cmnd->SCp.Status = FW_SUCCESS;
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 		csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
2051*4882a593Smuzhiyun 	}
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	/* Wake up the TM handler thread */
2054*4882a593Smuzhiyun 	csio_scsi_cmnd(req) = NULL;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun static int
csio_eh_lun_reset_handler(struct scsi_cmnd * cmnd)2058*4882a593Smuzhiyun csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(cmnd->device->host);
2061*4882a593Smuzhiyun 	struct csio_hw *hw = csio_lnode_to_hw(ln);
2062*4882a593Smuzhiyun 	struct csio_scsim *scsim = csio_hw_to_scsim(hw);
2063*4882a593Smuzhiyun 	struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
2064*4882a593Smuzhiyun 	struct csio_ioreq *ioreq = NULL;
2065*4882a593Smuzhiyun 	struct csio_scsi_qset *sqset;
2066*4882a593Smuzhiyun 	unsigned long flags;
2067*4882a593Smuzhiyun 	int retval;
2068*4882a593Smuzhiyun 	int count, ret;
2069*4882a593Smuzhiyun 	LIST_HEAD(local_q);
2070*4882a593Smuzhiyun 	struct csio_scsi_level_data sld;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	if (!rn)
2073*4882a593Smuzhiyun 		goto fail;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
2076*4882a593Smuzhiyun 		      cmnd->device->lun, rn->flowid, rn->scsi_id);
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	if (!csio_is_lnode_ready(ln)) {
2079*4882a593Smuzhiyun 		csio_err(hw,
2080*4882a593Smuzhiyun 			 "LUN reset cannot be issued on non-ready"
2081*4882a593Smuzhiyun 			 " local node vnpi:0x%x (LUN:%llu)\n",
2082*4882a593Smuzhiyun 			 ln->vnp_flowid, cmnd->device->lun);
2083*4882a593Smuzhiyun 		goto fail;
2084*4882a593Smuzhiyun 	}
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	/* Lnode is ready, now wait on rport node readiness */
2087*4882a593Smuzhiyun 	ret = fc_block_scsi_eh(cmnd);
2088*4882a593Smuzhiyun 	if (ret)
2089*4882a593Smuzhiyun 		return ret;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	/*
2092*4882a593Smuzhiyun 	 * If we have blocked in the previous call, at this point, either the
2093*4882a593Smuzhiyun 	 * remote node has come back online, or device loss timer has fired
2094*4882a593Smuzhiyun 	 * and the remote node is destroyed. Allow the LUN reset only for
2095*4882a593Smuzhiyun 	 * the former case, since LUN reset is a TMF I/O on the wire, and we
2096*4882a593Smuzhiyun 	 * need a valid session to issue it.
2097*4882a593Smuzhiyun 	 */
2098*4882a593Smuzhiyun 	if (fc_remote_port_chkready(rn->rport)) {
2099*4882a593Smuzhiyun 		csio_err(hw,
2100*4882a593Smuzhiyun 			 "LUN reset cannot be issued on non-ready"
2101*4882a593Smuzhiyun 			 " remote node ssni:0x%x (LUN:%llu)\n",
2102*4882a593Smuzhiyun 			 rn->flowid, cmnd->device->lun);
2103*4882a593Smuzhiyun 		goto fail;
2104*4882a593Smuzhiyun 	}
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	/* Get a free ioreq structure - SM is already set to uninit */
2107*4882a593Smuzhiyun 	ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	if (!ioreq) {
2110*4882a593Smuzhiyun 		csio_err(hw, "Out of IO request elements. Active # :%d\n",
2111*4882a593Smuzhiyun 			 scsim->stats.n_active);
2112*4882a593Smuzhiyun 		goto fail;
2113*4882a593Smuzhiyun 	}
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	sqset			= &hw->sqset[ln->portid][smp_processor_id()];
2116*4882a593Smuzhiyun 	ioreq->nsge		= 0;
2117*4882a593Smuzhiyun 	ioreq->lnode		= ln;
2118*4882a593Smuzhiyun 	ioreq->rnode		= rn;
2119*4882a593Smuzhiyun 	ioreq->iq_idx		= sqset->iq_idx;
2120*4882a593Smuzhiyun 	ioreq->eq_idx		= sqset->eq_idx;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	csio_scsi_cmnd(ioreq)	= cmnd;
2123*4882a593Smuzhiyun 	cmnd->host_scribble	= (unsigned char *)ioreq;
2124*4882a593Smuzhiyun 	cmnd->SCp.Status	= 0;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	cmnd->SCp.Message	= FCP_TMF_LUN_RESET;
2127*4882a593Smuzhiyun 	ioreq->tmo		= CSIO_SCSI_LUNRST_TMO_MS / 1000;
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	/*
2130*4882a593Smuzhiyun 	 * FW times the LUN reset for ioreq->tmo, so we got to wait a little
2131*4882a593Smuzhiyun 	 * longer (10s for now) than that to allow FW to return the timed
2132*4882a593Smuzhiyun 	 * out command.
2133*4882a593Smuzhiyun 	 */
2134*4882a593Smuzhiyun 	count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 	/* Set cbfn */
2137*4882a593Smuzhiyun 	ioreq->io_cbfn = csio_tm_cbfn;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	/* Save of the ioreq info for later use */
2140*4882a593Smuzhiyun 	sld.level = CSIO_LEV_LUN;
2141*4882a593Smuzhiyun 	sld.lnode = ioreq->lnode;
2142*4882a593Smuzhiyun 	sld.rnode = ioreq->rnode;
2143*4882a593Smuzhiyun 	sld.oslun = cmnd->device->lun;
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 	spin_lock_irqsave(&hw->lock, flags);
2146*4882a593Smuzhiyun 	/* Kick off TM SM on the ioreq */
2147*4882a593Smuzhiyun 	retval = csio_scsi_start_tm(ioreq);
2148*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hw->lock, flags);
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	if (retval != 0) {
2151*4882a593Smuzhiyun 		csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
2152*4882a593Smuzhiyun 			    ioreq, retval);
2153*4882a593Smuzhiyun 		goto fail_ret_ioreq;
2154*4882a593Smuzhiyun 	}
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
2157*4882a593Smuzhiyun 		    count * (CSIO_SCSI_TM_POLL_MS / 1000));
2158*4882a593Smuzhiyun 	/* Wait for completion */
2159*4882a593Smuzhiyun 	while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
2160*4882a593Smuzhiyun 								&& count--)
2161*4882a593Smuzhiyun 		msleep(CSIO_SCSI_TM_POLL_MS);
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	/* LUN reset timed-out */
2164*4882a593Smuzhiyun 	if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
2165*4882a593Smuzhiyun 		csio_err(hw, "LUN reset (%d:%llu) timed out\n",
2166*4882a593Smuzhiyun 			 cmnd->device->id, cmnd->device->lun);
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
2169*4882a593Smuzhiyun 		csio_scsi_drvcleanup(ioreq);
2170*4882a593Smuzhiyun 		list_del_init(&ioreq->sm.sm_list);
2171*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 		goto fail_ret_ioreq;
2174*4882a593Smuzhiyun 	}
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	/* LUN reset returned, check cached status */
2177*4882a593Smuzhiyun 	if (cmnd->SCp.Status != FW_SUCCESS) {
2178*4882a593Smuzhiyun 		csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
2179*4882a593Smuzhiyun 			 cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
2180*4882a593Smuzhiyun 		goto fail;
2181*4882a593Smuzhiyun 	}
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	/* LUN reset succeeded, Start aborting affected I/Os */
2184*4882a593Smuzhiyun 	/*
2185*4882a593Smuzhiyun 	 * Since the host guarantees during LUN reset that there
2186*4882a593Smuzhiyun 	 * will not be any more I/Os to that LUN, until the LUN reset
2187*4882a593Smuzhiyun 	 * completes, we gather pending I/Os after the LUN reset.
2188*4882a593Smuzhiyun 	 */
2189*4882a593Smuzhiyun 	spin_lock_irq(&hw->lock);
2190*4882a593Smuzhiyun 	csio_scsi_gather_active_ios(scsim, &sld, &local_q);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
2193*4882a593Smuzhiyun 	spin_unlock_irq(&hw->lock);
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	/* Aborts may have timed out */
2196*4882a593Smuzhiyun 	if (retval != 0) {
2197*4882a593Smuzhiyun 		csio_err(hw,
2198*4882a593Smuzhiyun 			 "Attempt to abort I/Os during LUN reset of %llu"
2199*4882a593Smuzhiyun 			 " returned %d\n", cmnd->device->lun, retval);
2200*4882a593Smuzhiyun 		/* Return I/Os back to active_q */
2201*4882a593Smuzhiyun 		spin_lock_irq(&hw->lock);
2202*4882a593Smuzhiyun 		list_splice_tail_init(&local_q, &scsim->active_q);
2203*4882a593Smuzhiyun 		spin_unlock_irq(&hw->lock);
2204*4882a593Smuzhiyun 		goto fail;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_lun_rst);
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	csio_info(hw, "LUN reset occurred (%d:%llu)\n",
2210*4882a593Smuzhiyun 		  cmnd->device->id, cmnd->device->lun);
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	return SUCCESS;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun fail_ret_ioreq:
2215*4882a593Smuzhiyun 	csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
2216*4882a593Smuzhiyun fail:
2217*4882a593Smuzhiyun 	CSIO_INC_STATS(rn, n_lun_rst_fail);
2218*4882a593Smuzhiyun 	return FAILED;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun static int
csio_slave_alloc(struct scsi_device * sdev)2222*4882a593Smuzhiyun csio_slave_alloc(struct scsi_device *sdev)
2223*4882a593Smuzhiyun {
2224*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	if (!rport || fc_remote_port_chkready(rport))
2227*4882a593Smuzhiyun 		return -ENXIO;
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	return 0;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun static int
csio_slave_configure(struct scsi_device * sdev)2235*4882a593Smuzhiyun csio_slave_configure(struct scsi_device *sdev)
2236*4882a593Smuzhiyun {
2237*4882a593Smuzhiyun 	scsi_change_queue_depth(sdev, csio_lun_qdepth);
2238*4882a593Smuzhiyun 	return 0;
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun static void
csio_slave_destroy(struct scsi_device * sdev)2242*4882a593Smuzhiyun csio_slave_destroy(struct scsi_device *sdev)
2243*4882a593Smuzhiyun {
2244*4882a593Smuzhiyun 	sdev->hostdata = NULL;
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun static int
csio_scan_finished(struct Scsi_Host * shost,unsigned long time)2248*4882a593Smuzhiyun csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
2249*4882a593Smuzhiyun {
2250*4882a593Smuzhiyun 	struct csio_lnode *ln = shost_priv(shost);
2251*4882a593Smuzhiyun 	int rv = 1;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	spin_lock_irq(shost->host_lock);
2254*4882a593Smuzhiyun 	if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
2255*4882a593Smuzhiyun 		goto out;
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 	rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
2258*4882a593Smuzhiyun 			    csio_delta_scan_tmo * HZ);
2259*4882a593Smuzhiyun out:
2260*4882a593Smuzhiyun 	spin_unlock_irq(shost->host_lock);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	return rv;
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun struct scsi_host_template csio_fcoe_shost_template = {
2266*4882a593Smuzhiyun 	.module			= THIS_MODULE,
2267*4882a593Smuzhiyun 	.name			= CSIO_DRV_DESC,
2268*4882a593Smuzhiyun 	.proc_name		= KBUILD_MODNAME,
2269*4882a593Smuzhiyun 	.queuecommand		= csio_queuecommand,
2270*4882a593Smuzhiyun 	.eh_timed_out		= fc_eh_timed_out,
2271*4882a593Smuzhiyun 	.eh_abort_handler	= csio_eh_abort_handler,
2272*4882a593Smuzhiyun 	.eh_device_reset_handler = csio_eh_lun_reset_handler,
2273*4882a593Smuzhiyun 	.slave_alloc		= csio_slave_alloc,
2274*4882a593Smuzhiyun 	.slave_configure	= csio_slave_configure,
2275*4882a593Smuzhiyun 	.slave_destroy		= csio_slave_destroy,
2276*4882a593Smuzhiyun 	.scan_finished		= csio_scan_finished,
2277*4882a593Smuzhiyun 	.this_id		= -1,
2278*4882a593Smuzhiyun 	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
2279*4882a593Smuzhiyun 	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
2280*4882a593Smuzhiyun 	.shost_attrs		= csio_fcoe_lport_attrs,
2281*4882a593Smuzhiyun 	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
2282*4882a593Smuzhiyun };
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun struct scsi_host_template csio_fcoe_shost_vport_template = {
2285*4882a593Smuzhiyun 	.module			= THIS_MODULE,
2286*4882a593Smuzhiyun 	.name			= CSIO_DRV_DESC,
2287*4882a593Smuzhiyun 	.proc_name		= KBUILD_MODNAME,
2288*4882a593Smuzhiyun 	.queuecommand		= csio_queuecommand,
2289*4882a593Smuzhiyun 	.eh_timed_out		= fc_eh_timed_out,
2290*4882a593Smuzhiyun 	.eh_abort_handler	= csio_eh_abort_handler,
2291*4882a593Smuzhiyun 	.eh_device_reset_handler = csio_eh_lun_reset_handler,
2292*4882a593Smuzhiyun 	.slave_alloc		= csio_slave_alloc,
2293*4882a593Smuzhiyun 	.slave_configure	= csio_slave_configure,
2294*4882a593Smuzhiyun 	.slave_destroy		= csio_slave_destroy,
2295*4882a593Smuzhiyun 	.scan_finished		= csio_scan_finished,
2296*4882a593Smuzhiyun 	.this_id		= -1,
2297*4882a593Smuzhiyun 	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
2298*4882a593Smuzhiyun 	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
2299*4882a593Smuzhiyun 	.shost_attrs		= csio_fcoe_vport_attrs,
2300*4882a593Smuzhiyun 	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
2301*4882a593Smuzhiyun };
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun /*
2304*4882a593Smuzhiyun  * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
2305*4882a593Smuzhiyun  * @scm: SCSI Module
2306*4882a593Smuzhiyun  * @hw: HW device.
2307*4882a593Smuzhiyun  * @buf_size: buffer size
2308*4882a593Smuzhiyun  * @num_buf : Number of buffers.
2309*4882a593Smuzhiyun  *
2310*4882a593Smuzhiyun  * This routine allocates DMA buffers required for SCSI Data xfer, if
2311*4882a593Smuzhiyun  * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
2312*4882a593Smuzhiyun  * not virtually contiguous.
2313*4882a593Smuzhiyun  */
2314*4882a593Smuzhiyun static int
csio_scsi_alloc_ddp_bufs(struct csio_scsim * scm,struct csio_hw * hw,int buf_size,int num_buf)2315*4882a593Smuzhiyun csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
2316*4882a593Smuzhiyun 			 int buf_size, int num_buf)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun 	int n = 0;
2319*4882a593Smuzhiyun 	struct list_head *tmp;
2320*4882a593Smuzhiyun 	struct csio_dma_buf *ddp_desc = NULL;
2321*4882a593Smuzhiyun 	uint32_t unit_size = 0;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	if (!num_buf)
2324*4882a593Smuzhiyun 		return 0;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	if (!buf_size)
2327*4882a593Smuzhiyun 		return -EINVAL;
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	INIT_LIST_HEAD(&scm->ddp_freelist);
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	/* Align buf size to page size */
2332*4882a593Smuzhiyun 	buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
2333*4882a593Smuzhiyun 	/* Initialize dma descriptors */
2334*4882a593Smuzhiyun 	for (n = 0; n < num_buf; n++) {
2335*4882a593Smuzhiyun 		/* Set unit size to request size */
2336*4882a593Smuzhiyun 		unit_size = buf_size;
2337*4882a593Smuzhiyun 		ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
2338*4882a593Smuzhiyun 		if (!ddp_desc) {
2339*4882a593Smuzhiyun 			csio_err(hw,
2340*4882a593Smuzhiyun 				 "Failed to allocate ddp descriptors,"
2341*4882a593Smuzhiyun 				 " Num allocated = %d.\n",
2342*4882a593Smuzhiyun 				 scm->stats.n_free_ddp);
2343*4882a593Smuzhiyun 			goto no_mem;
2344*4882a593Smuzhiyun 		}
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 		/* Allocate Dma buffers for DDP */
2347*4882a593Smuzhiyun 		ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
2348*4882a593Smuzhiyun 				&ddp_desc->paddr, GFP_KERNEL);
2349*4882a593Smuzhiyun 		if (!ddp_desc->vaddr) {
2350*4882a593Smuzhiyun 			csio_err(hw,
2351*4882a593Smuzhiyun 				 "SCSI response DMA buffer (ddp) allocation"
2352*4882a593Smuzhiyun 				 " failed!\n");
2353*4882a593Smuzhiyun 			kfree(ddp_desc);
2354*4882a593Smuzhiyun 			goto no_mem;
2355*4882a593Smuzhiyun 		}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		ddp_desc->len = unit_size;
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 		/* Added it to scsi ddp freelist */
2360*4882a593Smuzhiyun 		list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
2361*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_free_ddp);
2362*4882a593Smuzhiyun 	}
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	return 0;
2365*4882a593Smuzhiyun no_mem:
2366*4882a593Smuzhiyun 	/* release dma descs back to freelist and free dma memory */
2367*4882a593Smuzhiyun 	list_for_each(tmp, &scm->ddp_freelist) {
2368*4882a593Smuzhiyun 		ddp_desc = (struct csio_dma_buf *) tmp;
2369*4882a593Smuzhiyun 		tmp = csio_list_prev(tmp);
2370*4882a593Smuzhiyun 		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2371*4882a593Smuzhiyun 				  ddp_desc->vaddr, ddp_desc->paddr);
2372*4882a593Smuzhiyun 		list_del_init(&ddp_desc->list);
2373*4882a593Smuzhiyun 		kfree(ddp_desc);
2374*4882a593Smuzhiyun 	}
2375*4882a593Smuzhiyun 	scm->stats.n_free_ddp = 0;
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	return -ENOMEM;
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun /*
2381*4882a593Smuzhiyun  * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
2382*4882a593Smuzhiyun  * @scm: SCSI Module
2383*4882a593Smuzhiyun  * @hw: HW device.
2384*4882a593Smuzhiyun  *
2385*4882a593Smuzhiyun  * This routine frees ddp buffers.
2386*4882a593Smuzhiyun  */
2387*4882a593Smuzhiyun static void
csio_scsi_free_ddp_bufs(struct csio_scsim * scm,struct csio_hw * hw)2388*4882a593Smuzhiyun csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun 	struct list_head *tmp;
2391*4882a593Smuzhiyun 	struct csio_dma_buf *ddp_desc;
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	/* release dma descs back to freelist and free dma memory */
2394*4882a593Smuzhiyun 	list_for_each(tmp, &scm->ddp_freelist) {
2395*4882a593Smuzhiyun 		ddp_desc = (struct csio_dma_buf *) tmp;
2396*4882a593Smuzhiyun 		tmp = csio_list_prev(tmp);
2397*4882a593Smuzhiyun 		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2398*4882a593Smuzhiyun 				  ddp_desc->vaddr, ddp_desc->paddr);
2399*4882a593Smuzhiyun 		list_del_init(&ddp_desc->list);
2400*4882a593Smuzhiyun 		kfree(ddp_desc);
2401*4882a593Smuzhiyun 	}
2402*4882a593Smuzhiyun 	scm->stats.n_free_ddp = 0;
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun /**
2406*4882a593Smuzhiyun  * csio_scsim_init - Initialize SCSI Module
2407*4882a593Smuzhiyun  * @scm:	SCSI Module
2408*4882a593Smuzhiyun  * @hw:		HW module
2409*4882a593Smuzhiyun  *
2410*4882a593Smuzhiyun  */
2411*4882a593Smuzhiyun int
csio_scsim_init(struct csio_scsim * scm,struct csio_hw * hw)2412*4882a593Smuzhiyun csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
2413*4882a593Smuzhiyun {
2414*4882a593Smuzhiyun 	int i;
2415*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
2416*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	INIT_LIST_HEAD(&scm->active_q);
2419*4882a593Smuzhiyun 	scm->hw = hw;
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	scm->proto_cmd_len = sizeof(struct fcp_cmnd);
2422*4882a593Smuzhiyun 	scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
2423*4882a593Smuzhiyun 	scm->max_sge = CSIO_SCSI_MAX_SGE;
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	spin_lock_init(&scm->freelist_lock);
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 	/* Pre-allocate ioreqs and initialize them */
2428*4882a593Smuzhiyun 	INIT_LIST_HEAD(&scm->ioreq_freelist);
2429*4882a593Smuzhiyun 	for (i = 0; i < csio_scsi_ioreqs; i++) {
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 		ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
2432*4882a593Smuzhiyun 		if (!ioreq) {
2433*4882a593Smuzhiyun 			csio_err(hw,
2434*4882a593Smuzhiyun 				 "I/O request element allocation failed, "
2435*4882a593Smuzhiyun 				 " Num allocated = %d.\n",
2436*4882a593Smuzhiyun 				 scm->stats.n_free_ioreq);
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 			goto free_ioreq;
2439*4882a593Smuzhiyun 		}
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 		/* Allocate Dma buffers for Response Payload */
2442*4882a593Smuzhiyun 		dma_buf = &ioreq->dma_buf;
2443*4882a593Smuzhiyun 		dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
2444*4882a593Smuzhiyun 						&dma_buf->paddr);
2445*4882a593Smuzhiyun 		if (!dma_buf->vaddr) {
2446*4882a593Smuzhiyun 			csio_err(hw,
2447*4882a593Smuzhiyun 				 "SCSI response DMA buffer allocation"
2448*4882a593Smuzhiyun 				 " failed!\n");
2449*4882a593Smuzhiyun 			kfree(ioreq);
2450*4882a593Smuzhiyun 			goto free_ioreq;
2451*4882a593Smuzhiyun 		}
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 		dma_buf->len = scm->proto_rsp_len;
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 		/* Set state to uninit */
2456*4882a593Smuzhiyun 		csio_init_state(&ioreq->sm, csio_scsis_uninit);
2457*4882a593Smuzhiyun 		INIT_LIST_HEAD(&ioreq->gen_list);
2458*4882a593Smuzhiyun 		init_completion(&ioreq->cmplobj);
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 		list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
2461*4882a593Smuzhiyun 		CSIO_INC_STATS(scm, n_free_ioreq);
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
2465*4882a593Smuzhiyun 		goto free_ioreq;
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	return 0;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun free_ioreq:
2470*4882a593Smuzhiyun 	/*
2471*4882a593Smuzhiyun 	 * Free up existing allocations, since an error
2472*4882a593Smuzhiyun 	 * from here means we are returning for good
2473*4882a593Smuzhiyun 	 */
2474*4882a593Smuzhiyun 	while (!list_empty(&scm->ioreq_freelist)) {
2475*4882a593Smuzhiyun 		struct csio_sm *tmp;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 		tmp = list_first_entry(&scm->ioreq_freelist,
2478*4882a593Smuzhiyun 				       struct csio_sm, sm_list);
2479*4882a593Smuzhiyun 		list_del_init(&tmp->sm_list);
2480*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)tmp;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 		dma_buf = &ioreq->dma_buf;
2483*4882a593Smuzhiyun 		dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
2484*4882a593Smuzhiyun 			      dma_buf->paddr);
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 		kfree(ioreq);
2487*4882a593Smuzhiyun 	}
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	scm->stats.n_free_ioreq = 0;
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	return -ENOMEM;
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun /**
2495*4882a593Smuzhiyun  * csio_scsim_exit: Uninitialize SCSI Module
2496*4882a593Smuzhiyun  * @scm: SCSI Module
2497*4882a593Smuzhiyun  *
2498*4882a593Smuzhiyun  */
2499*4882a593Smuzhiyun void
csio_scsim_exit(struct csio_scsim * scm)2500*4882a593Smuzhiyun csio_scsim_exit(struct csio_scsim *scm)
2501*4882a593Smuzhiyun {
2502*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
2503*4882a593Smuzhiyun 	struct csio_dma_buf *dma_buf;
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	while (!list_empty(&scm->ioreq_freelist)) {
2506*4882a593Smuzhiyun 		struct csio_sm *tmp;
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 		tmp = list_first_entry(&scm->ioreq_freelist,
2509*4882a593Smuzhiyun 				       struct csio_sm, sm_list);
2510*4882a593Smuzhiyun 		list_del_init(&tmp->sm_list);
2511*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)tmp;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 		dma_buf = &ioreq->dma_buf;
2514*4882a593Smuzhiyun 		dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
2515*4882a593Smuzhiyun 			      dma_buf->paddr);
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 		kfree(ioreq);
2518*4882a593Smuzhiyun 	}
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	scm->stats.n_free_ioreq = 0;
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 	csio_scsi_free_ddp_bufs(scm, scm->hw);
2523*4882a593Smuzhiyun }
2524