xref: /OK3568_Linux_fs/kernel/drivers/scsi/qedf/qedf_io.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  QLogic FCoE Offload Driver
4*4882a593Smuzhiyun  *  Copyright (c) 2016-2018 Cavium Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/spinlock.h>
7*4882a593Smuzhiyun #include <linux/vmalloc.h>
8*4882a593Smuzhiyun #include "qedf.h"
9*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
10*4882a593Smuzhiyun 
qedf_cmd_timer_set(struct qedf_ctx * qedf,struct qedf_ioreq * io_req,unsigned int timer_msec)11*4882a593Smuzhiyun void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12*4882a593Smuzhiyun 	unsigned int timer_msec)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15*4882a593Smuzhiyun 	    msecs_to_jiffies(timer_msec));
16*4882a593Smuzhiyun }
17*4882a593Smuzhiyun 
qedf_cmd_timeout(struct work_struct * work)18*4882a593Smuzhiyun static void qedf_cmd_timeout(struct work_struct *work)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	struct qedf_ioreq *io_req =
22*4882a593Smuzhiyun 	    container_of(work, struct qedf_ioreq, timeout_work.work);
23*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
24*4882a593Smuzhiyun 	struct qedf_rport *fcport;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (io_req == NULL) {
27*4882a593Smuzhiyun 		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
28*4882a593Smuzhiyun 		return;
29*4882a593Smuzhiyun 	}
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	fcport = io_req->fcport;
32*4882a593Smuzhiyun 	if (io_req->fcport == NULL) {
33*4882a593Smuzhiyun 		QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
34*4882a593Smuzhiyun 		return;
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	qedf = fcport->qedf;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	switch (io_req->cmd_type) {
40*4882a593Smuzhiyun 	case QEDF_ABTS:
41*4882a593Smuzhiyun 		if (qedf == NULL) {
42*4882a593Smuzhiyun 			QEDF_INFO(NULL, QEDF_LOG_IO,
43*4882a593Smuzhiyun 				  "qedf is NULL for ABTS xid=0x%x.\n",
44*4882a593Smuzhiyun 				  io_req->xid);
45*4882a593Smuzhiyun 			return;
46*4882a593Smuzhiyun 		}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 		QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
49*4882a593Smuzhiyun 		    io_req->xid);
50*4882a593Smuzhiyun 		/* Cleanup timed out ABTS */
51*4882a593Smuzhiyun 		qedf_initiate_cleanup(io_req, true);
52*4882a593Smuzhiyun 		complete(&io_req->abts_done);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		/*
55*4882a593Smuzhiyun 		 * Need to call kref_put for reference taken when initiate_abts
56*4882a593Smuzhiyun 		 * was called since abts_compl won't be called now that we've
57*4882a593Smuzhiyun 		 * cleaned up the task.
58*4882a593Smuzhiyun 		 */
59*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		/* Clear in abort bit now that we're done with the command */
62*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		/*
65*4882a593Smuzhiyun 		 * Now that the original I/O and the ABTS are complete see
66*4882a593Smuzhiyun 		 * if we need to reconnect to the target.
67*4882a593Smuzhiyun 		 */
68*4882a593Smuzhiyun 		qedf_restart_rport(fcport);
69*4882a593Smuzhiyun 		break;
70*4882a593Smuzhiyun 	case QEDF_ELS:
71*4882a593Smuzhiyun 		if (!qedf) {
72*4882a593Smuzhiyun 			QEDF_INFO(NULL, QEDF_LOG_IO,
73*4882a593Smuzhiyun 				  "qedf is NULL for ELS xid=0x%x.\n",
74*4882a593Smuzhiyun 				  io_req->xid);
75*4882a593Smuzhiyun 			return;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 		/* ELS request no longer outstanding since it timed out */
78*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		kref_get(&io_req->refcount);
81*4882a593Smuzhiyun 		/*
82*4882a593Smuzhiyun 		 * Don't attempt to clean an ELS timeout as any subseqeunt
83*4882a593Smuzhiyun 		 * ABTS or cleanup requests just hang.  For now just free
84*4882a593Smuzhiyun 		 * the resources of the original I/O and the RRQ
85*4882a593Smuzhiyun 		 */
86*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
87*4882a593Smuzhiyun 			  io_req->xid);
88*4882a593Smuzhiyun 		qedf_initiate_cleanup(io_req, true);
89*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
90*4882a593Smuzhiyun 		/* Call callback function to complete command */
91*4882a593Smuzhiyun 		if (io_req->cb_func && io_req->cb_arg) {
92*4882a593Smuzhiyun 			io_req->cb_func(io_req->cb_arg);
93*4882a593Smuzhiyun 			io_req->cb_arg = NULL;
94*4882a593Smuzhiyun 		}
95*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
96*4882a593Smuzhiyun 		break;
97*4882a593Smuzhiyun 	case QEDF_SEQ_CLEANUP:
98*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
99*4882a593Smuzhiyun 		    "xid=0x%x.\n", io_req->xid);
100*4882a593Smuzhiyun 		qedf_initiate_cleanup(io_req, true);
101*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
102*4882a593Smuzhiyun 		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
103*4882a593Smuzhiyun 		break;
104*4882a593Smuzhiyun 	default:
105*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
106*4882a593Smuzhiyun 			  "Hit default case, xid=0x%x.\n", io_req->xid);
107*4882a593Smuzhiyun 		break;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
qedf_cmd_mgr_free(struct qedf_cmd_mgr * cmgr)111*4882a593Smuzhiyun void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct io_bdt *bdt_info;
114*4882a593Smuzhiyun 	struct qedf_ctx *qedf = cmgr->qedf;
115*4882a593Smuzhiyun 	size_t bd_tbl_sz;
116*4882a593Smuzhiyun 	u16 min_xid = 0;
117*4882a593Smuzhiyun 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
118*4882a593Smuzhiyun 	int num_ios;
119*4882a593Smuzhiyun 	int i;
120*4882a593Smuzhiyun 	struct qedf_ioreq *io_req;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	num_ios = max_xid - min_xid + 1;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Free fcoe_bdt_ctx structures */
125*4882a593Smuzhiyun 	if (!cmgr->io_bdt_pool) {
126*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
127*4882a593Smuzhiyun 		goto free_cmd_pool;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
131*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
132*4882a593Smuzhiyun 		bdt_info = cmgr->io_bdt_pool[i];
133*4882a593Smuzhiyun 		if (bdt_info->bd_tbl) {
134*4882a593Smuzhiyun 			dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
135*4882a593Smuzhiyun 			    bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
136*4882a593Smuzhiyun 			bdt_info->bd_tbl = NULL;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* Destroy io_bdt pool */
141*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
142*4882a593Smuzhiyun 		kfree(cmgr->io_bdt_pool[i]);
143*4882a593Smuzhiyun 		cmgr->io_bdt_pool[i] = NULL;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	kfree(cmgr->io_bdt_pool);
147*4882a593Smuzhiyun 	cmgr->io_bdt_pool = NULL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun free_cmd_pool:
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
152*4882a593Smuzhiyun 		io_req = &cmgr->cmds[i];
153*4882a593Smuzhiyun 		kfree(io_req->sgl_task_params);
154*4882a593Smuzhiyun 		kfree(io_req->task_params);
155*4882a593Smuzhiyun 		/* Make sure we free per command sense buffer */
156*4882a593Smuzhiyun 		if (io_req->sense_buffer)
157*4882a593Smuzhiyun 			dma_free_coherent(&qedf->pdev->dev,
158*4882a593Smuzhiyun 			    QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
159*4882a593Smuzhiyun 			    io_req->sense_buffer_dma);
160*4882a593Smuzhiyun 		cancel_delayed_work_sync(&io_req->rrq_work);
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Free command manager itself */
164*4882a593Smuzhiyun 	vfree(cmgr);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
qedf_handle_rrq(struct work_struct * work)167*4882a593Smuzhiyun static void qedf_handle_rrq(struct work_struct *work)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct qedf_ioreq *io_req =
170*4882a593Smuzhiyun 	    container_of(work, struct qedf_ioreq, rrq_work.work);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
173*4882a593Smuzhiyun 	qedf_send_rrq(io_req);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
qedf_cmd_mgr_alloc(struct qedf_ctx * qedf)177*4882a593Smuzhiyun struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct qedf_cmd_mgr *cmgr;
180*4882a593Smuzhiyun 	struct io_bdt *bdt_info;
181*4882a593Smuzhiyun 	struct qedf_ioreq *io_req;
182*4882a593Smuzhiyun 	u16 xid;
183*4882a593Smuzhiyun 	int i;
184*4882a593Smuzhiyun 	int num_ios;
185*4882a593Smuzhiyun 	u16 min_xid = 0;
186*4882a593Smuzhiyun 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* Make sure num_queues is already set before calling this function */
189*4882a593Smuzhiyun 	if (!qedf->num_queues) {
190*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
191*4882a593Smuzhiyun 		return NULL;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
195*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
196*4882a593Smuzhiyun 			   "max_xid 0x%x.\n", min_xid, max_xid);
197*4882a593Smuzhiyun 		return NULL;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
201*4882a593Smuzhiyun 		   "0x%x.\n", min_xid, max_xid);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	num_ios = max_xid - min_xid + 1;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
206*4882a593Smuzhiyun 	if (!cmgr) {
207*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
208*4882a593Smuzhiyun 		return NULL;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	cmgr->qedf = qedf;
212*4882a593Smuzhiyun 	spin_lock_init(&cmgr->lock);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/*
215*4882a593Smuzhiyun 	 * Initialize I/O request fields.
216*4882a593Smuzhiyun 	 */
217*4882a593Smuzhiyun 	xid = 0;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
220*4882a593Smuzhiyun 		io_req = &cmgr->cmds[i];
221*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		io_req->xid = xid++;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		/* Allocate DMA memory to hold sense buffer */
228*4882a593Smuzhiyun 		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
229*4882a593Smuzhiyun 		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
230*4882a593Smuzhiyun 		    GFP_KERNEL);
231*4882a593Smuzhiyun 		if (!io_req->sense_buffer) {
232*4882a593Smuzhiyun 			QEDF_ERR(&qedf->dbg_ctx,
233*4882a593Smuzhiyun 				 "Failed to alloc sense buffer.\n");
234*4882a593Smuzhiyun 			goto mem_err;
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		/* Allocate task parameters to pass to f/w init funcions */
238*4882a593Smuzhiyun 		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
239*4882a593Smuzhiyun 					      GFP_KERNEL);
240*4882a593Smuzhiyun 		if (!io_req->task_params) {
241*4882a593Smuzhiyun 			QEDF_ERR(&(qedf->dbg_ctx),
242*4882a593Smuzhiyun 				 "Failed to allocate task_params for xid=0x%x\n",
243*4882a593Smuzhiyun 				 i);
244*4882a593Smuzhiyun 			goto mem_err;
245*4882a593Smuzhiyun 		}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		/*
248*4882a593Smuzhiyun 		 * Allocate scatter/gather list info to pass to f/w init
249*4882a593Smuzhiyun 		 * functions.
250*4882a593Smuzhiyun 		 */
251*4882a593Smuzhiyun 		io_req->sgl_task_params = kzalloc(
252*4882a593Smuzhiyun 		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
253*4882a593Smuzhiyun 		if (!io_req->sgl_task_params) {
254*4882a593Smuzhiyun 			QEDF_ERR(&(qedf->dbg_ctx),
255*4882a593Smuzhiyun 				 "Failed to allocate sgl_task_params for xid=0x%x\n",
256*4882a593Smuzhiyun 				 i);
257*4882a593Smuzhiyun 			goto mem_err;
258*4882a593Smuzhiyun 		}
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* Allocate pool of io_bdts - one for each qedf_ioreq */
262*4882a593Smuzhiyun 	cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
263*4882a593Smuzhiyun 	    GFP_KERNEL);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (!cmgr->io_bdt_pool) {
266*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
267*4882a593Smuzhiyun 		goto mem_err;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
271*4882a593Smuzhiyun 		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
272*4882a593Smuzhiyun 		    GFP_KERNEL);
273*4882a593Smuzhiyun 		if (!cmgr->io_bdt_pool[i]) {
274*4882a593Smuzhiyun 			QEDF_WARN(&(qedf->dbg_ctx),
275*4882a593Smuzhiyun 				  "Failed to alloc io_bdt_pool[%d].\n", i);
276*4882a593Smuzhiyun 			goto mem_err;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	for (i = 0; i < num_ios; i++) {
281*4882a593Smuzhiyun 		bdt_info = cmgr->io_bdt_pool[i];
282*4882a593Smuzhiyun 		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
283*4882a593Smuzhiyun 		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
284*4882a593Smuzhiyun 		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
285*4882a593Smuzhiyun 		if (!bdt_info->bd_tbl) {
286*4882a593Smuzhiyun 			QEDF_WARN(&(qedf->dbg_ctx),
287*4882a593Smuzhiyun 				  "Failed to alloc bdt_tbl[%d].\n", i);
288*4882a593Smuzhiyun 			goto mem_err;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	atomic_set(&cmgr->free_list_cnt, num_ios);
292*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
293*4882a593Smuzhiyun 	    "cmgr->free_list_cnt=%d.\n",
294*4882a593Smuzhiyun 	    atomic_read(&cmgr->free_list_cnt));
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return cmgr;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun mem_err:
299*4882a593Smuzhiyun 	qedf_cmd_mgr_free(cmgr);
300*4882a593Smuzhiyun 	return NULL;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
qedf_alloc_cmd(struct qedf_rport * fcport,u8 cmd_type)303*4882a593Smuzhiyun struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct qedf_ctx *qedf = fcport->qedf;
306*4882a593Smuzhiyun 	struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
307*4882a593Smuzhiyun 	struct qedf_ioreq *io_req = NULL;
308*4882a593Smuzhiyun 	struct io_bdt *bd_tbl;
309*4882a593Smuzhiyun 	u16 xid;
310*4882a593Smuzhiyun 	uint32_t free_sqes;
311*4882a593Smuzhiyun 	int i;
312*4882a593Smuzhiyun 	unsigned long flags;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	free_sqes = atomic_read(&fcport->free_sqes);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (!free_sqes) {
317*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
318*4882a593Smuzhiyun 		    "Returning NULL, free_sqes=%d.\n ",
319*4882a593Smuzhiyun 		    free_sqes);
320*4882a593Smuzhiyun 		goto out_failed;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* Limit the number of outstanding R/W tasks */
324*4882a593Smuzhiyun 	if ((atomic_read(&fcport->num_active_ios) >=
325*4882a593Smuzhiyun 	    NUM_RW_TASKS_PER_CONNECTION)) {
326*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
327*4882a593Smuzhiyun 		    "Returning NULL, num_active_ios=%d.\n",
328*4882a593Smuzhiyun 		    atomic_read(&fcport->num_active_ios));
329*4882a593Smuzhiyun 		goto out_failed;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Limit global TIDs certain tasks */
333*4882a593Smuzhiyun 	if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
334*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
335*4882a593Smuzhiyun 		    "Returning NULL, free_list_cnt=%d.\n",
336*4882a593Smuzhiyun 		    atomic_read(&cmd_mgr->free_list_cnt));
337*4882a593Smuzhiyun 		goto out_failed;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	spin_lock_irqsave(&cmd_mgr->lock, flags);
341*4882a593Smuzhiyun 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
342*4882a593Smuzhiyun 		io_req = &cmd_mgr->cmds[cmd_mgr->idx];
343*4882a593Smuzhiyun 		cmd_mgr->idx++;
344*4882a593Smuzhiyun 		if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
345*4882a593Smuzhiyun 			cmd_mgr->idx = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		/* Check to make sure command was previously freed */
348*4882a593Smuzhiyun 		if (!io_req->alloc)
349*4882a593Smuzhiyun 			break;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (i == FCOE_PARAMS_NUM_TASKS) {
353*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cmd_mgr->lock, flags);
354*4882a593Smuzhiyun 		goto out_failed;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
358*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
359*4882a593Smuzhiyun 			 "io_req found to be dirty ox_id = 0x%x.\n",
360*4882a593Smuzhiyun 			 io_req->xid);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* Clear any flags now that we've reallocated the xid */
363*4882a593Smuzhiyun 	io_req->flags = 0;
364*4882a593Smuzhiyun 	io_req->alloc = 1;
365*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	atomic_inc(&fcport->num_active_ios);
368*4882a593Smuzhiyun 	atomic_dec(&fcport->free_sqes);
369*4882a593Smuzhiyun 	xid = io_req->xid;
370*4882a593Smuzhiyun 	atomic_dec(&cmd_mgr->free_list_cnt);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	io_req->cmd_mgr = cmd_mgr;
373*4882a593Smuzhiyun 	io_req->fcport = fcport;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* Clear any stale sc_cmd back pointer */
376*4882a593Smuzhiyun 	io_req->sc_cmd = NULL;
377*4882a593Smuzhiyun 	io_req->lun = -1;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Hold the io_req against deletion */
380*4882a593Smuzhiyun 	kref_init(&io_req->refcount);	/* ID: 001 */
381*4882a593Smuzhiyun 	atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* Bind io_bdt for this io_req */
384*4882a593Smuzhiyun 	/* Have a static link between io_req and io_bdt_pool */
385*4882a593Smuzhiyun 	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
386*4882a593Smuzhiyun 	if (bd_tbl == NULL) {
387*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
388*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
389*4882a593Smuzhiyun 		goto out_failed;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 	bd_tbl->io_req = io_req;
392*4882a593Smuzhiyun 	io_req->cmd_type = cmd_type;
393*4882a593Smuzhiyun 	io_req->tm_flags = 0;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* Reset sequence offset data */
396*4882a593Smuzhiyun 	io_req->rx_buf_off = 0;
397*4882a593Smuzhiyun 	io_req->tx_buf_off = 0;
398*4882a593Smuzhiyun 	io_req->rx_id = 0xffff; /* No OX_ID */
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return io_req;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun out_failed:
403*4882a593Smuzhiyun 	/* Record failure for stats and return NULL to caller */
404*4882a593Smuzhiyun 	qedf->alloc_failures++;
405*4882a593Smuzhiyun 	return NULL;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
qedf_free_mp_resc(struct qedf_ioreq * io_req)408*4882a593Smuzhiyun static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
411*4882a593Smuzhiyun 	struct qedf_ctx *qedf = io_req->fcport->qedf;
412*4882a593Smuzhiyun 	uint64_t sz = sizeof(struct scsi_sge);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* clear tm flags */
415*4882a593Smuzhiyun 	if (mp_req->mp_req_bd) {
416*4882a593Smuzhiyun 		dma_free_coherent(&qedf->pdev->dev, sz,
417*4882a593Smuzhiyun 		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
418*4882a593Smuzhiyun 		mp_req->mp_req_bd = NULL;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 	if (mp_req->mp_resp_bd) {
421*4882a593Smuzhiyun 		dma_free_coherent(&qedf->pdev->dev, sz,
422*4882a593Smuzhiyun 		    mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
423*4882a593Smuzhiyun 		mp_req->mp_resp_bd = NULL;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 	if (mp_req->req_buf) {
426*4882a593Smuzhiyun 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427*4882a593Smuzhiyun 		    mp_req->req_buf, mp_req->req_buf_dma);
428*4882a593Smuzhiyun 		mp_req->req_buf = NULL;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 	if (mp_req->resp_buf) {
431*4882a593Smuzhiyun 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
432*4882a593Smuzhiyun 		    mp_req->resp_buf, mp_req->resp_buf_dma);
433*4882a593Smuzhiyun 		mp_req->resp_buf = NULL;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
qedf_release_cmd(struct kref * ref)437*4882a593Smuzhiyun void qedf_release_cmd(struct kref *ref)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct qedf_ioreq *io_req =
440*4882a593Smuzhiyun 	    container_of(ref, struct qedf_ioreq, refcount);
441*4882a593Smuzhiyun 	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
442*4882a593Smuzhiyun 	struct qedf_rport *fcport = io_req->fcport;
443*4882a593Smuzhiyun 	unsigned long flags;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_SCSI_CMD) {
446*4882a593Smuzhiyun 		QEDF_WARN(&fcport->qedf->dbg_ctx,
447*4882a593Smuzhiyun 			  "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
448*4882a593Smuzhiyun 			  io_req, io_req->xid);
449*4882a593Smuzhiyun 		WARN_ON(io_req->sc_cmd);
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_ELS ||
453*4882a593Smuzhiyun 	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
454*4882a593Smuzhiyun 		qedf_free_mp_resc(io_req);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	atomic_inc(&cmd_mgr->free_list_cnt);
457*4882a593Smuzhiyun 	atomic_dec(&fcport->num_active_ios);
458*4882a593Smuzhiyun 	atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
459*4882a593Smuzhiyun 	if (atomic_read(&fcport->num_active_ios) < 0) {
460*4882a593Smuzhiyun 		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
461*4882a593Smuzhiyun 		WARN_ON(1);
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* Increment task retry identifier now that the request is released */
465*4882a593Smuzhiyun 	io_req->task_retry_identifier++;
466*4882a593Smuzhiyun 	io_req->fcport = NULL;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
469*4882a593Smuzhiyun 	io_req->cpu = 0;
470*4882a593Smuzhiyun 	spin_lock_irqsave(&cmd_mgr->lock, flags);
471*4882a593Smuzhiyun 	io_req->fcport = NULL;
472*4882a593Smuzhiyun 	io_req->alloc = 0;
473*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
qedf_map_sg(struct qedf_ioreq * io_req)476*4882a593Smuzhiyun static int qedf_map_sg(struct qedf_ioreq *io_req)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	struct scsi_cmnd *sc = io_req->sc_cmd;
479*4882a593Smuzhiyun 	struct Scsi_Host *host = sc->device->host;
480*4882a593Smuzhiyun 	struct fc_lport *lport = shost_priv(host);
481*4882a593Smuzhiyun 	struct qedf_ctx *qedf = lport_priv(lport);
482*4882a593Smuzhiyun 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
483*4882a593Smuzhiyun 	struct scatterlist *sg;
484*4882a593Smuzhiyun 	int byte_count = 0;
485*4882a593Smuzhiyun 	int sg_count = 0;
486*4882a593Smuzhiyun 	int bd_count = 0;
487*4882a593Smuzhiyun 	u32 sg_len;
488*4882a593Smuzhiyun 	u64 addr;
489*4882a593Smuzhiyun 	int i = 0;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
492*4882a593Smuzhiyun 	    scsi_sg_count(sc), sc->sc_data_direction);
493*4882a593Smuzhiyun 	sg = scsi_sglist(sc);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
498*4882a593Smuzhiyun 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	scsi_for_each_sg(sc, sg, sg_count, i) {
501*4882a593Smuzhiyun 		sg_len = (u32)sg_dma_len(sg);
502*4882a593Smuzhiyun 		addr = (u64)sg_dma_address(sg);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 		/*
505*4882a593Smuzhiyun 		 * Intermediate s/g element so check if start address
506*4882a593Smuzhiyun 		 * is page aligned.  Only required for writes and only if the
507*4882a593Smuzhiyun 		 * number of scatter/gather elements is 8 or more.
508*4882a593Smuzhiyun 		 */
509*4882a593Smuzhiyun 		if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
510*4882a593Smuzhiyun 		    (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
511*4882a593Smuzhiyun 			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
514*4882a593Smuzhiyun 		bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
515*4882a593Smuzhiyun 		bd[bd_count].sge_len = cpu_to_le32(sg_len);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		bd_count++;
518*4882a593Smuzhiyun 		byte_count += sg_len;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* To catch a case where FAST and SLOW nothing is set, set FAST */
522*4882a593Smuzhiyun 	if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
523*4882a593Smuzhiyun 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (byte_count != scsi_bufflen(sc))
526*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
527*4882a593Smuzhiyun 			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
528*4882a593Smuzhiyun 			   scsi_bufflen(sc), io_req->xid);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	return bd_count;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
qedf_build_bd_list_from_sg(struct qedf_ioreq * io_req)533*4882a593Smuzhiyun static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct scsi_cmnd *sc = io_req->sc_cmd;
536*4882a593Smuzhiyun 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
537*4882a593Smuzhiyun 	int bd_count;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (scsi_sg_count(sc)) {
540*4882a593Smuzhiyun 		bd_count = qedf_map_sg(io_req);
541*4882a593Smuzhiyun 		if (bd_count == 0)
542*4882a593Smuzhiyun 			return -ENOMEM;
543*4882a593Smuzhiyun 	} else {
544*4882a593Smuzhiyun 		bd_count = 0;
545*4882a593Smuzhiyun 		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
546*4882a593Smuzhiyun 		bd[0].sge_len = 0;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 	io_req->bd_tbl->bd_valid = bd_count;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
qedf_build_fcp_cmnd(struct qedf_ioreq * io_req,struct fcp_cmnd * fcp_cmnd)553*4882a593Smuzhiyun static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
554*4882a593Smuzhiyun 				  struct fcp_cmnd *fcp_cmnd)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	/* fcp_cmnd is 32 bytes */
559*4882a593Smuzhiyun 	memset(fcp_cmnd, 0, FCP_CMND_LEN);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* 8 bytes: SCSI LUN info */
562*4882a593Smuzhiyun 	int_to_scsilun(sc_cmd->device->lun,
563*4882a593Smuzhiyun 			(struct scsi_lun *)&fcp_cmnd->fc_lun);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* 4 bytes: flag info */
566*4882a593Smuzhiyun 	fcp_cmnd->fc_pri_ta = 0;
567*4882a593Smuzhiyun 	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
568*4882a593Smuzhiyun 	fcp_cmnd->fc_flags = io_req->io_req_flags;
569*4882a593Smuzhiyun 	fcp_cmnd->fc_cmdref = 0;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/* Populate data direction */
572*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
573*4882a593Smuzhiyun 		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
574*4882a593Smuzhiyun 	} else {
575*4882a593Smuzhiyun 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
576*4882a593Smuzhiyun 			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
577*4882a593Smuzhiyun 		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
578*4882a593Smuzhiyun 			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* 16 bytes: CDB information */
584*4882a593Smuzhiyun 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
585*4882a593Smuzhiyun 		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* 4 bytes: FCP data length */
588*4882a593Smuzhiyun 	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
qedf_init_task(struct qedf_rport * fcport,struct fc_lport * lport,struct qedf_ioreq * io_req,struct e4_fcoe_task_context * task_ctx,struct fcoe_wqe * sqe)591*4882a593Smuzhiyun static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
592*4882a593Smuzhiyun 	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
593*4882a593Smuzhiyun 	struct fcoe_wqe *sqe)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	enum fcoe_task_type task_type;
596*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
597*4882a593Smuzhiyun 	struct io_bdt *bd_tbl = io_req->bd_tbl;
598*4882a593Smuzhiyun 	u8 fcp_cmnd[32];
599*4882a593Smuzhiyun 	u32 tmp_fcp_cmnd[8];
600*4882a593Smuzhiyun 	int bd_count = 0;
601*4882a593Smuzhiyun 	struct qedf_ctx *qedf = fcport->qedf;
602*4882a593Smuzhiyun 	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
603*4882a593Smuzhiyun 	struct regpair sense_data_buffer_phys_addr;
604*4882a593Smuzhiyun 	u32 tx_io_size = 0;
605*4882a593Smuzhiyun 	u32 rx_io_size = 0;
606*4882a593Smuzhiyun 	int i, cnt;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* Note init_initiator_rw_fcoe_task memsets the task context */
609*4882a593Smuzhiyun 	io_req->task = task_ctx;
610*4882a593Smuzhiyun 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
611*4882a593Smuzhiyun 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
612*4882a593Smuzhiyun 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/* Set task type bassed on DMA directio of command */
615*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
616*4882a593Smuzhiyun 		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
617*4882a593Smuzhiyun 	} else {
618*4882a593Smuzhiyun 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
619*4882a593Smuzhiyun 			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
620*4882a593Smuzhiyun 			tx_io_size = io_req->data_xfer_len;
621*4882a593Smuzhiyun 		} else {
622*4882a593Smuzhiyun 			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
623*4882a593Smuzhiyun 			rx_io_size = io_req->data_xfer_len;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	/* Setup the fields for fcoe_task_params */
628*4882a593Smuzhiyun 	io_req->task_params->context = task_ctx;
629*4882a593Smuzhiyun 	io_req->task_params->sqe = sqe;
630*4882a593Smuzhiyun 	io_req->task_params->task_type = task_type;
631*4882a593Smuzhiyun 	io_req->task_params->tx_io_size = tx_io_size;
632*4882a593Smuzhiyun 	io_req->task_params->rx_io_size = rx_io_size;
633*4882a593Smuzhiyun 	io_req->task_params->conn_cid = fcport->fw_cid;
634*4882a593Smuzhiyun 	io_req->task_params->itid = io_req->xid;
635*4882a593Smuzhiyun 	io_req->task_params->cq_rss_number = cq_idx;
636*4882a593Smuzhiyun 	io_req->task_params->is_tape_device = fcport->dev_type;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* Fill in information for scatter/gather list */
639*4882a593Smuzhiyun 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
640*4882a593Smuzhiyun 		bd_count = bd_tbl->bd_valid;
641*4882a593Smuzhiyun 		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
642*4882a593Smuzhiyun 		io_req->sgl_task_params->sgl_phys_addr.lo =
643*4882a593Smuzhiyun 			U64_LO(bd_tbl->bd_tbl_dma);
644*4882a593Smuzhiyun 		io_req->sgl_task_params->sgl_phys_addr.hi =
645*4882a593Smuzhiyun 			U64_HI(bd_tbl->bd_tbl_dma);
646*4882a593Smuzhiyun 		io_req->sgl_task_params->num_sges = bd_count;
647*4882a593Smuzhiyun 		io_req->sgl_task_params->total_buffer_size =
648*4882a593Smuzhiyun 		    scsi_bufflen(io_req->sc_cmd);
649*4882a593Smuzhiyun 		if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
650*4882a593Smuzhiyun 			io_req->sgl_task_params->small_mid_sge = 1;
651*4882a593Smuzhiyun 		else
652*4882a593Smuzhiyun 			io_req->sgl_task_params->small_mid_sge = 0;
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	/* Fill in physical address of sense buffer */
656*4882a593Smuzhiyun 	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
657*4882a593Smuzhiyun 	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* fill FCP_CMND IU */
660*4882a593Smuzhiyun 	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* Swap fcp_cmnd since FC is big endian */
663*4882a593Smuzhiyun 	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
664*4882a593Smuzhiyun 	for (i = 0; i < cnt; i++) {
665*4882a593Smuzhiyun 		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	init_initiator_rw_fcoe_task(io_req->task_params,
670*4882a593Smuzhiyun 				    io_req->sgl_task_params,
671*4882a593Smuzhiyun 				    sense_data_buffer_phys_addr,
672*4882a593Smuzhiyun 				    io_req->task_retry_identifier, fcp_cmnd);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* Increment SGL type counters */
675*4882a593Smuzhiyun 	if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
676*4882a593Smuzhiyun 		qedf->slow_sge_ios++;
677*4882a593Smuzhiyun 	else
678*4882a593Smuzhiyun 		qedf->fast_sge_ios++;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
qedf_init_mp_task(struct qedf_ioreq * io_req,struct e4_fcoe_task_context * task_ctx,struct fcoe_wqe * sqe)681*4882a593Smuzhiyun void qedf_init_mp_task(struct qedf_ioreq *io_req,
682*4882a593Smuzhiyun 	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
685*4882a593Smuzhiyun 	struct qedf_rport *fcport = io_req->fcport;
686*4882a593Smuzhiyun 	struct qedf_ctx *qedf = io_req->fcport->qedf;
687*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr;
688*4882a593Smuzhiyun 	struct fcoe_tx_mid_path_params task_fc_hdr;
689*4882a593Smuzhiyun 	struct scsi_sgl_task_params tx_sgl_task_params;
690*4882a593Smuzhiyun 	struct scsi_sgl_task_params rx_sgl_task_params;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
693*4882a593Smuzhiyun 		  "Initializing MP task for cmd_type=%d\n",
694*4882a593Smuzhiyun 		  io_req->cmd_type);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	qedf->control_requests++;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
699*4882a593Smuzhiyun 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
700*4882a593Smuzhiyun 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
701*4882a593Smuzhiyun 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/* Setup the task from io_req for easy reference */
704*4882a593Smuzhiyun 	io_req->task = task_ctx;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* Setup the fields for fcoe_task_params */
707*4882a593Smuzhiyun 	io_req->task_params->context = task_ctx;
708*4882a593Smuzhiyun 	io_req->task_params->sqe = sqe;
709*4882a593Smuzhiyun 	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
710*4882a593Smuzhiyun 	io_req->task_params->tx_io_size = io_req->data_xfer_len;
711*4882a593Smuzhiyun 	/* rx_io_size tells the f/w how large a response buffer we have */
712*4882a593Smuzhiyun 	io_req->task_params->rx_io_size = PAGE_SIZE;
713*4882a593Smuzhiyun 	io_req->task_params->conn_cid = fcport->fw_cid;
714*4882a593Smuzhiyun 	io_req->task_params->itid = io_req->xid;
715*4882a593Smuzhiyun 	/* Return middle path commands on CQ 0 */
716*4882a593Smuzhiyun 	io_req->task_params->cq_rss_number = 0;
717*4882a593Smuzhiyun 	io_req->task_params->is_tape_device = fcport->dev_type;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	fc_hdr = &(mp_req->req_fc_hdr);
720*4882a593Smuzhiyun 	/* Set OX_ID and RX_ID based on driver task id */
721*4882a593Smuzhiyun 	fc_hdr->fh_ox_id = io_req->xid;
722*4882a593Smuzhiyun 	fc_hdr->fh_rx_id = htons(0xffff);
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	/* Set up FC header information */
725*4882a593Smuzhiyun 	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
726*4882a593Smuzhiyun 	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
727*4882a593Smuzhiyun 	task_fc_hdr.type = fc_hdr->fh_type;
728*4882a593Smuzhiyun 	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
729*4882a593Smuzhiyun 	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
730*4882a593Smuzhiyun 	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
731*4882a593Smuzhiyun 	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* Set up s/g list parameters for request buffer */
734*4882a593Smuzhiyun 	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
735*4882a593Smuzhiyun 	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
736*4882a593Smuzhiyun 	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
737*4882a593Smuzhiyun 	tx_sgl_task_params.num_sges = 1;
738*4882a593Smuzhiyun 	/* Set PAGE_SIZE for now since sg element is that size ??? */
739*4882a593Smuzhiyun 	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
740*4882a593Smuzhiyun 	tx_sgl_task_params.small_mid_sge = 0;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/* Set up s/g list parameters for request buffer */
743*4882a593Smuzhiyun 	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
744*4882a593Smuzhiyun 	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
745*4882a593Smuzhiyun 	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
746*4882a593Smuzhiyun 	rx_sgl_task_params.num_sges = 1;
747*4882a593Smuzhiyun 	/* Set PAGE_SIZE for now since sg element is that size ??? */
748*4882a593Smuzhiyun 	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
749*4882a593Smuzhiyun 	rx_sgl_task_params.small_mid_sge = 0;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/*
753*4882a593Smuzhiyun 	 * Last arg is 0 as previous code did not set that we wanted the
754*4882a593Smuzhiyun 	 * fc header information.
755*4882a593Smuzhiyun 	 */
756*4882a593Smuzhiyun 	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
757*4882a593Smuzhiyun 						     &task_fc_hdr,
758*4882a593Smuzhiyun 						     &tx_sgl_task_params,
759*4882a593Smuzhiyun 						     &rx_sgl_task_params, 0);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /* Presumed that fcport->rport_lock is held */
qedf_get_sqe_idx(struct qedf_rport * fcport)763*4882a593Smuzhiyun u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
766*4882a593Smuzhiyun 	u16 rval;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	rval = fcport->sq_prod_idx;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	/* Adjust ring index */
771*4882a593Smuzhiyun 	fcport->sq_prod_idx++;
772*4882a593Smuzhiyun 	fcport->fw_sq_prod_idx++;
773*4882a593Smuzhiyun 	if (fcport->sq_prod_idx == total_sqe)
774*4882a593Smuzhiyun 		fcport->sq_prod_idx = 0;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return rval;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
qedf_ring_doorbell(struct qedf_rport * fcport)779*4882a593Smuzhiyun void qedf_ring_doorbell(struct qedf_rport *fcport)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct fcoe_db_data dbell = { 0 };
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	dbell.agg_flags = 0;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
786*4882a593Smuzhiyun 	dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
787*4882a593Smuzhiyun 	dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
788*4882a593Smuzhiyun 	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	dbell.sq_prod = fcport->fw_sq_prod_idx;
791*4882a593Smuzhiyun 	/* wmb makes sure that the BDs data is updated before updating the
792*4882a593Smuzhiyun 	 * producer, otherwise FW may read old data from the BDs.
793*4882a593Smuzhiyun 	 */
794*4882a593Smuzhiyun 	wmb();
795*4882a593Smuzhiyun 	barrier();
796*4882a593Smuzhiyun 	writel(*(u32 *)&dbell, fcport->p_doorbell);
797*4882a593Smuzhiyun 	/*
798*4882a593Smuzhiyun 	 * Fence required to flush the write combined buffer, since another
799*4882a593Smuzhiyun 	 * CPU may write to the same doorbell address and data may be lost
800*4882a593Smuzhiyun 	 * due to relaxed order nature of write combined bar.
801*4882a593Smuzhiyun 	 */
802*4882a593Smuzhiyun 	wmb();
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
qedf_trace_io(struct qedf_rport * fcport,struct qedf_ioreq * io_req,int8_t direction)805*4882a593Smuzhiyun static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
806*4882a593Smuzhiyun 			  int8_t direction)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	struct qedf_ctx *qedf = fcport->qedf;
809*4882a593Smuzhiyun 	struct qedf_io_log *io_log;
810*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
811*4882a593Smuzhiyun 	unsigned long flags;
812*4882a593Smuzhiyun 	uint8_t op;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	spin_lock_irqsave(&qedf->io_trace_lock, flags);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
817*4882a593Smuzhiyun 	io_log->direction = direction;
818*4882a593Smuzhiyun 	io_log->task_id = io_req->xid;
819*4882a593Smuzhiyun 	io_log->port_id = fcport->rdata->ids.port_id;
820*4882a593Smuzhiyun 	io_log->lun = sc_cmd->device->lun;
821*4882a593Smuzhiyun 	io_log->op = op = sc_cmd->cmnd[0];
822*4882a593Smuzhiyun 	io_log->lba[0] = sc_cmd->cmnd[2];
823*4882a593Smuzhiyun 	io_log->lba[1] = sc_cmd->cmnd[3];
824*4882a593Smuzhiyun 	io_log->lba[2] = sc_cmd->cmnd[4];
825*4882a593Smuzhiyun 	io_log->lba[3] = sc_cmd->cmnd[5];
826*4882a593Smuzhiyun 	io_log->bufflen = scsi_bufflen(sc_cmd);
827*4882a593Smuzhiyun 	io_log->sg_count = scsi_sg_count(sc_cmd);
828*4882a593Smuzhiyun 	io_log->result = sc_cmd->result;
829*4882a593Smuzhiyun 	io_log->jiffies = jiffies;
830*4882a593Smuzhiyun 	io_log->refcount = kref_read(&io_req->refcount);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (direction == QEDF_IO_TRACE_REQ) {
833*4882a593Smuzhiyun 		/* For requests we only care abot the submission CPU */
834*4882a593Smuzhiyun 		io_log->req_cpu = io_req->cpu;
835*4882a593Smuzhiyun 		io_log->int_cpu = 0;
836*4882a593Smuzhiyun 		io_log->rsp_cpu = 0;
837*4882a593Smuzhiyun 	} else if (direction == QEDF_IO_TRACE_RSP) {
838*4882a593Smuzhiyun 		io_log->req_cpu = io_req->cpu;
839*4882a593Smuzhiyun 		io_log->int_cpu = io_req->int_cpu;
840*4882a593Smuzhiyun 		io_log->rsp_cpu = smp_processor_id();
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	io_log->sge_type = io_req->sge_type;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	qedf->io_trace_idx++;
846*4882a593Smuzhiyun 	if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
847*4882a593Smuzhiyun 		qedf->io_trace_idx = 0;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun 
qedf_post_io_req(struct qedf_rport * fcport,struct qedf_ioreq * io_req)852*4882a593Smuzhiyun int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
855*4882a593Smuzhiyun 	struct Scsi_Host *host = sc_cmd->device->host;
856*4882a593Smuzhiyun 	struct fc_lport *lport = shost_priv(host);
857*4882a593Smuzhiyun 	struct qedf_ctx *qedf = lport_priv(lport);
858*4882a593Smuzhiyun 	struct e4_fcoe_task_context *task_ctx;
859*4882a593Smuzhiyun 	u16 xid;
860*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
861*4882a593Smuzhiyun 	u16 sqe_idx;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	/* Initialize rest of io_req fileds */
864*4882a593Smuzhiyun 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
865*4882a593Smuzhiyun 	sc_cmd->SCp.ptr = (char *)io_req;
866*4882a593Smuzhiyun 	io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* Record which cpu this request is associated with */
869*4882a593Smuzhiyun 	io_req->cpu = smp_processor_id();
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
872*4882a593Smuzhiyun 		io_req->io_req_flags = QEDF_READ;
873*4882a593Smuzhiyun 		qedf->input_requests++;
874*4882a593Smuzhiyun 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
875*4882a593Smuzhiyun 		io_req->io_req_flags = QEDF_WRITE;
876*4882a593Smuzhiyun 		qedf->output_requests++;
877*4882a593Smuzhiyun 	} else {
878*4882a593Smuzhiyun 		io_req->io_req_flags = 0;
879*4882a593Smuzhiyun 		qedf->control_requests++;
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	xid = io_req->xid;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Build buffer descriptor list for firmware from sg list */
885*4882a593Smuzhiyun 	if (qedf_build_bd_list_from_sg(io_req)) {
886*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
887*4882a593Smuzhiyun 		/* Release cmd will release io_req, but sc_cmd is assigned */
888*4882a593Smuzhiyun 		io_req->sc_cmd = NULL;
889*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
890*4882a593Smuzhiyun 		return -EAGAIN;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
894*4882a593Smuzhiyun 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
895*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
896*4882a593Smuzhiyun 		/* Release cmd will release io_req, but sc_cmd is assigned */
897*4882a593Smuzhiyun 		io_req->sc_cmd = NULL;
898*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
899*4882a593Smuzhiyun 		return -EINVAL;
900*4882a593Smuzhiyun 	}
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	/* Record LUN number for later use if we neeed them */
903*4882a593Smuzhiyun 	io_req->lun = (int)sc_cmd->device->lun;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/* Obtain free SQE */
906*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
907*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
908*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/* Get the task context */
911*4882a593Smuzhiyun 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
912*4882a593Smuzhiyun 	if (!task_ctx) {
913*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
914*4882a593Smuzhiyun 			   xid);
915*4882a593Smuzhiyun 		/* Release cmd will release io_req, but sc_cmd is assigned */
916*4882a593Smuzhiyun 		io_req->sc_cmd = NULL;
917*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
918*4882a593Smuzhiyun 		return -EINVAL;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* Ring doorbell */
924*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* Set that command is with the firmware now */
927*4882a593Smuzhiyun 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (qedf_io_tracing && io_req->sc_cmd)
930*4882a593Smuzhiyun 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	return false;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun int
qedf_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * sc_cmd)936*4882a593Smuzhiyun qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	struct fc_lport *lport = shost_priv(host);
939*4882a593Smuzhiyun 	struct qedf_ctx *qedf = lport_priv(lport);
940*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
941*4882a593Smuzhiyun 	struct fc_rport_libfc_priv *rp = rport->dd_data;
942*4882a593Smuzhiyun 	struct qedf_rport *fcport;
943*4882a593Smuzhiyun 	struct qedf_ioreq *io_req;
944*4882a593Smuzhiyun 	int rc = 0;
945*4882a593Smuzhiyun 	int rval;
946*4882a593Smuzhiyun 	unsigned long flags = 0;
947*4882a593Smuzhiyun 	int num_sgs = 0;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	num_sgs = scsi_sg_count(sc_cmd);
950*4882a593Smuzhiyun 	if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
951*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
952*4882a593Smuzhiyun 			 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
953*4882a593Smuzhiyun 			 num_sgs, QEDF_MAX_BDS_PER_CMD);
954*4882a593Smuzhiyun 		sc_cmd->result = DID_ERROR;
955*4882a593Smuzhiyun 		sc_cmd->scsi_done(sc_cmd);
956*4882a593Smuzhiyun 		return 0;
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
960*4882a593Smuzhiyun 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
961*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
962*4882a593Smuzhiyun 			  "Returning DNC as unloading or stop io, flags 0x%lx.\n",
963*4882a593Smuzhiyun 			  qedf->flags);
964*4882a593Smuzhiyun 		sc_cmd->result = DID_NO_CONNECT << 16;
965*4882a593Smuzhiyun 		sc_cmd->scsi_done(sc_cmd);
966*4882a593Smuzhiyun 		return 0;
967*4882a593Smuzhiyun 	}
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	if (!qedf->pdev->msix_enabled) {
970*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
971*4882a593Smuzhiyun 		    "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
972*4882a593Smuzhiyun 		    sc_cmd);
973*4882a593Smuzhiyun 		sc_cmd->result = DID_NO_CONNECT << 16;
974*4882a593Smuzhiyun 		sc_cmd->scsi_done(sc_cmd);
975*4882a593Smuzhiyun 		return 0;
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	rval = fc_remote_port_chkready(rport);
979*4882a593Smuzhiyun 	if (rval) {
980*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
981*4882a593Smuzhiyun 			  "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
982*4882a593Smuzhiyun 			  rval, rport->port_id);
983*4882a593Smuzhiyun 		sc_cmd->result = rval;
984*4882a593Smuzhiyun 		sc_cmd->scsi_done(sc_cmd);
985*4882a593Smuzhiyun 		return 0;
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	/* Retry command if we are doing a qed drain operation */
989*4882a593Smuzhiyun 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
990*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
991*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
992*4882a593Smuzhiyun 		goto exit_qcmd;
993*4882a593Smuzhiyun 	}
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	if (lport->state != LPORT_ST_READY ||
996*4882a593Smuzhiyun 	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
997*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
998*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
999*4882a593Smuzhiyun 		goto exit_qcmd;
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/* rport and tgt are allocated together, so tgt should be non-NULL */
1003*4882a593Smuzhiyun 	fcport = (struct qedf_rport *)&rp[1];
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1006*4882a593Smuzhiyun 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1007*4882a593Smuzhiyun 		/*
1008*4882a593Smuzhiyun 		 * Session is not offloaded yet. Let SCSI-ml retry
1009*4882a593Smuzhiyun 		 * the command.
1010*4882a593Smuzhiyun 		 */
1011*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_TARGET_BUSY;
1012*4882a593Smuzhiyun 		goto exit_qcmd;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	atomic_inc(&fcport->ios_to_queue);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	if (fcport->retry_delay_timestamp) {
1018*4882a593Smuzhiyun 		/* Take fcport->rport_lock for resetting the delay_timestamp */
1019*4882a593Smuzhiyun 		spin_lock_irqsave(&fcport->rport_lock, flags);
1020*4882a593Smuzhiyun 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1021*4882a593Smuzhiyun 			fcport->retry_delay_timestamp = 0;
1022*4882a593Smuzhiyun 		} else {
1023*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fcport->rport_lock, flags);
1024*4882a593Smuzhiyun 			/* If retry_delay timer is active, flow off the ML */
1025*4882a593Smuzhiyun 			rc = SCSI_MLQUEUE_TARGET_BUSY;
1026*4882a593Smuzhiyun 			atomic_dec(&fcport->ios_to_queue);
1027*4882a593Smuzhiyun 			goto exit_qcmd;
1028*4882a593Smuzhiyun 		}
1029*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1033*4882a593Smuzhiyun 	if (!io_req) {
1034*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
1035*4882a593Smuzhiyun 		atomic_dec(&fcport->ios_to_queue);
1036*4882a593Smuzhiyun 		goto exit_qcmd;
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	io_req->sc_cmd = sc_cmd;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/* Take fcport->rport_lock for posting to fcport send queue */
1042*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
1043*4882a593Smuzhiyun 	if (qedf_post_io_req(fcport, io_req)) {
1044*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1045*4882a593Smuzhiyun 		/* Return SQE to pool */
1046*4882a593Smuzhiyun 		atomic_inc(&fcport->free_sqes);
1047*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
1048*4882a593Smuzhiyun 	}
1049*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1050*4882a593Smuzhiyun 	atomic_dec(&fcport->ios_to_queue);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun exit_qcmd:
1053*4882a593Smuzhiyun 	return rc;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
qedf_parse_fcp_rsp(struct qedf_ioreq * io_req,struct fcoe_cqe_rsp_info * fcp_rsp)1056*4882a593Smuzhiyun static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1057*4882a593Smuzhiyun 				 struct fcoe_cqe_rsp_info *fcp_rsp)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1060*4882a593Smuzhiyun 	struct qedf_ctx *qedf = io_req->fcport->qedf;
1061*4882a593Smuzhiyun 	u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1062*4882a593Smuzhiyun 	int fcp_sns_len = 0;
1063*4882a593Smuzhiyun 	int fcp_rsp_len = 0;
1064*4882a593Smuzhiyun 	uint8_t *rsp_info, *sense_data;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	io_req->fcp_status = FC_GOOD;
1067*4882a593Smuzhiyun 	io_req->fcp_resid = 0;
1068*4882a593Smuzhiyun 	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1069*4882a593Smuzhiyun 	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1070*4882a593Smuzhiyun 		io_req->fcp_resid = fcp_rsp->fcp_resid;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	io_req->scsi_comp_flags = rsp_flags;
1073*4882a593Smuzhiyun 	CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1074*4882a593Smuzhiyun 	    fcp_rsp->scsi_status_code;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	if (rsp_flags &
1077*4882a593Smuzhiyun 	    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1078*4882a593Smuzhiyun 		fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (rsp_flags &
1081*4882a593Smuzhiyun 	    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1082*4882a593Smuzhiyun 		fcp_sns_len = fcp_rsp->fcp_sns_len;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	io_req->fcp_rsp_len = fcp_rsp_len;
1085*4882a593Smuzhiyun 	io_req->fcp_sns_len = fcp_sns_len;
1086*4882a593Smuzhiyun 	rsp_info = sense_data = io_req->sense_buffer;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	/* fetch fcp_rsp_code */
1089*4882a593Smuzhiyun 	if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1090*4882a593Smuzhiyun 		/* Only for task management function */
1091*4882a593Smuzhiyun 		io_req->fcp_rsp_code = rsp_info[3];
1092*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1093*4882a593Smuzhiyun 		    "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1094*4882a593Smuzhiyun 		/* Adjust sense-data location. */
1095*4882a593Smuzhiyun 		sense_data += fcp_rsp_len;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1099*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1100*4882a593Smuzhiyun 		    "Truncating sense buffer\n");
1101*4882a593Smuzhiyun 		fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1102*4882a593Smuzhiyun 	}
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	/* The sense buffer can be NULL for TMF commands */
1105*4882a593Smuzhiyun 	if (sc_cmd->sense_buffer) {
1106*4882a593Smuzhiyun 		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1107*4882a593Smuzhiyun 		if (fcp_sns_len)
1108*4882a593Smuzhiyun 			memcpy(sc_cmd->sense_buffer, sense_data,
1109*4882a593Smuzhiyun 			    fcp_sns_len);
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun 
qedf_unmap_sg_list(struct qedf_ctx * qedf,struct qedf_ioreq * io_req)1113*4882a593Smuzhiyun static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun 	struct scsi_cmnd *sc = io_req->sc_cmd;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1118*4882a593Smuzhiyun 		dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1119*4882a593Smuzhiyun 		    scsi_sg_count(sc), sc->sc_data_direction);
1120*4882a593Smuzhiyun 		io_req->bd_tbl->bd_valid = 0;
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
qedf_scsi_completion(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)1124*4882a593Smuzhiyun void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1125*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd;
1128*4882a593Smuzhiyun 	struct fcoe_cqe_rsp_info *fcp_rsp;
1129*4882a593Smuzhiyun 	struct qedf_rport *fcport;
1130*4882a593Smuzhiyun 	int refcount;
1131*4882a593Smuzhiyun 	u16 scope, qualifier = 0;
1132*4882a593Smuzhiyun 	u8 fw_residual_flag = 0;
1133*4882a593Smuzhiyun 	unsigned long flags = 0;
1134*4882a593Smuzhiyun 	u16 chk_scope = 0;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	if (!io_req)
1137*4882a593Smuzhiyun 		return;
1138*4882a593Smuzhiyun 	if (!cqe)
1139*4882a593Smuzhiyun 		return;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1142*4882a593Smuzhiyun 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1143*4882a593Smuzhiyun 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1144*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1145*4882a593Smuzhiyun 			 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1146*4882a593Smuzhiyun 			 io_req->xid);
1147*4882a593Smuzhiyun 		return;
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	sc_cmd = io_req->sc_cmd;
1151*4882a593Smuzhiyun 	fcp_rsp = &cqe->cqe_info.rsp_info;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	if (!sc_cmd) {
1154*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1155*4882a593Smuzhiyun 		return;
1156*4882a593Smuzhiyun 	}
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	if (!sc_cmd->SCp.ptr) {
1159*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1160*4882a593Smuzhiyun 		    "another context.\n");
1161*4882a593Smuzhiyun 		return;
1162*4882a593Smuzhiyun 	}
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	if (!sc_cmd->device) {
1165*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1166*4882a593Smuzhiyun 			 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1167*4882a593Smuzhiyun 		return;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	if (!sc_cmd->request) {
1171*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1172*4882a593Smuzhiyun 		    "sc_cmd=%p.\n", sc_cmd);
1173*4882a593Smuzhiyun 		return;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	if (!sc_cmd->request->q) {
1177*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1178*4882a593Smuzhiyun 		   "is not valid, sc_cmd=%p.\n", sc_cmd);
1179*4882a593Smuzhiyun 		return;
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	fcport = io_req->fcport;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	/*
1185*4882a593Smuzhiyun 	 * When flush is active, let the cmds be completed from the cleanup
1186*4882a593Smuzhiyun 	 * context
1187*4882a593Smuzhiyun 	 */
1188*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1189*4882a593Smuzhiyun 	    (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1190*4882a593Smuzhiyun 	     sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1191*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1192*4882a593Smuzhiyun 			  "Dropping good completion xid=0x%x as fcport is flushing",
1193*4882a593Smuzhiyun 			  io_req->xid);
1194*4882a593Smuzhiyun 		return;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	qedf_unmap_sg_list(qedf, io_req);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	/* Check for FCP transport error */
1202*4882a593Smuzhiyun 	if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1203*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx),
1204*4882a593Smuzhiyun 		    "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1205*4882a593Smuzhiyun 		    "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1206*4882a593Smuzhiyun 		    io_req->fcp_rsp_code);
1207*4882a593Smuzhiyun 		sc_cmd->result = DID_BUS_BUSY << 16;
1208*4882a593Smuzhiyun 		goto out;
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1212*4882a593Smuzhiyun 	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1213*4882a593Smuzhiyun 	if (fw_residual_flag) {
1214*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1215*4882a593Smuzhiyun 			 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1216*4882a593Smuzhiyun 			 io_req->xid, fcp_rsp->rsp_flags.flags,
1217*4882a593Smuzhiyun 			 io_req->fcp_resid,
1218*4882a593Smuzhiyun 			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1219*4882a593Smuzhiyun 			 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 		if (io_req->cdb_status == 0)
1222*4882a593Smuzhiyun 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1223*4882a593Smuzhiyun 		else
1224*4882a593Smuzhiyun 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 		/*
1227*4882a593Smuzhiyun 		 * Set resid to the whole buffer length so we won't try to resue
1228*4882a593Smuzhiyun 		 * any previously data.
1229*4882a593Smuzhiyun 		 */
1230*4882a593Smuzhiyun 		scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1231*4882a593Smuzhiyun 		goto out;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	switch (io_req->fcp_status) {
1235*4882a593Smuzhiyun 	case FC_GOOD:
1236*4882a593Smuzhiyun 		if (io_req->cdb_status == 0) {
1237*4882a593Smuzhiyun 			/* Good I/O completion */
1238*4882a593Smuzhiyun 			sc_cmd->result = DID_OK << 16;
1239*4882a593Smuzhiyun 		} else {
1240*4882a593Smuzhiyun 			refcount = kref_read(&io_req->refcount);
1241*4882a593Smuzhiyun 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1242*4882a593Smuzhiyun 			    "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1243*4882a593Smuzhiyun 			    "lba=%02x%02x%02x%02x cdb_status=%d "
1244*4882a593Smuzhiyun 			    "fcp_resid=0x%x refcount=%d.\n",
1245*4882a593Smuzhiyun 			    qedf->lport->host->host_no, sc_cmd->device->id,
1246*4882a593Smuzhiyun 			    sc_cmd->device->lun, io_req->xid,
1247*4882a593Smuzhiyun 			    sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1248*4882a593Smuzhiyun 			    sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1249*4882a593Smuzhiyun 			    io_req->cdb_status, io_req->fcp_resid,
1250*4882a593Smuzhiyun 			    refcount);
1251*4882a593Smuzhiyun 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1254*4882a593Smuzhiyun 			    io_req->cdb_status == SAM_STAT_BUSY) {
1255*4882a593Smuzhiyun 				/*
1256*4882a593Smuzhiyun 				 * Check whether we need to set retry_delay at
1257*4882a593Smuzhiyun 				 * all based on retry_delay module parameter
1258*4882a593Smuzhiyun 				 * and the status qualifier.
1259*4882a593Smuzhiyun 				 */
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 				/* Upper 2 bits */
1262*4882a593Smuzhiyun 				scope = fcp_rsp->retry_delay_timer & 0xC000;
1263*4882a593Smuzhiyun 				/* Lower 14 bits */
1264*4882a593Smuzhiyun 				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 				if (qedf_retry_delay)
1267*4882a593Smuzhiyun 					chk_scope = 1;
1268*4882a593Smuzhiyun 				/* Record stats */
1269*4882a593Smuzhiyun 				if (io_req->cdb_status ==
1270*4882a593Smuzhiyun 				    SAM_STAT_TASK_SET_FULL)
1271*4882a593Smuzhiyun 					qedf->task_set_fulls++;
1272*4882a593Smuzhiyun 				else
1273*4882a593Smuzhiyun 					qedf->busy++;
1274*4882a593Smuzhiyun 			}
1275*4882a593Smuzhiyun 		}
1276*4882a593Smuzhiyun 		if (io_req->fcp_resid)
1277*4882a593Smuzhiyun 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		if (chk_scope == 1) {
1280*4882a593Smuzhiyun 			if ((scope == 1 || scope == 2) &&
1281*4882a593Smuzhiyun 			    (qualifier > 0 && qualifier <= 0x3FEF)) {
1282*4882a593Smuzhiyun 				/* Check we don't go over the max */
1283*4882a593Smuzhiyun 				if (qualifier > QEDF_RETRY_DELAY_MAX) {
1284*4882a593Smuzhiyun 					qualifier = QEDF_RETRY_DELAY_MAX;
1285*4882a593Smuzhiyun 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1286*4882a593Smuzhiyun 						  "qualifier = %d\n",
1287*4882a593Smuzhiyun 						  (fcp_rsp->retry_delay_timer &
1288*4882a593Smuzhiyun 						  0x3FFF));
1289*4882a593Smuzhiyun 				}
1290*4882a593Smuzhiyun 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1291*4882a593Smuzhiyun 					  "Scope = %d and qualifier = %d",
1292*4882a593Smuzhiyun 					  scope, qualifier);
1293*4882a593Smuzhiyun 				/*  Take fcport->rport_lock to
1294*4882a593Smuzhiyun 				 *  update the retry_delay_timestamp
1295*4882a593Smuzhiyun 				 */
1296*4882a593Smuzhiyun 				spin_lock_irqsave(&fcport->rport_lock, flags);
1297*4882a593Smuzhiyun 				fcport->retry_delay_timestamp =
1298*4882a593Smuzhiyun 					jiffies + (qualifier * HZ / 10);
1299*4882a593Smuzhiyun 				spin_unlock_irqrestore(&fcport->rport_lock,
1300*4882a593Smuzhiyun 						       flags);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 			} else {
1303*4882a593Smuzhiyun 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1304*4882a593Smuzhiyun 					  "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1305*4882a593Smuzhiyun 					  scope, qualifier);
1306*4882a593Smuzhiyun 			}
1307*4882a593Smuzhiyun 		}
1308*4882a593Smuzhiyun 		break;
1309*4882a593Smuzhiyun 	default:
1310*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1311*4882a593Smuzhiyun 			   io_req->fcp_status);
1312*4882a593Smuzhiyun 		break;
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun out:
1316*4882a593Smuzhiyun 	if (qedf_io_tracing)
1317*4882a593Smuzhiyun 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	/*
1320*4882a593Smuzhiyun 	 * We wait till the end of the function to clear the
1321*4882a593Smuzhiyun 	 * outstanding bit in case we need to send an abort
1322*4882a593Smuzhiyun 	 */
1323*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	io_req->sc_cmd = NULL;
1326*4882a593Smuzhiyun 	sc_cmd->SCp.ptr =  NULL;
1327*4882a593Smuzhiyun 	sc_cmd->scsi_done(sc_cmd);
1328*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun /* Return a SCSI command in some other context besides a normal completion */
qedf_scsi_done(struct qedf_ctx * qedf,struct qedf_ioreq * io_req,int result)1332*4882a593Smuzhiyun void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1333*4882a593Smuzhiyun 	int result)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd;
1336*4882a593Smuzhiyun 	int refcount;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (!io_req) {
1339*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1340*4882a593Smuzhiyun 		return;
1341*4882a593Smuzhiyun 	}
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1344*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1345*4882a593Smuzhiyun 			  "io_req:%p scsi_done handling already done\n",
1346*4882a593Smuzhiyun 			  io_req);
1347*4882a593Smuzhiyun 		return;
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/*
1351*4882a593Smuzhiyun 	 * We will be done with this command after this call so clear the
1352*4882a593Smuzhiyun 	 * outstanding bit.
1353*4882a593Smuzhiyun 	 */
1354*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	sc_cmd = io_req->sc_cmd;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (!sc_cmd) {
1359*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1360*4882a593Smuzhiyun 		return;
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	if (!virt_addr_valid(sc_cmd)) {
1364*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1365*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	if (!sc_cmd->SCp.ptr) {
1369*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1370*4882a593Smuzhiyun 		    "another context.\n");
1371*4882a593Smuzhiyun 		return;
1372*4882a593Smuzhiyun 	}
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	if (!sc_cmd->device) {
1375*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1376*4882a593Smuzhiyun 			 sc_cmd);
1377*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	if (!virt_addr_valid(sc_cmd->device)) {
1381*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1382*4882a593Smuzhiyun 			 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1383*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1384*4882a593Smuzhiyun 	}
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	if (!sc_cmd->sense_buffer) {
1387*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1388*4882a593Smuzhiyun 			 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1389*4882a593Smuzhiyun 			 sc_cmd);
1390*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1391*4882a593Smuzhiyun 	}
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1394*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1395*4882a593Smuzhiyun 			 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1396*4882a593Smuzhiyun 			 sc_cmd);
1397*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1398*4882a593Smuzhiyun 	}
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	if (!sc_cmd->scsi_done) {
1401*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1402*4882a593Smuzhiyun 			 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1403*4882a593Smuzhiyun 			 sc_cmd);
1404*4882a593Smuzhiyun 		goto bad_scsi_ptr;
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	qedf_unmap_sg_list(qedf, io_req);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	sc_cmd->result = result << 16;
1410*4882a593Smuzhiyun 	refcount = kref_read(&io_req->refcount);
1411*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1412*4882a593Smuzhiyun 	    "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1413*4882a593Smuzhiyun 	    "allowed=%d retries=%d refcount=%d.\n",
1414*4882a593Smuzhiyun 	    qedf->lport->host->host_no, sc_cmd->device->id,
1415*4882a593Smuzhiyun 	    sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1416*4882a593Smuzhiyun 	    sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1417*4882a593Smuzhiyun 	    sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1418*4882a593Smuzhiyun 	    refcount);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/*
1421*4882a593Smuzhiyun 	 * Set resid to the whole buffer length so we won't try to resue any
1422*4882a593Smuzhiyun 	 * previously read data
1423*4882a593Smuzhiyun 	 */
1424*4882a593Smuzhiyun 	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	if (qedf_io_tracing)
1427*4882a593Smuzhiyun 		qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	io_req->sc_cmd = NULL;
1430*4882a593Smuzhiyun 	sc_cmd->SCp.ptr = NULL;
1431*4882a593Smuzhiyun 	sc_cmd->scsi_done(sc_cmd);
1432*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);
1433*4882a593Smuzhiyun 	return;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun bad_scsi_ptr:
1436*4882a593Smuzhiyun 	/*
1437*4882a593Smuzhiyun 	 * Clear the io_req->sc_cmd backpointer so we don't try to process
1438*4882a593Smuzhiyun 	 * this again
1439*4882a593Smuzhiyun 	 */
1440*4882a593Smuzhiyun 	io_req->sc_cmd = NULL;
1441*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun /*
1445*4882a593Smuzhiyun  * Handle warning type CQE completions. This is mainly used for REC timer
1446*4882a593Smuzhiyun  * popping.
1447*4882a593Smuzhiyun  */
qedf_process_warning_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)1448*4882a593Smuzhiyun void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1449*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun 	int rval, i;
1452*4882a593Smuzhiyun 	struct qedf_rport *fcport = io_req->fcport;
1453*4882a593Smuzhiyun 	u64 err_warn_bit_map;
1454*4882a593Smuzhiyun 	u8 err_warn = 0xff;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (!cqe) {
1457*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1458*4882a593Smuzhiyun 			  "cqe is NULL for io_req %p xid=0x%x\n",
1459*4882a593Smuzhiyun 			  io_req, io_req->xid);
1460*4882a593Smuzhiyun 		return;
1461*4882a593Smuzhiyun 	}
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1464*4882a593Smuzhiyun 		  "xid=0x%x\n", io_req->xid);
1465*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1466*4882a593Smuzhiyun 		  "err_warn_bitmap=%08x:%08x\n",
1467*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1468*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1469*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1470*4882a593Smuzhiyun 		  "rx_buff_off=%08x, rx_id=%04x\n",
1471*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1472*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1473*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	/* Normalize the error bitmap value to an just an unsigned int */
1476*4882a593Smuzhiyun 	err_warn_bit_map = (u64)
1477*4882a593Smuzhiyun 	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1478*4882a593Smuzhiyun 	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1479*4882a593Smuzhiyun 	for (i = 0; i < 64; i++) {
1480*4882a593Smuzhiyun 		if (err_warn_bit_map & (u64)((u64)1 << i)) {
1481*4882a593Smuzhiyun 			err_warn = i;
1482*4882a593Smuzhiyun 			break;
1483*4882a593Smuzhiyun 		}
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	/* Check if REC TOV expired if this is a tape device */
1487*4882a593Smuzhiyun 	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1488*4882a593Smuzhiyun 		if (err_warn ==
1489*4882a593Smuzhiyun 		    FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1490*4882a593Smuzhiyun 			QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1491*4882a593Smuzhiyun 			if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1492*4882a593Smuzhiyun 				io_req->rx_buf_off =
1493*4882a593Smuzhiyun 				    cqe->cqe_info.err_info.rx_buf_off;
1494*4882a593Smuzhiyun 				io_req->tx_buf_off =
1495*4882a593Smuzhiyun 				    cqe->cqe_info.err_info.tx_buf_off;
1496*4882a593Smuzhiyun 				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1497*4882a593Smuzhiyun 				rval = qedf_send_rec(io_req);
1498*4882a593Smuzhiyun 				/*
1499*4882a593Smuzhiyun 				 * We only want to abort the io_req if we
1500*4882a593Smuzhiyun 				 * can't queue the REC command as we want to
1501*4882a593Smuzhiyun 				 * keep the exchange open for recovery.
1502*4882a593Smuzhiyun 				 */
1503*4882a593Smuzhiyun 				if (rval)
1504*4882a593Smuzhiyun 					goto send_abort;
1505*4882a593Smuzhiyun 			}
1506*4882a593Smuzhiyun 			return;
1507*4882a593Smuzhiyun 		}
1508*4882a593Smuzhiyun 	}
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun send_abort:
1511*4882a593Smuzhiyun 	init_completion(&io_req->abts_done);
1512*4882a593Smuzhiyun 	rval = qedf_initiate_abts(io_req, true);
1513*4882a593Smuzhiyun 	if (rval)
1514*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun /* Cleanup a command when we receive an error detection completion */
qedf_process_error_detect(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)1518*4882a593Smuzhiyun void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1519*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	int rval;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	if (io_req == NULL) {
1524*4882a593Smuzhiyun 		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1525*4882a593Smuzhiyun 		return;
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	if (io_req->fcport == NULL) {
1529*4882a593Smuzhiyun 		QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1530*4882a593Smuzhiyun 		return;
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	if (!cqe) {
1534*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1535*4882a593Smuzhiyun 			"cqe is NULL for io_req %p\n", io_req);
1536*4882a593Smuzhiyun 		return;
1537*4882a593Smuzhiyun 	}
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1540*4882a593Smuzhiyun 		  "xid=0x%x\n", io_req->xid);
1541*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1542*4882a593Smuzhiyun 		  "err_warn_bitmap=%08x:%08x\n",
1543*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1544*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1545*4882a593Smuzhiyun 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1546*4882a593Smuzhiyun 		  "rx_buff_off=%08x, rx_id=%04x\n",
1547*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1548*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1549*4882a593Smuzhiyun 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/* When flush is active, let the cmds be flushed out from the cleanup context */
1552*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1553*4882a593Smuzhiyun 		(test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1554*4882a593Smuzhiyun 		 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1555*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1556*4882a593Smuzhiyun 			"Dropping EQE for xid=0x%x as fcport is flushing",
1557*4882a593Smuzhiyun 			io_req->xid);
1558*4882a593Smuzhiyun 		return;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	if (qedf->stop_io_on_error) {
1562*4882a593Smuzhiyun 		qedf_stop_all_io(qedf);
1563*4882a593Smuzhiyun 		return;
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	init_completion(&io_req->abts_done);
1567*4882a593Smuzhiyun 	rval = qedf_initiate_abts(io_req, true);
1568*4882a593Smuzhiyun 	if (rval)
1569*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
qedf_flush_els_req(struct qedf_ctx * qedf,struct qedf_ioreq * els_req)1572*4882a593Smuzhiyun static void qedf_flush_els_req(struct qedf_ctx *qedf,
1573*4882a593Smuzhiyun 	struct qedf_ioreq *els_req)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1576*4882a593Smuzhiyun 	    "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1577*4882a593Smuzhiyun 	    kref_read(&els_req->refcount));
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/*
1580*4882a593Smuzhiyun 	 * Need to distinguish this from a timeout when calling the
1581*4882a593Smuzhiyun 	 * els_req->cb_func.
1582*4882a593Smuzhiyun 	 */
1583*4882a593Smuzhiyun 	els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	/* Cancel the timer */
1588*4882a593Smuzhiyun 	cancel_delayed_work_sync(&els_req->timeout_work);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* Call callback function to complete command */
1591*4882a593Smuzhiyun 	if (els_req->cb_func && els_req->cb_arg) {
1592*4882a593Smuzhiyun 		els_req->cb_func(els_req->cb_arg);
1593*4882a593Smuzhiyun 		els_req->cb_arg = NULL;
1594*4882a593Smuzhiyun 	}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	/* Release kref for original initiate_els */
1597*4882a593Smuzhiyun 	kref_put(&els_req->refcount, qedf_release_cmd);
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun /* A value of -1 for lun is a wild card that means flush all
1601*4882a593Smuzhiyun  * active SCSI I/Os for the target.
1602*4882a593Smuzhiyun  */
qedf_flush_active_ios(struct qedf_rport * fcport,int lun)1603*4882a593Smuzhiyun void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	struct qedf_ioreq *io_req;
1606*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
1607*4882a593Smuzhiyun 	struct qedf_cmd_mgr *cmd_mgr;
1608*4882a593Smuzhiyun 	int i, rc;
1609*4882a593Smuzhiyun 	unsigned long flags;
1610*4882a593Smuzhiyun 	int flush_cnt = 0;
1611*4882a593Smuzhiyun 	int wait_cnt = 100;
1612*4882a593Smuzhiyun 	int refcount = 0;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	if (!fcport) {
1615*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is NULL\n");
1616*4882a593Smuzhiyun 		return;
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	/* Check that fcport is still offloaded */
1620*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1621*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1622*4882a593Smuzhiyun 		return;
1623*4882a593Smuzhiyun 	}
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	qedf = fcport->qedf;
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	if (!qedf) {
1628*4882a593Smuzhiyun 		QEDF_ERR(NULL, "qedf is NULL.\n");
1629*4882a593Smuzhiyun 		return;
1630*4882a593Smuzhiyun 	}
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	/* Only wait for all commands to be queued in the Upload context */
1633*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1634*4882a593Smuzhiyun 	    (lun == -1)) {
1635*4882a593Smuzhiyun 		while (atomic_read(&fcport->ios_to_queue)) {
1636*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1637*4882a593Smuzhiyun 				  "Waiting for %d I/Os to be queued\n",
1638*4882a593Smuzhiyun 				  atomic_read(&fcport->ios_to_queue));
1639*4882a593Smuzhiyun 			if (wait_cnt == 0) {
1640*4882a593Smuzhiyun 				QEDF_ERR(NULL,
1641*4882a593Smuzhiyun 					 "%d IOs request could not be queued\n",
1642*4882a593Smuzhiyun 					 atomic_read(&fcport->ios_to_queue));
1643*4882a593Smuzhiyun 			}
1644*4882a593Smuzhiyun 			msleep(20);
1645*4882a593Smuzhiyun 			wait_cnt--;
1646*4882a593Smuzhiyun 		}
1647*4882a593Smuzhiyun 	}
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	cmd_mgr = qedf->cmd_mgr;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1652*4882a593Smuzhiyun 		  "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1653*4882a593Smuzhiyun 		  atomic_read(&fcport->num_active_ios), fcport,
1654*4882a593Smuzhiyun 		  fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1655*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	mutex_lock(&qedf->flush_mutex);
1658*4882a593Smuzhiyun 	if (lun == -1) {
1659*4882a593Smuzhiyun 		set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1660*4882a593Smuzhiyun 	} else {
1661*4882a593Smuzhiyun 		set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1662*4882a593Smuzhiyun 		fcport->lun_reset_lun = lun;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1666*4882a593Smuzhiyun 		io_req = &cmd_mgr->cmds[i];
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 		if (!io_req)
1669*4882a593Smuzhiyun 			continue;
1670*4882a593Smuzhiyun 		if (!io_req->fcport)
1671*4882a593Smuzhiyun 			continue;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 		spin_lock_irqsave(&cmd_mgr->lock, flags);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 		if (io_req->alloc) {
1676*4882a593Smuzhiyun 			if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677*4882a593Smuzhiyun 				if (io_req->cmd_type == QEDF_SCSI_CMD)
1678*4882a593Smuzhiyun 					QEDF_ERR(&qedf->dbg_ctx,
1679*4882a593Smuzhiyun 						 "Allocated but not queued, xid=0x%x\n",
1680*4882a593Smuzhiyun 						 io_req->xid);
1681*4882a593Smuzhiyun 			}
1682*4882a593Smuzhiyun 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1683*4882a593Smuzhiyun 		} else {
1684*4882a593Smuzhiyun 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1685*4882a593Smuzhiyun 			continue;
1686*4882a593Smuzhiyun 		}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		if (io_req->fcport != fcport)
1689*4882a593Smuzhiyun 			continue;
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 		/* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1692*4882a593Smuzhiyun 		 * but RRQ is still pending.
1693*4882a593Smuzhiyun 		 * Workaround: Within qedf_send_rrq, we check if the fcport is
1694*4882a593Smuzhiyun 		 * NULL, and we drop the ref on the io_req to clean it up.
1695*4882a593Smuzhiyun 		 */
1696*4882a593Smuzhiyun 		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1697*4882a593Smuzhiyun 			refcount = kref_read(&io_req->refcount);
1698*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1699*4882a593Smuzhiyun 				  "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1700*4882a593Smuzhiyun 				  io_req->xid, io_req->cmd_type, refcount);
1701*4882a593Smuzhiyun 			/* If RRQ work has been queue, try to cancel it and
1702*4882a593Smuzhiyun 			 * free the io_req
1703*4882a593Smuzhiyun 			 */
1704*4882a593Smuzhiyun 			if (atomic_read(&io_req->state) ==
1705*4882a593Smuzhiyun 			    QEDFC_CMD_ST_RRQ_WAIT) {
1706*4882a593Smuzhiyun 				if (cancel_delayed_work_sync
1707*4882a593Smuzhiyun 				    (&io_req->rrq_work)) {
1708*4882a593Smuzhiyun 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1709*4882a593Smuzhiyun 						  "Putting reference for pending RRQ work xid=0x%x.\n",
1710*4882a593Smuzhiyun 						  io_req->xid);
1711*4882a593Smuzhiyun 					/* ID: 003 */
1712*4882a593Smuzhiyun 					kref_put(&io_req->refcount,
1713*4882a593Smuzhiyun 						 qedf_release_cmd);
1714*4882a593Smuzhiyun 				}
1715*4882a593Smuzhiyun 			}
1716*4882a593Smuzhiyun 			continue;
1717*4882a593Smuzhiyun 		}
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 		/* Only consider flushing ELS during target reset */
1720*4882a593Smuzhiyun 		if (io_req->cmd_type == QEDF_ELS &&
1721*4882a593Smuzhiyun 		    lun == -1) {
1722*4882a593Smuzhiyun 			rc = kref_get_unless_zero(&io_req->refcount);
1723*4882a593Smuzhiyun 			if (!rc) {
1724*4882a593Smuzhiyun 				QEDF_ERR(&(qedf->dbg_ctx),
1725*4882a593Smuzhiyun 				    "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1726*4882a593Smuzhiyun 				    io_req, io_req->xid);
1727*4882a593Smuzhiyun 				continue;
1728*4882a593Smuzhiyun 			}
1729*4882a593Smuzhiyun 			qedf_initiate_cleanup(io_req, false);
1730*4882a593Smuzhiyun 			flush_cnt++;
1731*4882a593Smuzhiyun 			qedf_flush_els_req(qedf, io_req);
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 			/*
1734*4882a593Smuzhiyun 			 * Release the kref and go back to the top of the
1735*4882a593Smuzhiyun 			 * loop.
1736*4882a593Smuzhiyun 			 */
1737*4882a593Smuzhiyun 			goto free_cmd;
1738*4882a593Smuzhiyun 		}
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 		if (io_req->cmd_type == QEDF_ABTS) {
1741*4882a593Smuzhiyun 			/* ID: 004 */
1742*4882a593Smuzhiyun 			rc = kref_get_unless_zero(&io_req->refcount);
1743*4882a593Smuzhiyun 			if (!rc) {
1744*4882a593Smuzhiyun 				QEDF_ERR(&(qedf->dbg_ctx),
1745*4882a593Smuzhiyun 				    "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1746*4882a593Smuzhiyun 				    io_req, io_req->xid);
1747*4882a593Smuzhiyun 				continue;
1748*4882a593Smuzhiyun 			}
1749*4882a593Smuzhiyun 			if (lun != -1 && io_req->lun != lun)
1750*4882a593Smuzhiyun 				goto free_cmd;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1753*4882a593Smuzhiyun 			    "Flushing abort xid=0x%x.\n", io_req->xid);
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 			if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1756*4882a593Smuzhiyun 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1757*4882a593Smuzhiyun 					  "Putting ref for cancelled RRQ work xid=0x%x.\n",
1758*4882a593Smuzhiyun 					  io_req->xid);
1759*4882a593Smuzhiyun 				kref_put(&io_req->refcount, qedf_release_cmd);
1760*4882a593Smuzhiyun 			}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 			if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1763*4882a593Smuzhiyun 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1764*4882a593Smuzhiyun 					  "Putting ref for cancelled tmo work xid=0x%x.\n",
1765*4882a593Smuzhiyun 					  io_req->xid);
1766*4882a593Smuzhiyun 				qedf_initiate_cleanup(io_req, true);
1767*4882a593Smuzhiyun 				/* Notify eh_abort handler that ABTS is
1768*4882a593Smuzhiyun 				 * complete
1769*4882a593Smuzhiyun 				 */
1770*4882a593Smuzhiyun 				complete(&io_req->abts_done);
1771*4882a593Smuzhiyun 				clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1772*4882a593Smuzhiyun 				/* ID: 002 */
1773*4882a593Smuzhiyun 				kref_put(&io_req->refcount, qedf_release_cmd);
1774*4882a593Smuzhiyun 			}
1775*4882a593Smuzhiyun 			flush_cnt++;
1776*4882a593Smuzhiyun 			goto free_cmd;
1777*4882a593Smuzhiyun 		}
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 		if (!io_req->sc_cmd)
1780*4882a593Smuzhiyun 			continue;
1781*4882a593Smuzhiyun 		if (!io_req->sc_cmd->device) {
1782*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1783*4882a593Smuzhiyun 				  "Device backpointer NULL for sc_cmd=%p.\n",
1784*4882a593Smuzhiyun 				  io_req->sc_cmd);
1785*4882a593Smuzhiyun 			/* Put reference for non-existent scsi_cmnd */
1786*4882a593Smuzhiyun 			io_req->sc_cmd = NULL;
1787*4882a593Smuzhiyun 			qedf_initiate_cleanup(io_req, false);
1788*4882a593Smuzhiyun 			kref_put(&io_req->refcount, qedf_release_cmd);
1789*4882a593Smuzhiyun 			continue;
1790*4882a593Smuzhiyun 		}
1791*4882a593Smuzhiyun 		if (lun > -1) {
1792*4882a593Smuzhiyun 			if (io_req->lun != lun)
1793*4882a593Smuzhiyun 				continue;
1794*4882a593Smuzhiyun 		}
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 		/*
1797*4882a593Smuzhiyun 		 * Use kref_get_unless_zero in the unlikely case the command
1798*4882a593Smuzhiyun 		 * we're about to flush was completed in the normal SCSI path
1799*4882a593Smuzhiyun 		 */
1800*4882a593Smuzhiyun 		rc = kref_get_unless_zero(&io_req->refcount);
1801*4882a593Smuzhiyun 		if (!rc) {
1802*4882a593Smuzhiyun 			QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1803*4882a593Smuzhiyun 			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1804*4882a593Smuzhiyun 			continue;
1805*4882a593Smuzhiyun 		}
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1808*4882a593Smuzhiyun 		    "Cleanup xid=0x%x.\n", io_req->xid);
1809*4882a593Smuzhiyun 		flush_cnt++;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 		/* Cleanup task and return I/O mid-layer */
1812*4882a593Smuzhiyun 		qedf_initiate_cleanup(io_req, true);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun free_cmd:
1815*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);	/* ID: 004 */
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	wait_cnt = 60;
1819*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1820*4882a593Smuzhiyun 		  "Flushed 0x%x I/Os, active=0x%x.\n",
1821*4882a593Smuzhiyun 		  flush_cnt, atomic_read(&fcport->num_active_ios));
1822*4882a593Smuzhiyun 	/* Only wait for all commands to complete in the Upload context */
1823*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1824*4882a593Smuzhiyun 	    (lun == -1)) {
1825*4882a593Smuzhiyun 		while (atomic_read(&fcport->num_active_ios)) {
1826*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1827*4882a593Smuzhiyun 				  "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1828*4882a593Smuzhiyun 				  flush_cnt,
1829*4882a593Smuzhiyun 				  atomic_read(&fcport->num_active_ios),
1830*4882a593Smuzhiyun 				  wait_cnt);
1831*4882a593Smuzhiyun 			if (wait_cnt == 0) {
1832*4882a593Smuzhiyun 				QEDF_ERR(&qedf->dbg_ctx,
1833*4882a593Smuzhiyun 					 "Flushed %d I/Os, active=%d.\n",
1834*4882a593Smuzhiyun 					 flush_cnt,
1835*4882a593Smuzhiyun 					 atomic_read(&fcport->num_active_ios));
1836*4882a593Smuzhiyun 				for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1837*4882a593Smuzhiyun 					io_req = &cmd_mgr->cmds[i];
1838*4882a593Smuzhiyun 					if (io_req->fcport &&
1839*4882a593Smuzhiyun 					    io_req->fcport == fcport) {
1840*4882a593Smuzhiyun 						refcount =
1841*4882a593Smuzhiyun 						kref_read(&io_req->refcount);
1842*4882a593Smuzhiyun 						set_bit(QEDF_CMD_DIRTY,
1843*4882a593Smuzhiyun 							&io_req->flags);
1844*4882a593Smuzhiyun 						QEDF_ERR(&qedf->dbg_ctx,
1845*4882a593Smuzhiyun 							 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1846*4882a593Smuzhiyun 							 io_req, io_req->xid,
1847*4882a593Smuzhiyun 							 io_req->flags,
1848*4882a593Smuzhiyun 							 io_req->sc_cmd,
1849*4882a593Smuzhiyun 							 refcount,
1850*4882a593Smuzhiyun 							 io_req->cmd_type);
1851*4882a593Smuzhiyun 					}
1852*4882a593Smuzhiyun 				}
1853*4882a593Smuzhiyun 				WARN_ON(1);
1854*4882a593Smuzhiyun 				break;
1855*4882a593Smuzhiyun 			}
1856*4882a593Smuzhiyun 			msleep(500);
1857*4882a593Smuzhiyun 			wait_cnt--;
1858*4882a593Smuzhiyun 		}
1859*4882a593Smuzhiyun 	}
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1862*4882a593Smuzhiyun 	clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1863*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1864*4882a593Smuzhiyun 	mutex_unlock(&qedf->flush_mutex);
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun /*
1868*4882a593Smuzhiyun  * Initiate a ABTS middle path command. Note that we don't have to initialize
1869*4882a593Smuzhiyun  * the task context for an ABTS task.
1870*4882a593Smuzhiyun  */
qedf_initiate_abts(struct qedf_ioreq * io_req,bool return_scsi_cmd_on_abts)1871*4882a593Smuzhiyun int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1872*4882a593Smuzhiyun {
1873*4882a593Smuzhiyun 	struct fc_lport *lport;
1874*4882a593Smuzhiyun 	struct qedf_rport *fcport = io_req->fcport;
1875*4882a593Smuzhiyun 	struct fc_rport_priv *rdata;
1876*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
1877*4882a593Smuzhiyun 	u16 xid;
1878*4882a593Smuzhiyun 	int rc = 0;
1879*4882a593Smuzhiyun 	unsigned long flags;
1880*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
1881*4882a593Smuzhiyun 	u16 sqe_idx;
1882*4882a593Smuzhiyun 	int refcount = 0;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	/* Sanity check qedf_rport before dereferencing any pointers */
1885*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1886*4882a593Smuzhiyun 		QEDF_ERR(NULL, "tgt not offloaded\n");
1887*4882a593Smuzhiyun 		rc = 1;
1888*4882a593Smuzhiyun 		goto out;
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	qedf = fcport->qedf;
1892*4882a593Smuzhiyun 	rdata = fcport->rdata;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1895*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1896*4882a593Smuzhiyun 		rc = 1;
1897*4882a593Smuzhiyun 		goto out;
1898*4882a593Smuzhiyun 	}
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	lport = qedf->lport;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1903*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1904*4882a593Smuzhiyun 		rc = 1;
1905*4882a593Smuzhiyun 		goto drop_rdata_kref;
1906*4882a593Smuzhiyun 	}
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1909*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1910*4882a593Smuzhiyun 		rc = 1;
1911*4882a593Smuzhiyun 		goto drop_rdata_kref;
1912*4882a593Smuzhiyun 	}
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	/* Ensure room on SQ */
1915*4882a593Smuzhiyun 	if (!atomic_read(&fcport->free_sqes)) {
1916*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1917*4882a593Smuzhiyun 		rc = 1;
1918*4882a593Smuzhiyun 		goto drop_rdata_kref;
1919*4882a593Smuzhiyun 	}
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1922*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1923*4882a593Smuzhiyun 		rc = 1;
1924*4882a593Smuzhiyun 		goto drop_rdata_kref;
1925*4882a593Smuzhiyun 	}
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1928*4882a593Smuzhiyun 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1929*4882a593Smuzhiyun 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1930*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
1931*4882a593Smuzhiyun 			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1932*4882a593Smuzhiyun 			 io_req->xid, io_req->sc_cmd);
1933*4882a593Smuzhiyun 		rc = 1;
1934*4882a593Smuzhiyun 		goto drop_rdata_kref;
1935*4882a593Smuzhiyun 	}
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	kref_get(&io_req->refcount);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	xid = io_req->xid;
1940*4882a593Smuzhiyun 	qedf->control_requests++;
1941*4882a593Smuzhiyun 	qedf->packet_aborts++;
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 	/* Set the command type to abort */
1944*4882a593Smuzhiyun 	io_req->cmd_type = QEDF_ABTS;
1945*4882a593Smuzhiyun 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1948*4882a593Smuzhiyun 	refcount = kref_read(&io_req->refcount);
1949*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1950*4882a593Smuzhiyun 		  "ABTS io_req xid = 0x%x refcount=%d\n",
1951*4882a593Smuzhiyun 		  xid, refcount);
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
1958*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
1959*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1960*4882a593Smuzhiyun 	io_req->task_params->sqe = sqe;
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	init_initiator_abort_fcoe_task(io_req->task_params);
1963*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun drop_rdata_kref:
1968*4882a593Smuzhiyun 	kref_put(&rdata->kref, fc_rport_destroy);
1969*4882a593Smuzhiyun out:
1970*4882a593Smuzhiyun 	return rc;
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun 
qedf_process_abts_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)1973*4882a593Smuzhiyun void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1974*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
1975*4882a593Smuzhiyun {
1976*4882a593Smuzhiyun 	uint32_t r_ctl;
1977*4882a593Smuzhiyun 	int rc;
1978*4882a593Smuzhiyun 	struct qedf_rport *fcport = io_req->fcport;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1981*4882a593Smuzhiyun 		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	/* This was added at a point when we were scheduling abts_compl &
1986*4882a593Smuzhiyun 	 * cleanup_compl on different CPUs and there was a possibility of
1987*4882a593Smuzhiyun 	 * the io_req to be freed from the other context before we got here.
1988*4882a593Smuzhiyun 	 */
1989*4882a593Smuzhiyun 	if (!fcport) {
1990*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1991*4882a593Smuzhiyun 			  "Dropping ABTS completion xid=0x%x as fcport is NULL",
1992*4882a593Smuzhiyun 			  io_req->xid);
1993*4882a593Smuzhiyun 		return;
1994*4882a593Smuzhiyun 	}
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	/*
1997*4882a593Smuzhiyun 	 * When flush is active, let the cmds be completed from the cleanup
1998*4882a593Smuzhiyun 	 * context
1999*4882a593Smuzhiyun 	 */
2000*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
2001*4882a593Smuzhiyun 	    test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
2002*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2003*4882a593Smuzhiyun 			  "Dropping ABTS completion xid=0x%x as fcport is flushing",
2004*4882a593Smuzhiyun 			  io_req->xid);
2005*4882a593Smuzhiyun 		return;
2006*4882a593Smuzhiyun 	}
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	if (!cancel_delayed_work(&io_req->timeout_work)) {
2009*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
2010*4882a593Smuzhiyun 			 "Wasn't able to cancel abts timeout work.\n");
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	switch (r_ctl) {
2014*4882a593Smuzhiyun 	case FC_RCTL_BA_ACC:
2015*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2016*4882a593Smuzhiyun 		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
2017*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2018*4882a593Smuzhiyun 		rc = kref_get_unless_zero(&io_req->refcount);	/* ID: 003 */
2019*4882a593Smuzhiyun 		if (!rc) {
2020*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2021*4882a593Smuzhiyun 				  "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2022*4882a593Smuzhiyun 				  io_req->xid);
2023*4882a593Smuzhiyun 			return;
2024*4882a593Smuzhiyun 		}
2025*4882a593Smuzhiyun 		/*
2026*4882a593Smuzhiyun 		 * Dont release this cmd yet. It will be relesed
2027*4882a593Smuzhiyun 		 * after we get RRQ response
2028*4882a593Smuzhiyun 		 */
2029*4882a593Smuzhiyun 		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2030*4882a593Smuzhiyun 		    msecs_to_jiffies(qedf->lport->r_a_tov));
2031*4882a593Smuzhiyun 		atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2032*4882a593Smuzhiyun 		break;
2033*4882a593Smuzhiyun 	/* For error cases let the cleanup return the command */
2034*4882a593Smuzhiyun 	case FC_RCTL_BA_RJT:
2035*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2036*4882a593Smuzhiyun 		   "ABTS response - RJT\n");
2037*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2038*4882a593Smuzhiyun 		break;
2039*4882a593Smuzhiyun 	default:
2040*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2041*4882a593Smuzhiyun 		break;
2042*4882a593Smuzhiyun 	}
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (io_req->sc_cmd) {
2047*4882a593Smuzhiyun 		if (!io_req->return_scsi_cmd_on_abts)
2048*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2049*4882a593Smuzhiyun 				  "Not call scsi_done for xid=0x%x.\n",
2050*4882a593Smuzhiyun 				  io_req->xid);
2051*4882a593Smuzhiyun 		if (io_req->return_scsi_cmd_on_abts)
2052*4882a593Smuzhiyun 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2053*4882a593Smuzhiyun 	}
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	/* Notify eh_abort handler that ABTS is complete */
2056*4882a593Smuzhiyun 	complete(&io_req->abts_done);
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun 
qedf_init_mp_req(struct qedf_ioreq * io_req)2061*4882a593Smuzhiyun int qedf_init_mp_req(struct qedf_ioreq *io_req)
2062*4882a593Smuzhiyun {
2063*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req;
2064*4882a593Smuzhiyun 	struct scsi_sge *mp_req_bd;
2065*4882a593Smuzhiyun 	struct scsi_sge *mp_resp_bd;
2066*4882a593Smuzhiyun 	struct qedf_ctx *qedf = io_req->fcport->qedf;
2067*4882a593Smuzhiyun 	dma_addr_t addr;
2068*4882a593Smuzhiyun 	uint64_t sz;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2073*4882a593Smuzhiyun 	memset(mp_req, 0, sizeof(struct qedf_mp_req));
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	if (io_req->cmd_type != QEDF_ELS) {
2076*4882a593Smuzhiyun 		mp_req->req_len = sizeof(struct fcp_cmnd);
2077*4882a593Smuzhiyun 		io_req->data_xfer_len = mp_req->req_len;
2078*4882a593Smuzhiyun 	} else
2079*4882a593Smuzhiyun 		mp_req->req_len = io_req->data_xfer_len;
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 	mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2082*4882a593Smuzhiyun 	    &mp_req->req_buf_dma, GFP_KERNEL);
2083*4882a593Smuzhiyun 	if (!mp_req->req_buf) {
2084*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2085*4882a593Smuzhiyun 		qedf_free_mp_resc(io_req);
2086*4882a593Smuzhiyun 		return -ENOMEM;
2087*4882a593Smuzhiyun 	}
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2090*4882a593Smuzhiyun 	    QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2091*4882a593Smuzhiyun 	if (!mp_req->resp_buf) {
2092*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2093*4882a593Smuzhiyun 			  "buffer\n");
2094*4882a593Smuzhiyun 		qedf_free_mp_resc(io_req);
2095*4882a593Smuzhiyun 		return -ENOMEM;
2096*4882a593Smuzhiyun 	}
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	/* Allocate and map mp_req_bd and mp_resp_bd */
2099*4882a593Smuzhiyun 	sz = sizeof(struct scsi_sge);
2100*4882a593Smuzhiyun 	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2101*4882a593Smuzhiyun 	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
2102*4882a593Smuzhiyun 	if (!mp_req->mp_req_bd) {
2103*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2104*4882a593Smuzhiyun 		qedf_free_mp_resc(io_req);
2105*4882a593Smuzhiyun 		return -ENOMEM;
2106*4882a593Smuzhiyun 	}
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2109*4882a593Smuzhiyun 	    &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2110*4882a593Smuzhiyun 	if (!mp_req->mp_resp_bd) {
2111*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2112*4882a593Smuzhiyun 		qedf_free_mp_resc(io_req);
2113*4882a593Smuzhiyun 		return -ENOMEM;
2114*4882a593Smuzhiyun 	}
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	/* Fill bd table */
2117*4882a593Smuzhiyun 	addr = mp_req->req_buf_dma;
2118*4882a593Smuzhiyun 	mp_req_bd = mp_req->mp_req_bd;
2119*4882a593Smuzhiyun 	mp_req_bd->sge_addr.lo = U64_LO(addr);
2120*4882a593Smuzhiyun 	mp_req_bd->sge_addr.hi = U64_HI(addr);
2121*4882a593Smuzhiyun 	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	/*
2124*4882a593Smuzhiyun 	 * MP buffer is either a task mgmt command or an ELS.
2125*4882a593Smuzhiyun 	 * So the assumption is that it consumes a single bd
2126*4882a593Smuzhiyun 	 * entry in the bd table
2127*4882a593Smuzhiyun 	 */
2128*4882a593Smuzhiyun 	mp_resp_bd = mp_req->mp_resp_bd;
2129*4882a593Smuzhiyun 	addr = mp_req->resp_buf_dma;
2130*4882a593Smuzhiyun 	mp_resp_bd->sge_addr.lo = U64_LO(addr);
2131*4882a593Smuzhiyun 	mp_resp_bd->sge_addr.hi = U64_HI(addr);
2132*4882a593Smuzhiyun 	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	return 0;
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun /*
2138*4882a593Smuzhiyun  * Last ditch effort to clear the port if it's stuck. Used only after a
2139*4882a593Smuzhiyun  * cleanup task times out.
2140*4882a593Smuzhiyun  */
qedf_drain_request(struct qedf_ctx * qedf)2141*4882a593Smuzhiyun static void qedf_drain_request(struct qedf_ctx *qedf)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2144*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2145*4882a593Smuzhiyun 		return;
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	/* Set bit to return all queuecommand requests as busy */
2149*4882a593Smuzhiyun 	set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	/* Call qed drain request for function. Should be synchronous */
2152*4882a593Smuzhiyun 	qed_ops->common->drain(qedf->cdev);
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun 	/* Settle time for CQEs to be returned */
2155*4882a593Smuzhiyun 	msleep(100);
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	/* Unplug and continue */
2158*4882a593Smuzhiyun 	clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun /*
2162*4882a593Smuzhiyun  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2163*4882a593Smuzhiyun  * FAILURE.
2164*4882a593Smuzhiyun  */
qedf_initiate_cleanup(struct qedf_ioreq * io_req,bool return_scsi_cmd_on_abts)2165*4882a593Smuzhiyun int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2166*4882a593Smuzhiyun 	bool return_scsi_cmd_on_abts)
2167*4882a593Smuzhiyun {
2168*4882a593Smuzhiyun 	struct qedf_rport *fcport;
2169*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
2170*4882a593Smuzhiyun 	int tmo = 0;
2171*4882a593Smuzhiyun 	int rc = SUCCESS;
2172*4882a593Smuzhiyun 	unsigned long flags;
2173*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
2174*4882a593Smuzhiyun 	u16 sqe_idx;
2175*4882a593Smuzhiyun 	int refcount = 0;
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	fcport = io_req->fcport;
2178*4882a593Smuzhiyun 	if (!fcport) {
2179*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is NULL.\n");
2180*4882a593Smuzhiyun 		return SUCCESS;
2181*4882a593Smuzhiyun 	}
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	/* Sanity check qedf_rport before dereferencing any pointers */
2184*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2185*4882a593Smuzhiyun 		QEDF_ERR(NULL, "tgt not offloaded\n");
2186*4882a593Smuzhiyun 		return SUCCESS;
2187*4882a593Smuzhiyun 	}
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	qedf = fcport->qedf;
2190*4882a593Smuzhiyun 	if (!qedf) {
2191*4882a593Smuzhiyun 		QEDF_ERR(NULL, "qedf is NULL.\n");
2192*4882a593Smuzhiyun 		return SUCCESS;
2193*4882a593Smuzhiyun 	}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_ELS) {
2196*4882a593Smuzhiyun 		goto process_els;
2197*4882a593Smuzhiyun 	}
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2200*4882a593Smuzhiyun 	    test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2201*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2202*4882a593Smuzhiyun 			  "cleanup processing or already completed.\n",
2203*4882a593Smuzhiyun 			  io_req->xid);
2204*4882a593Smuzhiyun 		return SUCCESS;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun process_els:
2209*4882a593Smuzhiyun 	/* Ensure room on SQ */
2210*4882a593Smuzhiyun 	if (!atomic_read(&fcport->free_sqes)) {
2211*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2212*4882a593Smuzhiyun 		/* Need to make sure we clear the flag since it was set */
2213*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2214*4882a593Smuzhiyun 		return FAILED;
2215*4882a593Smuzhiyun 	}
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	if (io_req->cmd_type == QEDF_CLEANUP) {
2218*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
2219*4882a593Smuzhiyun 			 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2220*4882a593Smuzhiyun 			 io_req->xid, io_req->cmd_type);
2221*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2222*4882a593Smuzhiyun 		return SUCCESS;
2223*4882a593Smuzhiyun 	}
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	refcount = kref_read(&io_req->refcount);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2228*4882a593Smuzhiyun 		  "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2229*4882a593Smuzhiyun 		  io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2230*4882a593Smuzhiyun 		  refcount, fcport, fcport->rdata->ids.port_id);
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	/* Cleanup cmds re-use the same TID as the original I/O */
2233*4882a593Smuzhiyun 	io_req->cmd_type = QEDF_CLEANUP;
2234*4882a593Smuzhiyun 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	init_completion(&io_req->cleanup_done);
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
2241*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
2242*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2243*4882a593Smuzhiyun 	io_req->task_params->sqe = sqe;
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	init_initiator_cleanup_fcoe_task(io_req->task_params);
2246*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2251*4882a593Smuzhiyun 					  QEDF_CLEANUP_TIMEOUT * HZ);
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	if (!tmo) {
2254*4882a593Smuzhiyun 		rc = FAILED;
2255*4882a593Smuzhiyun 		/* Timeout case */
2256*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2257*4882a593Smuzhiyun 			  "xid=%x.\n", io_req->xid);
2258*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2259*4882a593Smuzhiyun 		/* Issue a drain request if cleanup task times out */
2260*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2261*4882a593Smuzhiyun 		qedf_drain_request(qedf);
2262*4882a593Smuzhiyun 	}
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	/* If it TASK MGMT handle it, reference will be decreased
2265*4882a593Smuzhiyun 	 * in qedf_execute_tmf
2266*4882a593Smuzhiyun 	 */
2267*4882a593Smuzhiyun 	if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
2268*4882a593Smuzhiyun 	    io_req->tm_flags == FCP_TMF_TGT_RESET) {
2269*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2270*4882a593Smuzhiyun 		io_req->sc_cmd = NULL;
2271*4882a593Smuzhiyun 		kref_put(&io_req->refcount, qedf_release_cmd);
2272*4882a593Smuzhiyun 		complete(&io_req->tm_done);
2273*4882a593Smuzhiyun 	}
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	if (io_req->sc_cmd) {
2276*4882a593Smuzhiyun 		if (!io_req->return_scsi_cmd_on_abts)
2277*4882a593Smuzhiyun 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2278*4882a593Smuzhiyun 				  "Not call scsi_done for xid=0x%x.\n",
2279*4882a593Smuzhiyun 				  io_req->xid);
2280*4882a593Smuzhiyun 		if (io_req->return_scsi_cmd_on_abts)
2281*4882a593Smuzhiyun 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2282*4882a593Smuzhiyun 	}
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	if (rc == SUCCESS)
2285*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2286*4882a593Smuzhiyun 	else
2287*4882a593Smuzhiyun 		io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	return rc;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun 
qedf_process_cleanup_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)2292*4882a593Smuzhiyun void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2293*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2296*4882a593Smuzhiyun 		   io_req->xid);
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	/* Complete so we can finish cleaning up the I/O */
2301*4882a593Smuzhiyun 	complete(&io_req->cleanup_done);
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun 
qedf_execute_tmf(struct qedf_rport * fcport,struct scsi_cmnd * sc_cmd,uint8_t tm_flags)2304*4882a593Smuzhiyun static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2305*4882a593Smuzhiyun 	uint8_t tm_flags)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	struct qedf_ioreq *io_req;
2308*4882a593Smuzhiyun 	struct e4_fcoe_task_context *task;
2309*4882a593Smuzhiyun 	struct qedf_ctx *qedf = fcport->qedf;
2310*4882a593Smuzhiyun 	struct fc_lport *lport = qedf->lport;
2311*4882a593Smuzhiyun 	int rc = 0;
2312*4882a593Smuzhiyun 	uint16_t xid;
2313*4882a593Smuzhiyun 	int tmo = 0;
2314*4882a593Smuzhiyun 	int lun = 0;
2315*4882a593Smuzhiyun 	unsigned long flags;
2316*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
2317*4882a593Smuzhiyun 	u16 sqe_idx;
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 	if (!sc_cmd) {
2320*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2321*4882a593Smuzhiyun 		return FAILED;
2322*4882a593Smuzhiyun 	}
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	lun = (int)sc_cmd->device->lun;
2325*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2326*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2327*4882a593Smuzhiyun 		rc = FAILED;
2328*4882a593Smuzhiyun 		goto no_flush;
2329*4882a593Smuzhiyun 	}
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2332*4882a593Smuzhiyun 	if (!io_req) {
2333*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2334*4882a593Smuzhiyun 		rc = -EAGAIN;
2335*4882a593Smuzhiyun 		goto no_flush;
2336*4882a593Smuzhiyun 	}
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	if (tm_flags == FCP_TMF_LUN_RESET)
2339*4882a593Smuzhiyun 		qedf->lun_resets++;
2340*4882a593Smuzhiyun 	else if (tm_flags == FCP_TMF_TGT_RESET)
2341*4882a593Smuzhiyun 		qedf->target_resets++;
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	/* Initialize rest of io_req fields */
2344*4882a593Smuzhiyun 	io_req->sc_cmd = sc_cmd;
2345*4882a593Smuzhiyun 	io_req->fcport = fcport;
2346*4882a593Smuzhiyun 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	/* Record which cpu this request is associated with */
2349*4882a593Smuzhiyun 	io_req->cpu = smp_processor_id();
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	/* Set TM flags */
2352*4882a593Smuzhiyun 	io_req->io_req_flags = QEDF_READ;
2353*4882a593Smuzhiyun 	io_req->data_xfer_len = 0;
2354*4882a593Smuzhiyun 	io_req->tm_flags = tm_flags;
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 	/* Default is to return a SCSI command when an error occurs */
2357*4882a593Smuzhiyun 	io_req->return_scsi_cmd_on_abts = false;
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	/* Obtain exchange id */
2360*4882a593Smuzhiyun 	xid = io_req->xid;
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2363*4882a593Smuzhiyun 		   "0x%x\n", xid);
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	/* Initialize task context for this IO request */
2366*4882a593Smuzhiyun 	task = qedf_get_task_mem(&qedf->tasks, xid);
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	init_completion(&io_req->tm_done);
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
2373*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
2374*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	qedf_init_task(fcport, lport, io_req, task, sqe);
2377*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2382*4882a593Smuzhiyun 	tmo = wait_for_completion_timeout(&io_req->tm_done,
2383*4882a593Smuzhiyun 	    QEDF_TM_TIMEOUT * HZ);
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	if (!tmo) {
2386*4882a593Smuzhiyun 		rc = FAILED;
2387*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2388*4882a593Smuzhiyun 		/* Clear outstanding bit since command timed out */
2389*4882a593Smuzhiyun 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2390*4882a593Smuzhiyun 		io_req->sc_cmd = NULL;
2391*4882a593Smuzhiyun 	} else {
2392*4882a593Smuzhiyun 		/* Check TMF response code */
2393*4882a593Smuzhiyun 		if (io_req->fcp_rsp_code == 0)
2394*4882a593Smuzhiyun 			rc = SUCCESS;
2395*4882a593Smuzhiyun 		else
2396*4882a593Smuzhiyun 			rc = FAILED;
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun 	/*
2399*4882a593Smuzhiyun 	 * Double check that fcport has not gone into an uploading state before
2400*4882a593Smuzhiyun 	 * executing the command flush for the LUN/target.
2401*4882a593Smuzhiyun 	 */
2402*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2403*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
2404*4882a593Smuzhiyun 			 "fcport is uploading, not executing flush.\n");
2405*4882a593Smuzhiyun 		goto no_flush;
2406*4882a593Smuzhiyun 	}
2407*4882a593Smuzhiyun 	/* We do not need this io_req any more */
2408*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	if (tm_flags == FCP_TMF_LUN_RESET)
2412*4882a593Smuzhiyun 		qedf_flush_active_ios(fcport, lun);
2413*4882a593Smuzhiyun 	else
2414*4882a593Smuzhiyun 		qedf_flush_active_ios(fcport, -1);
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun no_flush:
2417*4882a593Smuzhiyun 	if (rc != SUCCESS) {
2418*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2419*4882a593Smuzhiyun 		rc = FAILED;
2420*4882a593Smuzhiyun 	} else {
2421*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2422*4882a593Smuzhiyun 		rc = SUCCESS;
2423*4882a593Smuzhiyun 	}
2424*4882a593Smuzhiyun 	return rc;
2425*4882a593Smuzhiyun }
2426*4882a593Smuzhiyun 
qedf_initiate_tmf(struct scsi_cmnd * sc_cmd,u8 tm_flags)2427*4882a593Smuzhiyun int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2428*4882a593Smuzhiyun {
2429*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2430*4882a593Smuzhiyun 	struct fc_rport_libfc_priv *rp = rport->dd_data;
2431*4882a593Smuzhiyun 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2432*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
2433*4882a593Smuzhiyun 	struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2434*4882a593Smuzhiyun 	int rc = SUCCESS;
2435*4882a593Smuzhiyun 	int rval;
2436*4882a593Smuzhiyun 	struct qedf_ioreq *io_req = NULL;
2437*4882a593Smuzhiyun 	int ref_cnt = 0;
2438*4882a593Smuzhiyun 	struct fc_rport_priv *rdata = fcport->rdata;
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	QEDF_ERR(NULL,
2441*4882a593Smuzhiyun 		 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2442*4882a593Smuzhiyun 		 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2443*4882a593Smuzhiyun 		 rport->scsi_target_id, (int)sc_cmd->device->lun);
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2446*4882a593Smuzhiyun 		QEDF_ERR(NULL, "stale rport\n");
2447*4882a593Smuzhiyun 		return FAILED;
2448*4882a593Smuzhiyun 	}
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2451*4882a593Smuzhiyun 		 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2452*4882a593Smuzhiyun 		 "LUN RESET");
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	if (sc_cmd->SCp.ptr) {
2455*4882a593Smuzhiyun 		io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2456*4882a593Smuzhiyun 		ref_cnt = kref_read(&io_req->refcount);
2457*4882a593Smuzhiyun 		QEDF_ERR(NULL,
2458*4882a593Smuzhiyun 			 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2459*4882a593Smuzhiyun 			 io_req, io_req->xid, ref_cnt);
2460*4882a593Smuzhiyun 	}
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	rval = fc_remote_port_chkready(rport);
2463*4882a593Smuzhiyun 	if (rval) {
2464*4882a593Smuzhiyun 		QEDF_ERR(NULL, "device_reset rport not ready\n");
2465*4882a593Smuzhiyun 		rc = FAILED;
2466*4882a593Smuzhiyun 		goto tmf_err;
2467*4882a593Smuzhiyun 	}
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	rc = fc_block_scsi_eh(sc_cmd);
2470*4882a593Smuzhiyun 	if (rc)
2471*4882a593Smuzhiyun 		goto tmf_err;
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	if (!fcport) {
2474*4882a593Smuzhiyun 		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2475*4882a593Smuzhiyun 		rc = FAILED;
2476*4882a593Smuzhiyun 		goto tmf_err;
2477*4882a593Smuzhiyun 	}
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	qedf = fcport->qedf;
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun 	if (!qedf) {
2482*4882a593Smuzhiyun 		QEDF_ERR(NULL, "qedf is NULL.\n");
2483*4882a593Smuzhiyun 		rc = FAILED;
2484*4882a593Smuzhiyun 		goto tmf_err;
2485*4882a593Smuzhiyun 	}
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2488*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2489*4882a593Smuzhiyun 		rc = SUCCESS;
2490*4882a593Smuzhiyun 		goto tmf_err;
2491*4882a593Smuzhiyun 	}
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2494*4882a593Smuzhiyun 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2495*4882a593Smuzhiyun 		rc = SUCCESS;
2496*4882a593Smuzhiyun 		goto tmf_err;
2497*4882a593Smuzhiyun 	}
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2500*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2501*4882a593Smuzhiyun 		rc = FAILED;
2502*4882a593Smuzhiyun 		goto tmf_err;
2503*4882a593Smuzhiyun 	}
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2506*4882a593Smuzhiyun 		if (!fcport->rdata)
2507*4882a593Smuzhiyun 			QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2508*4882a593Smuzhiyun 				 fcport);
2509*4882a593Smuzhiyun 		else
2510*4882a593Smuzhiyun 			QEDF_ERR(&qedf->dbg_ctx,
2511*4882a593Smuzhiyun 				 "fcport %p port_id=%06x is uploading.\n",
2512*4882a593Smuzhiyun 				 fcport, fcport->rdata->ids.port_id);
2513*4882a593Smuzhiyun 		rc = FAILED;
2514*4882a593Smuzhiyun 		goto tmf_err;
2515*4882a593Smuzhiyun 	}
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun tmf_err:
2520*4882a593Smuzhiyun 	kref_put(&rdata->kref, fc_rport_destroy);
2521*4882a593Smuzhiyun 	return rc;
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun 
qedf_process_tmf_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)2524*4882a593Smuzhiyun void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2525*4882a593Smuzhiyun 	struct qedf_ioreq *io_req)
2526*4882a593Smuzhiyun {
2527*4882a593Smuzhiyun 	struct fcoe_cqe_rsp_info *fcp_rsp;
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	fcp_rsp = &cqe->cqe_info.rsp_info;
2532*4882a593Smuzhiyun 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	io_req->sc_cmd = NULL;
2535*4882a593Smuzhiyun 	complete(&io_req->tm_done);
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun 
qedf_process_unsol_compl(struct qedf_ctx * qedf,uint16_t que_idx,struct fcoe_cqe * cqe)2538*4882a593Smuzhiyun void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2539*4882a593Smuzhiyun 	struct fcoe_cqe *cqe)
2540*4882a593Smuzhiyun {
2541*4882a593Smuzhiyun 	unsigned long flags;
2542*4882a593Smuzhiyun 	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2543*4882a593Smuzhiyun 	u32 payload_len, crc;
2544*4882a593Smuzhiyun 	struct fc_frame_header *fh;
2545*4882a593Smuzhiyun 	struct fc_frame *fp;
2546*4882a593Smuzhiyun 	struct qedf_io_work *io_work;
2547*4882a593Smuzhiyun 	u32 bdq_idx;
2548*4882a593Smuzhiyun 	void *bdq_addr;
2549*4882a593Smuzhiyun 	struct scsi_bd *p_bd_info;
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2552*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2553*4882a593Smuzhiyun 		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2554*4882a593Smuzhiyun 		  le32_to_cpu(p_bd_info->address.hi),
2555*4882a593Smuzhiyun 		  le32_to_cpu(p_bd_info->address.lo),
2556*4882a593Smuzhiyun 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2557*4882a593Smuzhiyun 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2558*4882a593Smuzhiyun 		  qedf->bdq_prod_idx, pktlen);
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2561*4882a593Smuzhiyun 	if (bdq_idx >= QEDF_BDQ_SIZE) {
2562*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2563*4882a593Smuzhiyun 		    bdq_idx);
2564*4882a593Smuzhiyun 		goto increment_prod;
2565*4882a593Smuzhiyun 	}
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 	bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2568*4882a593Smuzhiyun 	if (!bdq_addr) {
2569*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2570*4882a593Smuzhiyun 		    "unsolicited packet.\n");
2571*4882a593Smuzhiyun 		goto increment_prod;
2572*4882a593Smuzhiyun 	}
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun 	if (qedf_dump_frames) {
2575*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2576*4882a593Smuzhiyun 		    "BDQ frame is at addr=%p.\n", bdq_addr);
2577*4882a593Smuzhiyun 		print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2578*4882a593Smuzhiyun 		    (void *)bdq_addr, pktlen, false);
2579*4882a593Smuzhiyun 	}
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	/* Allocate frame */
2582*4882a593Smuzhiyun 	payload_len = pktlen - sizeof(struct fc_frame_header);
2583*4882a593Smuzhiyun 	fp = fc_frame_alloc(qedf->lport, payload_len);
2584*4882a593Smuzhiyun 	if (!fp) {
2585*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2586*4882a593Smuzhiyun 		goto increment_prod;
2587*4882a593Smuzhiyun 	}
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	/* Copy data from BDQ buffer into fc_frame struct */
2590*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2591*4882a593Smuzhiyun 	memcpy(fh, (void *)bdq_addr, pktlen);
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 	QEDF_WARN(&qedf->dbg_ctx,
2594*4882a593Smuzhiyun 		  "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2595*4882a593Smuzhiyun 		  ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2596*4882a593Smuzhiyun 		  fh->fh_type, fc_frame_payload_op(fp));
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	/* Initialize the frame so libfc sees it as a valid frame */
2599*4882a593Smuzhiyun 	crc = fcoe_fc_crc(fp);
2600*4882a593Smuzhiyun 	fc_frame_init(fp);
2601*4882a593Smuzhiyun 	fr_dev(fp) = qedf->lport;
2602*4882a593Smuzhiyun 	fr_sof(fp) = FC_SOF_I3;
2603*4882a593Smuzhiyun 	fr_eof(fp) = FC_EOF_T;
2604*4882a593Smuzhiyun 	fr_crc(fp) = cpu_to_le32(~crc);
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 	/*
2607*4882a593Smuzhiyun 	 * We need to return the frame back up to libfc in a non-atomic
2608*4882a593Smuzhiyun 	 * context
2609*4882a593Smuzhiyun 	 */
2610*4882a593Smuzhiyun 	io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2611*4882a593Smuzhiyun 	if (!io_work) {
2612*4882a593Smuzhiyun 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2613*4882a593Smuzhiyun 			   "work for I/O completion.\n");
2614*4882a593Smuzhiyun 		fc_frame_free(fp);
2615*4882a593Smuzhiyun 		goto increment_prod;
2616*4882a593Smuzhiyun 	}
2617*4882a593Smuzhiyun 	memset(io_work, 0, sizeof(struct qedf_io_work));
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	INIT_WORK(&io_work->work, qedf_fp_io_handler);
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun 	/* Copy contents of CQE for deferred processing */
2622*4882a593Smuzhiyun 	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	io_work->qedf = qedf;
2625*4882a593Smuzhiyun 	io_work->fp = fp;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 	queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2628*4882a593Smuzhiyun increment_prod:
2629*4882a593Smuzhiyun 	spin_lock_irqsave(&qedf->hba_lock, flags);
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	/* Increment producer to let f/w know we've handled the frame */
2632*4882a593Smuzhiyun 	qedf->bdq_prod_idx++;
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 	/* Producer index wraps at uint16_t boundary */
2635*4882a593Smuzhiyun 	if (qedf->bdq_prod_idx == 0xffff)
2636*4882a593Smuzhiyun 		qedf->bdq_prod_idx = 0;
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2639*4882a593Smuzhiyun 	readw(qedf->bdq_primary_prod);
2640*4882a593Smuzhiyun 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2641*4882a593Smuzhiyun 	readw(qedf->bdq_secondary_prod);
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qedf->hba_lock, flags);
2644*4882a593Smuzhiyun }
2645