xref: /OK3568_Linux_fs/kernel/drivers/scsi/qedf/qedf_els.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  QLogic FCoE Offload Driver
4*4882a593Smuzhiyun  *  Copyright (c) 2016-2018 Cavium Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "qedf.h"
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /* It's assumed that the lock is held when calling this function. */
qedf_initiate_els(struct qedf_rport * fcport,unsigned int op,void * data,uint32_t data_len,void (* cb_func)(struct qedf_els_cb_arg * cb_arg),struct qedf_els_cb_arg * cb_arg,uint32_t timer_msec)9*4882a593Smuzhiyun static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
10*4882a593Smuzhiyun 	void *data, uint32_t data_len,
11*4882a593Smuzhiyun 	void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
12*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
15*4882a593Smuzhiyun 	struct fc_lport *lport;
16*4882a593Smuzhiyun 	struct qedf_ioreq *els_req;
17*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req;
18*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr;
19*4882a593Smuzhiyun 	struct e4_fcoe_task_context *task;
20*4882a593Smuzhiyun 	int rc = 0;
21*4882a593Smuzhiyun 	uint32_t did, sid;
22*4882a593Smuzhiyun 	uint16_t xid;
23*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
24*4882a593Smuzhiyun 	unsigned long flags;
25*4882a593Smuzhiyun 	u16 sqe_idx;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	if (!fcport) {
28*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is NULL");
29*4882a593Smuzhiyun 		rc = -EINVAL;
30*4882a593Smuzhiyun 		goto els_err;
31*4882a593Smuzhiyun 	}
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	qedf = fcport->qedf;
34*4882a593Smuzhiyun 	lport = qedf->lport;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	rc = fc_remote_port_chkready(fcport->rport);
39*4882a593Smuzhiyun 	if (rc) {
40*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
41*4882a593Smuzhiyun 		rc = -EAGAIN;
42*4882a593Smuzhiyun 		goto els_err;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
45*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
46*4882a593Smuzhiyun 			  op);
47*4882a593Smuzhiyun 		rc = -EAGAIN;
48*4882a593Smuzhiyun 		goto els_err;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
52*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
53*4882a593Smuzhiyun 		rc = -EINVAL;
54*4882a593Smuzhiyun 		goto els_err;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
58*4882a593Smuzhiyun 	if (!els_req) {
59*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
60*4882a593Smuzhiyun 			  "Failed to alloc ELS request 0x%x\n", op);
61*4882a593Smuzhiyun 		rc = -ENOMEM;
62*4882a593Smuzhiyun 		goto els_err;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
66*4882a593Smuzhiyun 		   "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
67*4882a593Smuzhiyun 		   els_req->xid);
68*4882a593Smuzhiyun 	els_req->sc_cmd = NULL;
69*4882a593Smuzhiyun 	els_req->cmd_type = QEDF_ELS;
70*4882a593Smuzhiyun 	els_req->fcport = fcport;
71*4882a593Smuzhiyun 	els_req->cb_func = cb_func;
72*4882a593Smuzhiyun 	cb_arg->io_req = els_req;
73*4882a593Smuzhiyun 	cb_arg->op = op;
74*4882a593Smuzhiyun 	els_req->cb_arg = cb_arg;
75*4882a593Smuzhiyun 	els_req->data_xfer_len = data_len;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* Record which cpu this request is associated with */
78*4882a593Smuzhiyun 	els_req->cpu = smp_processor_id();
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
81*4882a593Smuzhiyun 	rc = qedf_init_mp_req(els_req);
82*4882a593Smuzhiyun 	if (rc) {
83*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
84*4882a593Smuzhiyun 		kref_put(&els_req->refcount, qedf_release_cmd);
85*4882a593Smuzhiyun 		goto els_err;
86*4882a593Smuzhiyun 	} else {
87*4882a593Smuzhiyun 		rc = 0;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Fill ELS Payload */
91*4882a593Smuzhiyun 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
92*4882a593Smuzhiyun 		memcpy(mp_req->req_buf, data, data_len);
93*4882a593Smuzhiyun 	} else {
94*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
95*4882a593Smuzhiyun 		els_req->cb_func = NULL;
96*4882a593Smuzhiyun 		els_req->cb_arg = NULL;
97*4882a593Smuzhiyun 		kref_put(&els_req->refcount, qedf_release_cmd);
98*4882a593Smuzhiyun 		rc = -EINVAL;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (rc)
102*4882a593Smuzhiyun 		goto els_err;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* Fill FC header */
105*4882a593Smuzhiyun 	fc_hdr = &(mp_req->req_fc_hdr);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	did = fcport->rdata->ids.port_id;
108*4882a593Smuzhiyun 	sid = fcport->sid;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
111*4882a593Smuzhiyun 			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
112*4882a593Smuzhiyun 			   FC_FC_SEQ_INIT, 0);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* Obtain exchange id */
115*4882a593Smuzhiyun 	xid = els_req->xid;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
120*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
121*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Initialize task context for this IO request */
124*4882a593Smuzhiyun 	task = qedf_get_task_mem(&qedf->tasks, xid);
125*4882a593Smuzhiyun 	qedf_init_mp_task(els_req, task, sqe);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Put timer on els request */
128*4882a593Smuzhiyun 	if (timer_msec)
129*4882a593Smuzhiyun 		qedf_cmd_timer_set(qedf, els_req, timer_msec);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* Ring doorbell */
132*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
133*4882a593Smuzhiyun 		   "req\n");
134*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
135*4882a593Smuzhiyun 	set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
138*4882a593Smuzhiyun els_err:
139*4882a593Smuzhiyun 	return rc;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
qedf_process_els_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * els_req)142*4882a593Smuzhiyun void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
143*4882a593Smuzhiyun 	struct qedf_ioreq *els_req)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct fcoe_cqe_midpath_info *mp_info;
146*4882a593Smuzhiyun 	struct qedf_rport *fcport;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
149*4882a593Smuzhiyun 		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
152*4882a593Smuzhiyun 		|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
153*4882a593Smuzhiyun 		|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
154*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
155*4882a593Smuzhiyun 			"ELS completion xid=0x%x after flush event=0x%x",
156*4882a593Smuzhiyun 			els_req->xid, els_req->event);
157*4882a593Smuzhiyun 		return;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	fcport = els_req->fcport;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* When flush is active,
163*4882a593Smuzhiyun 	 * let the cmds be completed from the cleanup context
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
166*4882a593Smuzhiyun 		test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
167*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
168*4882a593Smuzhiyun 			"Dropping ELS completion xid=0x%x as fcport is flushing",
169*4882a593Smuzhiyun 			els_req->xid);
170*4882a593Smuzhiyun 		return;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Kill the ELS timer */
176*4882a593Smuzhiyun 	cancel_delayed_work(&els_req->timeout_work);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Get ELS response length from CQE */
179*4882a593Smuzhiyun 	mp_info = &cqe->cqe_info.midpath_info;
180*4882a593Smuzhiyun 	els_req->mp_req.resp_len = mp_info->data_placement_size;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* Parse ELS response */
183*4882a593Smuzhiyun 	if ((els_req->cb_func) && (els_req->cb_arg)) {
184*4882a593Smuzhiyun 		els_req->cb_func(els_req->cb_arg);
185*4882a593Smuzhiyun 		els_req->cb_arg = NULL;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	kref_put(&els_req->refcount, qedf_release_cmd);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
qedf_rrq_compl(struct qedf_els_cb_arg * cb_arg)191*4882a593Smuzhiyun static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct qedf_ioreq *orig_io_req;
194*4882a593Smuzhiyun 	struct qedf_ioreq *rrq_req;
195*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
196*4882a593Smuzhiyun 	int refcount;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	rrq_req = cb_arg->io_req;
199*4882a593Smuzhiyun 	qedf = rrq_req->fcport->qedf;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	orig_io_req = cb_arg->aborted_io_req;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (!orig_io_req) {
206*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
207*4882a593Smuzhiyun 			 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
208*4882a593Smuzhiyun 		goto out_free;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	refcount = kref_read(&orig_io_req->refcount);
212*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
213*4882a593Smuzhiyun 		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
214*4882a593Smuzhiyun 		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/*
217*4882a593Smuzhiyun 	 * This should return the aborted io_req to the command pool. Note that
218*4882a593Smuzhiyun 	 * we need to check the refcound in case the original request was
219*4882a593Smuzhiyun 	 * flushed but we get a completion on this xid.
220*4882a593Smuzhiyun 	 */
221*4882a593Smuzhiyun 	if (orig_io_req && refcount > 0)
222*4882a593Smuzhiyun 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun out_free:
225*4882a593Smuzhiyun 	/*
226*4882a593Smuzhiyun 	 * Release a reference to the rrq request if we timed out as the
227*4882a593Smuzhiyun 	 * rrq completion handler is called directly from the timeout handler
228*4882a593Smuzhiyun 	 * and not from els_compl where the reference would have normally been
229*4882a593Smuzhiyun 	 * released.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
232*4882a593Smuzhiyun 		kref_put(&rrq_req->refcount, qedf_release_cmd);
233*4882a593Smuzhiyun 	kfree(cb_arg);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* Assumes kref is already held by caller */
qedf_send_rrq(struct qedf_ioreq * aborted_io_req)237*4882a593Smuzhiyun int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	struct fc_els_rrq rrq;
241*4882a593Smuzhiyun 	struct qedf_rport *fcport;
242*4882a593Smuzhiyun 	struct fc_lport *lport;
243*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg = NULL;
244*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
245*4882a593Smuzhiyun 	uint32_t sid;
246*4882a593Smuzhiyun 	uint32_t r_a_tov;
247*4882a593Smuzhiyun 	int rc;
248*4882a593Smuzhiyun 	int refcount;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!aborted_io_req) {
251*4882a593Smuzhiyun 		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
252*4882a593Smuzhiyun 		return -EINVAL;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	fcport = aborted_io_req->fcport;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!fcport) {
258*4882a593Smuzhiyun 		refcount = kref_read(&aborted_io_req->refcount);
259*4882a593Smuzhiyun 		QEDF_ERR(NULL,
260*4882a593Smuzhiyun 			 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
261*4882a593Smuzhiyun 			 aborted_io_req->xid, refcount);
262*4882a593Smuzhiyun 		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
263*4882a593Smuzhiyun 		return -EINVAL;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Check that fcport is still offloaded */
267*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
268*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
269*4882a593Smuzhiyun 		return -EINVAL;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!fcport->qedf) {
273*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
274*4882a593Smuzhiyun 		return -EINVAL;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	qedf = fcport->qedf;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/*
280*4882a593Smuzhiyun 	 * Sanity check that we can send a RRQ to make sure that refcount isn't
281*4882a593Smuzhiyun 	 * 0
282*4882a593Smuzhiyun 	 */
283*4882a593Smuzhiyun 	refcount = kref_read(&aborted_io_req->refcount);
284*4882a593Smuzhiyun 	if (refcount != 1) {
285*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
286*4882a593Smuzhiyun 			  "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
287*4882a593Smuzhiyun 			  aborted_io_req->xid, aborted_io_req, refcount);
288*4882a593Smuzhiyun 		return -EINVAL;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	lport = qedf->lport;
292*4882a593Smuzhiyun 	sid = fcport->sid;
293*4882a593Smuzhiyun 	r_a_tov = lport->r_a_tov;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
296*4882a593Smuzhiyun 		   "io = %p, orig_xid = 0x%x\n", aborted_io_req,
297*4882a593Smuzhiyun 		   aborted_io_req->xid);
298*4882a593Smuzhiyun 	memset(&rrq, 0, sizeof(rrq));
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
301*4882a593Smuzhiyun 	if (!cb_arg) {
302*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
303*4882a593Smuzhiyun 			  "RRQ\n");
304*4882a593Smuzhiyun 		rc = -ENOMEM;
305*4882a593Smuzhiyun 		goto rrq_err;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	cb_arg->aborted_io_req = aborted_io_req;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	rrq.rrq_cmd = ELS_RRQ;
311*4882a593Smuzhiyun 	hton24(rrq.rrq_s_id, sid);
312*4882a593Smuzhiyun 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
313*4882a593Smuzhiyun 	rrq.rrq_rx_id =
314*4882a593Smuzhiyun 	    htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
317*4882a593Smuzhiyun 	    qedf_rrq_compl, cb_arg, r_a_tov);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun rrq_err:
320*4882a593Smuzhiyun 	if (rc) {
321*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
322*4882a593Smuzhiyun 			  "req 0x%x\n", aborted_io_req->xid);
323*4882a593Smuzhiyun 		kfree(cb_arg);
324*4882a593Smuzhiyun 		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 	return rc;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
qedf_process_l2_frame_compl(struct qedf_rport * fcport,struct fc_frame * fp,u16 l2_oxid)329*4882a593Smuzhiyun static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
330*4882a593Smuzhiyun 					struct fc_frame *fp,
331*4882a593Smuzhiyun 					u16 l2_oxid)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct fc_lport *lport = fcport->qedf->lport;
334*4882a593Smuzhiyun 	struct fc_frame_header *fh;
335*4882a593Smuzhiyun 	u32 crc;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* Set the OXID we return to what libfc used */
340*4882a593Smuzhiyun 	if (l2_oxid != FC_XID_UNKNOWN)
341*4882a593Smuzhiyun 		fh->fh_ox_id = htons(l2_oxid);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Setup header fields */
344*4882a593Smuzhiyun 	fh->fh_r_ctl = FC_RCTL_ELS_REP;
345*4882a593Smuzhiyun 	fh->fh_type = FC_TYPE_ELS;
346*4882a593Smuzhiyun 	/* Last sequence, end sequence */
347*4882a593Smuzhiyun 	fh->fh_f_ctl[0] = 0x98;
348*4882a593Smuzhiyun 	hton24(fh->fh_d_id, lport->port_id);
349*4882a593Smuzhiyun 	hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
350*4882a593Smuzhiyun 	fh->fh_rx_id = 0xffff;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Set frame attributes */
353*4882a593Smuzhiyun 	crc = fcoe_fc_crc(fp);
354*4882a593Smuzhiyun 	fc_frame_init(fp);
355*4882a593Smuzhiyun 	fr_dev(fp) = lport;
356*4882a593Smuzhiyun 	fr_sof(fp) = FC_SOF_I3;
357*4882a593Smuzhiyun 	fr_eof(fp) = FC_EOF_T;
358*4882a593Smuzhiyun 	fr_crc(fp) = cpu_to_le32(~crc);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Send completed request to libfc */
361*4882a593Smuzhiyun 	fc_exch_recv(lport, fp);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun  * In instances where an ELS command times out we may need to restart the
366*4882a593Smuzhiyun  * rport by logging out and then logging back in.
367*4882a593Smuzhiyun  */
qedf_restart_rport(struct qedf_rport * fcport)368*4882a593Smuzhiyun void qedf_restart_rport(struct qedf_rport *fcport)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct fc_lport *lport;
371*4882a593Smuzhiyun 	struct fc_rport_priv *rdata;
372*4882a593Smuzhiyun 	u32 port_id;
373*4882a593Smuzhiyun 	unsigned long flags;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (!fcport) {
376*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is NULL.\n");
377*4882a593Smuzhiyun 		return;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
381*4882a593Smuzhiyun 	if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
382*4882a593Smuzhiyun 	    !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
383*4882a593Smuzhiyun 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
384*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
385*4882a593Smuzhiyun 		    fcport);
386*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
387*4882a593Smuzhiyun 		return;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* Set that we are now in reset */
391*4882a593Smuzhiyun 	set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
392*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	rdata = fcport->rdata;
395*4882a593Smuzhiyun 	if (rdata && !kref_get_unless_zero(&rdata->kref)) {
396*4882a593Smuzhiyun 		fcport->rdata = NULL;
397*4882a593Smuzhiyun 		rdata = NULL;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (rdata && rdata->rp_state == RPORT_ST_READY) {
401*4882a593Smuzhiyun 		lport = fcport->qedf->lport;
402*4882a593Smuzhiyun 		port_id = rdata->ids.port_id;
403*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
404*4882a593Smuzhiyun 		    "LOGO port_id=%x.\n", port_id);
405*4882a593Smuzhiyun 		fc_rport_logoff(rdata);
406*4882a593Smuzhiyun 		kref_put(&rdata->kref, fc_rport_destroy);
407*4882a593Smuzhiyun 		mutex_lock(&lport->disc.disc_mutex);
408*4882a593Smuzhiyun 		/* Recreate the rport and log back in */
409*4882a593Smuzhiyun 		rdata = fc_rport_create(lport, port_id);
410*4882a593Smuzhiyun 		mutex_unlock(&lport->disc.disc_mutex);
411*4882a593Smuzhiyun 		if (rdata)
412*4882a593Smuzhiyun 			fc_rport_login(rdata);
413*4882a593Smuzhiyun 		fcport->rdata = rdata;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 	clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
qedf_l2_els_compl(struct qedf_els_cb_arg * cb_arg)418*4882a593Smuzhiyun static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct qedf_ioreq *els_req;
421*4882a593Smuzhiyun 	struct qedf_rport *fcport;
422*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req;
423*4882a593Smuzhiyun 	struct fc_frame *fp;
424*4882a593Smuzhiyun 	struct fc_frame_header *fh, *mp_fc_hdr;
425*4882a593Smuzhiyun 	void *resp_buf, *fc_payload;
426*4882a593Smuzhiyun 	u32 resp_len;
427*4882a593Smuzhiyun 	u16 l2_oxid;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	l2_oxid = cb_arg->l2_oxid;
430*4882a593Smuzhiyun 	els_req = cb_arg->io_req;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (!els_req) {
433*4882a593Smuzhiyun 		QEDF_ERR(NULL, "els_req is NULL.\n");
434*4882a593Smuzhiyun 		goto free_arg;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/*
438*4882a593Smuzhiyun 	 * If we are flushing the command just free the cb_arg as none of the
439*4882a593Smuzhiyun 	 * response data will be valid.
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
442*4882a593Smuzhiyun 		QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
443*4882a593Smuzhiyun 			 els_req->xid);
444*4882a593Smuzhiyun 		goto free_arg;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	fcport = els_req->fcport;
448*4882a593Smuzhiyun 	mp_req = &(els_req->mp_req);
449*4882a593Smuzhiyun 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
450*4882a593Smuzhiyun 	resp_len = mp_req->resp_len;
451*4882a593Smuzhiyun 	resp_buf = mp_req->resp_buf;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/*
454*4882a593Smuzhiyun 	 * If a middle path ELS command times out, don't try to return
455*4882a593Smuzhiyun 	 * the command but rather do any internal cleanup and then libfc
456*4882a593Smuzhiyun 	 * timeout the command and clean up its internal resources.
457*4882a593Smuzhiyun 	 */
458*4882a593Smuzhiyun 	if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
459*4882a593Smuzhiyun 		/*
460*4882a593Smuzhiyun 		 * If ADISC times out, libfc will timeout the exchange and then
461*4882a593Smuzhiyun 		 * try to send a PLOGI which will timeout since the session is
462*4882a593Smuzhiyun 		 * still offloaded.  Force libfc to logout the session which
463*4882a593Smuzhiyun 		 * will offload the connection and allow the PLOGI response to
464*4882a593Smuzhiyun 		 * flow over the LL2 path.
465*4882a593Smuzhiyun 		 */
466*4882a593Smuzhiyun 		if (cb_arg->op == ELS_ADISC)
467*4882a593Smuzhiyun 			qedf_restart_rport(fcport);
468*4882a593Smuzhiyun 		return;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
472*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
473*4882a593Smuzhiyun 		   "beyond page size.\n");
474*4882a593Smuzhiyun 		goto free_arg;
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
478*4882a593Smuzhiyun 	if (!fp) {
479*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
480*4882a593Smuzhiyun 		    "fc_frame_alloc failure.\n");
481*4882a593Smuzhiyun 		return;
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* Copy frame header from firmware into fp */
485*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
486*4882a593Smuzhiyun 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/* Copy payload from firmware into fp */
489*4882a593Smuzhiyun 	fc_payload = fc_frame_payload_get(fp, resp_len);
490*4882a593Smuzhiyun 	memcpy(fc_payload, resp_buf, resp_len);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
493*4882a593Smuzhiyun 	    "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
494*4882a593Smuzhiyun 	qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun free_arg:
497*4882a593Smuzhiyun 	kfree(cb_arg);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
qedf_send_adisc(struct qedf_rport * fcport,struct fc_frame * fp)500*4882a593Smuzhiyun int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct fc_els_adisc *adisc;
503*4882a593Smuzhiyun 	struct fc_frame_header *fh;
504*4882a593Smuzhiyun 	struct fc_lport *lport = fcport->qedf->lport;
505*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg = NULL;
506*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
507*4882a593Smuzhiyun 	uint32_t r_a_tov = lport->r_a_tov;
508*4882a593Smuzhiyun 	int rc;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	qedf = fcport->qedf;
511*4882a593Smuzhiyun 	fh = fc_frame_header_get(fp);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
514*4882a593Smuzhiyun 	if (!cb_arg) {
515*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
516*4882a593Smuzhiyun 			  "ADISC\n");
517*4882a593Smuzhiyun 		rc = -ENOMEM;
518*4882a593Smuzhiyun 		goto adisc_err;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
523*4882a593Smuzhiyun 	    "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
528*4882a593Smuzhiyun 	    qedf_l2_els_compl, cb_arg, r_a_tov);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun adisc_err:
531*4882a593Smuzhiyun 	if (rc) {
532*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
533*4882a593Smuzhiyun 		kfree(cb_arg);
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 	return rc;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
qedf_srr_compl(struct qedf_els_cb_arg * cb_arg)538*4882a593Smuzhiyun static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct qedf_ioreq *orig_io_req;
541*4882a593Smuzhiyun 	struct qedf_ioreq *srr_req;
542*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req;
543*4882a593Smuzhiyun 	struct fc_frame_header *mp_fc_hdr, *fh;
544*4882a593Smuzhiyun 	struct fc_frame *fp;
545*4882a593Smuzhiyun 	void *resp_buf, *fc_payload;
546*4882a593Smuzhiyun 	u32 resp_len;
547*4882a593Smuzhiyun 	struct fc_lport *lport;
548*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
549*4882a593Smuzhiyun 	int refcount;
550*4882a593Smuzhiyun 	u8 opcode;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	srr_req = cb_arg->io_req;
553*4882a593Smuzhiyun 	qedf = srr_req->fcport->qedf;
554*4882a593Smuzhiyun 	lport = qedf->lport;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	orig_io_req = cb_arg->aborted_io_req;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!orig_io_req) {
559*4882a593Smuzhiyun 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
560*4882a593Smuzhiyun 		goto out_free;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
566*4882a593Smuzhiyun 	    srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
567*4882a593Smuzhiyun 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	refcount = kref_read(&orig_io_req->refcount);
570*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
571*4882a593Smuzhiyun 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
572*4882a593Smuzhiyun 		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/* If a SRR times out, simply free resources */
575*4882a593Smuzhiyun 	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
576*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
577*4882a593Smuzhiyun 			 "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
578*4882a593Smuzhiyun 		goto out_put;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* Normalize response data into struct fc_frame */
582*4882a593Smuzhiyun 	mp_req = &(srr_req->mp_req);
583*4882a593Smuzhiyun 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
584*4882a593Smuzhiyun 	resp_len = mp_req->resp_len;
585*4882a593Smuzhiyun 	resp_buf = mp_req->resp_buf;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	fp = fc_frame_alloc(lport, resp_len);
588*4882a593Smuzhiyun 	if (!fp) {
589*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx),
590*4882a593Smuzhiyun 		    "fc_frame_alloc failure.\n");
591*4882a593Smuzhiyun 		goto out_put;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* Copy frame header from firmware into fp */
595*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
596*4882a593Smuzhiyun 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* Copy payload from firmware into fp */
599*4882a593Smuzhiyun 	fc_payload = fc_frame_payload_get(fp, resp_len);
600*4882a593Smuzhiyun 	memcpy(fc_payload, resp_buf, resp_len);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	opcode = fc_frame_payload_op(fp);
603*4882a593Smuzhiyun 	switch (opcode) {
604*4882a593Smuzhiyun 	case ELS_LS_ACC:
605*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
606*4882a593Smuzhiyun 		    "SRR success.\n");
607*4882a593Smuzhiyun 		break;
608*4882a593Smuzhiyun 	case ELS_LS_RJT:
609*4882a593Smuzhiyun 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
610*4882a593Smuzhiyun 		    "SRR rejected.\n");
611*4882a593Smuzhiyun 		qedf_initiate_abts(orig_io_req, true);
612*4882a593Smuzhiyun 		break;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	fc_frame_free(fp);
616*4882a593Smuzhiyun out_put:
617*4882a593Smuzhiyun 	/* Put reference for original command since SRR completed */
618*4882a593Smuzhiyun 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
619*4882a593Smuzhiyun out_free:
620*4882a593Smuzhiyun 	kfree(cb_arg);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
qedf_send_srr(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)623*4882a593Smuzhiyun static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	struct fcp_srr srr;
626*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
627*4882a593Smuzhiyun 	struct qedf_rport *fcport;
628*4882a593Smuzhiyun 	struct fc_lport *lport;
629*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg = NULL;
630*4882a593Smuzhiyun 	u32 r_a_tov;
631*4882a593Smuzhiyun 	int rc;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (!orig_io_req) {
634*4882a593Smuzhiyun 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
635*4882a593Smuzhiyun 		return -EINVAL;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	fcport = orig_io_req->fcport;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* Check that fcport is still offloaded */
641*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
642*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
643*4882a593Smuzhiyun 		return -EINVAL;
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (!fcport->qedf) {
647*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
648*4882a593Smuzhiyun 		return -EINVAL;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Take reference until SRR command completion */
652*4882a593Smuzhiyun 	kref_get(&orig_io_req->refcount);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	qedf = fcport->qedf;
655*4882a593Smuzhiyun 	lport = qedf->lport;
656*4882a593Smuzhiyun 	r_a_tov = lport->r_a_tov;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
659*4882a593Smuzhiyun 		   "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
660*4882a593Smuzhiyun 	memset(&srr, 0, sizeof(srr));
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
663*4882a593Smuzhiyun 	if (!cb_arg) {
664*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
665*4882a593Smuzhiyun 			  "SRR\n");
666*4882a593Smuzhiyun 		rc = -ENOMEM;
667*4882a593Smuzhiyun 		goto srr_err;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	cb_arg->aborted_io_req = orig_io_req;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	srr.srr_op = ELS_SRR;
673*4882a593Smuzhiyun 	srr.srr_ox_id = htons(orig_io_req->xid);
674*4882a593Smuzhiyun 	srr.srr_rx_id = htons(orig_io_req->rx_id);
675*4882a593Smuzhiyun 	srr.srr_rel_off = htonl(offset);
676*4882a593Smuzhiyun 	srr.srr_r_ctl = r_ctl;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
679*4882a593Smuzhiyun 	    qedf_srr_compl, cb_arg, r_a_tov);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun srr_err:
682*4882a593Smuzhiyun 	if (rc) {
683*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
684*4882a593Smuzhiyun 			  "=0x%x\n", orig_io_req->xid);
685*4882a593Smuzhiyun 		kfree(cb_arg);
686*4882a593Smuzhiyun 		/* If we fail to queue SRR, send ABTS to orig_io */
687*4882a593Smuzhiyun 		qedf_initiate_abts(orig_io_req, true);
688*4882a593Smuzhiyun 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
689*4882a593Smuzhiyun 	} else
690*4882a593Smuzhiyun 		/* Tell other threads that SRR is in progress */
691*4882a593Smuzhiyun 		set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return rc;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
qedf_initiate_seq_cleanup(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)696*4882a593Smuzhiyun static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
697*4882a593Smuzhiyun 	u32 offset, u8 r_ctl)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun 	struct qedf_rport *fcport;
700*4882a593Smuzhiyun 	unsigned long flags;
701*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg;
702*4882a593Smuzhiyun 	struct fcoe_wqe *sqe;
703*4882a593Smuzhiyun 	u16 sqe_idx;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	fcport = orig_io_req->fcport;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
708*4882a593Smuzhiyun 	    "Doing sequence cleanup for xid=0x%x offset=%u.\n",
709*4882a593Smuzhiyun 	    orig_io_req->xid, offset);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
712*4882a593Smuzhiyun 	if (!cb_arg) {
713*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
714*4882a593Smuzhiyun 			  "for sequence cleanup\n");
715*4882a593Smuzhiyun 		return;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* Get reference for cleanup request */
719*4882a593Smuzhiyun 	kref_get(&orig_io_req->refcount);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
722*4882a593Smuzhiyun 	cb_arg->offset = offset;
723*4882a593Smuzhiyun 	cb_arg->r_ctl = r_ctl;
724*4882a593Smuzhiyun 	orig_io_req->cb_arg = cb_arg;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	qedf_cmd_timer_set(fcport->qedf, orig_io_req,
727*4882a593Smuzhiyun 	    QEDF_CLEANUP_TIMEOUT * HZ);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	sqe_idx = qedf_get_sqe_idx(fcport);
732*4882a593Smuzhiyun 	sqe = &fcport->sq[sqe_idx];
733*4882a593Smuzhiyun 	memset(sqe, 0, sizeof(struct fcoe_wqe));
734*4882a593Smuzhiyun 	orig_io_req->task_params->sqe = sqe;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
737*4882a593Smuzhiyun 						   offset);
738*4882a593Smuzhiyun 	qedf_ring_doorbell(fcport);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
qedf_process_seq_cleanup_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)743*4882a593Smuzhiyun void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
744*4882a593Smuzhiyun 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	int rc;
747*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	cb_arg = io_req->cb_arg;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/* If we timed out just free resources */
752*4882a593Smuzhiyun 	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
753*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
754*4882a593Smuzhiyun 			 "cqe is NULL or timeout event (0x%x)", io_req->event);
755*4882a593Smuzhiyun 		goto free;
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/* Kill the timer we put on the request */
759*4882a593Smuzhiyun 	cancel_delayed_work_sync(&io_req->timeout_work);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
762*4882a593Smuzhiyun 	if (rc)
763*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
764*4882a593Smuzhiyun 		    "abort, xid=0x%x.\n", io_req->xid);
765*4882a593Smuzhiyun free:
766*4882a593Smuzhiyun 	kfree(cb_arg);
767*4882a593Smuzhiyun 	kref_put(&io_req->refcount, qedf_release_cmd);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
qedf_requeue_io_req(struct qedf_ioreq * orig_io_req)770*4882a593Smuzhiyun static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun 	struct qedf_rport *fcport;
773*4882a593Smuzhiyun 	struct qedf_ioreq *new_io_req;
774*4882a593Smuzhiyun 	unsigned long flags;
775*4882a593Smuzhiyun 	bool rc = false;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	fcport = orig_io_req->fcport;
778*4882a593Smuzhiyun 	if (!fcport) {
779*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is NULL.\n");
780*4882a593Smuzhiyun 		goto out;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (!orig_io_req->sc_cmd) {
784*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
785*4882a593Smuzhiyun 		    "xid=0x%x.\n", orig_io_req->xid);
786*4882a593Smuzhiyun 		goto out;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
790*4882a593Smuzhiyun 	if (!new_io_req) {
791*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
792*4882a593Smuzhiyun 		    "io_req.\n");
793*4882a593Smuzhiyun 		goto out;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	new_io_req->sc_cmd = orig_io_req->sc_cmd;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/*
799*4882a593Smuzhiyun 	 * This keeps the sc_cmd struct from being returned to the tape
800*4882a593Smuzhiyun 	 * driver and being requeued twice. We do need to put a reference
801*4882a593Smuzhiyun 	 * for the original I/O request since we will not do a SCSI completion
802*4882a593Smuzhiyun 	 * for it.
803*4882a593Smuzhiyun 	 */
804*4882a593Smuzhiyun 	orig_io_req->sc_cmd = NULL;
805*4882a593Smuzhiyun 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	spin_lock_irqsave(&fcport->rport_lock, flags);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* kref for new command released in qedf_post_io_req on error */
810*4882a593Smuzhiyun 	if (qedf_post_io_req(fcport, new_io_req)) {
811*4882a593Smuzhiyun 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
812*4882a593Smuzhiyun 		/* Return SQE to pool */
813*4882a593Smuzhiyun 		atomic_inc(&fcport->free_sqes);
814*4882a593Smuzhiyun 	} else {
815*4882a593Smuzhiyun 		QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
816*4882a593Smuzhiyun 		    "Reissued SCSI command from  orig_xid=0x%x on "
817*4882a593Smuzhiyun 		    "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
818*4882a593Smuzhiyun 		/*
819*4882a593Smuzhiyun 		 * Abort the original I/O but do not return SCSI command as
820*4882a593Smuzhiyun 		 * it has been reissued on another OX_ID.
821*4882a593Smuzhiyun 		 */
822*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
823*4882a593Smuzhiyun 		qedf_initiate_abts(orig_io_req, false);
824*4882a593Smuzhiyun 		goto out;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
828*4882a593Smuzhiyun out:
829*4882a593Smuzhiyun 	return rc;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 
qedf_rec_compl(struct qedf_els_cb_arg * cb_arg)833*4882a593Smuzhiyun static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	struct qedf_ioreq *orig_io_req;
836*4882a593Smuzhiyun 	struct qedf_ioreq *rec_req;
837*4882a593Smuzhiyun 	struct qedf_mp_req *mp_req;
838*4882a593Smuzhiyun 	struct fc_frame_header *mp_fc_hdr, *fh;
839*4882a593Smuzhiyun 	struct fc_frame *fp;
840*4882a593Smuzhiyun 	void *resp_buf, *fc_payload;
841*4882a593Smuzhiyun 	u32 resp_len;
842*4882a593Smuzhiyun 	struct fc_lport *lport;
843*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
844*4882a593Smuzhiyun 	int refcount;
845*4882a593Smuzhiyun 	enum fc_rctl r_ctl;
846*4882a593Smuzhiyun 	struct fc_els_ls_rjt *rjt;
847*4882a593Smuzhiyun 	struct fc_els_rec_acc *acc;
848*4882a593Smuzhiyun 	u8 opcode;
849*4882a593Smuzhiyun 	u32 offset, e_stat;
850*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd;
851*4882a593Smuzhiyun 	bool srr_needed = false;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	rec_req = cb_arg->io_req;
854*4882a593Smuzhiyun 	qedf = rec_req->fcport->qedf;
855*4882a593Smuzhiyun 	lport = qedf->lport;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	orig_io_req = cb_arg->aborted_io_req;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (!orig_io_req) {
860*4882a593Smuzhiyun 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
861*4882a593Smuzhiyun 		goto out_free;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
865*4882a593Smuzhiyun 	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
866*4882a593Smuzhiyun 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	refcount = kref_read(&orig_io_req->refcount);
869*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
870*4882a593Smuzhiyun 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
871*4882a593Smuzhiyun 		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/* If a REC times out, free resources */
874*4882a593Smuzhiyun 	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
875*4882a593Smuzhiyun 		QEDF_ERR(&qedf->dbg_ctx,
876*4882a593Smuzhiyun 			 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
877*4882a593Smuzhiyun 			 orig_io_req, orig_io_req->xid);
878*4882a593Smuzhiyun 		goto out_put;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/* Normalize response data into struct fc_frame */
882*4882a593Smuzhiyun 	mp_req = &(rec_req->mp_req);
883*4882a593Smuzhiyun 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
884*4882a593Smuzhiyun 	resp_len = mp_req->resp_len;
885*4882a593Smuzhiyun 	acc = resp_buf = mp_req->resp_buf;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	fp = fc_frame_alloc(lport, resp_len);
888*4882a593Smuzhiyun 	if (!fp) {
889*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx),
890*4882a593Smuzhiyun 		    "fc_frame_alloc failure.\n");
891*4882a593Smuzhiyun 		goto out_put;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* Copy frame header from firmware into fp */
895*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
896*4882a593Smuzhiyun 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* Copy payload from firmware into fp */
899*4882a593Smuzhiyun 	fc_payload = fc_frame_payload_get(fp, resp_len);
900*4882a593Smuzhiyun 	memcpy(fc_payload, resp_buf, resp_len);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	opcode = fc_frame_payload_op(fp);
903*4882a593Smuzhiyun 	if (opcode == ELS_LS_RJT) {
904*4882a593Smuzhiyun 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
905*4882a593Smuzhiyun 		if (!rjt) {
906*4882a593Smuzhiyun 			QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
907*4882a593Smuzhiyun 			goto out_free_frame;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
911*4882a593Smuzhiyun 		    "Received LS_RJT for REC: er_reason=0x%x, "
912*4882a593Smuzhiyun 		    "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
913*4882a593Smuzhiyun 		/*
914*4882a593Smuzhiyun 		 * The following response(s) mean that we need to reissue the
915*4882a593Smuzhiyun 		 * request on another exchange.  We need to do this without
916*4882a593Smuzhiyun 		 * informing the upper layers lest it cause an application
917*4882a593Smuzhiyun 		 * error.
918*4882a593Smuzhiyun 		 */
919*4882a593Smuzhiyun 		if ((rjt->er_reason == ELS_RJT_LOGIC ||
920*4882a593Smuzhiyun 		    rjt->er_reason == ELS_RJT_UNAB) &&
921*4882a593Smuzhiyun 		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
922*4882a593Smuzhiyun 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
923*4882a593Smuzhiyun 			    "Handle CMD LOST case.\n");
924*4882a593Smuzhiyun 			qedf_requeue_io_req(orig_io_req);
925*4882a593Smuzhiyun 		}
926*4882a593Smuzhiyun 	} else if (opcode == ELS_LS_ACC) {
927*4882a593Smuzhiyun 		offset = ntohl(acc->reca_fc4value);
928*4882a593Smuzhiyun 		e_stat = ntohl(acc->reca_e_stat);
929*4882a593Smuzhiyun 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
930*4882a593Smuzhiyun 		    "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
931*4882a593Smuzhiyun 		    offset, e_stat);
932*4882a593Smuzhiyun 		if (e_stat & ESB_ST_SEQ_INIT)  {
933*4882a593Smuzhiyun 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
934*4882a593Smuzhiyun 			    "Target has the seq init\n");
935*4882a593Smuzhiyun 			goto out_free_frame;
936*4882a593Smuzhiyun 		}
937*4882a593Smuzhiyun 		sc_cmd = orig_io_req->sc_cmd;
938*4882a593Smuzhiyun 		if (!sc_cmd) {
939*4882a593Smuzhiyun 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
940*4882a593Smuzhiyun 			    "sc_cmd is NULL for xid=0x%x.\n",
941*4882a593Smuzhiyun 			    orig_io_req->xid);
942*4882a593Smuzhiyun 			goto out_free_frame;
943*4882a593Smuzhiyun 		}
944*4882a593Smuzhiyun 		/* SCSI write case */
945*4882a593Smuzhiyun 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
946*4882a593Smuzhiyun 			if (offset == orig_io_req->data_xfer_len) {
947*4882a593Smuzhiyun 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
948*4882a593Smuzhiyun 				    "WRITE - response lost.\n");
949*4882a593Smuzhiyun 				r_ctl = FC_RCTL_DD_CMD_STATUS;
950*4882a593Smuzhiyun 				srr_needed = true;
951*4882a593Smuzhiyun 				offset = 0;
952*4882a593Smuzhiyun 			} else {
953*4882a593Smuzhiyun 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
954*4882a593Smuzhiyun 				    "WRITE - XFER_RDY/DATA lost.\n");
955*4882a593Smuzhiyun 				r_ctl = FC_RCTL_DD_DATA_DESC;
956*4882a593Smuzhiyun 				/* Use data from warning CQE instead of REC */
957*4882a593Smuzhiyun 				offset = orig_io_req->tx_buf_off;
958*4882a593Smuzhiyun 			}
959*4882a593Smuzhiyun 		/* SCSI read case */
960*4882a593Smuzhiyun 		} else {
961*4882a593Smuzhiyun 			if (orig_io_req->rx_buf_off ==
962*4882a593Smuzhiyun 			    orig_io_req->data_xfer_len) {
963*4882a593Smuzhiyun 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
964*4882a593Smuzhiyun 				    "READ - response lost.\n");
965*4882a593Smuzhiyun 				srr_needed = true;
966*4882a593Smuzhiyun 				r_ctl = FC_RCTL_DD_CMD_STATUS;
967*4882a593Smuzhiyun 				offset = 0;
968*4882a593Smuzhiyun 			} else {
969*4882a593Smuzhiyun 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
970*4882a593Smuzhiyun 				    "READ - DATA lost.\n");
971*4882a593Smuzhiyun 				/*
972*4882a593Smuzhiyun 				 * For read case we always set the offset to 0
973*4882a593Smuzhiyun 				 * for sequence recovery task.
974*4882a593Smuzhiyun 				 */
975*4882a593Smuzhiyun 				offset = 0;
976*4882a593Smuzhiyun 				r_ctl = FC_RCTL_DD_SOL_DATA;
977*4882a593Smuzhiyun 			}
978*4882a593Smuzhiyun 		}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 		if (srr_needed)
981*4882a593Smuzhiyun 			qedf_send_srr(orig_io_req, offset, r_ctl);
982*4882a593Smuzhiyun 		else
983*4882a593Smuzhiyun 			qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun out_free_frame:
987*4882a593Smuzhiyun 	fc_frame_free(fp);
988*4882a593Smuzhiyun out_put:
989*4882a593Smuzhiyun 	/* Put reference for original command since REC completed */
990*4882a593Smuzhiyun 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
991*4882a593Smuzhiyun out_free:
992*4882a593Smuzhiyun 	kfree(cb_arg);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun /* Assumes kref is already held by caller */
qedf_send_rec(struct qedf_ioreq * orig_io_req)996*4882a593Smuzhiyun int qedf_send_rec(struct qedf_ioreq *orig_io_req)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	struct fc_els_rec rec;
1000*4882a593Smuzhiyun 	struct qedf_rport *fcport;
1001*4882a593Smuzhiyun 	struct fc_lport *lport;
1002*4882a593Smuzhiyun 	struct qedf_els_cb_arg *cb_arg = NULL;
1003*4882a593Smuzhiyun 	struct qedf_ctx *qedf;
1004*4882a593Smuzhiyun 	uint32_t sid;
1005*4882a593Smuzhiyun 	uint32_t r_a_tov;
1006*4882a593Smuzhiyun 	int rc;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	if (!orig_io_req) {
1009*4882a593Smuzhiyun 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
1010*4882a593Smuzhiyun 		return -EINVAL;
1011*4882a593Smuzhiyun 	}
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	fcport = orig_io_req->fcport;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	/* Check that fcport is still offloaded */
1016*4882a593Smuzhiyun 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1017*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1018*4882a593Smuzhiyun 		return -EINVAL;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	if (!fcport->qedf) {
1022*4882a593Smuzhiyun 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
1023*4882a593Smuzhiyun 		return -EINVAL;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	/* Take reference until REC command completion */
1027*4882a593Smuzhiyun 	kref_get(&orig_io_req->refcount);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	qedf = fcport->qedf;
1030*4882a593Smuzhiyun 	lport = qedf->lport;
1031*4882a593Smuzhiyun 	sid = fcport->sid;
1032*4882a593Smuzhiyun 	r_a_tov = lport->r_a_tov;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	memset(&rec, 0, sizeof(rec));
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
1037*4882a593Smuzhiyun 	if (!cb_arg) {
1038*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
1039*4882a593Smuzhiyun 			  "REC\n");
1040*4882a593Smuzhiyun 		rc = -ENOMEM;
1041*4882a593Smuzhiyun 		goto rec_err;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	cb_arg->aborted_io_req = orig_io_req;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	rec.rec_cmd = ELS_REC;
1047*4882a593Smuzhiyun 	hton24(rec.rec_s_id, sid);
1048*4882a593Smuzhiyun 	rec.rec_ox_id = htons(orig_io_req->xid);
1049*4882a593Smuzhiyun 	rec.rec_rx_id =
1050*4882a593Smuzhiyun 	    htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
1053*4882a593Smuzhiyun 	   "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
1054*4882a593Smuzhiyun 	   orig_io_req->xid, rec.rec_rx_id);
1055*4882a593Smuzhiyun 	rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
1056*4882a593Smuzhiyun 	    qedf_rec_compl, cb_arg, r_a_tov);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun rec_err:
1059*4882a593Smuzhiyun 	if (rc) {
1060*4882a593Smuzhiyun 		QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
1061*4882a593Smuzhiyun 			  "=0x%x\n", orig_io_req->xid);
1062*4882a593Smuzhiyun 		kfree(cb_arg);
1063*4882a593Smuzhiyun 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
1064*4882a593Smuzhiyun 	}
1065*4882a593Smuzhiyun 	return rc;
1066*4882a593Smuzhiyun }
1067