1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * bnx2fc_els.c: QLogic Linux FCoE offload driver.
3*4882a593Smuzhiyun * This file contains helper routines that handle ELS requests
4*4882a593Smuzhiyun * and responses.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (c) 2008-2013 Broadcom Corporation
7*4882a593Smuzhiyun * Copyright (c) 2014-2016 QLogic Corporation
8*4882a593Smuzhiyun * Copyright (c) 2016-2017 Cavium Inc.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
12*4882a593Smuzhiyun * the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "bnx2fc.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
20*4882a593Smuzhiyun void *arg);
21*4882a593Smuzhiyun static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
22*4882a593Smuzhiyun void *arg);
23*4882a593Smuzhiyun static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
24*4882a593Smuzhiyun void *data, u32 data_len,
25*4882a593Smuzhiyun void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
26*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
27*4882a593Smuzhiyun
bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg * cb_arg)28*4882a593Smuzhiyun static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct bnx2fc_cmd *orig_io_req;
31*4882a593Smuzhiyun struct bnx2fc_cmd *rrq_req;
32*4882a593Smuzhiyun int rc = 0;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun BUG_ON(!cb_arg);
35*4882a593Smuzhiyun rrq_req = cb_arg->io_req;
36*4882a593Smuzhiyun orig_io_req = cb_arg->aborted_io_req;
37*4882a593Smuzhiyun BUG_ON(!orig_io_req);
38*4882a593Smuzhiyun BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
39*4882a593Smuzhiyun orig_io_req->xid, rrq_req->xid);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * els req is timed out. cleanup the IO with FW and
46*4882a593Smuzhiyun * drop the completion. Remove from active_cmd_queue.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
49*4882a593Smuzhiyun rrq_req->xid);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (rrq_req->on_active_queue) {
52*4882a593Smuzhiyun list_del_init(&rrq_req->link);
53*4882a593Smuzhiyun rrq_req->on_active_queue = 0;
54*4882a593Smuzhiyun rc = bnx2fc_initiate_cleanup(rrq_req);
55*4882a593Smuzhiyun BUG_ON(rc);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun kfree(cb_arg);
59*4882a593Smuzhiyun }
bnx2fc_send_rrq(struct bnx2fc_cmd * aborted_io_req)60*4882a593Smuzhiyun int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct fc_els_rrq rrq;
64*4882a593Smuzhiyun struct bnx2fc_rport *tgt = aborted_io_req->tgt;
65*4882a593Smuzhiyun struct fc_lport *lport = NULL;
66*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg = NULL;
67*4882a593Smuzhiyun u32 sid = 0;
68*4882a593Smuzhiyun u32 r_a_tov = 0;
69*4882a593Smuzhiyun unsigned long start = jiffies;
70*4882a593Smuzhiyun int rc;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
73*4882a593Smuzhiyun return -EINVAL;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun lport = tgt->rdata->local_port;
76*4882a593Smuzhiyun sid = tgt->sid;
77*4882a593Smuzhiyun r_a_tov = lport->r_a_tov;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
80*4882a593Smuzhiyun aborted_io_req->xid);
81*4882a593Smuzhiyun memset(&rrq, 0, sizeof(rrq));
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
84*4882a593Smuzhiyun if (!cb_arg) {
85*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
86*4882a593Smuzhiyun rc = -ENOMEM;
87*4882a593Smuzhiyun goto rrq_err;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun cb_arg->aborted_io_req = aborted_io_req;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun rrq.rrq_cmd = ELS_RRQ;
93*4882a593Smuzhiyun hton24(rrq.rrq_s_id, sid);
94*4882a593Smuzhiyun rrq.rrq_ox_id = htons(aborted_io_req->xid);
95*4882a593Smuzhiyun rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun retry_rrq:
98*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
99*4882a593Smuzhiyun bnx2fc_rrq_compl, cb_arg,
100*4882a593Smuzhiyun r_a_tov);
101*4882a593Smuzhiyun if (rc == -ENOMEM) {
102*4882a593Smuzhiyun if (time_after(jiffies, start + (10 * HZ))) {
103*4882a593Smuzhiyun BNX2FC_ELS_DBG("rrq Failed\n");
104*4882a593Smuzhiyun rc = FAILED;
105*4882a593Smuzhiyun goto rrq_err;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun msleep(20);
108*4882a593Smuzhiyun goto retry_rrq;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun rrq_err:
111*4882a593Smuzhiyun if (rc) {
112*4882a593Smuzhiyun BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
113*4882a593Smuzhiyun aborted_io_req->xid);
114*4882a593Smuzhiyun kfree(cb_arg);
115*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
116*4882a593Smuzhiyun kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
117*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun return rc;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg * cb_arg)122*4882a593Smuzhiyun static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct bnx2fc_cmd *els_req;
125*4882a593Smuzhiyun struct bnx2fc_rport *tgt;
126*4882a593Smuzhiyun struct bnx2fc_mp_req *mp_req;
127*4882a593Smuzhiyun struct fc_frame_header *fc_hdr;
128*4882a593Smuzhiyun unsigned char *buf;
129*4882a593Smuzhiyun void *resp_buf;
130*4882a593Smuzhiyun u32 resp_len, hdr_len;
131*4882a593Smuzhiyun u16 l2_oxid;
132*4882a593Smuzhiyun int frame_len;
133*4882a593Smuzhiyun int rc = 0;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun l2_oxid = cb_arg->l2_oxid;
136*4882a593Smuzhiyun BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun els_req = cb_arg->io_req;
139*4882a593Smuzhiyun if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * els req is timed out. cleanup the IO with FW and
142*4882a593Smuzhiyun * drop the completion. libfc will handle the els timeout
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun if (els_req->on_active_queue) {
145*4882a593Smuzhiyun list_del_init(&els_req->link);
146*4882a593Smuzhiyun els_req->on_active_queue = 0;
147*4882a593Smuzhiyun rc = bnx2fc_initiate_cleanup(els_req);
148*4882a593Smuzhiyun BUG_ON(rc);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun goto free_arg;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun tgt = els_req->tgt;
154*4882a593Smuzhiyun mp_req = &(els_req->mp_req);
155*4882a593Smuzhiyun fc_hdr = &(mp_req->resp_fc_hdr);
156*4882a593Smuzhiyun resp_len = mp_req->resp_len;
157*4882a593Smuzhiyun resp_buf = mp_req->resp_buf;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
160*4882a593Smuzhiyun if (!buf) {
161*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to alloc mp buf\n");
162*4882a593Smuzhiyun goto free_arg;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun hdr_len = sizeof(*fc_hdr);
165*4882a593Smuzhiyun if (hdr_len + resp_len > PAGE_SIZE) {
166*4882a593Smuzhiyun printk(KERN_ERR PFX "l2_els_compl: resp len is "
167*4882a593Smuzhiyun "beyond page size\n");
168*4882a593Smuzhiyun goto free_buf;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun memcpy(buf, fc_hdr, hdr_len);
171*4882a593Smuzhiyun memcpy(buf + hdr_len, resp_buf, resp_len);
172*4882a593Smuzhiyun frame_len = hdr_len + resp_len;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun free_buf:
177*4882a593Smuzhiyun kfree(buf);
178*4882a593Smuzhiyun free_arg:
179*4882a593Smuzhiyun kfree(cb_arg);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
bnx2fc_send_adisc(struct bnx2fc_rport * tgt,struct fc_frame * fp)182*4882a593Smuzhiyun int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct fc_els_adisc *adisc;
185*4882a593Smuzhiyun struct fc_frame_header *fh;
186*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg;
187*4882a593Smuzhiyun struct fc_lport *lport = tgt->rdata->local_port;
188*4882a593Smuzhiyun u32 r_a_tov = lport->r_a_tov;
189*4882a593Smuzhiyun int rc;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
192*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
193*4882a593Smuzhiyun if (!cb_arg) {
194*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
195*4882a593Smuzhiyun return -ENOMEM;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
201*4882a593Smuzhiyun adisc = fc_frame_payload_get(fp, sizeof(*adisc));
202*4882a593Smuzhiyun /* adisc is initialized by libfc */
203*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
204*4882a593Smuzhiyun bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
205*4882a593Smuzhiyun if (rc)
206*4882a593Smuzhiyun kfree(cb_arg);
207*4882a593Smuzhiyun return rc;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
bnx2fc_send_logo(struct bnx2fc_rport * tgt,struct fc_frame * fp)210*4882a593Smuzhiyun int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct fc_els_logo *logo;
213*4882a593Smuzhiyun struct fc_frame_header *fh;
214*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg;
215*4882a593Smuzhiyun struct fc_lport *lport = tgt->rdata->local_port;
216*4882a593Smuzhiyun u32 r_a_tov = lport->r_a_tov;
217*4882a593Smuzhiyun int rc;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
220*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
221*4882a593Smuzhiyun if (!cb_arg) {
222*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
223*4882a593Smuzhiyun return -ENOMEM;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
229*4882a593Smuzhiyun logo = fc_frame_payload_get(fp, sizeof(*logo));
230*4882a593Smuzhiyun /* logo is initialized by libfc */
231*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
232*4882a593Smuzhiyun bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
233*4882a593Smuzhiyun if (rc)
234*4882a593Smuzhiyun kfree(cb_arg);
235*4882a593Smuzhiyun return rc;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
bnx2fc_send_rls(struct bnx2fc_rport * tgt,struct fc_frame * fp)238*4882a593Smuzhiyun int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct fc_els_rls *rls;
241*4882a593Smuzhiyun struct fc_frame_header *fh;
242*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg;
243*4882a593Smuzhiyun struct fc_lport *lport = tgt->rdata->local_port;
244*4882a593Smuzhiyun u32 r_a_tov = lport->r_a_tov;
245*4882a593Smuzhiyun int rc;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
248*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
249*4882a593Smuzhiyun if (!cb_arg) {
250*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
251*4882a593Smuzhiyun return -ENOMEM;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun rls = fc_frame_payload_get(fp, sizeof(*rls));
257*4882a593Smuzhiyun /* rls is initialized by libfc */
258*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
259*4882a593Smuzhiyun bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
260*4882a593Smuzhiyun if (rc)
261*4882a593Smuzhiyun kfree(cb_arg);
262*4882a593Smuzhiyun return rc;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
bnx2fc_srr_compl(struct bnx2fc_els_cb_arg * cb_arg)265*4882a593Smuzhiyun static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct bnx2fc_mp_req *mp_req;
268*4882a593Smuzhiyun struct fc_frame_header *fc_hdr, *fh;
269*4882a593Smuzhiyun struct bnx2fc_cmd *srr_req;
270*4882a593Smuzhiyun struct bnx2fc_cmd *orig_io_req;
271*4882a593Smuzhiyun struct fc_frame *fp;
272*4882a593Smuzhiyun unsigned char *buf;
273*4882a593Smuzhiyun void *resp_buf;
274*4882a593Smuzhiyun u32 resp_len, hdr_len;
275*4882a593Smuzhiyun u8 opcode;
276*4882a593Smuzhiyun int rc = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun orig_io_req = cb_arg->aborted_io_req;
279*4882a593Smuzhiyun srr_req = cb_arg->io_req;
280*4882a593Smuzhiyun if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
281*4882a593Smuzhiyun /* SRR timedout */
282*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
283*4882a593Smuzhiyun "orig_io - 0x%x\n",
284*4882a593Smuzhiyun orig_io_req->xid);
285*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(srr_req);
286*4882a593Smuzhiyun if (rc != SUCCESS) {
287*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
288*4882a593Smuzhiyun "failed. issue cleanup\n");
289*4882a593Smuzhiyun bnx2fc_initiate_cleanup(srr_req);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
292*4882a593Smuzhiyun test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
293*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
294*4882a593Smuzhiyun orig_io_req->xid, orig_io_req->req_flags);
295*4882a593Smuzhiyun goto srr_compl_done;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun orig_io_req->srr_retry++;
298*4882a593Smuzhiyun if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
299*4882a593Smuzhiyun struct bnx2fc_rport *tgt = orig_io_req->tgt;
300*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
301*4882a593Smuzhiyun rc = bnx2fc_send_srr(orig_io_req,
302*4882a593Smuzhiyun orig_io_req->srr_offset,
303*4882a593Smuzhiyun orig_io_req->srr_rctl);
304*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
305*4882a593Smuzhiyun if (!rc)
306*4882a593Smuzhiyun goto srr_compl_done;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(orig_io_req);
310*4882a593Smuzhiyun if (rc != SUCCESS) {
311*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
312*4882a593Smuzhiyun "failed xid = 0x%x. issue cleanup\n",
313*4882a593Smuzhiyun orig_io_req->xid);
314*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun goto srr_compl_done;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
319*4882a593Smuzhiyun test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
320*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
321*4882a593Smuzhiyun orig_io_req->xid, orig_io_req->req_flags);
322*4882a593Smuzhiyun goto srr_compl_done;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun mp_req = &(srr_req->mp_req);
325*4882a593Smuzhiyun fc_hdr = &(mp_req->resp_fc_hdr);
326*4882a593Smuzhiyun resp_len = mp_req->resp_len;
327*4882a593Smuzhiyun resp_buf = mp_req->resp_buf;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun hdr_len = sizeof(*fc_hdr);
330*4882a593Smuzhiyun buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
331*4882a593Smuzhiyun if (!buf) {
332*4882a593Smuzhiyun printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
333*4882a593Smuzhiyun goto srr_compl_done;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun memcpy(buf, fc_hdr, hdr_len);
336*4882a593Smuzhiyun memcpy(buf + hdr_len, resp_buf, resp_len);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun fp = fc_frame_alloc(NULL, resp_len);
339*4882a593Smuzhiyun if (!fp) {
340*4882a593Smuzhiyun printk(KERN_ERR PFX "fc_frame_alloc failure\n");
341*4882a593Smuzhiyun goto free_buf;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun fh = (struct fc_frame_header *) fc_frame_header_get(fp);
345*4882a593Smuzhiyun /* Copy FC Frame header and payload into the frame */
346*4882a593Smuzhiyun memcpy(fh, buf, hdr_len + resp_len);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun opcode = fc_frame_payload_op(fp);
349*4882a593Smuzhiyun switch (opcode) {
350*4882a593Smuzhiyun case ELS_LS_ACC:
351*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "SRR success\n");
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun case ELS_LS_RJT:
354*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
355*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(orig_io_req);
356*4882a593Smuzhiyun if (rc != SUCCESS) {
357*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
358*4882a593Smuzhiyun "failed xid = 0x%x. issue cleanup\n",
359*4882a593Smuzhiyun orig_io_req->xid);
360*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun default:
364*4882a593Smuzhiyun BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
365*4882a593Smuzhiyun opcode);
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun fc_frame_free(fp);
369*4882a593Smuzhiyun free_buf:
370*4882a593Smuzhiyun kfree(buf);
371*4882a593Smuzhiyun srr_compl_done:
372*4882a593Smuzhiyun kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
bnx2fc_rec_compl(struct bnx2fc_els_cb_arg * cb_arg)375*4882a593Smuzhiyun static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct bnx2fc_cmd *orig_io_req, *new_io_req;
378*4882a593Smuzhiyun struct bnx2fc_cmd *rec_req;
379*4882a593Smuzhiyun struct bnx2fc_mp_req *mp_req;
380*4882a593Smuzhiyun struct fc_frame_header *fc_hdr, *fh;
381*4882a593Smuzhiyun struct fc_els_ls_rjt *rjt;
382*4882a593Smuzhiyun struct fc_els_rec_acc *acc;
383*4882a593Smuzhiyun struct bnx2fc_rport *tgt;
384*4882a593Smuzhiyun struct fcoe_err_report_entry *err_entry;
385*4882a593Smuzhiyun struct scsi_cmnd *sc_cmd;
386*4882a593Smuzhiyun enum fc_rctl r_ctl;
387*4882a593Smuzhiyun unsigned char *buf;
388*4882a593Smuzhiyun void *resp_buf;
389*4882a593Smuzhiyun struct fc_frame *fp;
390*4882a593Smuzhiyun u8 opcode;
391*4882a593Smuzhiyun u32 offset;
392*4882a593Smuzhiyun u32 e_stat;
393*4882a593Smuzhiyun u32 resp_len, hdr_len;
394*4882a593Smuzhiyun int rc = 0;
395*4882a593Smuzhiyun bool send_seq_clnp = false;
396*4882a593Smuzhiyun bool abort_io = false;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun BNX2FC_MISC_DBG("Entered rec_compl callback\n");
399*4882a593Smuzhiyun rec_req = cb_arg->io_req;
400*4882a593Smuzhiyun orig_io_req = cb_arg->aborted_io_req;
401*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
402*4882a593Smuzhiyun tgt = orig_io_req->tgt;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Handle REC timeout case */
405*4882a593Smuzhiyun if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
406*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "timed out, abort "
407*4882a593Smuzhiyun "orig_io - 0x%x\n",
408*4882a593Smuzhiyun orig_io_req->xid);
409*4882a593Smuzhiyun /* els req is timed out. send abts for els */
410*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(rec_req);
411*4882a593Smuzhiyun if (rc != SUCCESS) {
412*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
413*4882a593Smuzhiyun "failed. issue cleanup\n");
414*4882a593Smuzhiyun bnx2fc_initiate_cleanup(rec_req);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun orig_io_req->rec_retry++;
417*4882a593Smuzhiyun /* REC timedout. send ABTS to the orig IO req */
418*4882a593Smuzhiyun if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
419*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
420*4882a593Smuzhiyun rc = bnx2fc_send_rec(orig_io_req);
421*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
422*4882a593Smuzhiyun if (!rc)
423*4882a593Smuzhiyun goto rec_compl_done;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(orig_io_req);
426*4882a593Smuzhiyun if (rc != SUCCESS) {
427*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
428*4882a593Smuzhiyun "failed xid = 0x%x. issue cleanup\n",
429*4882a593Smuzhiyun orig_io_req->xid);
430*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun goto rec_compl_done;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
436*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "completed"
437*4882a593Smuzhiyun "orig_io - 0x%x\n",
438*4882a593Smuzhiyun orig_io_req->xid);
439*4882a593Smuzhiyun goto rec_compl_done;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
442*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "abts in prog "
443*4882a593Smuzhiyun "orig_io - 0x%x\n",
444*4882a593Smuzhiyun orig_io_req->xid);
445*4882a593Smuzhiyun goto rec_compl_done;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun mp_req = &(rec_req->mp_req);
449*4882a593Smuzhiyun fc_hdr = &(mp_req->resp_fc_hdr);
450*4882a593Smuzhiyun resp_len = mp_req->resp_len;
451*4882a593Smuzhiyun acc = resp_buf = mp_req->resp_buf;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun hdr_len = sizeof(*fc_hdr);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
456*4882a593Smuzhiyun if (!buf) {
457*4882a593Smuzhiyun printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
458*4882a593Smuzhiyun goto rec_compl_done;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun memcpy(buf, fc_hdr, hdr_len);
461*4882a593Smuzhiyun memcpy(buf + hdr_len, resp_buf, resp_len);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun fp = fc_frame_alloc(NULL, resp_len);
464*4882a593Smuzhiyun if (!fp) {
465*4882a593Smuzhiyun printk(KERN_ERR PFX "fc_frame_alloc failure\n");
466*4882a593Smuzhiyun goto free_buf;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun fh = (struct fc_frame_header *) fc_frame_header_get(fp);
470*4882a593Smuzhiyun /* Copy FC Frame header and payload into the frame */
471*4882a593Smuzhiyun memcpy(fh, buf, hdr_len + resp_len);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun opcode = fc_frame_payload_op(fp);
474*4882a593Smuzhiyun if (opcode == ELS_LS_RJT) {
475*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
476*4882a593Smuzhiyun rjt = fc_frame_payload_get(fp, sizeof(*rjt));
477*4882a593Smuzhiyun if ((rjt->er_reason == ELS_RJT_LOGIC ||
478*4882a593Smuzhiyun rjt->er_reason == ELS_RJT_UNAB) &&
479*4882a593Smuzhiyun rjt->er_explan == ELS_EXPL_OXID_RXID) {
480*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
481*4882a593Smuzhiyun new_io_req = bnx2fc_cmd_alloc(tgt);
482*4882a593Smuzhiyun if (!new_io_req)
483*4882a593Smuzhiyun goto abort_io;
484*4882a593Smuzhiyun new_io_req->sc_cmd = orig_io_req->sc_cmd;
485*4882a593Smuzhiyun /* cleanup orig_io_req that is with the FW */
486*4882a593Smuzhiyun set_bit(BNX2FC_FLAG_CMD_LOST,
487*4882a593Smuzhiyun &orig_io_req->req_flags);
488*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
489*4882a593Smuzhiyun /* Post a new IO req with the same sc_cmd */
490*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
491*4882a593Smuzhiyun rc = bnx2fc_post_io_req(tgt, new_io_req);
492*4882a593Smuzhiyun if (!rc)
493*4882a593Smuzhiyun goto free_frame;
494*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun abort_io:
497*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(orig_io_req);
498*4882a593Smuzhiyun if (rc != SUCCESS) {
499*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
500*4882a593Smuzhiyun "failed. issue cleanup\n");
501*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun } else if (opcode == ELS_LS_ACC) {
504*4882a593Smuzhiyun /* REVISIT: Check if the exchange is already aborted */
505*4882a593Smuzhiyun offset = ntohl(acc->reca_fc4value);
506*4882a593Smuzhiyun e_stat = ntohl(acc->reca_e_stat);
507*4882a593Smuzhiyun if (e_stat & ESB_ST_SEQ_INIT) {
508*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
509*4882a593Smuzhiyun goto free_frame;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
512*4882a593Smuzhiyun e_stat, offset);
513*4882a593Smuzhiyun /* Seq initiative is with us */
514*4882a593Smuzhiyun err_entry = (struct fcoe_err_report_entry *)
515*4882a593Smuzhiyun &orig_io_req->err_entry;
516*4882a593Smuzhiyun sc_cmd = orig_io_req->sc_cmd;
517*4882a593Smuzhiyun if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
518*4882a593Smuzhiyun /* SCSI WRITE command */
519*4882a593Smuzhiyun if (offset == orig_io_req->data_xfer_len) {
520*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
521*4882a593Smuzhiyun /* FCP_RSP lost */
522*4882a593Smuzhiyun r_ctl = FC_RCTL_DD_CMD_STATUS;
523*4882a593Smuzhiyun offset = 0;
524*4882a593Smuzhiyun } else {
525*4882a593Smuzhiyun /* start transmitting from offset */
526*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
527*4882a593Smuzhiyun send_seq_clnp = true;
528*4882a593Smuzhiyun r_ctl = FC_RCTL_DD_DATA_DESC;
529*4882a593Smuzhiyun if (bnx2fc_initiate_seq_cleanup(orig_io_req,
530*4882a593Smuzhiyun offset, r_ctl))
531*4882a593Smuzhiyun abort_io = true;
532*4882a593Smuzhiyun /* XFER_RDY */
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun } else {
535*4882a593Smuzhiyun /* SCSI READ command */
536*4882a593Smuzhiyun if (err_entry->data.rx_buf_off ==
537*4882a593Smuzhiyun orig_io_req->data_xfer_len) {
538*4882a593Smuzhiyun /* FCP_RSP lost */
539*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
540*4882a593Smuzhiyun r_ctl = FC_RCTL_DD_CMD_STATUS;
541*4882a593Smuzhiyun offset = 0;
542*4882a593Smuzhiyun } else {
543*4882a593Smuzhiyun /* request retransmission from this offset */
544*4882a593Smuzhiyun send_seq_clnp = true;
545*4882a593Smuzhiyun offset = err_entry->data.rx_buf_off;
546*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
547*4882a593Smuzhiyun /* FCP_DATA lost */
548*4882a593Smuzhiyun r_ctl = FC_RCTL_DD_SOL_DATA;
549*4882a593Smuzhiyun if (bnx2fc_initiate_seq_cleanup(orig_io_req,
550*4882a593Smuzhiyun offset, r_ctl))
551*4882a593Smuzhiyun abort_io = true;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun if (abort_io) {
555*4882a593Smuzhiyun rc = bnx2fc_initiate_abts(orig_io_req);
556*4882a593Smuzhiyun if (rc != SUCCESS) {
557*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
558*4882a593Smuzhiyun " failed. issue cleanup\n");
559*4882a593Smuzhiyun bnx2fc_initiate_cleanup(orig_io_req);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun } else if (!send_seq_clnp) {
562*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
563*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
564*4882a593Smuzhiyun rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
565*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (rc) {
568*4882a593Smuzhiyun BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
569*4882a593Smuzhiyun " IO will abort\n");
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun free_frame:
574*4882a593Smuzhiyun fc_frame_free(fp);
575*4882a593Smuzhiyun free_buf:
576*4882a593Smuzhiyun kfree(buf);
577*4882a593Smuzhiyun rec_compl_done:
578*4882a593Smuzhiyun kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
579*4882a593Smuzhiyun kfree(cb_arg);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
bnx2fc_send_rec(struct bnx2fc_cmd * orig_io_req)582*4882a593Smuzhiyun int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun struct fc_els_rec rec;
585*4882a593Smuzhiyun struct bnx2fc_rport *tgt = orig_io_req->tgt;
586*4882a593Smuzhiyun struct fc_lport *lport = tgt->rdata->local_port;
587*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg = NULL;
588*4882a593Smuzhiyun u32 sid = tgt->sid;
589*4882a593Smuzhiyun u32 r_a_tov = lport->r_a_tov;
590*4882a593Smuzhiyun int rc;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
593*4882a593Smuzhiyun memset(&rec, 0, sizeof(rec));
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
596*4882a593Smuzhiyun if (!cb_arg) {
597*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
598*4882a593Smuzhiyun rc = -ENOMEM;
599*4882a593Smuzhiyun goto rec_err;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun kref_get(&orig_io_req->refcount);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun cb_arg->aborted_io_req = orig_io_req;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun rec.rec_cmd = ELS_REC;
606*4882a593Smuzhiyun hton24(rec.rec_s_id, sid);
607*4882a593Smuzhiyun rec.rec_ox_id = htons(orig_io_req->xid);
608*4882a593Smuzhiyun rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
611*4882a593Smuzhiyun bnx2fc_rec_compl, cb_arg,
612*4882a593Smuzhiyun r_a_tov);
613*4882a593Smuzhiyun if (rc) {
614*4882a593Smuzhiyun BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
615*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
616*4882a593Smuzhiyun kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
617*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
618*4882a593Smuzhiyun kfree(cb_arg);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun rec_err:
621*4882a593Smuzhiyun return rc;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
bnx2fc_send_srr(struct bnx2fc_cmd * orig_io_req,u32 offset,u8 r_ctl)624*4882a593Smuzhiyun int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun struct fcp_srr srr;
627*4882a593Smuzhiyun struct bnx2fc_rport *tgt = orig_io_req->tgt;
628*4882a593Smuzhiyun struct fc_lport *lport = tgt->rdata->local_port;
629*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg = NULL;
630*4882a593Smuzhiyun u32 r_a_tov = lport->r_a_tov;
631*4882a593Smuzhiyun int rc;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
634*4882a593Smuzhiyun memset(&srr, 0, sizeof(srr));
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
637*4882a593Smuzhiyun if (!cb_arg) {
638*4882a593Smuzhiyun printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
639*4882a593Smuzhiyun rc = -ENOMEM;
640*4882a593Smuzhiyun goto srr_err;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun kref_get(&orig_io_req->refcount);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun cb_arg->aborted_io_req = orig_io_req;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun srr.srr_op = ELS_SRR;
647*4882a593Smuzhiyun srr.srr_ox_id = htons(orig_io_req->xid);
648*4882a593Smuzhiyun srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
649*4882a593Smuzhiyun srr.srr_rel_off = htonl(offset);
650*4882a593Smuzhiyun srr.srr_r_ctl = r_ctl;
651*4882a593Smuzhiyun orig_io_req->srr_offset = offset;
652*4882a593Smuzhiyun orig_io_req->srr_rctl = r_ctl;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
655*4882a593Smuzhiyun bnx2fc_srr_compl, cb_arg,
656*4882a593Smuzhiyun r_a_tov);
657*4882a593Smuzhiyun if (rc) {
658*4882a593Smuzhiyun BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
659*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
660*4882a593Smuzhiyun kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
661*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
662*4882a593Smuzhiyun kfree(cb_arg);
663*4882a593Smuzhiyun } else
664*4882a593Smuzhiyun set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun srr_err:
667*4882a593Smuzhiyun return rc;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
bnx2fc_initiate_els(struct bnx2fc_rport * tgt,unsigned int op,void * data,u32 data_len,void (* cb_func)(struct bnx2fc_els_cb_arg * cb_arg),struct bnx2fc_els_cb_arg * cb_arg,u32 timer_msec)670*4882a593Smuzhiyun static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
671*4882a593Smuzhiyun void *data, u32 data_len,
672*4882a593Smuzhiyun void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
673*4882a593Smuzhiyun struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun struct fcoe_port *port = tgt->port;
676*4882a593Smuzhiyun struct bnx2fc_interface *interface = port->priv;
677*4882a593Smuzhiyun struct fc_rport *rport = tgt->rport;
678*4882a593Smuzhiyun struct fc_lport *lport = port->lport;
679*4882a593Smuzhiyun struct bnx2fc_cmd *els_req;
680*4882a593Smuzhiyun struct bnx2fc_mp_req *mp_req;
681*4882a593Smuzhiyun struct fc_frame_header *fc_hdr;
682*4882a593Smuzhiyun struct fcoe_task_ctx_entry *task;
683*4882a593Smuzhiyun struct fcoe_task_ctx_entry *task_page;
684*4882a593Smuzhiyun int rc = 0;
685*4882a593Smuzhiyun int task_idx, index;
686*4882a593Smuzhiyun u32 did, sid;
687*4882a593Smuzhiyun u16 xid;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun rc = fc_remote_port_chkready(rport);
690*4882a593Smuzhiyun if (rc) {
691*4882a593Smuzhiyun printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
692*4882a593Smuzhiyun rc = -EINVAL;
693*4882a593Smuzhiyun goto els_err;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
696*4882a593Smuzhiyun printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
697*4882a593Smuzhiyun rc = -EINVAL;
698*4882a593Smuzhiyun goto els_err;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
701*4882a593Smuzhiyun printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
702*4882a593Smuzhiyun rc = -EINVAL;
703*4882a593Smuzhiyun goto els_err;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
706*4882a593Smuzhiyun if (!els_req) {
707*4882a593Smuzhiyun rc = -ENOMEM;
708*4882a593Smuzhiyun goto els_err;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun els_req->sc_cmd = NULL;
712*4882a593Smuzhiyun els_req->port = port;
713*4882a593Smuzhiyun els_req->tgt = tgt;
714*4882a593Smuzhiyun els_req->cb_func = cb_func;
715*4882a593Smuzhiyun cb_arg->io_req = els_req;
716*4882a593Smuzhiyun els_req->cb_arg = cb_arg;
717*4882a593Smuzhiyun els_req->data_xfer_len = data_len;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
720*4882a593Smuzhiyun rc = bnx2fc_init_mp_req(els_req);
721*4882a593Smuzhiyun if (rc == FAILED) {
722*4882a593Smuzhiyun printk(KERN_ERR PFX "ELS MP request init failed\n");
723*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
724*4882a593Smuzhiyun kref_put(&els_req->refcount, bnx2fc_cmd_release);
725*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
726*4882a593Smuzhiyun rc = -ENOMEM;
727*4882a593Smuzhiyun goto els_err;
728*4882a593Smuzhiyun } else {
729*4882a593Smuzhiyun /* rc SUCCESS */
730*4882a593Smuzhiyun rc = 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* Set the data_xfer_len to the size of ELS payload */
734*4882a593Smuzhiyun mp_req->req_len = data_len;
735*4882a593Smuzhiyun els_req->data_xfer_len = mp_req->req_len;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* Fill ELS Payload */
738*4882a593Smuzhiyun if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
739*4882a593Smuzhiyun memcpy(mp_req->req_buf, data, data_len);
740*4882a593Smuzhiyun } else {
741*4882a593Smuzhiyun printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
742*4882a593Smuzhiyun els_req->cb_func = NULL;
743*4882a593Smuzhiyun els_req->cb_arg = NULL;
744*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
745*4882a593Smuzhiyun kref_put(&els_req->refcount, bnx2fc_cmd_release);
746*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
747*4882a593Smuzhiyun rc = -EINVAL;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (rc)
751*4882a593Smuzhiyun goto els_err;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /* Fill FC header */
754*4882a593Smuzhiyun fc_hdr = &(mp_req->req_fc_hdr);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun did = tgt->rport->port_id;
757*4882a593Smuzhiyun sid = tgt->sid;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (op == ELS_SRR)
760*4882a593Smuzhiyun __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
761*4882a593Smuzhiyun FC_TYPE_FCP, FC_FC_FIRST_SEQ |
762*4882a593Smuzhiyun FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
763*4882a593Smuzhiyun else
764*4882a593Smuzhiyun __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
765*4882a593Smuzhiyun FC_TYPE_ELS, FC_FC_FIRST_SEQ |
766*4882a593Smuzhiyun FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* Obtain exchange id */
769*4882a593Smuzhiyun xid = els_req->xid;
770*4882a593Smuzhiyun task_idx = xid/BNX2FC_TASKS_PER_PAGE;
771*4882a593Smuzhiyun index = xid % BNX2FC_TASKS_PER_PAGE;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* Initialize task context for this IO request */
774*4882a593Smuzhiyun task_page = (struct fcoe_task_ctx_entry *)
775*4882a593Smuzhiyun interface->hba->task_ctx[task_idx];
776*4882a593Smuzhiyun task = &(task_page[index]);
777*4882a593Smuzhiyun bnx2fc_init_mp_task(els_req, task);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun spin_lock_bh(&tgt->tgt_lock);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
782*4882a593Smuzhiyun printk(KERN_ERR PFX "initiate_els.. session not ready\n");
783*4882a593Smuzhiyun els_req->cb_func = NULL;
784*4882a593Smuzhiyun els_req->cb_arg = NULL;
785*4882a593Smuzhiyun kref_put(&els_req->refcount, bnx2fc_cmd_release);
786*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
787*4882a593Smuzhiyun return -EINVAL;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (timer_msec)
791*4882a593Smuzhiyun bnx2fc_cmd_timer_set(els_req, timer_msec);
792*4882a593Smuzhiyun bnx2fc_add_2_sq(tgt, xid);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun els_req->on_active_queue = 1;
795*4882a593Smuzhiyun list_add_tail(&els_req->link, &tgt->els_queue);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* Ring doorbell */
798*4882a593Smuzhiyun bnx2fc_ring_doorbell(tgt);
799*4882a593Smuzhiyun spin_unlock_bh(&tgt->tgt_lock);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun els_err:
802*4882a593Smuzhiyun return rc;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
bnx2fc_process_els_compl(struct bnx2fc_cmd * els_req,struct fcoe_task_ctx_entry * task,u8 num_rq)805*4882a593Smuzhiyun void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
806*4882a593Smuzhiyun struct fcoe_task_ctx_entry *task, u8 num_rq)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun struct bnx2fc_mp_req *mp_req;
809*4882a593Smuzhiyun struct fc_frame_header *fc_hdr;
810*4882a593Smuzhiyun u64 *hdr;
811*4882a593Smuzhiyun u64 *temp_hdr;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
814*4882a593Smuzhiyun "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
817*4882a593Smuzhiyun &els_req->req_flags)) {
818*4882a593Smuzhiyun BNX2FC_ELS_DBG("Timer context finished processing this "
819*4882a593Smuzhiyun "els - 0x%x\n", els_req->xid);
820*4882a593Smuzhiyun /* This IO doesn't receive cleanup completion */
821*4882a593Smuzhiyun kref_put(&els_req->refcount, bnx2fc_cmd_release);
822*4882a593Smuzhiyun return;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* Cancel the timeout_work, as we received the response */
826*4882a593Smuzhiyun if (cancel_delayed_work(&els_req->timeout_work))
827*4882a593Smuzhiyun kref_put(&els_req->refcount,
828*4882a593Smuzhiyun bnx2fc_cmd_release); /* drop timer hold */
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (els_req->on_active_queue) {
831*4882a593Smuzhiyun list_del_init(&els_req->link);
832*4882a593Smuzhiyun els_req->on_active_queue = 0;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun mp_req = &(els_req->mp_req);
836*4882a593Smuzhiyun fc_hdr = &(mp_req->resp_fc_hdr);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun hdr = (u64 *)fc_hdr;
839*4882a593Smuzhiyun temp_hdr = (u64 *)
840*4882a593Smuzhiyun &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
841*4882a593Smuzhiyun hdr[0] = cpu_to_be64(temp_hdr[0]);
842*4882a593Smuzhiyun hdr[1] = cpu_to_be64(temp_hdr[1]);
843*4882a593Smuzhiyun hdr[2] = cpu_to_be64(temp_hdr[2]);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun mp_req->resp_len =
846*4882a593Smuzhiyun task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* Parse ELS response */
849*4882a593Smuzhiyun if ((els_req->cb_func) && (els_req->cb_arg)) {
850*4882a593Smuzhiyun els_req->cb_func(els_req->cb_arg);
851*4882a593Smuzhiyun els_req->cb_arg = NULL;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun kref_put(&els_req->refcount, bnx2fc_cmd_release);
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun #define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1
858*4882a593Smuzhiyun #define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2
859*4882a593Smuzhiyun #define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3
bnx2fc_flogi_resp(struct fc_seq * seq,struct fc_frame * fp,void * arg)860*4882a593Smuzhiyun static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
861*4882a593Smuzhiyun void *arg)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct fcoe_ctlr *fip = arg;
864*4882a593Smuzhiyun struct fc_exch *exch = fc_seq_exch(seq);
865*4882a593Smuzhiyun struct fc_lport *lport = exch->lp;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun struct fc_frame_header *fh;
868*4882a593Smuzhiyun u8 *granted_mac;
869*4882a593Smuzhiyun u8 fcoe_mac[6];
870*4882a593Smuzhiyun u8 fc_map[3];
871*4882a593Smuzhiyun int method;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (IS_ERR(fp))
874*4882a593Smuzhiyun goto done;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
877*4882a593Smuzhiyun granted_mac = fr_cb(fp)->granted_mac;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * We set the source MAC for FCoE traffic based on the Granted MAC
881*4882a593Smuzhiyun * address from the switch.
882*4882a593Smuzhiyun *
883*4882a593Smuzhiyun * If granted_mac is non-zero, we use that.
884*4882a593Smuzhiyun * If the granted_mac is zeroed out, create the FCoE MAC based on
885*4882a593Smuzhiyun * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
886*4882a593Smuzhiyun * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the
887*4882a593Smuzhiyun * d_id of the FLOGI frame.
888*4882a593Smuzhiyun */
889*4882a593Smuzhiyun if (!is_zero_ether_addr(granted_mac)) {
890*4882a593Smuzhiyun ether_addr_copy(fcoe_mac, granted_mac);
891*4882a593Smuzhiyun method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC;
892*4882a593Smuzhiyun } else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) {
893*4882a593Smuzhiyun hton24(fc_map, fip->sel_fcf->fc_map);
894*4882a593Smuzhiyun fcoe_mac[0] = fc_map[0];
895*4882a593Smuzhiyun fcoe_mac[1] = fc_map[1];
896*4882a593Smuzhiyun fcoe_mac[2] = fc_map[2];
897*4882a593Smuzhiyun fcoe_mac[3] = fh->fh_d_id[0];
898*4882a593Smuzhiyun fcoe_mac[4] = fh->fh_d_id[1];
899*4882a593Smuzhiyun fcoe_mac[5] = fh->fh_d_id[2];
900*4882a593Smuzhiyun method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP;
901*4882a593Smuzhiyun } else {
902*4882a593Smuzhiyun fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id);
903*4882a593Smuzhiyun method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method);
907*4882a593Smuzhiyun fip->update_mac(lport, fcoe_mac);
908*4882a593Smuzhiyun done:
909*4882a593Smuzhiyun fc_lport_flogi_resp(seq, fp, lport);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
bnx2fc_logo_resp(struct fc_seq * seq,struct fc_frame * fp,void * arg)912*4882a593Smuzhiyun static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
913*4882a593Smuzhiyun void *arg)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun struct fcoe_ctlr *fip = arg;
916*4882a593Smuzhiyun struct fc_exch *exch = fc_seq_exch(seq);
917*4882a593Smuzhiyun struct fc_lport *lport = exch->lp;
918*4882a593Smuzhiyun static u8 zero_mac[ETH_ALEN] = { 0 };
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (!IS_ERR(fp))
921*4882a593Smuzhiyun fip->update_mac(lport, zero_mac);
922*4882a593Smuzhiyun fc_lport_logo_resp(seq, fp, lport);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
bnx2fc_elsct_send(struct fc_lport * lport,u32 did,struct fc_frame * fp,unsigned int op,void (* resp)(struct fc_seq *,struct fc_frame *,void *),void * arg,u32 timeout)925*4882a593Smuzhiyun struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
926*4882a593Smuzhiyun struct fc_frame *fp, unsigned int op,
927*4882a593Smuzhiyun void (*resp)(struct fc_seq *,
928*4882a593Smuzhiyun struct fc_frame *,
929*4882a593Smuzhiyun void *),
930*4882a593Smuzhiyun void *arg, u32 timeout)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct fcoe_port *port = lport_priv(lport);
933*4882a593Smuzhiyun struct bnx2fc_interface *interface = port->priv;
934*4882a593Smuzhiyun struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
935*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun switch (op) {
938*4882a593Smuzhiyun case ELS_FLOGI:
939*4882a593Smuzhiyun case ELS_FDISC:
940*4882a593Smuzhiyun return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
941*4882a593Smuzhiyun fip, timeout);
942*4882a593Smuzhiyun case ELS_LOGO:
943*4882a593Smuzhiyun /* only hook onto fabric logouts, not port logouts */
944*4882a593Smuzhiyun if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
945*4882a593Smuzhiyun break;
946*4882a593Smuzhiyun return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
947*4882a593Smuzhiyun fip, timeout);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
950*4882a593Smuzhiyun }
951