xref: /OK3568_Linux_fs/kernel/drivers/scsi/bnx2fc/bnx2fc_hwi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
2*4882a593Smuzhiyun  * This file contains the code that low level functions that interact
3*4882a593Smuzhiyun  * with 57712 FCoE firmware.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2008-2013 Broadcom Corporation
6*4882a593Smuzhiyun  * Copyright (c) 2014-2016 QLogic Corporation
7*4882a593Smuzhiyun  * Copyright (c) 2016-2017 Cavium Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
11*4882a593Smuzhiyun  * the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "bnx2fc.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
21*4882a593Smuzhiyun 					struct fcoe_kcqe *new_cqe_kcqe);
22*4882a593Smuzhiyun static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
23*4882a593Smuzhiyun 					struct fcoe_kcqe *ofld_kcqe);
24*4882a593Smuzhiyun static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
25*4882a593Smuzhiyun 						struct fcoe_kcqe *ofld_kcqe);
26*4882a593Smuzhiyun static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
27*4882a593Smuzhiyun static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
28*4882a593Smuzhiyun 					struct fcoe_kcqe *destroy_kcqe);
29*4882a593Smuzhiyun 
bnx2fc_send_stat_req(struct bnx2fc_hba * hba)30*4882a593Smuzhiyun int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct fcoe_kwqe_stat stat_req;
33*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
34*4882a593Smuzhiyun 	int num_kwqes = 1;
35*4882a593Smuzhiyun 	int rc = 0;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
38*4882a593Smuzhiyun 	stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
39*4882a593Smuzhiyun 	stat_req.hdr.flags =
40*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
43*4882a593Smuzhiyun 	stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &stat_req;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
48*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return rc;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * @hba:	adapter structure pointer
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * Send down FCoE firmware init KWQEs which initiates the initial handshake
59*4882a593Smuzhiyun  *	with the f/w.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  */
bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba * hba)62*4882a593Smuzhiyun int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct fcoe_kwqe_init1 fcoe_init1;
65*4882a593Smuzhiyun 	struct fcoe_kwqe_init2 fcoe_init2;
66*4882a593Smuzhiyun 	struct fcoe_kwqe_init3 fcoe_init3;
67*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[3];
68*4882a593Smuzhiyun 	int num_kwqes = 3;
69*4882a593Smuzhiyun 	int rc = 0;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (!hba->cnic) {
72*4882a593Smuzhiyun 		printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
73*4882a593Smuzhiyun 		return -ENODEV;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* fill init1 KWQE */
77*4882a593Smuzhiyun 	memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
78*4882a593Smuzhiyun 	fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
79*4882a593Smuzhiyun 	fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
80*4882a593Smuzhiyun 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	fcoe_init1.num_tasks = hba->max_tasks;
83*4882a593Smuzhiyun 	fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
84*4882a593Smuzhiyun 	fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
85*4882a593Smuzhiyun 	fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
86*4882a593Smuzhiyun 	fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
87*4882a593Smuzhiyun 	fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
88*4882a593Smuzhiyun 	fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
89*4882a593Smuzhiyun 	fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
90*4882a593Smuzhiyun 	fcoe_init1.task_list_pbl_addr_hi =
91*4882a593Smuzhiyun 				(u32) ((u64) hba->task_ctx_bd_dma >> 32);
92*4882a593Smuzhiyun 	fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	fcoe_init1.flags = (PAGE_SHIFT <<
95*4882a593Smuzhiyun 				FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* fill init2 KWQE */
100*4882a593Smuzhiyun 	memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
101*4882a593Smuzhiyun 	fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
102*4882a593Smuzhiyun 	fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
103*4882a593Smuzhiyun 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
106*4882a593Smuzhiyun 	fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
110*4882a593Smuzhiyun 	fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
111*4882a593Smuzhiyun 					   ((u64) hba->hash_tbl_pbl_dma >> 32);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
114*4882a593Smuzhiyun 	fcoe_init2.t2_hash_tbl_addr_hi = (u32)
115*4882a593Smuzhiyun 					  ((u64) hba->t2_hash_tbl_dma >> 32);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
118*4882a593Smuzhiyun 	fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
119*4882a593Smuzhiyun 					((u64) hba->t2_hash_tbl_ptr_dma >> 32);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* fill init3 KWQE */
124*4882a593Smuzhiyun 	memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
125*4882a593Smuzhiyun 	fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
126*4882a593Smuzhiyun 	fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
127*4882a593Smuzhiyun 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
128*4882a593Smuzhiyun 	fcoe_init3.error_bit_map_lo = 0xffffffff;
129*4882a593Smuzhiyun 	fcoe_init3.error_bit_map_hi = 0xffffffff;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * enable both cached connection and cached tasks
133*4882a593Smuzhiyun 	 * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	fcoe_init3.perf_config = 3;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
138*4882a593Smuzhiyun 	kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
139*4882a593Smuzhiyun 	kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
142*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return rc;
145*4882a593Smuzhiyun }
bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba * hba)146*4882a593Smuzhiyun int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct fcoe_kwqe_destroy fcoe_destroy;
149*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
150*4882a593Smuzhiyun 	int num_kwqes = 1;
151*4882a593Smuzhiyun 	int rc = -1;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* fill destroy KWQE */
154*4882a593Smuzhiyun 	memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
155*4882a593Smuzhiyun 	fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
156*4882a593Smuzhiyun 	fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
157*4882a593Smuzhiyun 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
158*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
161*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
162*4882a593Smuzhiyun 	return rc;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * @port:		port structure pointer
169*4882a593Smuzhiyun  * @tgt:		bnx2fc_rport structure pointer
170*4882a593Smuzhiyun  */
bnx2fc_send_session_ofld_req(struct fcoe_port * port,struct bnx2fc_rport * tgt)171*4882a593Smuzhiyun int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
172*4882a593Smuzhiyun 					struct bnx2fc_rport *tgt)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct fc_lport *lport = port->lport;
175*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
176*4882a593Smuzhiyun 	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
177*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
178*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[4];
179*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_offload1 ofld_req1;
180*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_offload2 ofld_req2;
181*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_offload3 ofld_req3;
182*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_offload4 ofld_req4;
183*4882a593Smuzhiyun 	struct fc_rport_priv *rdata = tgt->rdata;
184*4882a593Smuzhiyun 	struct fc_rport *rport = tgt->rport;
185*4882a593Smuzhiyun 	int num_kwqes = 4;
186*4882a593Smuzhiyun 	u32 port_id;
187*4882a593Smuzhiyun 	int rc = 0;
188*4882a593Smuzhiyun 	u16 conn_id;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Initialize offload request 1 structure */
191*4882a593Smuzhiyun 	memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
194*4882a593Smuzhiyun 	ofld_req1.hdr.flags =
195*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	conn_id = (u16)tgt->fcoe_conn_id;
199*4882a593Smuzhiyun 	ofld_req1.fcoe_conn_id = conn_id;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
203*4882a593Smuzhiyun 	ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
206*4882a593Smuzhiyun 	ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
209*4882a593Smuzhiyun 	ofld_req1.rq_first_pbe_addr_hi =
210*4882a593Smuzhiyun 				(u32)((u64) tgt->rq_dma >> 32);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	ofld_req1.rq_prod = 0x8000;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Initialize offload request 2 structure */
215*4882a593Smuzhiyun 	memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
218*4882a593Smuzhiyun 	ofld_req2.hdr.flags =
219*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
224*4882a593Smuzhiyun 	ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
227*4882a593Smuzhiyun 	ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
230*4882a593Smuzhiyun 	ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Initialize offload request 3 structure */
233*4882a593Smuzhiyun 	memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
236*4882a593Smuzhiyun 	ofld_req3.hdr.flags =
237*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	ofld_req3.vlan_tag = interface->vlan_id <<
240*4882a593Smuzhiyun 				FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
241*4882a593Smuzhiyun 	ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	port_id = fc_host_port_id(lport->host);
244*4882a593Smuzhiyun 	if (port_id == 0) {
245*4882a593Smuzhiyun 		BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
246*4882a593Smuzhiyun 		return -EINVAL;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Store s_id of the initiator for further reference. This will
251*4882a593Smuzhiyun 	 * be used during disable/destroy during linkdown processing as
252*4882a593Smuzhiyun 	 * when the lport is reset, the port_id also is reset to 0
253*4882a593Smuzhiyun 	 */
254*4882a593Smuzhiyun 	tgt->sid = port_id;
255*4882a593Smuzhiyun 	ofld_req3.s_id[0] = (port_id & 0x000000FF);
256*4882a593Smuzhiyun 	ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
257*4882a593Smuzhiyun 	ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	port_id = rport->port_id;
260*4882a593Smuzhiyun 	ofld_req3.d_id[0] = (port_id & 0x000000FF);
261*4882a593Smuzhiyun 	ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
262*4882a593Smuzhiyun 	ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ofld_req3.tx_total_conc_seqs = rdata->max_seq;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
267*4882a593Smuzhiyun 	ofld_req3.rx_max_fc_pay_len  = lport->mfs;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
270*4882a593Smuzhiyun 	ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
271*4882a593Smuzhiyun 	ofld_req3.rx_open_seqs_exch_c3 = 1;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
274*4882a593Smuzhiyun 	ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* set mul_n_port_ids supported flag to 0, until it is supported */
277*4882a593Smuzhiyun 	ofld_req3.flags = 0;
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
280*4882a593Smuzhiyun 			    FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
281*4882a593Smuzhiyun 	*/
282*4882a593Smuzhiyun 	/* Info from PLOGI response */
283*4882a593Smuzhiyun 	ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
284*4882a593Smuzhiyun 			     FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
287*4882a593Smuzhiyun 			     FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/*
290*4882a593Smuzhiyun 	 * Info from PRLI response, this info is used for sequence level error
291*4882a593Smuzhiyun 	 * recovery support
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	if (tgt->dev_type == TYPE_TAPE) {
294*4882a593Smuzhiyun 		ofld_req3.flags |= 1 <<
295*4882a593Smuzhiyun 				    FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
296*4882a593Smuzhiyun 		ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
297*4882a593Smuzhiyun 				    ? 1 : 0) <<
298*4882a593Smuzhiyun 				    FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* vlan flag */
302*4882a593Smuzhiyun 	ofld_req3.flags |= (interface->vlan_enabled <<
303*4882a593Smuzhiyun 			    FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* C2_VALID and ACK flags are not set as they are not supported */
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* Initialize offload request 4 structure */
309*4882a593Smuzhiyun 	memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
310*4882a593Smuzhiyun 	ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
311*4882a593Smuzhiyun 	ofld_req4.hdr.flags =
312*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
318*4882a593Smuzhiyun 							/* local mac */
319*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
320*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
321*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
322*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
323*4882a593Smuzhiyun 	ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
324*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
325*4882a593Smuzhiyun 							/* fcf mac */
326*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
327*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
328*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
329*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
330*4882a593Smuzhiyun 	ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
333*4882a593Smuzhiyun 	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
336*4882a593Smuzhiyun 	ofld_req4.confq_pbl_base_addr_hi =
337*4882a593Smuzhiyun 					(u32)((u64) tgt->confq_pbl_dma >> 32);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
340*4882a593Smuzhiyun 	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
341*4882a593Smuzhiyun 	kwqe_arr[2] = (struct kwqe *) &ofld_req3;
342*4882a593Smuzhiyun 	kwqe_arr[3] = (struct kwqe *) &ofld_req4;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
345*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return rc;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * @port:		port structure pointer
354*4882a593Smuzhiyun  * @tgt:		bnx2fc_rport structure pointer
355*4882a593Smuzhiyun  */
bnx2fc_send_session_enable_req(struct fcoe_port * port,struct bnx2fc_rport * tgt)356*4882a593Smuzhiyun int bnx2fc_send_session_enable_req(struct fcoe_port *port,
357*4882a593Smuzhiyun 					struct bnx2fc_rport *tgt)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
360*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
361*4882a593Smuzhiyun 	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
362*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
363*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_enable_disable enbl_req;
364*4882a593Smuzhiyun 	struct fc_lport *lport = port->lport;
365*4882a593Smuzhiyun 	struct fc_rport *rport = tgt->rport;
366*4882a593Smuzhiyun 	int num_kwqes = 1;
367*4882a593Smuzhiyun 	int rc = 0;
368*4882a593Smuzhiyun 	u32 port_id;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	memset(&enbl_req, 0x00,
371*4882a593Smuzhiyun 	       sizeof(struct fcoe_kwqe_conn_enable_disable));
372*4882a593Smuzhiyun 	enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
373*4882a593Smuzhiyun 	enbl_req.hdr.flags =
374*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
377*4882a593Smuzhiyun 							/* local mac */
378*4882a593Smuzhiyun 	enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
379*4882a593Smuzhiyun 	enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
380*4882a593Smuzhiyun 	enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
381*4882a593Smuzhiyun 	enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
382*4882a593Smuzhiyun 	enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
383*4882a593Smuzhiyun 	memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
386*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
387*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
388*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
389*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
390*4882a593Smuzhiyun 	enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	port_id = fc_host_port_id(lport->host);
393*4882a593Smuzhiyun 	if (port_id != tgt->sid) {
394*4882a593Smuzhiyun 		printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
395*4882a593Smuzhiyun 				"sid = 0x%x\n", port_id, tgt->sid);
396*4882a593Smuzhiyun 		port_id = tgt->sid;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 	enbl_req.s_id[0] = (port_id & 0x000000FF);
399*4882a593Smuzhiyun 	enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
400*4882a593Smuzhiyun 	enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	port_id = rport->port_id;
403*4882a593Smuzhiyun 	enbl_req.d_id[0] = (port_id & 0x000000FF);
404*4882a593Smuzhiyun 	enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
405*4882a593Smuzhiyun 	enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
406*4882a593Smuzhiyun 	enbl_req.vlan_tag = interface->vlan_id <<
407*4882a593Smuzhiyun 				FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
408*4882a593Smuzhiyun 	enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
409*4882a593Smuzhiyun 	enbl_req.vlan_flag = interface->vlan_enabled;
410*4882a593Smuzhiyun 	enbl_req.context_id = tgt->context_id;
411*4882a593Smuzhiyun 	enbl_req.conn_id = tgt->fcoe_conn_id;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &enbl_req;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
416*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
417*4882a593Smuzhiyun 	return rc;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /**
421*4882a593Smuzhiyun  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * @port:		port structure pointer
424*4882a593Smuzhiyun  * @tgt:		bnx2fc_rport structure pointer
425*4882a593Smuzhiyun  */
bnx2fc_send_session_disable_req(struct fcoe_port * port,struct bnx2fc_rport * tgt)426*4882a593Smuzhiyun int bnx2fc_send_session_disable_req(struct fcoe_port *port,
427*4882a593Smuzhiyun 				    struct bnx2fc_rport *tgt)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
430*4882a593Smuzhiyun 	struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
431*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
432*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_enable_disable disable_req;
433*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
434*4882a593Smuzhiyun 	struct fc_rport *rport = tgt->rport;
435*4882a593Smuzhiyun 	int num_kwqes = 1;
436*4882a593Smuzhiyun 	int rc = 0;
437*4882a593Smuzhiyun 	u32 port_id;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	memset(&disable_req, 0x00,
440*4882a593Smuzhiyun 	       sizeof(struct fcoe_kwqe_conn_enable_disable));
441*4882a593Smuzhiyun 	disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
442*4882a593Smuzhiyun 	disable_req.hdr.flags =
443*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
446*4882a593Smuzhiyun 	disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
447*4882a593Smuzhiyun 	disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
448*4882a593Smuzhiyun 	disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
449*4882a593Smuzhiyun 	disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
450*4882a593Smuzhiyun 	disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
453*4882a593Smuzhiyun 	disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
454*4882a593Smuzhiyun 	disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
455*4882a593Smuzhiyun 	disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
456*4882a593Smuzhiyun 	disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
457*4882a593Smuzhiyun 	disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	port_id = tgt->sid;
460*4882a593Smuzhiyun 	disable_req.s_id[0] = (port_id & 0x000000FF);
461*4882a593Smuzhiyun 	disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
462*4882a593Smuzhiyun 	disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	port_id = rport->port_id;
466*4882a593Smuzhiyun 	disable_req.d_id[0] = (port_id & 0x000000FF);
467*4882a593Smuzhiyun 	disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
468*4882a593Smuzhiyun 	disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
469*4882a593Smuzhiyun 	disable_req.context_id = tgt->context_id;
470*4882a593Smuzhiyun 	disable_req.conn_id = tgt->fcoe_conn_id;
471*4882a593Smuzhiyun 	disable_req.vlan_tag = interface->vlan_id <<
472*4882a593Smuzhiyun 				FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
473*4882a593Smuzhiyun 	disable_req.vlan_tag |=
474*4882a593Smuzhiyun 			3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
475*4882a593Smuzhiyun 	disable_req.vlan_flag = interface->vlan_enabled;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &disable_req;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
480*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return rc;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /**
486*4882a593Smuzhiyun  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
487*4882a593Smuzhiyun  *
488*4882a593Smuzhiyun  * @hba:		adapter structure pointer
489*4882a593Smuzhiyun  * @tgt:		bnx2fc_rport structure pointer
490*4882a593Smuzhiyun  */
bnx2fc_send_session_destroy_req(struct bnx2fc_hba * hba,struct bnx2fc_rport * tgt)491*4882a593Smuzhiyun int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
492*4882a593Smuzhiyun 					struct bnx2fc_rport *tgt)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct fcoe_kwqe_conn_destroy destroy_req;
495*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
496*4882a593Smuzhiyun 	int num_kwqes = 1;
497*4882a593Smuzhiyun 	int rc = 0;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
500*4882a593Smuzhiyun 	destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
501*4882a593Smuzhiyun 	destroy_req.hdr.flags =
502*4882a593Smuzhiyun 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	destroy_req.context_id = tgt->context_id;
505*4882a593Smuzhiyun 	destroy_req.conn_id = tgt->fcoe_conn_id;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &destroy_req;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
510*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	return rc;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
is_valid_lport(struct bnx2fc_hba * hba,struct fc_lport * lport)515*4882a593Smuzhiyun static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct bnx2fc_lport *blport;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	spin_lock_bh(&hba->hba_lock);
520*4882a593Smuzhiyun 	list_for_each_entry(blport, &hba->vports, list) {
521*4882a593Smuzhiyun 		if (blport->lport == lport) {
522*4882a593Smuzhiyun 			spin_unlock_bh(&hba->hba_lock);
523*4882a593Smuzhiyun 			return true;
524*4882a593Smuzhiyun 		}
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	spin_unlock_bh(&hba->hba_lock);
527*4882a593Smuzhiyun 	return false;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 
bnx2fc_unsol_els_work(struct work_struct * work)532*4882a593Smuzhiyun static void bnx2fc_unsol_els_work(struct work_struct *work)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct bnx2fc_unsol_els *unsol_els;
535*4882a593Smuzhiyun 	struct fc_lport *lport;
536*4882a593Smuzhiyun 	struct bnx2fc_hba *hba;
537*4882a593Smuzhiyun 	struct fc_frame *fp;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
540*4882a593Smuzhiyun 	lport = unsol_els->lport;
541*4882a593Smuzhiyun 	fp = unsol_els->fp;
542*4882a593Smuzhiyun 	hba = unsol_els->hba;
543*4882a593Smuzhiyun 	if (is_valid_lport(hba, lport))
544*4882a593Smuzhiyun 		fc_exch_recv(lport, fp);
545*4882a593Smuzhiyun 	kfree(unsol_els);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
bnx2fc_process_l2_frame_compl(struct bnx2fc_rport * tgt,unsigned char * buf,u32 frame_len,u16 l2_oxid)548*4882a593Smuzhiyun void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
549*4882a593Smuzhiyun 				   unsigned char *buf,
550*4882a593Smuzhiyun 				   u32 frame_len, u16 l2_oxid)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct fcoe_port *port = tgt->port;
553*4882a593Smuzhiyun 	struct fc_lport *lport = port->lport;
554*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
555*4882a593Smuzhiyun 	struct bnx2fc_unsol_els *unsol_els;
556*4882a593Smuzhiyun 	struct fc_frame_header *fh;
557*4882a593Smuzhiyun 	struct fc_frame *fp;
558*4882a593Smuzhiyun 	struct sk_buff *skb;
559*4882a593Smuzhiyun 	u32 payload_len;
560*4882a593Smuzhiyun 	u32 crc;
561*4882a593Smuzhiyun 	u8 op;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
565*4882a593Smuzhiyun 	if (!unsol_els) {
566*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
567*4882a593Smuzhiyun 		return;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
571*4882a593Smuzhiyun 		l2_oxid, frame_len);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	payload_len = frame_len - sizeof(struct fc_frame_header);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	fp = fc_frame_alloc(lport, payload_len);
576*4882a593Smuzhiyun 	if (!fp) {
577*4882a593Smuzhiyun 		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
578*4882a593Smuzhiyun 		kfree(unsol_els);
579*4882a593Smuzhiyun 		return;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
583*4882a593Smuzhiyun 	/* Copy FC Frame header and payload into the frame */
584*4882a593Smuzhiyun 	memcpy(fh, buf, frame_len);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (l2_oxid != FC_XID_UNKNOWN)
587*4882a593Smuzhiyun 		fh->fh_ox_id = htons(l2_oxid);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	skb = fp_skb(fp);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
592*4882a593Smuzhiyun 	    (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		if (fh->fh_type == FC_TYPE_ELS) {
595*4882a593Smuzhiyun 			op = fc_frame_payload_op(fp);
596*4882a593Smuzhiyun 			if ((op == ELS_TEST) ||	(op == ELS_ESTC) ||
597*4882a593Smuzhiyun 			    (op == ELS_FAN) || (op == ELS_CSU)) {
598*4882a593Smuzhiyun 				/*
599*4882a593Smuzhiyun 				 * No need to reply for these
600*4882a593Smuzhiyun 				 * ELS requests
601*4882a593Smuzhiyun 				 */
602*4882a593Smuzhiyun 				printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
603*4882a593Smuzhiyun 				kfree_skb(skb);
604*4882a593Smuzhiyun 				kfree(unsol_els);
605*4882a593Smuzhiyun 				return;
606*4882a593Smuzhiyun 			}
607*4882a593Smuzhiyun 		}
608*4882a593Smuzhiyun 		crc = fcoe_fc_crc(fp);
609*4882a593Smuzhiyun 		fc_frame_init(fp);
610*4882a593Smuzhiyun 		fr_dev(fp) = lport;
611*4882a593Smuzhiyun 		fr_sof(fp) = FC_SOF_I3;
612*4882a593Smuzhiyun 		fr_eof(fp) = FC_EOF_T;
613*4882a593Smuzhiyun 		fr_crc(fp) = cpu_to_le32(~crc);
614*4882a593Smuzhiyun 		unsol_els->lport = lport;
615*4882a593Smuzhiyun 		unsol_els->hba = interface->hba;
616*4882a593Smuzhiyun 		unsol_els->fp = fp;
617*4882a593Smuzhiyun 		INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
618*4882a593Smuzhiyun 		queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
619*4882a593Smuzhiyun 	} else {
620*4882a593Smuzhiyun 		BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
621*4882a593Smuzhiyun 		kfree_skb(skb);
622*4882a593Smuzhiyun 		kfree(unsol_els);
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
bnx2fc_process_unsol_compl(struct bnx2fc_rport * tgt,u16 wqe)626*4882a593Smuzhiyun static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	u8 num_rq;
629*4882a593Smuzhiyun 	struct fcoe_err_report_entry *err_entry;
630*4882a593Smuzhiyun 	unsigned char *rq_data;
631*4882a593Smuzhiyun 	unsigned char *buf = NULL, *buf1;
632*4882a593Smuzhiyun 	int i;
633*4882a593Smuzhiyun 	u16 xid;
634*4882a593Smuzhiyun 	u32 frame_len, len;
635*4882a593Smuzhiyun 	struct bnx2fc_cmd *io_req = NULL;
636*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = tgt->port->priv;
637*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
638*4882a593Smuzhiyun 	int rc = 0;
639*4882a593Smuzhiyun 	u64 err_warn_bit_map;
640*4882a593Smuzhiyun 	u8 err_warn = 0xff;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
644*4882a593Smuzhiyun 	switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
645*4882a593Smuzhiyun 	case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
646*4882a593Smuzhiyun 		frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
647*4882a593Smuzhiyun 			     FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 		num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		spin_lock_bh(&tgt->tgt_lock);
652*4882a593Smuzhiyun 		rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
653*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->tgt_lock);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 		if (rq_data) {
656*4882a593Smuzhiyun 			buf = rq_data;
657*4882a593Smuzhiyun 		} else {
658*4882a593Smuzhiyun 			buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
659*4882a593Smuzhiyun 					      GFP_ATOMIC);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 			if (!buf1) {
662*4882a593Smuzhiyun 				BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
663*4882a593Smuzhiyun 				break;
664*4882a593Smuzhiyun 			}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 			for (i = 0; i < num_rq; i++) {
667*4882a593Smuzhiyun 				spin_lock_bh(&tgt->tgt_lock);
668*4882a593Smuzhiyun 				rq_data = (unsigned char *)
669*4882a593Smuzhiyun 					   bnx2fc_get_next_rqe(tgt, 1);
670*4882a593Smuzhiyun 				spin_unlock_bh(&tgt->tgt_lock);
671*4882a593Smuzhiyun 				len = BNX2FC_RQ_BUF_SZ;
672*4882a593Smuzhiyun 				memcpy(buf1, rq_data, len);
673*4882a593Smuzhiyun 				buf1 += len;
674*4882a593Smuzhiyun 			}
675*4882a593Smuzhiyun 		}
676*4882a593Smuzhiyun 		bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
677*4882a593Smuzhiyun 					      FC_XID_UNKNOWN);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		if (buf != rq_data)
680*4882a593Smuzhiyun 			kfree(buf);
681*4882a593Smuzhiyun 		spin_lock_bh(&tgt->tgt_lock);
682*4882a593Smuzhiyun 		bnx2fc_return_rqe(tgt, num_rq);
683*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->tgt_lock);
684*4882a593Smuzhiyun 		break;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	case FCOE_ERROR_DETECTION_CQE_TYPE:
687*4882a593Smuzhiyun 		/*
688*4882a593Smuzhiyun 		 * In case of error reporting CQE a single RQ entry
689*4882a593Smuzhiyun 		 * is consumed.
690*4882a593Smuzhiyun 		 */
691*4882a593Smuzhiyun 		spin_lock_bh(&tgt->tgt_lock);
692*4882a593Smuzhiyun 		num_rq = 1;
693*4882a593Smuzhiyun 		err_entry = (struct fcoe_err_report_entry *)
694*4882a593Smuzhiyun 			     bnx2fc_get_next_rqe(tgt, 1);
695*4882a593Smuzhiyun 		xid = err_entry->fc_hdr.ox_id;
696*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
697*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
698*4882a593Smuzhiyun 			err_entry->data.err_warn_bitmap_hi,
699*4882a593Smuzhiyun 			err_entry->data.err_warn_bitmap_lo);
700*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
701*4882a593Smuzhiyun 			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		if (xid > hba->max_xid) {
704*4882a593Smuzhiyun 			BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
705*4882a593Smuzhiyun 				   xid);
706*4882a593Smuzhiyun 			goto ret_err_rqe;
707*4882a593Smuzhiyun 		}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
711*4882a593Smuzhiyun 		if (!io_req)
712*4882a593Smuzhiyun 			goto ret_err_rqe;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
715*4882a593Smuzhiyun 			printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
716*4882a593Smuzhiyun 			goto ret_err_rqe;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 		if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
720*4882a593Smuzhiyun 				       &io_req->req_flags)) {
721*4882a593Smuzhiyun 			BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
722*4882a593Smuzhiyun 					    "progress.. ignore unsol err\n");
723*4882a593Smuzhiyun 			goto ret_err_rqe;
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		err_warn_bit_map = (u64)
727*4882a593Smuzhiyun 			((u64)err_entry->data.err_warn_bitmap_hi << 32) |
728*4882a593Smuzhiyun 			(u64)err_entry->data.err_warn_bitmap_lo;
729*4882a593Smuzhiyun 		for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
730*4882a593Smuzhiyun 			if (err_warn_bit_map & (u64)((u64)1 << i)) {
731*4882a593Smuzhiyun 				err_warn = i;
732*4882a593Smuzhiyun 				break;
733*4882a593Smuzhiyun 			}
734*4882a593Smuzhiyun 		}
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		/*
737*4882a593Smuzhiyun 		 * If ABTS is already in progress, and FW error is
738*4882a593Smuzhiyun 		 * received after that, do not cancel the timeout_work
739*4882a593Smuzhiyun 		 * and let the error recovery continue by explicitly
740*4882a593Smuzhiyun 		 * logging out the target, when the ABTS eventually
741*4882a593Smuzhiyun 		 * times out.
742*4882a593Smuzhiyun 		 */
743*4882a593Smuzhiyun 		if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
744*4882a593Smuzhiyun 			printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
745*4882a593Smuzhiyun 					    "in ABTS processing\n", xid);
746*4882a593Smuzhiyun 			goto ret_err_rqe;
747*4882a593Smuzhiyun 		}
748*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
749*4882a593Smuzhiyun 		if (tgt->dev_type != TYPE_TAPE)
750*4882a593Smuzhiyun 			goto skip_rec;
751*4882a593Smuzhiyun 		switch (err_warn) {
752*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
753*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_DATA_OOO_RO:
754*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
755*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
756*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
757*4882a593Smuzhiyun 		case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
758*4882a593Smuzhiyun 			BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
759*4882a593Smuzhiyun 				   xid);
760*4882a593Smuzhiyun 			memcpy(&io_req->err_entry, err_entry,
761*4882a593Smuzhiyun 			       sizeof(struct fcoe_err_report_entry));
762*4882a593Smuzhiyun 			if (!test_bit(BNX2FC_FLAG_SRR_SENT,
763*4882a593Smuzhiyun 				      &io_req->req_flags)) {
764*4882a593Smuzhiyun 				spin_unlock_bh(&tgt->tgt_lock);
765*4882a593Smuzhiyun 				rc = bnx2fc_send_rec(io_req);
766*4882a593Smuzhiyun 				spin_lock_bh(&tgt->tgt_lock);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 				if (rc)
769*4882a593Smuzhiyun 					goto skip_rec;
770*4882a593Smuzhiyun 			} else
771*4882a593Smuzhiyun 				printk(KERN_ERR PFX "SRR in progress\n");
772*4882a593Smuzhiyun 			goto ret_err_rqe;
773*4882a593Smuzhiyun 			break;
774*4882a593Smuzhiyun 		default:
775*4882a593Smuzhiyun 			break;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun skip_rec:
779*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
780*4882a593Smuzhiyun 		/*
781*4882a593Smuzhiyun 		 * Cancel the timeout_work, as we received IO
782*4882a593Smuzhiyun 		 * completion with FW error.
783*4882a593Smuzhiyun 		 */
784*4882a593Smuzhiyun 		if (cancel_delayed_work(&io_req->timeout_work))
785*4882a593Smuzhiyun 			kref_put(&io_req->refcount, bnx2fc_cmd_release);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		rc = bnx2fc_initiate_abts(io_req);
788*4882a593Smuzhiyun 		if (rc != SUCCESS) {
789*4882a593Smuzhiyun 			printk(KERN_ERR PFX "err_warn: initiate_abts "
790*4882a593Smuzhiyun 				"failed xid = 0x%x. issue cleanup\n",
791*4882a593Smuzhiyun 				io_req->xid);
792*4882a593Smuzhiyun 			bnx2fc_initiate_cleanup(io_req);
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun ret_err_rqe:
795*4882a593Smuzhiyun 		bnx2fc_return_rqe(tgt, 1);
796*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->tgt_lock);
797*4882a593Smuzhiyun 		break;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	case FCOE_WARNING_DETECTION_CQE_TYPE:
800*4882a593Smuzhiyun 		/*
801*4882a593Smuzhiyun 		 *In case of warning reporting CQE a single RQ entry
802*4882a593Smuzhiyun 		 * is consumes.
803*4882a593Smuzhiyun 		 */
804*4882a593Smuzhiyun 		spin_lock_bh(&tgt->tgt_lock);
805*4882a593Smuzhiyun 		num_rq = 1;
806*4882a593Smuzhiyun 		err_entry = (struct fcoe_err_report_entry *)
807*4882a593Smuzhiyun 			     bnx2fc_get_next_rqe(tgt, 1);
808*4882a593Smuzhiyun 		xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
809*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
810*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
811*4882a593Smuzhiyun 			err_entry->data.err_warn_bitmap_hi,
812*4882a593Smuzhiyun 			err_entry->data.err_warn_bitmap_lo);
813*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
814*4882a593Smuzhiyun 			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		if (xid > hba->max_xid) {
817*4882a593Smuzhiyun 			BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
818*4882a593Smuzhiyun 			goto ret_warn_rqe;
819*4882a593Smuzhiyun 		}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		err_warn_bit_map = (u64)
822*4882a593Smuzhiyun 			((u64)err_entry->data.err_warn_bitmap_hi << 32) |
823*4882a593Smuzhiyun 			(u64)err_entry->data.err_warn_bitmap_lo;
824*4882a593Smuzhiyun 		for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
825*4882a593Smuzhiyun 			if (err_warn_bit_map & ((u64)1 << i)) {
826*4882a593Smuzhiyun 				err_warn = i;
827*4882a593Smuzhiyun 				break;
828*4882a593Smuzhiyun 			}
829*4882a593Smuzhiyun 		}
830*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
833*4882a593Smuzhiyun 		if (!io_req)
834*4882a593Smuzhiyun 			goto ret_warn_rqe;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
837*4882a593Smuzhiyun 			printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
838*4882a593Smuzhiyun 			goto ret_warn_rqe;
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		memcpy(&io_req->err_entry, err_entry,
842*4882a593Smuzhiyun 		       sizeof(struct fcoe_err_report_entry));
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
845*4882a593Smuzhiyun 			/* REC_TOV is not a warning code */
846*4882a593Smuzhiyun 			BUG_ON(1);
847*4882a593Smuzhiyun 		else
848*4882a593Smuzhiyun 			BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
849*4882a593Smuzhiyun ret_warn_rqe:
850*4882a593Smuzhiyun 		bnx2fc_return_rqe(tgt, 1);
851*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->tgt_lock);
852*4882a593Smuzhiyun 		break;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	default:
855*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
856*4882a593Smuzhiyun 		break;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
bnx2fc_process_cq_compl(struct bnx2fc_rport * tgt,u16 wqe,unsigned char * rq_data,u8 num_rq,struct fcoe_task_ctx_entry * task)860*4882a593Smuzhiyun void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
861*4882a593Smuzhiyun 			     unsigned char *rq_data, u8 num_rq,
862*4882a593Smuzhiyun 			     struct fcoe_task_ctx_entry *task)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	struct fcoe_port *port = tgt->port;
865*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
866*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
867*4882a593Smuzhiyun 	struct bnx2fc_cmd *io_req;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	u16 xid;
870*4882a593Smuzhiyun 	u8  cmd_type;
871*4882a593Smuzhiyun 	u8 rx_state = 0;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	spin_lock_bh(&tgt->tgt_lock);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
876*4882a593Smuzhiyun 	io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (io_req == NULL) {
879*4882a593Smuzhiyun 		printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
880*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->tgt_lock);
881*4882a593Smuzhiyun 		return;
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Timestamp IO completion time */
885*4882a593Smuzhiyun 	cmd_type = io_req->cmd_type;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
888*4882a593Smuzhiyun 		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
889*4882a593Smuzhiyun 		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	/* Process other IO completion types */
892*4882a593Smuzhiyun 	switch (cmd_type) {
893*4882a593Smuzhiyun 	case BNX2FC_SCSI_CMD:
894*4882a593Smuzhiyun 		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
895*4882a593Smuzhiyun 			bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
896*4882a593Smuzhiyun 						      rq_data);
897*4882a593Smuzhiyun 			spin_unlock_bh(&tgt->tgt_lock);
898*4882a593Smuzhiyun 			return;
899*4882a593Smuzhiyun 		}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 		if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
902*4882a593Smuzhiyun 			bnx2fc_process_abts_compl(io_req, task, num_rq);
903*4882a593Smuzhiyun 		else if (rx_state ==
904*4882a593Smuzhiyun 			 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
905*4882a593Smuzhiyun 			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
906*4882a593Smuzhiyun 		else
907*4882a593Smuzhiyun 			printk(KERN_ERR PFX "Invalid rx state - %d\n",
908*4882a593Smuzhiyun 				rx_state);
909*4882a593Smuzhiyun 		break;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	case BNX2FC_TASK_MGMT_CMD:
912*4882a593Smuzhiyun 		BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
913*4882a593Smuzhiyun 		bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
914*4882a593Smuzhiyun 		break;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	case BNX2FC_ABTS:
917*4882a593Smuzhiyun 		/*
918*4882a593Smuzhiyun 		 * ABTS request received by firmware. ABTS response
919*4882a593Smuzhiyun 		 * will be delivered to the task belonging to the IO
920*4882a593Smuzhiyun 		 * that was aborted
921*4882a593Smuzhiyun 		 */
922*4882a593Smuzhiyun 		BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
923*4882a593Smuzhiyun 		kref_put(&io_req->refcount, bnx2fc_cmd_release);
924*4882a593Smuzhiyun 		break;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	case BNX2FC_ELS:
927*4882a593Smuzhiyun 		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
928*4882a593Smuzhiyun 			bnx2fc_process_els_compl(io_req, task, num_rq);
929*4882a593Smuzhiyun 		else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
930*4882a593Smuzhiyun 			bnx2fc_process_abts_compl(io_req, task, num_rq);
931*4882a593Smuzhiyun 		else if (rx_state ==
932*4882a593Smuzhiyun 			 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
933*4882a593Smuzhiyun 			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
934*4882a593Smuzhiyun 		else
935*4882a593Smuzhiyun 			printk(KERN_ERR PFX "Invalid rx state =  %d\n",
936*4882a593Smuzhiyun 				rx_state);
937*4882a593Smuzhiyun 		break;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	case BNX2FC_CLEANUP:
940*4882a593Smuzhiyun 		BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
941*4882a593Smuzhiyun 		kref_put(&io_req->refcount, bnx2fc_cmd_release);
942*4882a593Smuzhiyun 		break;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	case BNX2FC_SEQ_CLEANUP:
945*4882a593Smuzhiyun 		BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
946*4882a593Smuzhiyun 			      io_req->xid);
947*4882a593Smuzhiyun 		bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
948*4882a593Smuzhiyun 		kref_put(&io_req->refcount, bnx2fc_cmd_release);
949*4882a593Smuzhiyun 		break;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	default:
952*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
953*4882a593Smuzhiyun 		break;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun 	spin_unlock_bh(&tgt->tgt_lock);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
bnx2fc_arm_cq(struct bnx2fc_rport * tgt)958*4882a593Smuzhiyun void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
961*4882a593Smuzhiyun 	u32 msg;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	wmb();
964*4882a593Smuzhiyun 	rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
965*4882a593Smuzhiyun 			FCOE_CQE_TOGGLE_BIT_SHIFT);
966*4882a593Smuzhiyun 	msg = *((u32 *)rx_db);
967*4882a593Smuzhiyun 	writel(cpu_to_le32(msg), tgt->ctx_base);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
bnx2fc_alloc_work(struct bnx2fc_rport * tgt,u16 wqe,unsigned char * rq_data,u8 num_rq,struct fcoe_task_ctx_entry * task)971*4882a593Smuzhiyun static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
972*4882a593Smuzhiyun 					     unsigned char *rq_data, u8 num_rq,
973*4882a593Smuzhiyun 					     struct fcoe_task_ctx_entry *task)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun 	struct bnx2fc_work *work;
976*4882a593Smuzhiyun 	work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
977*4882a593Smuzhiyun 	if (!work)
978*4882a593Smuzhiyun 		return NULL;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	INIT_LIST_HEAD(&work->list);
981*4882a593Smuzhiyun 	work->tgt = tgt;
982*4882a593Smuzhiyun 	work->wqe = wqe;
983*4882a593Smuzhiyun 	work->num_rq = num_rq;
984*4882a593Smuzhiyun 	work->task = task;
985*4882a593Smuzhiyun 	if (rq_data)
986*4882a593Smuzhiyun 		memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	return work;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun /* Pending work request completion */
bnx2fc_pending_work(struct bnx2fc_rport * tgt,unsigned int wqe)992*4882a593Smuzhiyun static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	unsigned int cpu = wqe % num_possible_cpus();
995*4882a593Smuzhiyun 	struct bnx2fc_percpu_s *fps;
996*4882a593Smuzhiyun 	struct bnx2fc_work *work;
997*4882a593Smuzhiyun 	struct fcoe_task_ctx_entry *task;
998*4882a593Smuzhiyun 	struct fcoe_task_ctx_entry *task_page;
999*4882a593Smuzhiyun 	struct fcoe_port *port = tgt->port;
1000*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
1001*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
1002*4882a593Smuzhiyun 	unsigned char *rq_data = NULL;
1003*4882a593Smuzhiyun 	unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
1004*4882a593Smuzhiyun 	int task_idx, index;
1005*4882a593Smuzhiyun 	u16 xid;
1006*4882a593Smuzhiyun 	u8 num_rq;
1007*4882a593Smuzhiyun 	int i;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
1010*4882a593Smuzhiyun 	if (xid >= hba->max_tasks) {
1011*4882a593Smuzhiyun 		pr_err(PFX "ERROR:xid out of range\n");
1012*4882a593Smuzhiyun 		return false;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1016*4882a593Smuzhiyun 	index = xid % BNX2FC_TASKS_PER_PAGE;
1017*4882a593Smuzhiyun 	task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
1018*4882a593Smuzhiyun 	task = &task_page[index];
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
1021*4882a593Smuzhiyun 		   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
1022*4882a593Smuzhiyun 		  FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (!num_rq)
1027*4882a593Smuzhiyun 		goto num_rq_zero;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	rq_data = bnx2fc_get_next_rqe(tgt, 1);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	if (num_rq > 1) {
1032*4882a593Smuzhiyun 		/* We do not need extra sense data */
1033*4882a593Smuzhiyun 		for (i = 1; i < num_rq; i++)
1034*4882a593Smuzhiyun 			bnx2fc_get_next_rqe(tgt, 1);
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	if (rq_data)
1038*4882a593Smuzhiyun 		memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	/* return RQ entries */
1041*4882a593Smuzhiyun 	for (i = 0; i < num_rq; i++)
1042*4882a593Smuzhiyun 		bnx2fc_return_rqe(tgt, 1);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun num_rq_zero:
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	fps = &per_cpu(bnx2fc_percpu, cpu);
1047*4882a593Smuzhiyun 	spin_lock_bh(&fps->fp_work_lock);
1048*4882a593Smuzhiyun 	if (fps->iothread) {
1049*4882a593Smuzhiyun 		work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
1050*4882a593Smuzhiyun 					 num_rq, task);
1051*4882a593Smuzhiyun 		if (work) {
1052*4882a593Smuzhiyun 			list_add_tail(&work->list, &fps->work_list);
1053*4882a593Smuzhiyun 			wake_up_process(fps->iothread);
1054*4882a593Smuzhiyun 			spin_unlock_bh(&fps->fp_work_lock);
1055*4882a593Smuzhiyun 			return true;
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 	spin_unlock_bh(&fps->fp_work_lock);
1059*4882a593Smuzhiyun 	bnx2fc_process_cq_compl(tgt, wqe,
1060*4882a593Smuzhiyun 				rq_data_buff, num_rq, task);
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	return true;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun 
bnx2fc_process_new_cqes(struct bnx2fc_rport * tgt)1065*4882a593Smuzhiyun int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	struct fcoe_cqe *cq;
1068*4882a593Smuzhiyun 	u32 cq_cons;
1069*4882a593Smuzhiyun 	struct fcoe_cqe *cqe;
1070*4882a593Smuzhiyun 	u32 num_free_sqes = 0;
1071*4882a593Smuzhiyun 	u32 num_cqes = 0;
1072*4882a593Smuzhiyun 	u16 wqe;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/*
1075*4882a593Smuzhiyun 	 * cq_lock is a low contention lock used to protect
1076*4882a593Smuzhiyun 	 * the CQ data structure from being freed up during
1077*4882a593Smuzhiyun 	 * the upload operation
1078*4882a593Smuzhiyun 	 */
1079*4882a593Smuzhiyun 	spin_lock_bh(&tgt->cq_lock);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	if (!tgt->cq) {
1082*4882a593Smuzhiyun 		printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1083*4882a593Smuzhiyun 		spin_unlock_bh(&tgt->cq_lock);
1084*4882a593Smuzhiyun 		return 0;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 	cq = tgt->cq;
1087*4882a593Smuzhiyun 	cq_cons = tgt->cq_cons_idx;
1088*4882a593Smuzhiyun 	cqe = &cq[cq_cons];
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1091*4882a593Smuzhiyun 	       (tgt->cq_curr_toggle_bit <<
1092*4882a593Smuzhiyun 	       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 		/* new entry on the cq */
1095*4882a593Smuzhiyun 		if (wqe & FCOE_CQE_CQE_TYPE) {
1096*4882a593Smuzhiyun 			/* Unsolicited event notification */
1097*4882a593Smuzhiyun 			bnx2fc_process_unsol_compl(tgt, wqe);
1098*4882a593Smuzhiyun 		} else {
1099*4882a593Smuzhiyun 			if (bnx2fc_pending_work(tgt, wqe))
1100*4882a593Smuzhiyun 				num_free_sqes++;
1101*4882a593Smuzhiyun 		}
1102*4882a593Smuzhiyun 		cqe++;
1103*4882a593Smuzhiyun 		tgt->cq_cons_idx++;
1104*4882a593Smuzhiyun 		num_cqes++;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 		if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1107*4882a593Smuzhiyun 			tgt->cq_cons_idx = 0;
1108*4882a593Smuzhiyun 			cqe = cq;
1109*4882a593Smuzhiyun 			tgt->cq_curr_toggle_bit =
1110*4882a593Smuzhiyun 				1 - tgt->cq_curr_toggle_bit;
1111*4882a593Smuzhiyun 		}
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 	if (num_cqes) {
1114*4882a593Smuzhiyun 		/* Arm CQ only if doorbell is mapped */
1115*4882a593Smuzhiyun 		if (tgt->ctx_base)
1116*4882a593Smuzhiyun 			bnx2fc_arm_cq(tgt);
1117*4882a593Smuzhiyun 		atomic_add(num_free_sqes, &tgt->free_sqes);
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun 	spin_unlock_bh(&tgt->cq_lock);
1120*4882a593Smuzhiyun 	return 0;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun /**
1124*4882a593Smuzhiyun  * bnx2fc_fastpath_notification - process global event queue (KCQ)
1125*4882a593Smuzhiyun  *
1126*4882a593Smuzhiyun  * @hba:		adapter structure pointer
1127*4882a593Smuzhiyun  * @new_cqe_kcqe:	pointer to newly DMA'd KCQ entry
1128*4882a593Smuzhiyun  *
1129*4882a593Smuzhiyun  * Fast path event notification handler
1130*4882a593Smuzhiyun  */
bnx2fc_fastpath_notification(struct bnx2fc_hba * hba,struct fcoe_kcqe * new_cqe_kcqe)1131*4882a593Smuzhiyun static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1132*4882a593Smuzhiyun 					struct fcoe_kcqe *new_cqe_kcqe)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1135*4882a593Smuzhiyun 	struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (!tgt) {
1138*4882a593Smuzhiyun 		printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1139*4882a593Smuzhiyun 		return;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	bnx2fc_process_new_cqes(tgt);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun /**
1146*4882a593Smuzhiyun  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1147*4882a593Smuzhiyun  *
1148*4882a593Smuzhiyun  * @hba:	adapter structure pointer
1149*4882a593Smuzhiyun  * @ofld_kcqe:	connection offload kcqe pointer
1150*4882a593Smuzhiyun  *
1151*4882a593Smuzhiyun  * handle session offload completion, enable the session if offload is
1152*4882a593Smuzhiyun  * successful.
1153*4882a593Smuzhiyun  */
bnx2fc_process_ofld_cmpl(struct bnx2fc_hba * hba,struct fcoe_kcqe * ofld_kcqe)1154*4882a593Smuzhiyun static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1155*4882a593Smuzhiyun 					struct fcoe_kcqe *ofld_kcqe)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	struct bnx2fc_rport		*tgt;
1158*4882a593Smuzhiyun 	struct bnx2fc_interface		*interface;
1159*4882a593Smuzhiyun 	u32				conn_id;
1160*4882a593Smuzhiyun 	u32				context_id;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	conn_id = ofld_kcqe->fcoe_conn_id;
1163*4882a593Smuzhiyun 	context_id = ofld_kcqe->fcoe_conn_context_id;
1164*4882a593Smuzhiyun 	tgt = hba->tgt_ofld_list[conn_id];
1165*4882a593Smuzhiyun 	if (!tgt) {
1166*4882a593Smuzhiyun 		printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1167*4882a593Smuzhiyun 		return;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1170*4882a593Smuzhiyun 		ofld_kcqe->fcoe_conn_context_id);
1171*4882a593Smuzhiyun 	interface = tgt->port->priv;
1172*4882a593Smuzhiyun 	if (hba != interface->hba) {
1173*4882a593Smuzhiyun 		printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1174*4882a593Smuzhiyun 		goto ofld_cmpl_err;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 	/*
1177*4882a593Smuzhiyun 	 * cnic has allocated a context_id for this session; use this
1178*4882a593Smuzhiyun 	 * while enabling the session.
1179*4882a593Smuzhiyun 	 */
1180*4882a593Smuzhiyun 	tgt->context_id = context_id;
1181*4882a593Smuzhiyun 	if (ofld_kcqe->completion_status) {
1182*4882a593Smuzhiyun 		if (ofld_kcqe->completion_status ==
1183*4882a593Smuzhiyun 				FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1184*4882a593Smuzhiyun 			printk(KERN_ERR PFX "unable to allocate FCoE context "
1185*4882a593Smuzhiyun 				"resources\n");
1186*4882a593Smuzhiyun 			set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1187*4882a593Smuzhiyun 		}
1188*4882a593Smuzhiyun 	} else {
1189*4882a593Smuzhiyun 		/* FW offload request successfully completed */
1190*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1191*4882a593Smuzhiyun 	}
1192*4882a593Smuzhiyun ofld_cmpl_err:
1193*4882a593Smuzhiyun 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1194*4882a593Smuzhiyun 	wake_up_interruptible(&tgt->ofld_wait);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun /**
1198*4882a593Smuzhiyun  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1199*4882a593Smuzhiyun  *
1200*4882a593Smuzhiyun  * @hba:	adapter structure pointer
1201*4882a593Smuzhiyun  * @ofld_kcqe:	connection offload kcqe pointer
1202*4882a593Smuzhiyun  *
1203*4882a593Smuzhiyun  * handle session enable completion, mark the rport as ready
1204*4882a593Smuzhiyun  */
1205*4882a593Smuzhiyun 
bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba * hba,struct fcoe_kcqe * ofld_kcqe)1206*4882a593Smuzhiyun static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1207*4882a593Smuzhiyun 						struct fcoe_kcqe *ofld_kcqe)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	struct bnx2fc_rport		*tgt;
1210*4882a593Smuzhiyun 	struct bnx2fc_interface		*interface;
1211*4882a593Smuzhiyun 	u32				conn_id;
1212*4882a593Smuzhiyun 	u32				context_id;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	context_id = ofld_kcqe->fcoe_conn_context_id;
1215*4882a593Smuzhiyun 	conn_id = ofld_kcqe->fcoe_conn_id;
1216*4882a593Smuzhiyun 	tgt = hba->tgt_ofld_list[conn_id];
1217*4882a593Smuzhiyun 	if (!tgt) {
1218*4882a593Smuzhiyun 		printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1219*4882a593Smuzhiyun 		return;
1220*4882a593Smuzhiyun 	}
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1223*4882a593Smuzhiyun 		ofld_kcqe->fcoe_conn_context_id);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	/*
1226*4882a593Smuzhiyun 	 * context_id should be the same for this target during offload
1227*4882a593Smuzhiyun 	 * and enable
1228*4882a593Smuzhiyun 	 */
1229*4882a593Smuzhiyun 	if (tgt->context_id != context_id) {
1230*4882a593Smuzhiyun 		printk(KERN_ERR PFX "context id mis-match\n");
1231*4882a593Smuzhiyun 		return;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 	interface = tgt->port->priv;
1234*4882a593Smuzhiyun 	if (hba != interface->hba) {
1235*4882a593Smuzhiyun 		printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1236*4882a593Smuzhiyun 		goto enbl_cmpl_err;
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 	if (!ofld_kcqe->completion_status)
1239*4882a593Smuzhiyun 		/* enable successful - rport ready for issuing IOs */
1240*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun enbl_cmpl_err:
1243*4882a593Smuzhiyun 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1244*4882a593Smuzhiyun 	wake_up_interruptible(&tgt->ofld_wait);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba * hba,struct fcoe_kcqe * disable_kcqe)1247*4882a593Smuzhiyun static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1248*4882a593Smuzhiyun 					struct fcoe_kcqe *disable_kcqe)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	struct bnx2fc_rport		*tgt;
1252*4882a593Smuzhiyun 	u32				conn_id;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	conn_id = disable_kcqe->fcoe_conn_id;
1255*4882a593Smuzhiyun 	tgt = hba->tgt_ofld_list[conn_id];
1256*4882a593Smuzhiyun 	if (!tgt) {
1257*4882a593Smuzhiyun 		printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1258*4882a593Smuzhiyun 		return;
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (disable_kcqe->completion_status) {
1264*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1265*4882a593Smuzhiyun 			disable_kcqe->completion_status);
1266*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1267*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1268*4882a593Smuzhiyun 		wake_up_interruptible(&tgt->upld_wait);
1269*4882a593Smuzhiyun 	} else {
1270*4882a593Smuzhiyun 		/* disable successful */
1271*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "disable successful\n");
1272*4882a593Smuzhiyun 		clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1273*4882a593Smuzhiyun 		clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1274*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1275*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1276*4882a593Smuzhiyun 		wake_up_interruptible(&tgt->upld_wait);
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun 
bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba * hba,struct fcoe_kcqe * destroy_kcqe)1280*4882a593Smuzhiyun static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1281*4882a593Smuzhiyun 					struct fcoe_kcqe *destroy_kcqe)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	struct bnx2fc_rport		*tgt;
1284*4882a593Smuzhiyun 	u32				conn_id;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	conn_id = destroy_kcqe->fcoe_conn_id;
1287*4882a593Smuzhiyun 	tgt = hba->tgt_ofld_list[conn_id];
1288*4882a593Smuzhiyun 	if (!tgt) {
1289*4882a593Smuzhiyun 		printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1290*4882a593Smuzhiyun 		return;
1291*4882a593Smuzhiyun 	}
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	if (destroy_kcqe->completion_status) {
1296*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1297*4882a593Smuzhiyun 			destroy_kcqe->completion_status);
1298*4882a593Smuzhiyun 		return;
1299*4882a593Smuzhiyun 	} else {
1300*4882a593Smuzhiyun 		/* destroy successful */
1301*4882a593Smuzhiyun 		BNX2FC_TGT_DBG(tgt, "upload successful\n");
1302*4882a593Smuzhiyun 		clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1303*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1304*4882a593Smuzhiyun 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1305*4882a593Smuzhiyun 		wake_up_interruptible(&tgt->upld_wait);
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun 
bnx2fc_init_failure(struct bnx2fc_hba * hba,u32 err_code)1309*4882a593Smuzhiyun static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun 	switch (err_code) {
1312*4882a593Smuzhiyun 	case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1313*4882a593Smuzhiyun 		printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1314*4882a593Smuzhiyun 		break;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1317*4882a593Smuzhiyun 		printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1318*4882a593Smuzhiyun 		break;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1321*4882a593Smuzhiyun 		printk(KERN_ERR PFX "init_failure due to NIC error\n");
1322*4882a593Smuzhiyun 		break;
1323*4882a593Smuzhiyun 	case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1324*4882a593Smuzhiyun 		printk(KERN_ERR PFX "init failure due to compl status err\n");
1325*4882a593Smuzhiyun 		break;
1326*4882a593Smuzhiyun 	case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1327*4882a593Smuzhiyun 		printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1328*4882a593Smuzhiyun 		break;
1329*4882a593Smuzhiyun 	default:
1330*4882a593Smuzhiyun 		printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1331*4882a593Smuzhiyun 	}
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun /**
1335*4882a593Smuzhiyun  * bnx2fc_indicae_kcqe - process KCQE
1336*4882a593Smuzhiyun  *
1337*4882a593Smuzhiyun  * @context:	adapter structure pointer
1338*4882a593Smuzhiyun  * @kcq:	kcqe pointer
1339*4882a593Smuzhiyun  * @num_cqe:	Number of completion queue elements
1340*4882a593Smuzhiyun  *
1341*4882a593Smuzhiyun  * Generic KCQ event handler
1342*4882a593Smuzhiyun  */
bnx2fc_indicate_kcqe(void * context,struct kcqe * kcq[],u32 num_cqe)1343*4882a593Smuzhiyun void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1344*4882a593Smuzhiyun 					u32 num_cqe)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1347*4882a593Smuzhiyun 	int i = 0;
1348*4882a593Smuzhiyun 	struct fcoe_kcqe *kcqe = NULL;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	while (i < num_cqe) {
1351*4882a593Smuzhiyun 		kcqe = (struct fcoe_kcqe *) kcq[i++];
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 		switch (kcqe->op_code) {
1354*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1355*4882a593Smuzhiyun 			bnx2fc_fastpath_notification(hba, kcqe);
1356*4882a593Smuzhiyun 			break;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1359*4882a593Smuzhiyun 			bnx2fc_process_ofld_cmpl(hba, kcqe);
1360*4882a593Smuzhiyun 			break;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_ENABLE_CONN:
1363*4882a593Smuzhiyun 			bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1364*4882a593Smuzhiyun 			break;
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_INIT_FUNC:
1367*4882a593Smuzhiyun 			if (kcqe->completion_status !=
1368*4882a593Smuzhiyun 					FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1369*4882a593Smuzhiyun 				bnx2fc_init_failure(hba,
1370*4882a593Smuzhiyun 						kcqe->completion_status);
1371*4882a593Smuzhiyun 			} else {
1372*4882a593Smuzhiyun 				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1373*4882a593Smuzhiyun 				bnx2fc_get_link_state(hba);
1374*4882a593Smuzhiyun 				printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1375*4882a593Smuzhiyun 					(u8)hba->pcidev->bus->number);
1376*4882a593Smuzhiyun 			}
1377*4882a593Smuzhiyun 			break;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1380*4882a593Smuzhiyun 			if (kcqe->completion_status !=
1381*4882a593Smuzhiyun 					FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 				printk(KERN_ERR PFX "DESTROY failed\n");
1384*4882a593Smuzhiyun 			} else {
1385*4882a593Smuzhiyun 				printk(KERN_ERR PFX "DESTROY success\n");
1386*4882a593Smuzhiyun 			}
1387*4882a593Smuzhiyun 			set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1388*4882a593Smuzhiyun 			wake_up_interruptible(&hba->destroy_wait);
1389*4882a593Smuzhiyun 			break;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_DISABLE_CONN:
1392*4882a593Smuzhiyun 			bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1393*4882a593Smuzhiyun 			break;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_DESTROY_CONN:
1396*4882a593Smuzhiyun 			bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1397*4882a593Smuzhiyun 			break;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_STAT_FUNC:
1400*4882a593Smuzhiyun 			if (kcqe->completion_status !=
1401*4882a593Smuzhiyun 			    FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1402*4882a593Smuzhiyun 				printk(KERN_ERR PFX "STAT failed\n");
1403*4882a593Smuzhiyun 			complete(&hba->stat_req_done);
1404*4882a593Smuzhiyun 			break;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 		case FCOE_KCQE_OPCODE_FCOE_ERROR:
1407*4882a593Smuzhiyun 		default:
1408*4882a593Smuzhiyun 			printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1409*4882a593Smuzhiyun 								kcqe->op_code);
1410*4882a593Smuzhiyun 		}
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
bnx2fc_add_2_sq(struct bnx2fc_rport * tgt,u16 xid)1414*4882a593Smuzhiyun void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun 	struct fcoe_sqe *sqe;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	sqe = &tgt->sq[tgt->sq_prod_idx];
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* Fill SQ WQE */
1421*4882a593Smuzhiyun 	sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1422*4882a593Smuzhiyun 	sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/* Advance SQ Prod Idx */
1425*4882a593Smuzhiyun 	if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1426*4882a593Smuzhiyun 		tgt->sq_prod_idx = 0;
1427*4882a593Smuzhiyun 		tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1428*4882a593Smuzhiyun 	}
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun 
bnx2fc_ring_doorbell(struct bnx2fc_rport * tgt)1431*4882a593Smuzhiyun void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun 	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1434*4882a593Smuzhiyun 	u32 msg;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	wmb();
1437*4882a593Smuzhiyun 	sq_db->prod = tgt->sq_prod_idx |
1438*4882a593Smuzhiyun 				(tgt->sq_curr_toggle_bit << 15);
1439*4882a593Smuzhiyun 	msg = *((u32 *)sq_db);
1440*4882a593Smuzhiyun 	writel(cpu_to_le32(msg), tgt->ctx_base);
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
bnx2fc_map_doorbell(struct bnx2fc_rport * tgt)1444*4882a593Smuzhiyun int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	u32 context_id = tgt->context_id;
1447*4882a593Smuzhiyun 	struct fcoe_port *port = tgt->port;
1448*4882a593Smuzhiyun 	u32 reg_off;
1449*4882a593Smuzhiyun 	resource_size_t reg_base;
1450*4882a593Smuzhiyun 	struct bnx2fc_interface *interface = port->priv;
1451*4882a593Smuzhiyun 	struct bnx2fc_hba *hba = interface->hba;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	reg_base = pci_resource_start(hba->pcidev,
1454*4882a593Smuzhiyun 					BNX2X_DOORBELL_PCI_BAR);
1455*4882a593Smuzhiyun 	reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1456*4882a593Smuzhiyun 	tgt->ctx_base = ioremap(reg_base + reg_off, 4);
1457*4882a593Smuzhiyun 	if (!tgt->ctx_base)
1458*4882a593Smuzhiyun 		return -ENOMEM;
1459*4882a593Smuzhiyun 	return 0;
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun 
bnx2fc_get_next_rqe(struct bnx2fc_rport * tgt,u8 num_items)1462*4882a593Smuzhiyun char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun 	char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1467*4882a593Smuzhiyun 		return NULL;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	tgt->rq_cons_idx += num_items;
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1472*4882a593Smuzhiyun 		tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	return buf;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun 
bnx2fc_return_rqe(struct bnx2fc_rport * tgt,u8 num_items)1477*4882a593Smuzhiyun void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	/* return the rq buffer */
1480*4882a593Smuzhiyun 	u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1481*4882a593Smuzhiyun 	if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1482*4882a593Smuzhiyun 		/* Wrap around RQ */
1483*4882a593Smuzhiyun 		next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 	tgt->rq_prod_idx = next_prod_idx;
1486*4882a593Smuzhiyun 	tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun 
bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd * seq_clnp_req,struct fcoe_task_ctx_entry * task,struct bnx2fc_cmd * orig_io_req,u32 offset)1489*4882a593Smuzhiyun void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1490*4882a593Smuzhiyun 				  struct fcoe_task_ctx_entry *task,
1491*4882a593Smuzhiyun 				  struct bnx2fc_cmd *orig_io_req,
1492*4882a593Smuzhiyun 				  u32 offset)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1495*4882a593Smuzhiyun 	struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1496*4882a593Smuzhiyun 	struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1497*4882a593Smuzhiyun 	struct fcoe_ext_mul_sges_ctx *sgl;
1498*4882a593Smuzhiyun 	u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1499*4882a593Smuzhiyun 	u8 orig_task_type;
1500*4882a593Smuzhiyun 	u16 orig_xid = orig_io_req->xid;
1501*4882a593Smuzhiyun 	u32 context_id = tgt->context_id;
1502*4882a593Smuzhiyun 	u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1503*4882a593Smuzhiyun 	u32 orig_offset = offset;
1504*4882a593Smuzhiyun 	int bd_count;
1505*4882a593Smuzhiyun 	int i;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1510*4882a593Smuzhiyun 		orig_task_type = FCOE_TASK_TYPE_WRITE;
1511*4882a593Smuzhiyun 	else
1512*4882a593Smuzhiyun 		orig_task_type = FCOE_TASK_TYPE_READ;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	/* Tx flags */
1515*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.tx_flags =
1516*4882a593Smuzhiyun 				FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1517*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1518*4882a593Smuzhiyun 	/* init flags */
1519*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags = task_type <<
1520*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1521*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1522*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1523*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.init_flags = context_id <<
1524*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1525*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.init_flags = context_id <<
1526*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1531*4882a593Smuzhiyun 	task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	bd_count = orig_io_req->bd_tbl->bd_valid;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	/* obtain the appropriate bd entry from relative offset */
1536*4882a593Smuzhiyun 	for (i = 0; i < bd_count; i++) {
1537*4882a593Smuzhiyun 		if (offset < bd[i].buf_len)
1538*4882a593Smuzhiyun 			break;
1539*4882a593Smuzhiyun 		offset -= bd[i].buf_len;
1540*4882a593Smuzhiyun 	}
1541*4882a593Smuzhiyun 	phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1544*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1545*4882a593Smuzhiyun 				(u32)phys_addr;
1546*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1547*4882a593Smuzhiyun 				(u32)((u64)phys_addr >> 32);
1548*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1549*4882a593Smuzhiyun 				bd_count;
1550*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1551*4882a593Smuzhiyun 				offset; /* adjusted offset */
1552*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1553*4882a593Smuzhiyun 	} else {
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 		/* Multiple SGEs were used for this IO */
1556*4882a593Smuzhiyun 		sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1557*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1558*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1559*4882a593Smuzhiyun 		sgl->mul_sgl.sgl_size = bd_count;
1560*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1561*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_idx = i;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 		memset(&task->rxwr_only.rx_seq_ctx, 0,
1564*4882a593Smuzhiyun 		       sizeof(struct fcoe_rx_seq_ctx));
1565*4882a593Smuzhiyun 		task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1566*4882a593Smuzhiyun 		task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1567*4882a593Smuzhiyun 	}
1568*4882a593Smuzhiyun }
bnx2fc_init_cleanup_task(struct bnx2fc_cmd * io_req,struct fcoe_task_ctx_entry * task,u16 orig_xid)1569*4882a593Smuzhiyun void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1570*4882a593Smuzhiyun 			      struct fcoe_task_ctx_entry *task,
1571*4882a593Smuzhiyun 			      u16 orig_xid)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun 	u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1574*4882a593Smuzhiyun 	struct bnx2fc_rport *tgt = io_req->tgt;
1575*4882a593Smuzhiyun 	u32 context_id = tgt->context_id;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/* Tx Write Rx Read */
1580*4882a593Smuzhiyun 	/* init flags */
1581*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags = task_type <<
1582*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1583*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1584*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1585*4882a593Smuzhiyun 	if (tgt->dev_type == TYPE_TAPE)
1586*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1587*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_TAPE <<
1588*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1589*4882a593Smuzhiyun 	else
1590*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1591*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_DISK <<
1592*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1593*4882a593Smuzhiyun 	task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	/* Tx flags */
1596*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.tx_flags =
1597*4882a593Smuzhiyun 				FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1598*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	/* Rx Read Tx Write */
1601*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.init_flags = context_id <<
1602*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1603*4882a593Smuzhiyun 	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1604*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun 
bnx2fc_init_mp_task(struct bnx2fc_cmd * io_req,struct fcoe_task_ctx_entry * task)1607*4882a593Smuzhiyun void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1608*4882a593Smuzhiyun 				struct fcoe_task_ctx_entry *task)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun 	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1611*4882a593Smuzhiyun 	struct bnx2fc_rport *tgt = io_req->tgt;
1612*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr;
1613*4882a593Smuzhiyun 	struct fcoe_ext_mul_sges_ctx *sgl;
1614*4882a593Smuzhiyun 	u8 task_type = 0;
1615*4882a593Smuzhiyun 	u64 *hdr;
1616*4882a593Smuzhiyun 	u64 temp_hdr[3];
1617*4882a593Smuzhiyun 	u32 context_id;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	/* Obtain task_type */
1621*4882a593Smuzhiyun 	if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1622*4882a593Smuzhiyun 	    (io_req->cmd_type == BNX2FC_ELS)) {
1623*4882a593Smuzhiyun 		task_type = FCOE_TASK_TYPE_MIDPATH;
1624*4882a593Smuzhiyun 	} else if (io_req->cmd_type == BNX2FC_ABTS) {
1625*4882a593Smuzhiyun 		task_type = FCOE_TASK_TYPE_ABTS;
1626*4882a593Smuzhiyun 	}
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	/* Setup the task from io_req for easy reference */
1631*4882a593Smuzhiyun 	io_req->task = task;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1634*4882a593Smuzhiyun 		io_req->cmd_type, task_type);
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	/* Tx only */
1637*4882a593Smuzhiyun 	if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1638*4882a593Smuzhiyun 	    (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1639*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1640*4882a593Smuzhiyun 				(u32)mp_req->mp_req_bd_dma;
1641*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1642*4882a593Smuzhiyun 				(u32)((u64)mp_req->mp_req_bd_dma >> 32);
1643*4882a593Smuzhiyun 		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1644*4882a593Smuzhiyun 	}
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	/* Tx Write Rx Read */
1647*4882a593Smuzhiyun 	/* init flags */
1648*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags = task_type <<
1649*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1650*4882a593Smuzhiyun 	if (tgt->dev_type == TYPE_TAPE)
1651*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1652*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_TAPE <<
1653*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1654*4882a593Smuzhiyun 	else
1655*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1656*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_DISK <<
1657*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1658*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1659*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	/* tx flags */
1662*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1663*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	/* Rx Write Tx Read */
1666*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	/* rx flags */
1669*4882a593Smuzhiyun 	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1670*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	context_id = tgt->context_id;
1673*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.init_flags = context_id <<
1674*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	fc_hdr = &(mp_req->req_fc_hdr);
1677*4882a593Smuzhiyun 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1678*4882a593Smuzhiyun 		fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1679*4882a593Smuzhiyun 		fc_hdr->fh_rx_id = htons(0xffff);
1680*4882a593Smuzhiyun 		task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1681*4882a593Smuzhiyun 	} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1682*4882a593Smuzhiyun 		fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1683*4882a593Smuzhiyun 	}
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	/* Fill FC Header into middle path buffer */
1686*4882a593Smuzhiyun 	hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1687*4882a593Smuzhiyun 	memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1688*4882a593Smuzhiyun 	hdr[0] = cpu_to_be64(temp_hdr[0]);
1689*4882a593Smuzhiyun 	hdr[1] = cpu_to_be64(temp_hdr[1]);
1690*4882a593Smuzhiyun 	hdr[2] = cpu_to_be64(temp_hdr[2]);
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	/* Rx Only */
1693*4882a593Smuzhiyun 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1694*4882a593Smuzhiyun 		sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1697*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.hi =
1698*4882a593Smuzhiyun 				(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1699*4882a593Smuzhiyun 		sgl->mul_sgl.sgl_size = 1;
1700*4882a593Smuzhiyun 	}
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun 
bnx2fc_init_task(struct bnx2fc_cmd * io_req,struct fcoe_task_ctx_entry * task)1703*4882a593Smuzhiyun void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1704*4882a593Smuzhiyun 			     struct fcoe_task_ctx_entry *task)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun 	u8 task_type;
1707*4882a593Smuzhiyun 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1708*4882a593Smuzhiyun 	struct io_bdt *bd_tbl = io_req->bd_tbl;
1709*4882a593Smuzhiyun 	struct bnx2fc_rport *tgt = io_req->tgt;
1710*4882a593Smuzhiyun 	struct fcoe_cached_sge_ctx *cached_sge;
1711*4882a593Smuzhiyun 	struct fcoe_ext_mul_sges_ctx *sgl;
1712*4882a593Smuzhiyun 	int dev_type = tgt->dev_type;
1713*4882a593Smuzhiyun 	u64 *fcp_cmnd;
1714*4882a593Smuzhiyun 	u64 tmp_fcp_cmnd[4];
1715*4882a593Smuzhiyun 	u32 context_id;
1716*4882a593Smuzhiyun 	int cnt, i;
1717*4882a593Smuzhiyun 	int bd_count;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	/* Setup the task from io_req for easy reference */
1722*4882a593Smuzhiyun 	io_req->task = task;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1725*4882a593Smuzhiyun 		task_type = FCOE_TASK_TYPE_WRITE;
1726*4882a593Smuzhiyun 	else
1727*4882a593Smuzhiyun 		task_type = FCOE_TASK_TYPE_READ;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	/* Tx only */
1730*4882a593Smuzhiyun 	bd_count = bd_tbl->bd_valid;
1731*4882a593Smuzhiyun 	cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1732*4882a593Smuzhiyun 	if (task_type == FCOE_TASK_TYPE_WRITE) {
1733*4882a593Smuzhiyun 		if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1734*4882a593Smuzhiyun 			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1737*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.lo =
1738*4882a593Smuzhiyun 					fcoe_bd_tbl->buf_addr_lo;
1739*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1740*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.hi =
1741*4882a593Smuzhiyun 					fcoe_bd_tbl->buf_addr_hi;
1742*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1743*4882a593Smuzhiyun 			cached_sge->cur_buf_rem =
1744*4882a593Smuzhiyun 					fcoe_bd_tbl->buf_len;
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1747*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1748*4882a593Smuzhiyun 		} else {
1749*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1750*4882a593Smuzhiyun 					(u32)bd_tbl->bd_tbl_dma;
1751*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1752*4882a593Smuzhiyun 					(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1753*4882a593Smuzhiyun 			task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1754*4882a593Smuzhiyun 					bd_tbl->bd_valid;
1755*4882a593Smuzhiyun 		}
1756*4882a593Smuzhiyun 	}
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	/*Tx Write Rx Read */
1759*4882a593Smuzhiyun 	/* Init state to NORMAL */
1760*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1761*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1762*4882a593Smuzhiyun 	if (dev_type == TYPE_TAPE) {
1763*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1764*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_TAPE <<
1765*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1766*4882a593Smuzhiyun 		io_req->rec_retry = 0;
1767*4882a593Smuzhiyun 		io_req->rec_retry = 0;
1768*4882a593Smuzhiyun 	} else
1769*4882a593Smuzhiyun 		task->txwr_rxrd.const_ctx.init_flags |=
1770*4882a593Smuzhiyun 				FCOE_TASK_DEV_TYPE_DISK <<
1771*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1772*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1773*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1774*4882a593Smuzhiyun 	/* tx flags */
1775*4882a593Smuzhiyun 	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1776*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	/* Set initial seq counter */
1779*4882a593Smuzhiyun 	task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	/* Fill FCP_CMND IU */
1782*4882a593Smuzhiyun 	fcp_cmnd = (u64 *)
1783*4882a593Smuzhiyun 		    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1784*4882a593Smuzhiyun 	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	/* swap fcp_cmnd */
1787*4882a593Smuzhiyun 	cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	for (i = 0; i < cnt; i++) {
1790*4882a593Smuzhiyun 		*fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1791*4882a593Smuzhiyun 		fcp_cmnd++;
1792*4882a593Smuzhiyun 	}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	/* Rx Write Tx Read */
1795*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	context_id = tgt->context_id;
1798*4882a593Smuzhiyun 	task->rxwr_txrd.const_ctx.init_flags = context_id <<
1799*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	/* rx flags */
1802*4882a593Smuzhiyun 	/* Set state to "waiting for the first packet" */
1803*4882a593Smuzhiyun 	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1804*4882a593Smuzhiyun 				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	/* Rx Only */
1809*4882a593Smuzhiyun 	if (task_type != FCOE_TASK_TYPE_READ)
1810*4882a593Smuzhiyun 		return;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1813*4882a593Smuzhiyun 	bd_count = bd_tbl->bd_valid;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	if (dev_type == TYPE_DISK) {
1816*4882a593Smuzhiyun 		if (bd_count == 1) {
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1821*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1822*4882a593Smuzhiyun 			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1823*4882a593Smuzhiyun 			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1824*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1825*4882a593Smuzhiyun 		} else if (bd_count == 2) {
1826*4882a593Smuzhiyun 			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1829*4882a593Smuzhiyun 			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1830*4882a593Smuzhiyun 			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 			fcoe_bd_tbl++;
1833*4882a593Smuzhiyun 			cached_sge->second_buf_addr.lo =
1834*4882a593Smuzhiyun 						 fcoe_bd_tbl->buf_addr_lo;
1835*4882a593Smuzhiyun 			cached_sge->second_buf_addr.hi =
1836*4882a593Smuzhiyun 						fcoe_bd_tbl->buf_addr_hi;
1837*4882a593Smuzhiyun 			cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1838*4882a593Smuzhiyun 			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1839*4882a593Smuzhiyun 				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1840*4882a593Smuzhiyun 		} else {
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 			sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1843*4882a593Smuzhiyun 			sgl->mul_sgl.cur_sge_addr.hi =
1844*4882a593Smuzhiyun 					(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1845*4882a593Smuzhiyun 			sgl->mul_sgl.sgl_size = bd_count;
1846*4882a593Smuzhiyun 		}
1847*4882a593Smuzhiyun 	} else {
1848*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1849*4882a593Smuzhiyun 		sgl->mul_sgl.cur_sge_addr.hi =
1850*4882a593Smuzhiyun 				(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1851*4882a593Smuzhiyun 		sgl->mul_sgl.sgl_size = bd_count;
1852*4882a593Smuzhiyun 	}
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun /**
1856*4882a593Smuzhiyun  * bnx2fc_setup_task_ctx - allocate and map task context
1857*4882a593Smuzhiyun  *
1858*4882a593Smuzhiyun  * @hba:	pointer to adapter structure
1859*4882a593Smuzhiyun  *
1860*4882a593Smuzhiyun  * allocate memory for task context, and associated BD table to be used
1861*4882a593Smuzhiyun  * by firmware
1862*4882a593Smuzhiyun  *
1863*4882a593Smuzhiyun  */
bnx2fc_setup_task_ctx(struct bnx2fc_hba * hba)1864*4882a593Smuzhiyun int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun 	int rc = 0;
1867*4882a593Smuzhiyun 	struct regpair *task_ctx_bdt;
1868*4882a593Smuzhiyun 	dma_addr_t addr;
1869*4882a593Smuzhiyun 	int task_ctx_arr_sz;
1870*4882a593Smuzhiyun 	int i;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	/*
1873*4882a593Smuzhiyun 	 * Allocate task context bd table. A page size of bd table
1874*4882a593Smuzhiyun 	 * can map 256 buffers. Each buffer contains 32 task context
1875*4882a593Smuzhiyun 	 * entries. Hence the limit with one page is 8192 task context
1876*4882a593Smuzhiyun 	 * entries.
1877*4882a593Smuzhiyun 	 */
1878*4882a593Smuzhiyun 	hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1879*4882a593Smuzhiyun 						  PAGE_SIZE,
1880*4882a593Smuzhiyun 						  &hba->task_ctx_bd_dma,
1881*4882a593Smuzhiyun 						  GFP_KERNEL);
1882*4882a593Smuzhiyun 	if (!hba->task_ctx_bd_tbl) {
1883*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1884*4882a593Smuzhiyun 		rc = -1;
1885*4882a593Smuzhiyun 		goto out;
1886*4882a593Smuzhiyun 	}
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	/*
1889*4882a593Smuzhiyun 	 * Allocate task_ctx which is an array of pointers pointing to
1890*4882a593Smuzhiyun 	 * a page containing 32 task contexts
1891*4882a593Smuzhiyun 	 */
1892*4882a593Smuzhiyun 	task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1893*4882a593Smuzhiyun 	hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1894*4882a593Smuzhiyun 				 GFP_KERNEL);
1895*4882a593Smuzhiyun 	if (!hba->task_ctx) {
1896*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to allocate task context array\n");
1897*4882a593Smuzhiyun 		rc = -1;
1898*4882a593Smuzhiyun 		goto out1;
1899*4882a593Smuzhiyun 	}
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	/*
1902*4882a593Smuzhiyun 	 * Allocate task_ctx_dma which is an array of dma addresses
1903*4882a593Smuzhiyun 	 */
1904*4882a593Smuzhiyun 	hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1905*4882a593Smuzhiyun 					sizeof(dma_addr_t)), GFP_KERNEL);
1906*4882a593Smuzhiyun 	if (!hba->task_ctx_dma) {
1907*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1908*4882a593Smuzhiyun 		rc = -1;
1909*4882a593Smuzhiyun 		goto out2;
1910*4882a593Smuzhiyun 	}
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1913*4882a593Smuzhiyun 	for (i = 0; i < task_ctx_arr_sz; i++) {
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 		hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1916*4882a593Smuzhiyun 						      PAGE_SIZE,
1917*4882a593Smuzhiyun 						      &hba->task_ctx_dma[i],
1918*4882a593Smuzhiyun 						      GFP_KERNEL);
1919*4882a593Smuzhiyun 		if (!hba->task_ctx[i]) {
1920*4882a593Smuzhiyun 			printk(KERN_ERR PFX "unable to alloc task context\n");
1921*4882a593Smuzhiyun 			rc = -1;
1922*4882a593Smuzhiyun 			goto out3;
1923*4882a593Smuzhiyun 		}
1924*4882a593Smuzhiyun 		addr = (u64)hba->task_ctx_dma[i];
1925*4882a593Smuzhiyun 		task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1926*4882a593Smuzhiyun 		task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1927*4882a593Smuzhiyun 		task_ctx_bdt++;
1928*4882a593Smuzhiyun 	}
1929*4882a593Smuzhiyun 	return 0;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun out3:
1932*4882a593Smuzhiyun 	for (i = 0; i < task_ctx_arr_sz; i++) {
1933*4882a593Smuzhiyun 		if (hba->task_ctx[i]) {
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 			dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1936*4882a593Smuzhiyun 				hba->task_ctx[i], hba->task_ctx_dma[i]);
1937*4882a593Smuzhiyun 			hba->task_ctx[i] = NULL;
1938*4882a593Smuzhiyun 		}
1939*4882a593Smuzhiyun 	}
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun 	kfree(hba->task_ctx_dma);
1942*4882a593Smuzhiyun 	hba->task_ctx_dma = NULL;
1943*4882a593Smuzhiyun out2:
1944*4882a593Smuzhiyun 	kfree(hba->task_ctx);
1945*4882a593Smuzhiyun 	hba->task_ctx = NULL;
1946*4882a593Smuzhiyun out1:
1947*4882a593Smuzhiyun 	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1948*4882a593Smuzhiyun 			hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1949*4882a593Smuzhiyun 	hba->task_ctx_bd_tbl = NULL;
1950*4882a593Smuzhiyun out:
1951*4882a593Smuzhiyun 	return rc;
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun 
bnx2fc_free_task_ctx(struct bnx2fc_hba * hba)1954*4882a593Smuzhiyun void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1955*4882a593Smuzhiyun {
1956*4882a593Smuzhiyun 	int task_ctx_arr_sz;
1957*4882a593Smuzhiyun 	int i;
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 	if (hba->task_ctx_bd_tbl) {
1960*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1961*4882a593Smuzhiyun 				    hba->task_ctx_bd_tbl,
1962*4882a593Smuzhiyun 				    hba->task_ctx_bd_dma);
1963*4882a593Smuzhiyun 		hba->task_ctx_bd_tbl = NULL;
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1967*4882a593Smuzhiyun 	if (hba->task_ctx) {
1968*4882a593Smuzhiyun 		for (i = 0; i < task_ctx_arr_sz; i++) {
1969*4882a593Smuzhiyun 			if (hba->task_ctx[i]) {
1970*4882a593Smuzhiyun 				dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1971*4882a593Smuzhiyun 						    hba->task_ctx[i],
1972*4882a593Smuzhiyun 						    hba->task_ctx_dma[i]);
1973*4882a593Smuzhiyun 				hba->task_ctx[i] = NULL;
1974*4882a593Smuzhiyun 			}
1975*4882a593Smuzhiyun 		}
1976*4882a593Smuzhiyun 		kfree(hba->task_ctx);
1977*4882a593Smuzhiyun 		hba->task_ctx = NULL;
1978*4882a593Smuzhiyun 	}
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	kfree(hba->task_ctx_dma);
1981*4882a593Smuzhiyun 	hba->task_ctx_dma = NULL;
1982*4882a593Smuzhiyun }
1983*4882a593Smuzhiyun 
bnx2fc_free_hash_table(struct bnx2fc_hba * hba)1984*4882a593Smuzhiyun static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1985*4882a593Smuzhiyun {
1986*4882a593Smuzhiyun 	int i;
1987*4882a593Smuzhiyun 	int segment_count;
1988*4882a593Smuzhiyun 	u32 *pbl;
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	if (hba->hash_tbl_segments) {
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 		pbl = hba->hash_tbl_pbl;
1993*4882a593Smuzhiyun 		if (pbl) {
1994*4882a593Smuzhiyun 			segment_count = hba->hash_tbl_segment_count;
1995*4882a593Smuzhiyun 			for (i = 0; i < segment_count; ++i) {
1996*4882a593Smuzhiyun 				dma_addr_t dma_address;
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 				dma_address = le32_to_cpu(*pbl);
1999*4882a593Smuzhiyun 				++pbl;
2000*4882a593Smuzhiyun 				dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
2001*4882a593Smuzhiyun 				++pbl;
2002*4882a593Smuzhiyun 				dma_free_coherent(&hba->pcidev->dev,
2003*4882a593Smuzhiyun 						  BNX2FC_HASH_TBL_CHUNK_SIZE,
2004*4882a593Smuzhiyun 						  hba->hash_tbl_segments[i],
2005*4882a593Smuzhiyun 						  dma_address);
2006*4882a593Smuzhiyun 			}
2007*4882a593Smuzhiyun 		}
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 		kfree(hba->hash_tbl_segments);
2010*4882a593Smuzhiyun 		hba->hash_tbl_segments = NULL;
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	if (hba->hash_tbl_pbl) {
2014*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2015*4882a593Smuzhiyun 				    hba->hash_tbl_pbl,
2016*4882a593Smuzhiyun 				    hba->hash_tbl_pbl_dma);
2017*4882a593Smuzhiyun 		hba->hash_tbl_pbl = NULL;
2018*4882a593Smuzhiyun 	}
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun 
bnx2fc_allocate_hash_table(struct bnx2fc_hba * hba)2021*4882a593Smuzhiyun static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2022*4882a593Smuzhiyun {
2023*4882a593Smuzhiyun 	int i;
2024*4882a593Smuzhiyun 	int hash_table_size;
2025*4882a593Smuzhiyun 	int segment_count;
2026*4882a593Smuzhiyun 	int segment_array_size;
2027*4882a593Smuzhiyun 	int dma_segment_array_size;
2028*4882a593Smuzhiyun 	dma_addr_t *dma_segment_array;
2029*4882a593Smuzhiyun 	u32 *pbl;
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 	hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2032*4882a593Smuzhiyun 		sizeof(struct fcoe_hash_table_entry);
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2035*4882a593Smuzhiyun 	segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2036*4882a593Smuzhiyun 	hba->hash_tbl_segment_count = segment_count;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2039*4882a593Smuzhiyun 	hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2040*4882a593Smuzhiyun 	if (!hba->hash_tbl_segments) {
2041*4882a593Smuzhiyun 		printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2042*4882a593Smuzhiyun 		return -ENOMEM;
2043*4882a593Smuzhiyun 	}
2044*4882a593Smuzhiyun 	dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2045*4882a593Smuzhiyun 	dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2046*4882a593Smuzhiyun 	if (!dma_segment_array) {
2047*4882a593Smuzhiyun 		printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2048*4882a593Smuzhiyun 		goto cleanup_ht;
2049*4882a593Smuzhiyun 	}
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	for (i = 0; i < segment_count; ++i) {
2052*4882a593Smuzhiyun 		hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2053*4882a593Smuzhiyun 							       BNX2FC_HASH_TBL_CHUNK_SIZE,
2054*4882a593Smuzhiyun 							       &dma_segment_array[i],
2055*4882a593Smuzhiyun 							       GFP_KERNEL);
2056*4882a593Smuzhiyun 		if (!hba->hash_tbl_segments[i]) {
2057*4882a593Smuzhiyun 			printk(KERN_ERR PFX "hash segment alloc failed\n");
2058*4882a593Smuzhiyun 			goto cleanup_dma;
2059*4882a593Smuzhiyun 		}
2060*4882a593Smuzhiyun 	}
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2063*4882a593Smuzhiyun 					       &hba->hash_tbl_pbl_dma,
2064*4882a593Smuzhiyun 					       GFP_KERNEL);
2065*4882a593Smuzhiyun 	if (!hba->hash_tbl_pbl) {
2066*4882a593Smuzhiyun 		printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2067*4882a593Smuzhiyun 		goto cleanup_dma;
2068*4882a593Smuzhiyun 	}
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	pbl = hba->hash_tbl_pbl;
2071*4882a593Smuzhiyun 	for (i = 0; i < segment_count; ++i) {
2072*4882a593Smuzhiyun 		u64 paddr = dma_segment_array[i];
2073*4882a593Smuzhiyun 		*pbl = cpu_to_le32((u32) paddr);
2074*4882a593Smuzhiyun 		++pbl;
2075*4882a593Smuzhiyun 		*pbl = cpu_to_le32((u32) (paddr >> 32));
2076*4882a593Smuzhiyun 		++pbl;
2077*4882a593Smuzhiyun 	}
2078*4882a593Smuzhiyun 	pbl = hba->hash_tbl_pbl;
2079*4882a593Smuzhiyun 	i = 0;
2080*4882a593Smuzhiyun 	while (*pbl && *(pbl + 1)) {
2081*4882a593Smuzhiyun 		++pbl;
2082*4882a593Smuzhiyun 		++pbl;
2083*4882a593Smuzhiyun 		++i;
2084*4882a593Smuzhiyun 	}
2085*4882a593Smuzhiyun 	kfree(dma_segment_array);
2086*4882a593Smuzhiyun 	return 0;
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun cleanup_dma:
2089*4882a593Smuzhiyun 	for (i = 0; i < segment_count; ++i) {
2090*4882a593Smuzhiyun 		if (hba->hash_tbl_segments[i])
2091*4882a593Smuzhiyun 			dma_free_coherent(&hba->pcidev->dev,
2092*4882a593Smuzhiyun 					    BNX2FC_HASH_TBL_CHUNK_SIZE,
2093*4882a593Smuzhiyun 					    hba->hash_tbl_segments[i],
2094*4882a593Smuzhiyun 					    dma_segment_array[i]);
2095*4882a593Smuzhiyun 	}
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	kfree(dma_segment_array);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun cleanup_ht:
2100*4882a593Smuzhiyun 	kfree(hba->hash_tbl_segments);
2101*4882a593Smuzhiyun 	hba->hash_tbl_segments = NULL;
2102*4882a593Smuzhiyun 	return -ENOMEM;
2103*4882a593Smuzhiyun }
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun /**
2106*4882a593Smuzhiyun  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2107*4882a593Smuzhiyun  *
2108*4882a593Smuzhiyun  * @hba:	Pointer to adapter structure
2109*4882a593Smuzhiyun  *
2110*4882a593Smuzhiyun  */
bnx2fc_setup_fw_resc(struct bnx2fc_hba * hba)2111*4882a593Smuzhiyun int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2112*4882a593Smuzhiyun {
2113*4882a593Smuzhiyun 	u64 addr;
2114*4882a593Smuzhiyun 	u32 mem_size;
2115*4882a593Smuzhiyun 	int i;
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	if (bnx2fc_allocate_hash_table(hba))
2118*4882a593Smuzhiyun 		return -ENOMEM;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2121*4882a593Smuzhiyun 	hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2122*4882a593Smuzhiyun 						  &hba->t2_hash_tbl_ptr_dma,
2123*4882a593Smuzhiyun 						  GFP_KERNEL);
2124*4882a593Smuzhiyun 	if (!hba->t2_hash_tbl_ptr) {
2125*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2126*4882a593Smuzhiyun 		bnx2fc_free_fw_resc(hba);
2127*4882a593Smuzhiyun 		return -ENOMEM;
2128*4882a593Smuzhiyun 	}
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	mem_size = BNX2FC_NUM_MAX_SESS *
2131*4882a593Smuzhiyun 				sizeof(struct fcoe_t2_hash_table_entry);
2132*4882a593Smuzhiyun 	hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2133*4882a593Smuzhiyun 					      &hba->t2_hash_tbl_dma,
2134*4882a593Smuzhiyun 					      GFP_KERNEL);
2135*4882a593Smuzhiyun 	if (!hba->t2_hash_tbl) {
2136*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2137*4882a593Smuzhiyun 		bnx2fc_free_fw_resc(hba);
2138*4882a593Smuzhiyun 		return -ENOMEM;
2139*4882a593Smuzhiyun 	}
2140*4882a593Smuzhiyun 	for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2141*4882a593Smuzhiyun 		addr = (unsigned long) hba->t2_hash_tbl_dma +
2142*4882a593Smuzhiyun 			 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2143*4882a593Smuzhiyun 		hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2144*4882a593Smuzhiyun 		hba->t2_hash_tbl[i].next.hi = addr >> 32;
2145*4882a593Smuzhiyun 	}
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2148*4882a593Smuzhiyun 					       PAGE_SIZE, &hba->dummy_buf_dma,
2149*4882a593Smuzhiyun 					       GFP_KERNEL);
2150*4882a593Smuzhiyun 	if (!hba->dummy_buffer) {
2151*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2152*4882a593Smuzhiyun 		bnx2fc_free_fw_resc(hba);
2153*4882a593Smuzhiyun 		return -ENOMEM;
2154*4882a593Smuzhiyun 	}
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2157*4882a593Smuzhiyun 					       &hba->stats_buf_dma,
2158*4882a593Smuzhiyun 					       GFP_KERNEL);
2159*4882a593Smuzhiyun 	if (!hba->stats_buffer) {
2160*4882a593Smuzhiyun 		printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2161*4882a593Smuzhiyun 		bnx2fc_free_fw_resc(hba);
2162*4882a593Smuzhiyun 		return -ENOMEM;
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	return 0;
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun 
bnx2fc_free_fw_resc(struct bnx2fc_hba * hba)2168*4882a593Smuzhiyun void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun 	u32 mem_size;
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	if (hba->stats_buffer) {
2173*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2174*4882a593Smuzhiyun 				  hba->stats_buffer, hba->stats_buf_dma);
2175*4882a593Smuzhiyun 		hba->stats_buffer = NULL;
2176*4882a593Smuzhiyun 	}
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	if (hba->dummy_buffer) {
2179*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2180*4882a593Smuzhiyun 				  hba->dummy_buffer, hba->dummy_buf_dma);
2181*4882a593Smuzhiyun 		hba->dummy_buffer = NULL;
2182*4882a593Smuzhiyun 	}
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	if (hba->t2_hash_tbl_ptr) {
2185*4882a593Smuzhiyun 		mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2186*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, mem_size,
2187*4882a593Smuzhiyun 				    hba->t2_hash_tbl_ptr,
2188*4882a593Smuzhiyun 				    hba->t2_hash_tbl_ptr_dma);
2189*4882a593Smuzhiyun 		hba->t2_hash_tbl_ptr = NULL;
2190*4882a593Smuzhiyun 	}
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	if (hba->t2_hash_tbl) {
2193*4882a593Smuzhiyun 		mem_size = BNX2FC_NUM_MAX_SESS *
2194*4882a593Smuzhiyun 			    sizeof(struct fcoe_t2_hash_table_entry);
2195*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, mem_size,
2196*4882a593Smuzhiyun 				    hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2197*4882a593Smuzhiyun 		hba->t2_hash_tbl = NULL;
2198*4882a593Smuzhiyun 	}
2199*4882a593Smuzhiyun 	bnx2fc_free_hash_table(hba);
2200*4882a593Smuzhiyun }
2201