xref: /OK3568_Linux_fs/kernel/drivers/scsi/bnx2i/bnx2i_hwi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver.
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright (c) 2006 - 2013 Broadcom Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
5*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mike Christie
6*4882a593Smuzhiyun  * Copyright (c) 2014, QLogic Corporation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
9*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
10*4882a593Smuzhiyun  * the Free Software Foundation.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13*4882a593Smuzhiyun  * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
14*4882a593Smuzhiyun  * Maintained by: QLogic-Storage-Upstream@qlogic.com
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/gfp.h>
18*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
19*4882a593Smuzhiyun #include <scsi/libiscsi.h>
20*4882a593Smuzhiyun #include "bnx2i.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * bnx2i_get_cid_num - get cid from ep
26*4882a593Smuzhiyun  * @ep: 	endpoint pointer
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * Only applicable to 57710 family of devices
29*4882a593Smuzhiyun  */
bnx2i_get_cid_num(struct bnx2i_endpoint * ep)30*4882a593Smuzhiyun static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	u32 cid;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
35*4882a593Smuzhiyun 		cid = ep->ep_cid;
36*4882a593Smuzhiyun 	else
37*4882a593Smuzhiyun 		cid = GET_CID_NUM(ep->ep_cid);
38*4882a593Smuzhiyun 	return cid;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun  * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
44*4882a593Smuzhiyun  * @hba: 		Adapter for which adjustments is to be made
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Only applicable to 57710 family of devices
47*4882a593Smuzhiyun  */
bnx2i_adjust_qp_size(struct bnx2i_hba * hba)48*4882a593Smuzhiyun static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	u32 num_elements_per_pg;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
53*4882a593Smuzhiyun 	    test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
54*4882a593Smuzhiyun 	    test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
55*4882a593Smuzhiyun 		if (!is_power_of_2(hba->max_sqes))
56*4882a593Smuzhiyun 			hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		if (!is_power_of_2(hba->max_rqes))
59*4882a593Smuzhiyun 			hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Adjust each queue size if the user selection does not
63*4882a593Smuzhiyun 	 * yield integral num of page buffers
64*4882a593Smuzhiyun 	 */
65*4882a593Smuzhiyun 	/* adjust SQ */
66*4882a593Smuzhiyun 	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
67*4882a593Smuzhiyun 	if (hba->max_sqes < num_elements_per_pg)
68*4882a593Smuzhiyun 		hba->max_sqes = num_elements_per_pg;
69*4882a593Smuzhiyun 	else if (hba->max_sqes % num_elements_per_pg)
70*4882a593Smuzhiyun 		hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
71*4882a593Smuzhiyun 				 ~(num_elements_per_pg - 1);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* adjust CQ */
74*4882a593Smuzhiyun 	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
75*4882a593Smuzhiyun 	if (hba->max_cqes < num_elements_per_pg)
76*4882a593Smuzhiyun 		hba->max_cqes = num_elements_per_pg;
77*4882a593Smuzhiyun 	else if (hba->max_cqes % num_elements_per_pg)
78*4882a593Smuzhiyun 		hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
79*4882a593Smuzhiyun 				 ~(num_elements_per_pg - 1);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* adjust RQ */
82*4882a593Smuzhiyun 	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
83*4882a593Smuzhiyun 	if (hba->max_rqes < num_elements_per_pg)
84*4882a593Smuzhiyun 		hba->max_rqes = num_elements_per_pg;
85*4882a593Smuzhiyun 	else if (hba->max_rqes % num_elements_per_pg)
86*4882a593Smuzhiyun 		hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
87*4882a593Smuzhiyun 				 ~(num_elements_per_pg - 1);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun  * bnx2i_get_link_state - get network interface link state
93*4882a593Smuzhiyun  * @hba:	adapter instance pointer
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * updates adapter structure flag based on netdev state
96*4882a593Smuzhiyun  */
bnx2i_get_link_state(struct bnx2i_hba * hba)97*4882a593Smuzhiyun static void bnx2i_get_link_state(struct bnx2i_hba *hba)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
100*4882a593Smuzhiyun 		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
101*4882a593Smuzhiyun 	else
102*4882a593Smuzhiyun 		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun  * bnx2i_iscsi_license_error - displays iscsi license related error message
108*4882a593Smuzhiyun  * @hba:		adapter instance pointer
109*4882a593Smuzhiyun  * @error_code:		error classification
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * Puts out an error log when driver is unable to offload iscsi connection
112*4882a593Smuzhiyun  *	due to license restrictions
113*4882a593Smuzhiyun  */
bnx2i_iscsi_license_error(struct bnx2i_hba * hba,u32 error_code)114*4882a593Smuzhiyun static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
117*4882a593Smuzhiyun 		/* iSCSI offload not supported on this device */
118*4882a593Smuzhiyun 		printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
119*4882a593Smuzhiyun 				hba->netdev->name);
120*4882a593Smuzhiyun 	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
121*4882a593Smuzhiyun 		/* iSCSI offload not supported on this LOM device */
122*4882a593Smuzhiyun 		printk(KERN_ERR "bnx2i: LOM is not enable to "
123*4882a593Smuzhiyun 				"offload iSCSI connections, dev=%s\n",
124*4882a593Smuzhiyun 				hba->netdev->name);
125*4882a593Smuzhiyun 	set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /**
130*4882a593Smuzhiyun  * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
131*4882a593Smuzhiyun  * @ep:		endpoint (transport identifier) structure
132*4882a593Smuzhiyun  * @action:	action, ARM or DISARM. For now only ARM_CQE is used
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
135*4882a593Smuzhiyun  *	the driver. EQ event is generated CQ index is hit or at least 1 CQ is
136*4882a593Smuzhiyun  *	outstanding and on chip timer expires
137*4882a593Smuzhiyun  */
bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint * ep,u8 action)138*4882a593Smuzhiyun int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct bnx2i_5771x_cq_db *cq_db;
141*4882a593Smuzhiyun 	u16 cq_index;
142*4882a593Smuzhiyun 	u16 next_index = 0;
143*4882a593Smuzhiyun 	u32 num_active_cmds;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Coalesce CQ entries only on 10G devices */
146*4882a593Smuzhiyun 	if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
147*4882a593Smuzhiyun 		return 0;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Do not update CQ DB multiple times before firmware writes
150*4882a593Smuzhiyun 	 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
151*4882a593Smuzhiyun 	 * interrupts and other unwanted results
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (action != CNIC_ARM_CQE_FP)
156*4882a593Smuzhiyun 		if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
157*4882a593Smuzhiyun 			return 0;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
160*4882a593Smuzhiyun 		num_active_cmds = atomic_read(&ep->num_active_cmds);
161*4882a593Smuzhiyun 		if (num_active_cmds <= event_coal_min)
162*4882a593Smuzhiyun 			next_index = 1;
163*4882a593Smuzhiyun 		else {
164*4882a593Smuzhiyun 			next_index = num_active_cmds >> ep->ec_shift;
165*4882a593Smuzhiyun 			if (next_index > num_active_cmds - event_coal_min)
166*4882a593Smuzhiyun 				next_index = num_active_cmds - event_coal_min;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 		if (!next_index)
169*4882a593Smuzhiyun 			next_index = 1;
170*4882a593Smuzhiyun 		cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
171*4882a593Smuzhiyun 		if (cq_index > ep->qp.cqe_size * 2)
172*4882a593Smuzhiyun 			cq_index -= ep->qp.cqe_size * 2;
173*4882a593Smuzhiyun 		if (!cq_index)
174*4882a593Smuzhiyun 			cq_index = 1;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		cq_db->sqn[0] = cq_index;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 	return next_index;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
184*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection on which RQ event occurred
185*4882a593Smuzhiyun  * @ptr:		driver buffer to which RQ buffer contents is to
186*4882a593Smuzhiyun  *			be copied
187*4882a593Smuzhiyun  * @len:		length of valid data inside RQ buf
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * Copies RQ buffer contents from shared (DMA'able) memory region to
190*4882a593Smuzhiyun  *	driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
191*4882a593Smuzhiyun  *	scsi sense info
192*4882a593Smuzhiyun  */
bnx2i_get_rq_buf(struct bnx2i_conn * bnx2i_conn,char * ptr,int len)193*4882a593Smuzhiyun void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	if (!bnx2i_conn->ep->qp.rqe_left)
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	bnx2i_conn->ep->qp.rqe_left--;
199*4882a593Smuzhiyun 	memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
200*4882a593Smuzhiyun 	if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
201*4882a593Smuzhiyun 		bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
202*4882a593Smuzhiyun 		bnx2i_conn->ep->qp.rq_cons_idx = 0;
203*4882a593Smuzhiyun 	} else {
204*4882a593Smuzhiyun 		bnx2i_conn->ep->qp.rq_cons_qe++;
205*4882a593Smuzhiyun 		bnx2i_conn->ep->qp.rq_cons_idx++;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 
bnx2i_ring_577xx_doorbell(struct bnx2i_conn * conn)210*4882a593Smuzhiyun static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct bnx2i_5771x_dbell dbell;
213*4882a593Smuzhiyun 	u32 msg;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	memset(&dbell, 0, sizeof(dbell));
216*4882a593Smuzhiyun 	dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
217*4882a593Smuzhiyun 			      B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
218*4882a593Smuzhiyun 	msg = *((u32 *)&dbell);
219*4882a593Smuzhiyun 	/* TODO : get doorbell register mapping */
220*4882a593Smuzhiyun 	writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun  * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
226*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection on which event to post
227*4882a593Smuzhiyun  * @count:	number of RQ buffer being posted to chip
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * No need to ring hardware doorbell for 57710 family of devices
230*4882a593Smuzhiyun  */
bnx2i_put_rq_buf(struct bnx2i_conn * bnx2i_conn,int count)231*4882a593Smuzhiyun void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct bnx2i_5771x_sq_rq_db *rq_db;
234*4882a593Smuzhiyun 	u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
235*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	ep->qp.rqe_left += count;
238*4882a593Smuzhiyun 	ep->qp.rq_prod_idx &= 0x7FFF;
239*4882a593Smuzhiyun 	ep->qp.rq_prod_idx += count;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
242*4882a593Smuzhiyun 		ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
243*4882a593Smuzhiyun 		if (!hi_bit)
244*4882a593Smuzhiyun 			ep->qp.rq_prod_idx |= 0x8000;
245*4882a593Smuzhiyun 	} else
246*4882a593Smuzhiyun 		ep->qp.rq_prod_idx |= hi_bit;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249*4882a593Smuzhiyun 		rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
250*4882a593Smuzhiyun 		rq_db->prod_idx = ep->qp.rq_prod_idx;
251*4882a593Smuzhiyun 		/* no need to ring hardware doorbell for 57710 */
252*4882a593Smuzhiyun 	} else {
253*4882a593Smuzhiyun 		writew(ep->qp.rq_prod_idx,
254*4882a593Smuzhiyun 		       ep->qp.ctx_base + CNIC_RECV_DOORBELL);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun  * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
261*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection to which new SQ entries belong
262*4882a593Smuzhiyun  * @count: 		number of SQ WQEs to post
263*4882a593Smuzhiyun  *
264*4882a593Smuzhiyun  * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
265*4882a593Smuzhiyun  *	of devices. For 5706/5708/5709 new SQ WQE count is written into the
266*4882a593Smuzhiyun  *	doorbell register
267*4882a593Smuzhiyun  */
bnx2i_ring_sq_dbell(struct bnx2i_conn * bnx2i_conn,int count)268*4882a593Smuzhiyun static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct bnx2i_5771x_sq_rq_db *sq_db;
271*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	atomic_inc(&ep->num_active_cmds);
274*4882a593Smuzhiyun 	wmb();	/* flush SQ WQE memory before the doorbell is rung */
275*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
276*4882a593Smuzhiyun 		sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
277*4882a593Smuzhiyun 		sq_db->prod_idx = ep->qp.sq_prod_idx;
278*4882a593Smuzhiyun 		bnx2i_ring_577xx_doorbell(bnx2i_conn);
279*4882a593Smuzhiyun 	} else
280*4882a593Smuzhiyun 		writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun  * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
286*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection to which new SQ entries belong
287*4882a593Smuzhiyun  * @count:	number of SQ WQEs to post
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * this routine will update SQ driver parameters and ring the doorbell
290*4882a593Smuzhiyun  */
bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn * bnx2i_conn,int count)291*4882a593Smuzhiyun static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
292*4882a593Smuzhiyun 					      int count)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	int tmp_cnt;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (count == 1) {
297*4882a593Smuzhiyun 		if (bnx2i_conn->ep->qp.sq_prod_qe ==
298*4882a593Smuzhiyun 		    bnx2i_conn->ep->qp.sq_last_qe)
299*4882a593Smuzhiyun 			bnx2i_conn->ep->qp.sq_prod_qe =
300*4882a593Smuzhiyun 						bnx2i_conn->ep->qp.sq_first_qe;
301*4882a593Smuzhiyun 		else
302*4882a593Smuzhiyun 			bnx2i_conn->ep->qp.sq_prod_qe++;
303*4882a593Smuzhiyun 	} else {
304*4882a593Smuzhiyun 		if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
305*4882a593Smuzhiyun 		    bnx2i_conn->ep->qp.sq_last_qe)
306*4882a593Smuzhiyun 			bnx2i_conn->ep->qp.sq_prod_qe += count;
307*4882a593Smuzhiyun 		else {
308*4882a593Smuzhiyun 			tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
309*4882a593Smuzhiyun 				bnx2i_conn->ep->qp.sq_prod_qe;
310*4882a593Smuzhiyun 			bnx2i_conn->ep->qp.sq_prod_qe =
311*4882a593Smuzhiyun 				&bnx2i_conn->ep->qp.sq_first_qe[count -
312*4882a593Smuzhiyun 								(tmp_cnt + 1)];
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 	bnx2i_conn->ep->qp.sq_prod_idx += count;
316*4882a593Smuzhiyun 	/* Ring the doorbell */
317*4882a593Smuzhiyun 	bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun  * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
323*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
324*4882a593Smuzhiyun  * @task: transport layer's command structure pointer which is requesting
325*4882a593Smuzhiyun  *	  a WQE to sent to chip for further processing
326*4882a593Smuzhiyun  *
327*4882a593Smuzhiyun  * prepare and post an iSCSI Login request WQE to CNIC firmware
328*4882a593Smuzhiyun  */
bnx2i_send_iscsi_login(struct bnx2i_conn * bnx2i_conn,struct iscsi_task * task)329*4882a593Smuzhiyun int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
330*4882a593Smuzhiyun 			   struct iscsi_task *task)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct bnx2i_login_request *login_wqe;
333*4882a593Smuzhiyun 	struct iscsi_login_req *login_hdr;
334*4882a593Smuzhiyun 	u32 dword;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	login_hdr = (struct iscsi_login_req *)task->hdr;
337*4882a593Smuzhiyun 	login_wqe = (struct bnx2i_login_request *)
338*4882a593Smuzhiyun 						bnx2i_conn->ep->qp.sq_prod_qe;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	login_wqe->op_code = login_hdr->opcode;
341*4882a593Smuzhiyun 	login_wqe->op_attr = login_hdr->flags;
342*4882a593Smuzhiyun 	login_wqe->version_max = login_hdr->max_version;
343*4882a593Smuzhiyun 	login_wqe->version_min = login_hdr->min_version;
344*4882a593Smuzhiyun 	login_wqe->data_length = ntoh24(login_hdr->dlength);
345*4882a593Smuzhiyun 	login_wqe->isid_lo = *((u32 *) login_hdr->isid);
346*4882a593Smuzhiyun 	login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
347*4882a593Smuzhiyun 	login_wqe->tsih = login_hdr->tsih;
348*4882a593Smuzhiyun 	login_wqe->itt = task->itt |
349*4882a593Smuzhiyun 		(ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
350*4882a593Smuzhiyun 	login_wqe->cid = login_hdr->cid;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
353*4882a593Smuzhiyun 	login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
354*4882a593Smuzhiyun 	login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
357*4882a593Smuzhiyun 	login_wqe->resp_bd_list_addr_hi =
358*4882a593Smuzhiyun 		(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
361*4882a593Smuzhiyun 		 (bnx2i_conn->gen_pdu.resp_buf_size <<
362*4882a593Smuzhiyun 		  ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
363*4882a593Smuzhiyun 	login_wqe->resp_buffer = dword;
364*4882a593Smuzhiyun 	login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
365*4882a593Smuzhiyun 	login_wqe->bd_list_addr_hi =
366*4882a593Smuzhiyun 		(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
367*4882a593Smuzhiyun 	login_wqe->num_bds = 1;
368*4882a593Smuzhiyun 	login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
371*4882a593Smuzhiyun 	return 0;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
376*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
377*4882a593Smuzhiyun  * @mtask:	driver command structure which is requesting
378*4882a593Smuzhiyun  *		a WQE to sent to chip for further processing
379*4882a593Smuzhiyun  *
380*4882a593Smuzhiyun  * prepare and post an iSCSI Login request WQE to CNIC firmware
381*4882a593Smuzhiyun  */
bnx2i_send_iscsi_tmf(struct bnx2i_conn * bnx2i_conn,struct iscsi_task * mtask)382*4882a593Smuzhiyun int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
383*4882a593Smuzhiyun 			 struct iscsi_task *mtask)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
386*4882a593Smuzhiyun 	struct iscsi_tm *tmfabort_hdr;
387*4882a593Smuzhiyun 	struct scsi_cmnd *ref_sc;
388*4882a593Smuzhiyun 	struct iscsi_task *ctask;
389*4882a593Smuzhiyun 	struct bnx2i_tmf_request *tmfabort_wqe;
390*4882a593Smuzhiyun 	u32 dword;
391*4882a593Smuzhiyun 	u32 scsi_lun[2];
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
394*4882a593Smuzhiyun 	tmfabort_wqe = (struct bnx2i_tmf_request *)
395*4882a593Smuzhiyun 						bnx2i_conn->ep->qp.sq_prod_qe;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	tmfabort_wqe->op_code = tmfabort_hdr->opcode;
398*4882a593Smuzhiyun 	tmfabort_wqe->op_attr = tmfabort_hdr->flags;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
401*4882a593Smuzhiyun 	tmfabort_wqe->reserved2 = 0;
402*4882a593Smuzhiyun 	tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
405*4882a593Smuzhiyun 	case ISCSI_TM_FUNC_ABORT_TASK:
406*4882a593Smuzhiyun 	case ISCSI_TM_FUNC_TASK_REASSIGN:
407*4882a593Smuzhiyun 		ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
408*4882a593Smuzhiyun 		if (!ctask || !ctask->sc)
409*4882a593Smuzhiyun 			/*
410*4882a593Smuzhiyun 			 * the iscsi layer must have completed the cmd while
411*4882a593Smuzhiyun 			 * was starting up.
412*4882a593Smuzhiyun 			 *
413*4882a593Smuzhiyun 			 * Note: In the case of a SCSI cmd timeout, the task's
414*4882a593Smuzhiyun 			 *       sc is still active; hence ctask->sc != 0
415*4882a593Smuzhiyun 			 *       In this case, the task must be aborted
416*4882a593Smuzhiyun 			 */
417*4882a593Smuzhiyun 			return 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		ref_sc = ctask->sc;
420*4882a593Smuzhiyun 		if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
421*4882a593Smuzhiyun 			dword = (ISCSI_TASK_TYPE_WRITE <<
422*4882a593Smuzhiyun 				 ISCSI_CMD_REQUEST_TYPE_SHIFT);
423*4882a593Smuzhiyun 		else
424*4882a593Smuzhiyun 			dword = (ISCSI_TASK_TYPE_READ <<
425*4882a593Smuzhiyun 				 ISCSI_CMD_REQUEST_TYPE_SHIFT);
426*4882a593Smuzhiyun 		tmfabort_wqe->ref_itt = (dword |
427*4882a593Smuzhiyun 					(tmfabort_hdr->rtt & ISCSI_ITT_MASK));
428*4882a593Smuzhiyun 		break;
429*4882a593Smuzhiyun 	default:
430*4882a593Smuzhiyun 		tmfabort_wqe->ref_itt = RESERVED_ITT;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 	memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun));
433*4882a593Smuzhiyun 	tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
434*4882a593Smuzhiyun 	tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
439*4882a593Smuzhiyun 	tmfabort_wqe->bd_list_addr_hi = (u32)
440*4882a593Smuzhiyun 				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
441*4882a593Smuzhiyun 	tmfabort_wqe->num_bds = 1;
442*4882a593Smuzhiyun 	tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun  * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
450*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
451*4882a593Smuzhiyun  * @mtask:	driver command structure which is requesting
452*4882a593Smuzhiyun  *		a WQE to sent to chip for further processing
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * prepare and post an iSCSI Text request WQE to CNIC firmware
455*4882a593Smuzhiyun  */
bnx2i_send_iscsi_text(struct bnx2i_conn * bnx2i_conn,struct iscsi_task * mtask)456*4882a593Smuzhiyun int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
457*4882a593Smuzhiyun 			  struct iscsi_task *mtask)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	struct bnx2i_text_request *text_wqe;
460*4882a593Smuzhiyun 	struct iscsi_text *text_hdr;
461*4882a593Smuzhiyun 	u32 dword;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	text_hdr = (struct iscsi_text *)mtask->hdr;
464*4882a593Smuzhiyun 	text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	text_wqe->op_code = text_hdr->opcode;
469*4882a593Smuzhiyun 	text_wqe->op_attr = text_hdr->flags;
470*4882a593Smuzhiyun 	text_wqe->data_length = ntoh24(text_hdr->dlength);
471*4882a593Smuzhiyun 	text_wqe->itt = mtask->itt |
472*4882a593Smuzhiyun 		(ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
473*4882a593Smuzhiyun 	text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
478*4882a593Smuzhiyun 	text_wqe->resp_bd_list_addr_hi =
479*4882a593Smuzhiyun 			(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
482*4882a593Smuzhiyun 		 (bnx2i_conn->gen_pdu.resp_buf_size <<
483*4882a593Smuzhiyun 		  ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
484*4882a593Smuzhiyun 	text_wqe->resp_buffer = dword;
485*4882a593Smuzhiyun 	text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
486*4882a593Smuzhiyun 	text_wqe->bd_list_addr_hi =
487*4882a593Smuzhiyun 			(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
488*4882a593Smuzhiyun 	text_wqe->num_bds = 1;
489*4882a593Smuzhiyun 	text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
492*4882a593Smuzhiyun 	return 0;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /**
497*4882a593Smuzhiyun  * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
498*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
499*4882a593Smuzhiyun  * @cmd:	driver command structure which is requesting
500*4882a593Smuzhiyun  *		a WQE to sent to chip for further processing
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
503*4882a593Smuzhiyun  */
bnx2i_send_iscsi_scsicmd(struct bnx2i_conn * bnx2i_conn,struct bnx2i_cmd * cmd)504*4882a593Smuzhiyun int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
505*4882a593Smuzhiyun 			     struct bnx2i_cmd *cmd)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct bnx2i_cmd_request *scsi_cmd_wqe;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	scsi_cmd_wqe = (struct bnx2i_cmd_request *)
510*4882a593Smuzhiyun 						bnx2i_conn->ep->qp.sq_prod_qe;
511*4882a593Smuzhiyun 	memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
512*4882a593Smuzhiyun 	scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
515*4882a593Smuzhiyun 	return 0;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun /**
519*4882a593Smuzhiyun  * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
520*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection
521*4882a593Smuzhiyun  * @task:		transport layer's command structure pointer which is
522*4882a593Smuzhiyun  *                      requesting a WQE to sent to chip for further processing
523*4882a593Smuzhiyun  * @datap:		payload buffer pointer
524*4882a593Smuzhiyun  * @data_len:		payload data length
525*4882a593Smuzhiyun  * @unsol:		indicated whether nopout pdu is unsolicited pdu or
526*4882a593Smuzhiyun  *			in response to target's NOPIN w/ TTT != FFFFFFFF
527*4882a593Smuzhiyun  *
528*4882a593Smuzhiyun  * prepare and post a nopout request WQE to CNIC firmware
529*4882a593Smuzhiyun  */
bnx2i_send_iscsi_nopout(struct bnx2i_conn * bnx2i_conn,struct iscsi_task * task,char * datap,int data_len,int unsol)530*4882a593Smuzhiyun int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
531*4882a593Smuzhiyun 			    struct iscsi_task *task,
532*4882a593Smuzhiyun 			    char *datap, int data_len, int unsol)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
535*4882a593Smuzhiyun 	struct bnx2i_nop_out_request *nopout_wqe;
536*4882a593Smuzhiyun 	struct iscsi_nopout *nopout_hdr;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	nopout_hdr = (struct iscsi_nopout *)task->hdr;
539*4882a593Smuzhiyun 	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	nopout_wqe->op_code = nopout_hdr->opcode;
544*4882a593Smuzhiyun 	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
545*4882a593Smuzhiyun 	memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/* 57710 requires LUN field to be swapped */
548*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
549*4882a593Smuzhiyun 		swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	nopout_wqe->itt = ((u16)task->itt |
552*4882a593Smuzhiyun 			   (ISCSI_TASK_TYPE_MPATH <<
553*4882a593Smuzhiyun 			    ISCSI_TMF_REQUEST_TYPE_SHIFT));
554*4882a593Smuzhiyun 	nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
555*4882a593Smuzhiyun 	nopout_wqe->flags = 0;
556*4882a593Smuzhiyun 	if (!unsol)
557*4882a593Smuzhiyun 		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
558*4882a593Smuzhiyun 	else if (nopout_hdr->itt == RESERVED_ITT)
559*4882a593Smuzhiyun 		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
562*4882a593Smuzhiyun 	nopout_wqe->data_length = data_len;
563*4882a593Smuzhiyun 	if (data_len) {
564*4882a593Smuzhiyun 		/* handle payload data, not required in first release */
565*4882a593Smuzhiyun 		printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
566*4882a593Smuzhiyun 	} else {
567*4882a593Smuzhiyun 		nopout_wqe->bd_list_addr_lo = (u32)
568*4882a593Smuzhiyun 					bnx2i_conn->hba->mp_bd_dma;
569*4882a593Smuzhiyun 		nopout_wqe->bd_list_addr_hi =
570*4882a593Smuzhiyun 			(u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
571*4882a593Smuzhiyun 		nopout_wqe->num_bds = 1;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 	nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /**
581*4882a593Smuzhiyun  * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
582*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
583*4882a593Smuzhiyun  * @task:	transport layer's command structure pointer which is
584*4882a593Smuzhiyun  *		requesting a WQE to sent to chip for further processing
585*4882a593Smuzhiyun  *
586*4882a593Smuzhiyun  * prepare and post logout request WQE to CNIC firmware
587*4882a593Smuzhiyun  */
bnx2i_send_iscsi_logout(struct bnx2i_conn * bnx2i_conn,struct iscsi_task * task)588*4882a593Smuzhiyun int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
589*4882a593Smuzhiyun 			    struct iscsi_task *task)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	struct bnx2i_logout_request *logout_wqe;
592*4882a593Smuzhiyun 	struct iscsi_logout *logout_hdr;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	logout_hdr = (struct iscsi_logout *)task->hdr;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	logout_wqe = (struct bnx2i_logout_request *)
597*4882a593Smuzhiyun 						bnx2i_conn->ep->qp.sq_prod_qe;
598*4882a593Smuzhiyun 	memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	logout_wqe->op_code = logout_hdr->opcode;
601*4882a593Smuzhiyun 	logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
602*4882a593Smuzhiyun 	logout_wqe->op_attr =
603*4882a593Smuzhiyun 			logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
604*4882a593Smuzhiyun 	logout_wqe->itt = ((u16)task->itt |
605*4882a593Smuzhiyun 			   (ISCSI_TASK_TYPE_MPATH <<
606*4882a593Smuzhiyun 			    ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
607*4882a593Smuzhiyun 	logout_wqe->data_length = 0;
608*4882a593Smuzhiyun 	logout_wqe->cid = 0;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
611*4882a593Smuzhiyun 	logout_wqe->bd_list_addr_hi = (u32)
612*4882a593Smuzhiyun 				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
613*4882a593Smuzhiyun 	logout_wqe->num_bds = 1;
614*4882a593Smuzhiyun 	logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
619*4882a593Smuzhiyun 	return 0;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun  * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
625*4882a593Smuzhiyun  * @conn:	iscsi connection which requires iscsi parameter update
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * sends down iSCSI Conn Update request to move iSCSI conn to FFP
628*4882a593Smuzhiyun  */
bnx2i_update_iscsi_conn(struct iscsi_conn * conn)629*4882a593Smuzhiyun void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
632*4882a593Smuzhiyun 	struct bnx2i_hba *hba = bnx2i_conn->hba;
633*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
634*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_update *update_wqe;
635*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_update conn_update_kwqe;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	update_wqe = &conn_update_kwqe;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
640*4882a593Smuzhiyun 	update_wqe->hdr.flags =
641*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* 5771x requires conn context id to be passed as is */
644*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
645*4882a593Smuzhiyun 		update_wqe->context_id = bnx2i_conn->ep->ep_cid;
646*4882a593Smuzhiyun 	else
647*4882a593Smuzhiyun 		update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
648*4882a593Smuzhiyun 	update_wqe->conn_flags = 0;
649*4882a593Smuzhiyun 	if (conn->hdrdgst_en)
650*4882a593Smuzhiyun 		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
651*4882a593Smuzhiyun 	if (conn->datadgst_en)
652*4882a593Smuzhiyun 		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
653*4882a593Smuzhiyun 	if (conn->session->initial_r2t_en)
654*4882a593Smuzhiyun 		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
655*4882a593Smuzhiyun 	if (conn->session->imm_data_en)
656*4882a593Smuzhiyun 		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
659*4882a593Smuzhiyun 	update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
660*4882a593Smuzhiyun 	update_wqe->first_burst_length = conn->session->first_burst;
661*4882a593Smuzhiyun 	update_wqe->max_burst_length = conn->session->max_burst;
662*4882a593Smuzhiyun 	update_wqe->exp_stat_sn = conn->exp_statsn;
663*4882a593Smuzhiyun 	update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
664*4882a593Smuzhiyun 	update_wqe->session_error_recovery_level = conn->session->erl;
665*4882a593Smuzhiyun 	iscsi_conn_printk(KERN_ALERT, conn,
666*4882a593Smuzhiyun 			  "bnx2i: conn update - MBL 0x%x FBL 0x%x"
667*4882a593Smuzhiyun 			  "MRDSL_I 0x%x MRDSL_T 0x%x \n",
668*4882a593Smuzhiyun 			  update_wqe->max_burst_length,
669*4882a593Smuzhiyun 			  update_wqe->first_burst_length,
670*4882a593Smuzhiyun 			  update_wqe->max_recv_pdu_length,
671*4882a593Smuzhiyun 			  update_wqe->max_send_pdu_length);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) update_wqe;
674*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
675*4882a593Smuzhiyun 		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /**
680*4882a593Smuzhiyun  * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
681*4882a593Smuzhiyun  * @t:	timer context used to fetch the endpoint (transport
682*4882a593Smuzhiyun  *	handle) structure pointer
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  * routine to handle connection offload/destroy request timeout
685*4882a593Smuzhiyun  */
bnx2i_ep_ofld_timer(struct timer_list * t)686*4882a593Smuzhiyun void bnx2i_ep_ofld_timer(struct timer_list *t)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (ep->state == EP_STATE_OFLD_START) {
691*4882a593Smuzhiyun 		printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
692*4882a593Smuzhiyun 		ep->state = EP_STATE_OFLD_FAILED;
693*4882a593Smuzhiyun 	} else if (ep->state == EP_STATE_DISCONN_START) {
694*4882a593Smuzhiyun 		printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
695*4882a593Smuzhiyun 		ep->state = EP_STATE_DISCONN_TIMEDOUT;
696*4882a593Smuzhiyun 	} else if (ep->state == EP_STATE_CLEANUP_START) {
697*4882a593Smuzhiyun 		printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
698*4882a593Smuzhiyun 		ep->state = EP_STATE_CLEANUP_FAILED;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 
bnx2i_power_of2(u32 val)705*4882a593Smuzhiyun static int bnx2i_power_of2(u32 val)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	u32 power = 0;
708*4882a593Smuzhiyun 	if (val & (val - 1))
709*4882a593Smuzhiyun 		return power;
710*4882a593Smuzhiyun 	val--;
711*4882a593Smuzhiyun 	while (val) {
712*4882a593Smuzhiyun 		val = val >> 1;
713*4882a593Smuzhiyun 		power++;
714*4882a593Smuzhiyun 	}
715*4882a593Smuzhiyun 	return power;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun /**
720*4882a593Smuzhiyun  * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
721*4882a593Smuzhiyun  * @hba:	adapter structure pointer
722*4882a593Smuzhiyun  * @cmd:	driver command structure which is requesting
723*4882a593Smuzhiyun  *		a WQE to sent to chip for further processing
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * prepares and posts CONN_OFLD_REQ1/2 KWQE
726*4882a593Smuzhiyun  */
bnx2i_send_cmd_cleanup_req(struct bnx2i_hba * hba,struct bnx2i_cmd * cmd)727*4882a593Smuzhiyun void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct bnx2i_cleanup_request *cmd_cleanup;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	cmd_cleanup =
732*4882a593Smuzhiyun 		(struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
733*4882a593Smuzhiyun 	memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
736*4882a593Smuzhiyun 	cmd_cleanup->itt = cmd->req.itt;
737*4882a593Smuzhiyun 	cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun /**
744*4882a593Smuzhiyun  * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
745*4882a593Smuzhiyun  * @hba:	adapter structure pointer
746*4882a593Smuzhiyun  * @ep:		endpoint (transport identifier) structure
747*4882a593Smuzhiyun  *
748*4882a593Smuzhiyun  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
749*4882a593Smuzhiyun  * 	iscsi connection context clean-up process
750*4882a593Smuzhiyun  */
bnx2i_send_conn_destroy(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)751*4882a593Smuzhiyun int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
754*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_destroy conn_cleanup;
755*4882a593Smuzhiyun 	int rc = -EINVAL;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
760*4882a593Smuzhiyun 	conn_cleanup.hdr.flags =
761*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
762*4882a593Smuzhiyun 	/* 5771x requires conn context id to be passed as is */
763*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
764*4882a593Smuzhiyun 		conn_cleanup.context_id = ep->ep_cid;
765*4882a593Smuzhiyun 	else
766*4882a593Smuzhiyun 		conn_cleanup.context_id = (ep->ep_cid >> 7);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
771*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
772*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	return rc;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun /**
779*4882a593Smuzhiyun  * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
780*4882a593Smuzhiyun  * @hba: 		adapter structure pointer
781*4882a593Smuzhiyun  * @ep: 		endpoint (transport identifier) structure
782*4882a593Smuzhiyun  *
783*4882a593Smuzhiyun  * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
784*4882a593Smuzhiyun  */
bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)785*4882a593Smuzhiyun static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
786*4882a593Smuzhiyun 					 struct bnx2i_endpoint *ep)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[2];
789*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_offload1 ofld_req1;
790*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_offload2 ofld_req2;
791*4882a593Smuzhiyun 	dma_addr_t dma_addr;
792*4882a593Smuzhiyun 	int num_kwqes = 2;
793*4882a593Smuzhiyun 	u32 *ptbl;
794*4882a593Smuzhiyun 	int rc = -EINVAL;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
797*4882a593Smuzhiyun 	ofld_req1.hdr.flags =
798*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	dma_addr = ep->qp.sq_pgtbl_phys;
803*4882a593Smuzhiyun 	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
804*4882a593Smuzhiyun 	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	dma_addr = ep->qp.cq_pgtbl_phys;
807*4882a593Smuzhiyun 	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
808*4882a593Smuzhiyun 	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
811*4882a593Smuzhiyun 	ofld_req2.hdr.flags =
812*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	dma_addr = ep->qp.rq_pgtbl_phys;
815*4882a593Smuzhiyun 	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
816*4882a593Smuzhiyun 	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	ofld_req2.sq_first_pte.hi = *ptbl++;
821*4882a593Smuzhiyun 	ofld_req2.sq_first_pte.lo = *ptbl;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
824*4882a593Smuzhiyun 	ofld_req2.cq_first_pte.hi = *ptbl++;
825*4882a593Smuzhiyun 	ofld_req2.cq_first_pte.lo = *ptbl;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
828*4882a593Smuzhiyun 	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
829*4882a593Smuzhiyun 	ofld_req2.num_additional_wqes = 0;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
832*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	return rc;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun  * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
840*4882a593Smuzhiyun  * @hba: 		adapter structure pointer
841*4882a593Smuzhiyun  * @ep: 		endpoint (transport identifier) structure
842*4882a593Smuzhiyun  *
843*4882a593Smuzhiyun  * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
844*4882a593Smuzhiyun  */
bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)845*4882a593Smuzhiyun static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
846*4882a593Smuzhiyun 					  struct bnx2i_endpoint *ep)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[5];
849*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_offload1 ofld_req1;
850*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_offload2 ofld_req2;
851*4882a593Smuzhiyun 	struct iscsi_kwqe_conn_offload3 ofld_req3[1];
852*4882a593Smuzhiyun 	dma_addr_t dma_addr;
853*4882a593Smuzhiyun 	int num_kwqes = 2;
854*4882a593Smuzhiyun 	u32 *ptbl;
855*4882a593Smuzhiyun 	int rc = -EINVAL;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
858*4882a593Smuzhiyun 	ofld_req1.hdr.flags =
859*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
864*4882a593Smuzhiyun 	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
865*4882a593Smuzhiyun 	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
868*4882a593Smuzhiyun 	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
869*4882a593Smuzhiyun 	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
872*4882a593Smuzhiyun 	ofld_req2.hdr.flags =
873*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
876*4882a593Smuzhiyun 	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
877*4882a593Smuzhiyun 	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
880*4882a593Smuzhiyun 	ofld_req2.sq_first_pte.hi = *ptbl++;
881*4882a593Smuzhiyun 	ofld_req2.sq_first_pte.lo = *ptbl;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
884*4882a593Smuzhiyun 	ofld_req2.cq_first_pte.hi = *ptbl++;
885*4882a593Smuzhiyun 	ofld_req2.cq_first_pte.lo = *ptbl;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
888*4882a593Smuzhiyun 	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	ofld_req2.num_additional_wqes = 1;
891*4882a593Smuzhiyun 	memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
892*4882a593Smuzhiyun 	ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
893*4882a593Smuzhiyun 	ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
894*4882a593Smuzhiyun 	ofld_req3[0].qp_first_pte[0].lo = *ptbl;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	kwqe_arr[2] = (struct kwqe *) ofld_req3;
897*4882a593Smuzhiyun 	/* need if we decide to go with multiple KCQE's per conn */
898*4882a593Smuzhiyun 	num_kwqes += 1;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
901*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return rc;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun  * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
908*4882a593Smuzhiyun  *
909*4882a593Smuzhiyun  * @hba: 		adapter structure pointer
910*4882a593Smuzhiyun  * @ep: 		endpoint (transport identifier) structure
911*4882a593Smuzhiyun  *
912*4882a593Smuzhiyun  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
913*4882a593Smuzhiyun  */
bnx2i_send_conn_ofld_req(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)914*4882a593Smuzhiyun int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun 	int rc;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
919*4882a593Smuzhiyun 		rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
920*4882a593Smuzhiyun 	else
921*4882a593Smuzhiyun 		rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	return rc;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun /**
928*4882a593Smuzhiyun  * setup_qp_page_tables - iscsi QP page table setup function
929*4882a593Smuzhiyun  * @ep:		endpoint (transport identifier) structure
930*4882a593Smuzhiyun  *
931*4882a593Smuzhiyun  * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
932*4882a593Smuzhiyun  * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires
933*4882a593Smuzhiyun  * 	PT in little endian format
934*4882a593Smuzhiyun  */
setup_qp_page_tables(struct bnx2i_endpoint * ep)935*4882a593Smuzhiyun static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	int num_pages;
938*4882a593Smuzhiyun 	u32 *ptbl;
939*4882a593Smuzhiyun 	dma_addr_t page;
940*4882a593Smuzhiyun 	int cnic_dev_10g;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
943*4882a593Smuzhiyun 		cnic_dev_10g = 1;
944*4882a593Smuzhiyun 	else
945*4882a593Smuzhiyun 		cnic_dev_10g = 0;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* SQ page table */
948*4882a593Smuzhiyun 	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
949*4882a593Smuzhiyun 	num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
950*4882a593Smuzhiyun 	page = ep->qp.sq_phys;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	if (cnic_dev_10g)
953*4882a593Smuzhiyun 		ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
954*4882a593Smuzhiyun 	else
955*4882a593Smuzhiyun 		ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
956*4882a593Smuzhiyun 	while (num_pages--) {
957*4882a593Smuzhiyun 		if (cnic_dev_10g) {
958*4882a593Smuzhiyun 			/* PTE is written in little endian format for 57710 */
959*4882a593Smuzhiyun 			*ptbl = (u32) page;
960*4882a593Smuzhiyun 			ptbl++;
961*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
962*4882a593Smuzhiyun 			ptbl++;
963*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
964*4882a593Smuzhiyun 		} else {
965*4882a593Smuzhiyun 			/* PTE is written in big endian format for
966*4882a593Smuzhiyun 			 * 5706/5708/5709 devices */
967*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
968*4882a593Smuzhiyun 			ptbl++;
969*4882a593Smuzhiyun 			*ptbl = (u32) page;
970*4882a593Smuzhiyun 			ptbl++;
971*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
972*4882a593Smuzhiyun 		}
973*4882a593Smuzhiyun 	}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	/* RQ page table */
976*4882a593Smuzhiyun 	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
977*4882a593Smuzhiyun 	num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
978*4882a593Smuzhiyun 	page = ep->qp.rq_phys;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (cnic_dev_10g)
981*4882a593Smuzhiyun 		ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
982*4882a593Smuzhiyun 	else
983*4882a593Smuzhiyun 		ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
984*4882a593Smuzhiyun 	while (num_pages--) {
985*4882a593Smuzhiyun 		if (cnic_dev_10g) {
986*4882a593Smuzhiyun 			/* PTE is written in little endian format for 57710 */
987*4882a593Smuzhiyun 			*ptbl = (u32) page;
988*4882a593Smuzhiyun 			ptbl++;
989*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
990*4882a593Smuzhiyun 			ptbl++;
991*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
992*4882a593Smuzhiyun 		} else {
993*4882a593Smuzhiyun 			/* PTE is written in big endian format for
994*4882a593Smuzhiyun 			 * 5706/5708/5709 devices */
995*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
996*4882a593Smuzhiyun 			ptbl++;
997*4882a593Smuzhiyun 			*ptbl = (u32) page;
998*4882a593Smuzhiyun 			ptbl++;
999*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
1000*4882a593Smuzhiyun 		}
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* CQ page table */
1004*4882a593Smuzhiyun 	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
1005*4882a593Smuzhiyun 	num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
1006*4882a593Smuzhiyun 	page = ep->qp.cq_phys;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	if (cnic_dev_10g)
1009*4882a593Smuzhiyun 		ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
1010*4882a593Smuzhiyun 	else
1011*4882a593Smuzhiyun 		ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
1012*4882a593Smuzhiyun 	while (num_pages--) {
1013*4882a593Smuzhiyun 		if (cnic_dev_10g) {
1014*4882a593Smuzhiyun 			/* PTE is written in little endian format for 57710 */
1015*4882a593Smuzhiyun 			*ptbl = (u32) page;
1016*4882a593Smuzhiyun 			ptbl++;
1017*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
1018*4882a593Smuzhiyun 			ptbl++;
1019*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
1020*4882a593Smuzhiyun 		} else {
1021*4882a593Smuzhiyun 			/* PTE is written in big endian format for
1022*4882a593Smuzhiyun 			 * 5706/5708/5709 devices */
1023*4882a593Smuzhiyun 			*ptbl = (u32) ((u64) page >> 32);
1024*4882a593Smuzhiyun 			ptbl++;
1025*4882a593Smuzhiyun 			*ptbl = (u32) page;
1026*4882a593Smuzhiyun 			ptbl++;
1027*4882a593Smuzhiyun 			page += CNIC_PAGE_SIZE;
1028*4882a593Smuzhiyun 		}
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun /**
1034*4882a593Smuzhiyun  * bnx2i_alloc_qp_resc - allocates required resources for QP.
1035*4882a593Smuzhiyun  * @hba:	adapter structure pointer
1036*4882a593Smuzhiyun  * @ep:		endpoint (transport identifier) structure
1037*4882a593Smuzhiyun  *
1038*4882a593Smuzhiyun  * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
1039*4882a593Smuzhiyun  *	memory for SQ/RQ/CQ and page tables. EP structure elements such
1040*4882a593Smuzhiyun  *	as producer/consumer indexes/pointers, queue sizes and page table
1041*4882a593Smuzhiyun  *	contents are setup
1042*4882a593Smuzhiyun  */
bnx2i_alloc_qp_resc(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)1043*4882a593Smuzhiyun int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct bnx2i_5771x_cq_db *cq_db;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	ep->hba = hba;
1048*4882a593Smuzhiyun 	ep->conn = NULL;
1049*4882a593Smuzhiyun 	ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/* Allocate page table memory for SQ which is page aligned */
1052*4882a593Smuzhiyun 	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
1053*4882a593Smuzhiyun 	ep->qp.sq_mem_size =
1054*4882a593Smuzhiyun 		(ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1055*4882a593Smuzhiyun 	ep->qp.sq_pgtbl_size =
1056*4882a593Smuzhiyun 		(ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1057*4882a593Smuzhiyun 	ep->qp.sq_pgtbl_size =
1058*4882a593Smuzhiyun 		(ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	ep->qp.sq_pgtbl_virt =
1061*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1062*4882a593Smuzhiyun 				   &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
1063*4882a593Smuzhiyun 	if (!ep->qp.sq_pgtbl_virt) {
1064*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
1065*4882a593Smuzhiyun 				  ep->qp.sq_pgtbl_size);
1066*4882a593Smuzhiyun 		goto mem_alloc_err;
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	/* Allocate memory area for actual SQ element */
1070*4882a593Smuzhiyun 	ep->qp.sq_virt =
1071*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1072*4882a593Smuzhiyun 				   &ep->qp.sq_phys, GFP_KERNEL);
1073*4882a593Smuzhiyun 	if (!ep->qp.sq_virt) {
1074*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
1075*4882a593Smuzhiyun 				  ep->qp.sq_mem_size);
1076*4882a593Smuzhiyun 		goto mem_alloc_err;
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	ep->qp.sq_first_qe = ep->qp.sq_virt;
1080*4882a593Smuzhiyun 	ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
1081*4882a593Smuzhiyun 	ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
1082*4882a593Smuzhiyun 	ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
1083*4882a593Smuzhiyun 	ep->qp.sq_prod_idx = 0;
1084*4882a593Smuzhiyun 	ep->qp.sq_cons_idx = 0;
1085*4882a593Smuzhiyun 	ep->qp.sqe_left = hba->max_sqes;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Allocate page table memory for CQ which is page aligned */
1088*4882a593Smuzhiyun 	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
1089*4882a593Smuzhiyun 	ep->qp.cq_mem_size =
1090*4882a593Smuzhiyun 		(ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1091*4882a593Smuzhiyun 	ep->qp.cq_pgtbl_size =
1092*4882a593Smuzhiyun 		(ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1093*4882a593Smuzhiyun 	ep->qp.cq_pgtbl_size =
1094*4882a593Smuzhiyun 		(ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	ep->qp.cq_pgtbl_virt =
1097*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1098*4882a593Smuzhiyun 				   &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1099*4882a593Smuzhiyun 	if (!ep->qp.cq_pgtbl_virt) {
1100*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1101*4882a593Smuzhiyun 				  ep->qp.cq_pgtbl_size);
1102*4882a593Smuzhiyun 		goto mem_alloc_err;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	/* Allocate memory area for actual CQ element */
1106*4882a593Smuzhiyun 	ep->qp.cq_virt =
1107*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1108*4882a593Smuzhiyun 				   &ep->qp.cq_phys, GFP_KERNEL);
1109*4882a593Smuzhiyun 	if (!ep->qp.cq_virt) {
1110*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1111*4882a593Smuzhiyun 				  ep->qp.cq_mem_size);
1112*4882a593Smuzhiyun 		goto mem_alloc_err;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	ep->qp.cq_first_qe = ep->qp.cq_virt;
1116*4882a593Smuzhiyun 	ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1117*4882a593Smuzhiyun 	ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1118*4882a593Smuzhiyun 	ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1119*4882a593Smuzhiyun 	ep->qp.cq_prod_idx = 0;
1120*4882a593Smuzhiyun 	ep->qp.cq_cons_idx = 0;
1121*4882a593Smuzhiyun 	ep->qp.cqe_left = hba->max_cqes;
1122*4882a593Smuzhiyun 	ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1123*4882a593Smuzhiyun 	ep->qp.cqe_size = hba->max_cqes;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	/* Invalidate all EQ CQE index, req only for 57710 */
1126*4882a593Smuzhiyun 	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1127*4882a593Smuzhiyun 	memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	/* Allocate page table memory for RQ which is page aligned */
1130*4882a593Smuzhiyun 	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1131*4882a593Smuzhiyun 	ep->qp.rq_mem_size =
1132*4882a593Smuzhiyun 		(ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1133*4882a593Smuzhiyun 	ep->qp.rq_pgtbl_size =
1134*4882a593Smuzhiyun 		(ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1135*4882a593Smuzhiyun 	ep->qp.rq_pgtbl_size =
1136*4882a593Smuzhiyun 		(ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	ep->qp.rq_pgtbl_virt =
1139*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1140*4882a593Smuzhiyun 				   &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1141*4882a593Smuzhiyun 	if (!ep->qp.rq_pgtbl_virt) {
1142*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1143*4882a593Smuzhiyun 				  ep->qp.rq_pgtbl_size);
1144*4882a593Smuzhiyun 		goto mem_alloc_err;
1145*4882a593Smuzhiyun 	}
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	/* Allocate memory area for actual RQ element */
1148*4882a593Smuzhiyun 	ep->qp.rq_virt =
1149*4882a593Smuzhiyun 		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1150*4882a593Smuzhiyun 				   &ep->qp.rq_phys, GFP_KERNEL);
1151*4882a593Smuzhiyun 	if (!ep->qp.rq_virt) {
1152*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1153*4882a593Smuzhiyun 				  ep->qp.rq_mem_size);
1154*4882a593Smuzhiyun 		goto mem_alloc_err;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	ep->qp.rq_first_qe = ep->qp.rq_virt;
1158*4882a593Smuzhiyun 	ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1159*4882a593Smuzhiyun 	ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1160*4882a593Smuzhiyun 	ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1161*4882a593Smuzhiyun 	ep->qp.rq_prod_idx = 0x8000;
1162*4882a593Smuzhiyun 	ep->qp.rq_cons_idx = 0;
1163*4882a593Smuzhiyun 	ep->qp.rqe_left = hba->max_rqes;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	setup_qp_page_tables(ep);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	return 0;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun mem_alloc_err:
1170*4882a593Smuzhiyun 	bnx2i_free_qp_resc(hba, ep);
1171*4882a593Smuzhiyun 	return -ENOMEM;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun /**
1177*4882a593Smuzhiyun  * bnx2i_free_qp_resc - free memory resources held by QP
1178*4882a593Smuzhiyun  * @hba:	adapter structure pointer
1179*4882a593Smuzhiyun  * @ep:	endpoint (transport identifier) structure
1180*4882a593Smuzhiyun  *
1181*4882a593Smuzhiyun  * Free QP resources - SQ/RQ/CQ memory and page tables.
1182*4882a593Smuzhiyun  */
bnx2i_free_qp_resc(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)1183*4882a593Smuzhiyun void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun 	if (ep->qp.ctx_base) {
1186*4882a593Smuzhiyun 		iounmap(ep->qp.ctx_base);
1187*4882a593Smuzhiyun 		ep->qp.ctx_base = NULL;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun 	/* Free SQ mem */
1190*4882a593Smuzhiyun 	if (ep->qp.sq_pgtbl_virt) {
1191*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1192*4882a593Smuzhiyun 				  ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1193*4882a593Smuzhiyun 		ep->qp.sq_pgtbl_virt = NULL;
1194*4882a593Smuzhiyun 		ep->qp.sq_pgtbl_phys = 0;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 	if (ep->qp.sq_virt) {
1197*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1198*4882a593Smuzhiyun 				  ep->qp.sq_virt, ep->qp.sq_phys);
1199*4882a593Smuzhiyun 		ep->qp.sq_virt = NULL;
1200*4882a593Smuzhiyun 		ep->qp.sq_phys = 0;
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	/* Free RQ mem */
1204*4882a593Smuzhiyun 	if (ep->qp.rq_pgtbl_virt) {
1205*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1206*4882a593Smuzhiyun 				  ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1207*4882a593Smuzhiyun 		ep->qp.rq_pgtbl_virt = NULL;
1208*4882a593Smuzhiyun 		ep->qp.rq_pgtbl_phys = 0;
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 	if (ep->qp.rq_virt) {
1211*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1212*4882a593Smuzhiyun 				  ep->qp.rq_virt, ep->qp.rq_phys);
1213*4882a593Smuzhiyun 		ep->qp.rq_virt = NULL;
1214*4882a593Smuzhiyun 		ep->qp.rq_phys = 0;
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* Free CQ mem */
1218*4882a593Smuzhiyun 	if (ep->qp.cq_pgtbl_virt) {
1219*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1220*4882a593Smuzhiyun 				  ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1221*4882a593Smuzhiyun 		ep->qp.cq_pgtbl_virt = NULL;
1222*4882a593Smuzhiyun 		ep->qp.cq_pgtbl_phys = 0;
1223*4882a593Smuzhiyun 	}
1224*4882a593Smuzhiyun 	if (ep->qp.cq_virt) {
1225*4882a593Smuzhiyun 		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1226*4882a593Smuzhiyun 				  ep->qp.cq_virt, ep->qp.cq_phys);
1227*4882a593Smuzhiyun 		ep->qp.cq_virt = NULL;
1228*4882a593Smuzhiyun 		ep->qp.cq_phys = 0;
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun /**
1234*4882a593Smuzhiyun  * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1235*4882a593Smuzhiyun  * @hba:	adapter structure pointer
1236*4882a593Smuzhiyun  *
1237*4882a593Smuzhiyun  * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1238*4882a593Smuzhiyun  * 	This results in iSCSi support validation and on-chip context manager
1239*4882a593Smuzhiyun  * 	initialization.  Firmware completes this handshake with a CQE carrying
1240*4882a593Smuzhiyun  * 	the result of iscsi support validation. Parameter carried by
1241*4882a593Smuzhiyun  * 	iscsi init request determines the number of offloaded connection and
1242*4882a593Smuzhiyun  * 	tolerance level for iscsi protocol violation this hba/chip can support
1243*4882a593Smuzhiyun  */
bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba * hba)1244*4882a593Smuzhiyun int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun 	struct kwqe *kwqe_arr[3];
1247*4882a593Smuzhiyun 	struct iscsi_kwqe_init1 iscsi_init;
1248*4882a593Smuzhiyun 	struct iscsi_kwqe_init2 iscsi_init2;
1249*4882a593Smuzhiyun 	int rc = 0;
1250*4882a593Smuzhiyun 	u64 mask64;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
1253*4882a593Smuzhiyun 	memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	bnx2i_adjust_qp_size(hba);
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	iscsi_init.flags =
1258*4882a593Smuzhiyun 		(CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1259*4882a593Smuzhiyun 	if (en_tcp_dack)
1260*4882a593Smuzhiyun 		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1261*4882a593Smuzhiyun 	iscsi_init.reserved0 = 0;
1262*4882a593Smuzhiyun 	iscsi_init.num_cqs = 1;
1263*4882a593Smuzhiyun 	iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1264*4882a593Smuzhiyun 	iscsi_init.hdr.flags =
1265*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1268*4882a593Smuzhiyun 	iscsi_init.dummy_buffer_addr_hi =
1269*4882a593Smuzhiyun 		(u32) ((u64) hba->dummy_buf_dma >> 32);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	hba->num_ccell = hba->max_sqes >> 1;
1272*4882a593Smuzhiyun 	hba->ctx_ccell_tasks =
1273*4882a593Smuzhiyun 			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1274*4882a593Smuzhiyun 	iscsi_init.num_ccells_per_conn = hba->num_ccell;
1275*4882a593Smuzhiyun 	iscsi_init.num_tasks_per_conn = hba->max_sqes;
1276*4882a593Smuzhiyun 	iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1277*4882a593Smuzhiyun 	iscsi_init.sq_num_wqes = hba->max_sqes;
1278*4882a593Smuzhiyun 	iscsi_init.cq_log_wqes_per_page =
1279*4882a593Smuzhiyun 		(u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
1280*4882a593Smuzhiyun 	iscsi_init.cq_num_wqes = hba->max_cqes;
1281*4882a593Smuzhiyun 	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1282*4882a593Smuzhiyun 				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
1283*4882a593Smuzhiyun 	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1284*4882a593Smuzhiyun 				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
1285*4882a593Smuzhiyun 	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1286*4882a593Smuzhiyun 	iscsi_init.rq_num_wqes = hba->max_rqes;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1290*4882a593Smuzhiyun 	iscsi_init2.hdr.flags =
1291*4882a593Smuzhiyun 		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1292*4882a593Smuzhiyun 	iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1293*4882a593Smuzhiyun 	mask64 = 0x0ULL;
1294*4882a593Smuzhiyun 	mask64 |= (
1295*4882a593Smuzhiyun 		/* CISCO MDS */
1296*4882a593Smuzhiyun 		(1UL <<
1297*4882a593Smuzhiyun 		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1298*4882a593Smuzhiyun 		/* HP MSA1510i */
1299*4882a593Smuzhiyun 		(1UL <<
1300*4882a593Smuzhiyun 		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1301*4882a593Smuzhiyun 		/* EMC */
1302*4882a593Smuzhiyun 		(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1303*4882a593Smuzhiyun 	if (error_mask1) {
1304*4882a593Smuzhiyun 		iscsi_init2.error_bit_map[0] = error_mask1;
1305*4882a593Smuzhiyun 		mask64 ^= (u32)(mask64);
1306*4882a593Smuzhiyun 		mask64 |= error_mask1;
1307*4882a593Smuzhiyun 	} else
1308*4882a593Smuzhiyun 		iscsi_init2.error_bit_map[0] = (u32) mask64;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	if (error_mask2) {
1311*4882a593Smuzhiyun 		iscsi_init2.error_bit_map[1] = error_mask2;
1312*4882a593Smuzhiyun 		mask64 &= 0xffffffff;
1313*4882a593Smuzhiyun 		mask64 |= ((u64)error_mask2 << 32);
1314*4882a593Smuzhiyun 	} else
1315*4882a593Smuzhiyun 		iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	iscsi_error_mask = mask64;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1320*4882a593Smuzhiyun 	kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	if (hba->cnic && hba->cnic->submit_kwqes)
1323*4882a593Smuzhiyun 		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1324*4882a593Smuzhiyun 	return rc;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun /**
1329*4882a593Smuzhiyun  * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1330*4882a593Smuzhiyun  * @session:	iscsi session
1331*4882a593Smuzhiyun  * @bnx2i_conn:	bnx2i connection
1332*4882a593Smuzhiyun  * @cqe:	pointer to newly DMA'ed CQE entry for processing
1333*4882a593Smuzhiyun  *
1334*4882a593Smuzhiyun  * process SCSI CMD Response CQE & complete the request to SCSI-ML
1335*4882a593Smuzhiyun  */
bnx2i_process_scsi_cmd_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1336*4882a593Smuzhiyun int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1337*4882a593Smuzhiyun 				struct bnx2i_conn *bnx2i_conn,
1338*4882a593Smuzhiyun 				struct cqe *cqe)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1341*4882a593Smuzhiyun 	struct bnx2i_hba *hba = bnx2i_conn->hba;
1342*4882a593Smuzhiyun 	struct bnx2i_cmd_response *resp_cqe;
1343*4882a593Smuzhiyun 	struct bnx2i_cmd *bnx2i_cmd;
1344*4882a593Smuzhiyun 	struct iscsi_task *task;
1345*4882a593Smuzhiyun 	struct iscsi_scsi_rsp *hdr;
1346*4882a593Smuzhiyun 	u32 datalen = 0;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
1349*4882a593Smuzhiyun 	spin_lock_bh(&session->back_lock);
1350*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1351*4882a593Smuzhiyun 				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1352*4882a593Smuzhiyun 	if (!task)
1353*4882a593Smuzhiyun 		goto fail;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	bnx2i_cmd = task->dd_data;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1358*4882a593Smuzhiyun 		conn->datain_pdus_cnt +=
1359*4882a593Smuzhiyun 			resp_cqe->task_stat.read_stat.num_data_ins;
1360*4882a593Smuzhiyun 		conn->rxdata_octets +=
1361*4882a593Smuzhiyun 			bnx2i_cmd->req.total_data_transfer_length;
1362*4882a593Smuzhiyun 		ADD_STATS_64(hba, rx_pdus,
1363*4882a593Smuzhiyun 			     resp_cqe->task_stat.read_stat.num_data_ins);
1364*4882a593Smuzhiyun 		ADD_STATS_64(hba, rx_bytes,
1365*4882a593Smuzhiyun 			     bnx2i_cmd->req.total_data_transfer_length);
1366*4882a593Smuzhiyun 	} else {
1367*4882a593Smuzhiyun 		conn->dataout_pdus_cnt +=
1368*4882a593Smuzhiyun 			resp_cqe->task_stat.write_stat.num_data_outs;
1369*4882a593Smuzhiyun 		conn->r2t_pdus_cnt +=
1370*4882a593Smuzhiyun 			resp_cqe->task_stat.write_stat.num_r2ts;
1371*4882a593Smuzhiyun 		conn->txdata_octets +=
1372*4882a593Smuzhiyun 			bnx2i_cmd->req.total_data_transfer_length;
1373*4882a593Smuzhiyun 		ADD_STATS_64(hba, tx_pdus,
1374*4882a593Smuzhiyun 			     resp_cqe->task_stat.write_stat.num_data_outs);
1375*4882a593Smuzhiyun 		ADD_STATS_64(hba, tx_bytes,
1376*4882a593Smuzhiyun 			     bnx2i_cmd->req.total_data_transfer_length);
1377*4882a593Smuzhiyun 		ADD_STATS_64(hba, rx_pdus,
1378*4882a593Smuzhiyun 			     resp_cqe->task_stat.write_stat.num_r2ts);
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 	bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	hdr = (struct iscsi_scsi_rsp *)task->hdr;
1383*4882a593Smuzhiyun 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
1384*4882a593Smuzhiyun 	hdr->opcode = resp_cqe->op_code;
1385*4882a593Smuzhiyun 	hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1386*4882a593Smuzhiyun 	hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1387*4882a593Smuzhiyun 	hdr->response = resp_cqe->response;
1388*4882a593Smuzhiyun 	hdr->cmd_status = resp_cqe->status;
1389*4882a593Smuzhiyun 	hdr->flags = resp_cqe->response_flags;
1390*4882a593Smuzhiyun 	hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1393*4882a593Smuzhiyun 		goto done;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1396*4882a593Smuzhiyun 		datalen = resp_cqe->data_length;
1397*4882a593Smuzhiyun 		if (datalen < 2)
1398*4882a593Smuzhiyun 			goto done;
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 		if (datalen > BNX2I_RQ_WQE_SIZE) {
1401*4882a593Smuzhiyun 			iscsi_conn_printk(KERN_ERR, conn,
1402*4882a593Smuzhiyun 					  "sense data len %d > RQ sz\n",
1403*4882a593Smuzhiyun 					  datalen);
1404*4882a593Smuzhiyun 			datalen = BNX2I_RQ_WQE_SIZE;
1405*4882a593Smuzhiyun 		} else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1406*4882a593Smuzhiyun 			iscsi_conn_printk(KERN_ERR, conn,
1407*4882a593Smuzhiyun 					  "sense data len %d > conn data\n",
1408*4882a593Smuzhiyun 					  datalen);
1409*4882a593Smuzhiyun 			datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1410*4882a593Smuzhiyun 		}
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 		bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1413*4882a593Smuzhiyun 		bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1414*4882a593Smuzhiyun 	}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun done:
1417*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1418*4882a593Smuzhiyun 			     conn->data, datalen);
1419*4882a593Smuzhiyun fail:
1420*4882a593Smuzhiyun 	spin_unlock_bh(&session->back_lock);
1421*4882a593Smuzhiyun 	return 0;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun /**
1426*4882a593Smuzhiyun  * bnx2i_process_login_resp - this function handles iscsi login response
1427*4882a593Smuzhiyun  * @session:		iscsi session pointer
1428*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1429*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1430*4882a593Smuzhiyun  *
1431*4882a593Smuzhiyun  * process Login Response CQE & complete it to open-iscsi user daemon
1432*4882a593Smuzhiyun  */
bnx2i_process_login_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1433*4882a593Smuzhiyun static int bnx2i_process_login_resp(struct iscsi_session *session,
1434*4882a593Smuzhiyun 				    struct bnx2i_conn *bnx2i_conn,
1435*4882a593Smuzhiyun 				    struct cqe *cqe)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1438*4882a593Smuzhiyun 	struct iscsi_task *task;
1439*4882a593Smuzhiyun 	struct bnx2i_login_response *login;
1440*4882a593Smuzhiyun 	struct iscsi_login_rsp *resp_hdr;
1441*4882a593Smuzhiyun 	int pld_len;
1442*4882a593Smuzhiyun 	int pad_len;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	login = (struct bnx2i_login_response *) cqe;
1445*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1446*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1447*4882a593Smuzhiyun 				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1448*4882a593Smuzhiyun 	if (!task)
1449*4882a593Smuzhiyun 		goto done;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1452*4882a593Smuzhiyun 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1453*4882a593Smuzhiyun 	resp_hdr->opcode = login->op_code;
1454*4882a593Smuzhiyun 	resp_hdr->flags = login->response_flags;
1455*4882a593Smuzhiyun 	resp_hdr->max_version = login->version_max;
1456*4882a593Smuzhiyun 	resp_hdr->active_version = login->version_active;
1457*4882a593Smuzhiyun 	resp_hdr->hlength = 0;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	hton24(resp_hdr->dlength, login->data_length);
1460*4882a593Smuzhiyun 	memcpy(resp_hdr->isid, &login->isid_lo, 6);
1461*4882a593Smuzhiyun 	resp_hdr->tsih = cpu_to_be16(login->tsih);
1462*4882a593Smuzhiyun 	resp_hdr->itt = task->hdr->itt;
1463*4882a593Smuzhiyun 	resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1464*4882a593Smuzhiyun 	resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1465*4882a593Smuzhiyun 	resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1466*4882a593Smuzhiyun 	resp_hdr->status_class = login->status_class;
1467*4882a593Smuzhiyun 	resp_hdr->status_detail = login->status_detail;
1468*4882a593Smuzhiyun 	pld_len = login->data_length;
1469*4882a593Smuzhiyun 	bnx2i_conn->gen_pdu.resp_wr_ptr =
1470*4882a593Smuzhiyun 					bnx2i_conn->gen_pdu.resp_buf + pld_len;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	pad_len = 0;
1473*4882a593Smuzhiyun 	if (pld_len & 0x3)
1474*4882a593Smuzhiyun 		pad_len = 4 - (pld_len % 4);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	if (pad_len) {
1477*4882a593Smuzhiyun 		int i = 0;
1478*4882a593Smuzhiyun 		for (i = 0; i < pad_len; i++) {
1479*4882a593Smuzhiyun 			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1480*4882a593Smuzhiyun 			bnx2i_conn->gen_pdu.resp_wr_ptr++;
1481*4882a593Smuzhiyun 		}
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1485*4882a593Smuzhiyun 		bnx2i_conn->gen_pdu.resp_buf,
1486*4882a593Smuzhiyun 		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1487*4882a593Smuzhiyun done:
1488*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1489*4882a593Smuzhiyun 	return 0;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun /**
1494*4882a593Smuzhiyun  * bnx2i_process_text_resp - this function handles iscsi text response
1495*4882a593Smuzhiyun  * @session:	iscsi session pointer
1496*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection pointer
1497*4882a593Smuzhiyun  * @cqe:	pointer to newly DMA'ed CQE entry for processing
1498*4882a593Smuzhiyun  *
1499*4882a593Smuzhiyun  * process iSCSI Text Response CQE&  complete it to open-iscsi user daemon
1500*4882a593Smuzhiyun  */
bnx2i_process_text_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1501*4882a593Smuzhiyun static int bnx2i_process_text_resp(struct iscsi_session *session,
1502*4882a593Smuzhiyun 				   struct bnx2i_conn *bnx2i_conn,
1503*4882a593Smuzhiyun 				   struct cqe *cqe)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1506*4882a593Smuzhiyun 	struct iscsi_task *task;
1507*4882a593Smuzhiyun 	struct bnx2i_text_response *text;
1508*4882a593Smuzhiyun 	struct iscsi_text_rsp *resp_hdr;
1509*4882a593Smuzhiyun 	int pld_len;
1510*4882a593Smuzhiyun 	int pad_len;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	text = (struct bnx2i_text_response *) cqe;
1513*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1514*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1515*4882a593Smuzhiyun 	if (!task)
1516*4882a593Smuzhiyun 		goto done;
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
1519*4882a593Smuzhiyun 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1520*4882a593Smuzhiyun 	resp_hdr->opcode = text->op_code;
1521*4882a593Smuzhiyun 	resp_hdr->flags = text->response_flags;
1522*4882a593Smuzhiyun 	resp_hdr->hlength = 0;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	hton24(resp_hdr->dlength, text->data_length);
1525*4882a593Smuzhiyun 	resp_hdr->itt = task->hdr->itt;
1526*4882a593Smuzhiyun 	resp_hdr->ttt = cpu_to_be32(text->ttt);
1527*4882a593Smuzhiyun 	resp_hdr->statsn = task->hdr->exp_statsn;
1528*4882a593Smuzhiyun 	resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
1529*4882a593Smuzhiyun 	resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
1530*4882a593Smuzhiyun 	pld_len = text->data_length;
1531*4882a593Smuzhiyun 	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
1532*4882a593Smuzhiyun 					  pld_len;
1533*4882a593Smuzhiyun 	pad_len = 0;
1534*4882a593Smuzhiyun 	if (pld_len & 0x3)
1535*4882a593Smuzhiyun 		pad_len = 4 - (pld_len % 4);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	if (pad_len) {
1538*4882a593Smuzhiyun 		int i = 0;
1539*4882a593Smuzhiyun 		for (i = 0; i < pad_len; i++) {
1540*4882a593Smuzhiyun 			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1541*4882a593Smuzhiyun 			bnx2i_conn->gen_pdu.resp_wr_ptr++;
1542*4882a593Smuzhiyun 		}
1543*4882a593Smuzhiyun 	}
1544*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1545*4882a593Smuzhiyun 			     bnx2i_conn->gen_pdu.resp_buf,
1546*4882a593Smuzhiyun 			     bnx2i_conn->gen_pdu.resp_wr_ptr -
1547*4882a593Smuzhiyun 			     bnx2i_conn->gen_pdu.resp_buf);
1548*4882a593Smuzhiyun done:
1549*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1550*4882a593Smuzhiyun 	return 0;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun /**
1555*4882a593Smuzhiyun  * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1556*4882a593Smuzhiyun  * @session:		iscsi session pointer
1557*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1558*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1559*4882a593Smuzhiyun  *
1560*4882a593Smuzhiyun  * process iSCSI TMF Response CQE and wake up the driver eh thread.
1561*4882a593Smuzhiyun  */
bnx2i_process_tmf_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1562*4882a593Smuzhiyun static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1563*4882a593Smuzhiyun 				  struct bnx2i_conn *bnx2i_conn,
1564*4882a593Smuzhiyun 				  struct cqe *cqe)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1567*4882a593Smuzhiyun 	struct iscsi_task *task;
1568*4882a593Smuzhiyun 	struct bnx2i_tmf_response *tmf_cqe;
1569*4882a593Smuzhiyun 	struct iscsi_tm_rsp *resp_hdr;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1572*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1573*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1574*4882a593Smuzhiyun 				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1575*4882a593Smuzhiyun 	if (!task)
1576*4882a593Smuzhiyun 		goto done;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1579*4882a593Smuzhiyun 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1580*4882a593Smuzhiyun 	resp_hdr->opcode = tmf_cqe->op_code;
1581*4882a593Smuzhiyun 	resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1582*4882a593Smuzhiyun 	resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1583*4882a593Smuzhiyun 	resp_hdr->itt = task->hdr->itt;
1584*4882a593Smuzhiyun 	resp_hdr->response = tmf_cqe->response;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1587*4882a593Smuzhiyun done:
1588*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1589*4882a593Smuzhiyun 	return 0;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun /**
1593*4882a593Smuzhiyun  * bnx2i_process_logout_resp - this function handles iscsi logout response
1594*4882a593Smuzhiyun  * @session:		iscsi session pointer
1595*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1596*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1597*4882a593Smuzhiyun  *
1598*4882a593Smuzhiyun  * process iSCSI Logout Response CQE & make function call to
1599*4882a593Smuzhiyun  * notify the user daemon.
1600*4882a593Smuzhiyun  */
bnx2i_process_logout_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1601*4882a593Smuzhiyun static int bnx2i_process_logout_resp(struct iscsi_session *session,
1602*4882a593Smuzhiyun 				     struct bnx2i_conn *bnx2i_conn,
1603*4882a593Smuzhiyun 				     struct cqe *cqe)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1606*4882a593Smuzhiyun 	struct iscsi_task *task;
1607*4882a593Smuzhiyun 	struct bnx2i_logout_response *logout;
1608*4882a593Smuzhiyun 	struct iscsi_logout_rsp *resp_hdr;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	logout = (struct bnx2i_logout_response *) cqe;
1611*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1612*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1613*4882a593Smuzhiyun 				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1614*4882a593Smuzhiyun 	if (!task)
1615*4882a593Smuzhiyun 		goto done;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1618*4882a593Smuzhiyun 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1619*4882a593Smuzhiyun 	resp_hdr->opcode = logout->op_code;
1620*4882a593Smuzhiyun 	resp_hdr->flags = logout->response;
1621*4882a593Smuzhiyun 	resp_hdr->hlength = 0;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	resp_hdr->itt = task->hdr->itt;
1624*4882a593Smuzhiyun 	resp_hdr->statsn = task->hdr->exp_statsn;
1625*4882a593Smuzhiyun 	resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1626*4882a593Smuzhiyun 	resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1629*4882a593Smuzhiyun 	resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
1634*4882a593Smuzhiyun done:
1635*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1636*4882a593Smuzhiyun 	return 0;
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun /**
1640*4882a593Smuzhiyun  * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1641*4882a593Smuzhiyun  * @session:		iscsi session pointer
1642*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1643*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1644*4882a593Smuzhiyun  *
1645*4882a593Smuzhiyun  * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1646*4882a593Smuzhiyun  */
bnx2i_process_nopin_local_cmpl(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1647*4882a593Smuzhiyun static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1648*4882a593Smuzhiyun 					   struct bnx2i_conn *bnx2i_conn,
1649*4882a593Smuzhiyun 					   struct cqe *cqe)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1652*4882a593Smuzhiyun 	struct bnx2i_nop_in_msg *nop_in;
1653*4882a593Smuzhiyun 	struct iscsi_task *task;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
1656*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1657*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1658*4882a593Smuzhiyun 				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1659*4882a593Smuzhiyun 	if (task)
1660*4882a593Smuzhiyun 		__iscsi_put_task(task);
1661*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun /**
1665*4882a593Smuzhiyun  * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1666*4882a593Smuzhiyun  * @bnx2i_conn:	iscsi connection
1667*4882a593Smuzhiyun  *
1668*4882a593Smuzhiyun  * Firmware advances RQ producer index for every unsolicited PDU even if
1669*4882a593Smuzhiyun  *	payload data length is '0'. This function makes corresponding
1670*4882a593Smuzhiyun  *	adjustments on the driver side to match this f/w behavior
1671*4882a593Smuzhiyun  */
bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn * bnx2i_conn)1672*4882a593Smuzhiyun static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun 	char dummy_rq_data[2];
1675*4882a593Smuzhiyun 	bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1676*4882a593Smuzhiyun 	bnx2i_put_rq_buf(bnx2i_conn, 1);
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun /**
1681*4882a593Smuzhiyun  * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1682*4882a593Smuzhiyun  * @session:		iscsi session pointer
1683*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1684*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1685*4882a593Smuzhiyun  *
1686*4882a593Smuzhiyun  * process iSCSI target's proactive iSCSI NOPIN request
1687*4882a593Smuzhiyun  */
bnx2i_process_nopin_mesg(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1688*4882a593Smuzhiyun static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1689*4882a593Smuzhiyun 				     struct bnx2i_conn *bnx2i_conn,
1690*4882a593Smuzhiyun 				     struct cqe *cqe)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1693*4882a593Smuzhiyun 	struct iscsi_task *task;
1694*4882a593Smuzhiyun 	struct bnx2i_nop_in_msg *nop_in;
1695*4882a593Smuzhiyun 	struct iscsi_nopin *hdr;
1696*4882a593Smuzhiyun 	int tgt_async_nop = 0;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1701*4882a593Smuzhiyun 	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1702*4882a593Smuzhiyun 	memset(hdr, 0, sizeof(struct iscsi_hdr));
1703*4882a593Smuzhiyun 	hdr->opcode = nop_in->op_code;
1704*4882a593Smuzhiyun 	hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1705*4882a593Smuzhiyun 	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1706*4882a593Smuzhiyun 	hdr->ttt = cpu_to_be32(nop_in->ttt);
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	if (nop_in->itt == (u16) RESERVED_ITT) {
1709*4882a593Smuzhiyun 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1710*4882a593Smuzhiyun 		hdr->itt = RESERVED_ITT;
1711*4882a593Smuzhiyun 		tgt_async_nop = 1;
1712*4882a593Smuzhiyun 		goto done;
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* this is a response to one of our nop-outs */
1716*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1717*4882a593Smuzhiyun 			 (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX));
1718*4882a593Smuzhiyun 	if (task) {
1719*4882a593Smuzhiyun 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
1720*4882a593Smuzhiyun 		hdr->itt = task->hdr->itt;
1721*4882a593Smuzhiyun 		hdr->ttt = cpu_to_be32(nop_in->ttt);
1722*4882a593Smuzhiyun 		memcpy(&hdr->lun, nop_in->lun, 8);
1723*4882a593Smuzhiyun 	}
1724*4882a593Smuzhiyun done:
1725*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1726*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	return tgt_async_nop;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun /**
1733*4882a593Smuzhiyun  * bnx2i_process_async_mesg - this function handles iscsi async message
1734*4882a593Smuzhiyun  * @session:		iscsi session pointer
1735*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1736*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1737*4882a593Smuzhiyun  *
1738*4882a593Smuzhiyun  * process iSCSI ASYNC Message
1739*4882a593Smuzhiyun  */
bnx2i_process_async_mesg(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1740*4882a593Smuzhiyun static void bnx2i_process_async_mesg(struct iscsi_session *session,
1741*4882a593Smuzhiyun 				     struct bnx2i_conn *bnx2i_conn,
1742*4882a593Smuzhiyun 				     struct cqe *cqe)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun 	struct bnx2i_async_msg *async_cqe;
1745*4882a593Smuzhiyun 	struct iscsi_async *resp_hdr;
1746*4882a593Smuzhiyun 	u8 async_event;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	async_cqe = (struct bnx2i_async_msg *)cqe;
1751*4882a593Smuzhiyun 	async_event = async_cqe->async_event;
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1754*4882a593Smuzhiyun 		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1755*4882a593Smuzhiyun 				  "async: scsi events not supported\n");
1756*4882a593Smuzhiyun 		return;
1757*4882a593Smuzhiyun 	}
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1760*4882a593Smuzhiyun 	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1761*4882a593Smuzhiyun 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1762*4882a593Smuzhiyun 	resp_hdr->opcode = async_cqe->op_code;
1763*4882a593Smuzhiyun 	resp_hdr->flags = 0x80;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	memcpy(&resp_hdr->lun, async_cqe->lun, 8);
1766*4882a593Smuzhiyun 	resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1767*4882a593Smuzhiyun 	resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	resp_hdr->async_event = async_cqe->async_event;
1770*4882a593Smuzhiyun 	resp_hdr->async_vcode = async_cqe->async_vcode;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1773*4882a593Smuzhiyun 	resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1774*4882a593Smuzhiyun 	resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1777*4882a593Smuzhiyun 			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
1778*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun /**
1783*4882a593Smuzhiyun  * bnx2i_process_reject_mesg - process iscsi reject pdu
1784*4882a593Smuzhiyun  * @session:		iscsi session pointer
1785*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1786*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1787*4882a593Smuzhiyun  *
1788*4882a593Smuzhiyun  * process iSCSI REJECT message
1789*4882a593Smuzhiyun  */
bnx2i_process_reject_mesg(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1790*4882a593Smuzhiyun static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1791*4882a593Smuzhiyun 				      struct bnx2i_conn *bnx2i_conn,
1792*4882a593Smuzhiyun 				      struct cqe *cqe)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1795*4882a593Smuzhiyun 	struct bnx2i_reject_msg *reject;
1796*4882a593Smuzhiyun 	struct iscsi_reject *hdr;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	reject = (struct bnx2i_reject_msg *) cqe;
1799*4882a593Smuzhiyun 	if (reject->data_length) {
1800*4882a593Smuzhiyun 		bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1801*4882a593Smuzhiyun 		bnx2i_put_rq_buf(bnx2i_conn, 1);
1802*4882a593Smuzhiyun 	} else
1803*4882a593Smuzhiyun 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1806*4882a593Smuzhiyun 	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1807*4882a593Smuzhiyun 	memset(hdr, 0, sizeof(struct iscsi_hdr));
1808*4882a593Smuzhiyun 	hdr->opcode = reject->op_code;
1809*4882a593Smuzhiyun 	hdr->reason = reject->reason;
1810*4882a593Smuzhiyun 	hton24(hdr->dlength, reject->data_length);
1811*4882a593Smuzhiyun 	hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1812*4882a593Smuzhiyun 	hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1813*4882a593Smuzhiyun 	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1814*4882a593Smuzhiyun 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1815*4882a593Smuzhiyun 			     reject->data_length);
1816*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun /**
1820*4882a593Smuzhiyun  * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1821*4882a593Smuzhiyun  * @session:		iscsi session pointer
1822*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection pointer
1823*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1824*4882a593Smuzhiyun  *
1825*4882a593Smuzhiyun  * process command cleanup response CQE during conn shutdown or error recovery
1826*4882a593Smuzhiyun  */
bnx2i_process_cmd_cleanup_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct cqe * cqe)1827*4882a593Smuzhiyun static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1828*4882a593Smuzhiyun 					   struct bnx2i_conn *bnx2i_conn,
1829*4882a593Smuzhiyun 					   struct cqe *cqe)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun 	struct bnx2i_cleanup_response *cmd_clean_rsp;
1832*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1833*4882a593Smuzhiyun 	struct iscsi_task *task;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1836*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1837*4882a593Smuzhiyun 	task = iscsi_itt_to_task(conn,
1838*4882a593Smuzhiyun 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1839*4882a593Smuzhiyun 	if (!task)
1840*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1841*4882a593Smuzhiyun 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1842*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1843*4882a593Smuzhiyun 	complete(&bnx2i_conn->cmd_cleanup_cmpl);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun /**
1848*4882a593Smuzhiyun  * bnx2i_percpu_io_thread - thread per cpu for ios
1849*4882a593Smuzhiyun  *
1850*4882a593Smuzhiyun  * @arg:	ptr to bnx2i_percpu_info structure
1851*4882a593Smuzhiyun  */
bnx2i_percpu_io_thread(void * arg)1852*4882a593Smuzhiyun int bnx2i_percpu_io_thread(void *arg)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun 	struct bnx2i_percpu_s *p = arg;
1855*4882a593Smuzhiyun 	struct bnx2i_work *work, *tmp;
1856*4882a593Smuzhiyun 	LIST_HEAD(work_list);
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	set_user_nice(current, MIN_NICE);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
1861*4882a593Smuzhiyun 		spin_lock_bh(&p->p_work_lock);
1862*4882a593Smuzhiyun 		while (!list_empty(&p->work_list)) {
1863*4882a593Smuzhiyun 			list_splice_init(&p->work_list, &work_list);
1864*4882a593Smuzhiyun 			spin_unlock_bh(&p->p_work_lock);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 			list_for_each_entry_safe(work, tmp, &work_list, list) {
1867*4882a593Smuzhiyun 				list_del_init(&work->list);
1868*4882a593Smuzhiyun 				/* work allocated in the bh, freed here */
1869*4882a593Smuzhiyun 				bnx2i_process_scsi_cmd_resp(work->session,
1870*4882a593Smuzhiyun 							    work->bnx2i_conn,
1871*4882a593Smuzhiyun 							    &work->cqe);
1872*4882a593Smuzhiyun 				atomic_dec(&work->bnx2i_conn->work_cnt);
1873*4882a593Smuzhiyun 				kfree(work);
1874*4882a593Smuzhiyun 			}
1875*4882a593Smuzhiyun 			spin_lock_bh(&p->p_work_lock);
1876*4882a593Smuzhiyun 		}
1877*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
1878*4882a593Smuzhiyun 		spin_unlock_bh(&p->p_work_lock);
1879*4882a593Smuzhiyun 		schedule();
1880*4882a593Smuzhiyun 	}
1881*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	return 0;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun /**
1888*4882a593Smuzhiyun  * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
1889*4882a593Smuzhiyun  * @session:		iscsi session
1890*4882a593Smuzhiyun  * @bnx2i_conn:		bnx2i connection
1891*4882a593Smuzhiyun  * @cqe:		pointer to newly DMA'ed CQE entry for processing
1892*4882a593Smuzhiyun  *
1893*4882a593Smuzhiyun  * this function is called by generic KCQ handler to queue all pending cmd
1894*4882a593Smuzhiyun  * completion CQEs
1895*4882a593Smuzhiyun  *
1896*4882a593Smuzhiyun  * The implementation is to queue the cmd response based on the
1897*4882a593Smuzhiyun  * last recorded command for the given connection.  The
1898*4882a593Smuzhiyun  * cpu_id gets recorded upon task_xmit.  No out-of-order completion!
1899*4882a593Smuzhiyun  */
bnx2i_queue_scsi_cmd_resp(struct iscsi_session * session,struct bnx2i_conn * bnx2i_conn,struct bnx2i_nop_in_msg * cqe)1900*4882a593Smuzhiyun static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
1901*4882a593Smuzhiyun 				     struct bnx2i_conn *bnx2i_conn,
1902*4882a593Smuzhiyun 				     struct bnx2i_nop_in_msg *cqe)
1903*4882a593Smuzhiyun {
1904*4882a593Smuzhiyun 	struct bnx2i_work *bnx2i_work = NULL;
1905*4882a593Smuzhiyun 	struct bnx2i_percpu_s *p = NULL;
1906*4882a593Smuzhiyun 	struct iscsi_task *task;
1907*4882a593Smuzhiyun 	struct scsi_cmnd *sc;
1908*4882a593Smuzhiyun 	int rc = 0;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	spin_lock(&session->back_lock);
1911*4882a593Smuzhiyun 	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
1912*4882a593Smuzhiyun 				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1913*4882a593Smuzhiyun 	if (!task || !task->sc) {
1914*4882a593Smuzhiyun 		spin_unlock(&session->back_lock);
1915*4882a593Smuzhiyun 		return -EINVAL;
1916*4882a593Smuzhiyun 	}
1917*4882a593Smuzhiyun 	sc = task->sc;
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	spin_unlock(&session->back_lock);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
1922*4882a593Smuzhiyun 	spin_lock(&p->p_work_lock);
1923*4882a593Smuzhiyun 	if (unlikely(!p->iothread)) {
1924*4882a593Smuzhiyun 		rc = -EINVAL;
1925*4882a593Smuzhiyun 		goto err;
1926*4882a593Smuzhiyun 	}
1927*4882a593Smuzhiyun 	/* Alloc and copy to the cqe */
1928*4882a593Smuzhiyun 	bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC);
1929*4882a593Smuzhiyun 	if (bnx2i_work) {
1930*4882a593Smuzhiyun 		INIT_LIST_HEAD(&bnx2i_work->list);
1931*4882a593Smuzhiyun 		bnx2i_work->session = session;
1932*4882a593Smuzhiyun 		bnx2i_work->bnx2i_conn = bnx2i_conn;
1933*4882a593Smuzhiyun 		memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
1934*4882a593Smuzhiyun 		list_add_tail(&bnx2i_work->list, &p->work_list);
1935*4882a593Smuzhiyun 		atomic_inc(&bnx2i_conn->work_cnt);
1936*4882a593Smuzhiyun 		wake_up_process(p->iothread);
1937*4882a593Smuzhiyun 		spin_unlock(&p->p_work_lock);
1938*4882a593Smuzhiyun 		goto done;
1939*4882a593Smuzhiyun 	} else
1940*4882a593Smuzhiyun 		rc = -ENOMEM;
1941*4882a593Smuzhiyun err:
1942*4882a593Smuzhiyun 	spin_unlock(&p->p_work_lock);
1943*4882a593Smuzhiyun 	bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
1944*4882a593Smuzhiyun done:
1945*4882a593Smuzhiyun 	return rc;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun /**
1950*4882a593Smuzhiyun  * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1951*4882a593Smuzhiyun  * @bnx2i_conn:		bnx2i connection
1952*4882a593Smuzhiyun  *
1953*4882a593Smuzhiyun  * this function is called by generic KCQ handler to process all pending CQE's
1954*4882a593Smuzhiyun  */
bnx2i_process_new_cqes(struct bnx2i_conn * bnx2i_conn)1955*4882a593Smuzhiyun static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun 	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1958*4882a593Smuzhiyun 	struct iscsi_session *session = conn->session;
1959*4882a593Smuzhiyun 	struct bnx2i_hba *hba = bnx2i_conn->hba;
1960*4882a593Smuzhiyun 	struct qp_info *qp;
1961*4882a593Smuzhiyun 	struct bnx2i_nop_in_msg *nopin;
1962*4882a593Smuzhiyun 	int tgt_async_msg;
1963*4882a593Smuzhiyun 	int cqe_cnt = 0;
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	if (bnx2i_conn->ep == NULL)
1966*4882a593Smuzhiyun 		return 0;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	qp = &bnx2i_conn->ep->qp;
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 	if (!qp->cq_virt) {
1971*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1972*4882a593Smuzhiyun 		       hba->netdev->name);
1973*4882a593Smuzhiyun 		goto out;
1974*4882a593Smuzhiyun 	}
1975*4882a593Smuzhiyun 	while (1) {
1976*4882a593Smuzhiyun 		nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1977*4882a593Smuzhiyun 		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1978*4882a593Smuzhiyun 			break;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
1981*4882a593Smuzhiyun 			if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1982*4882a593Smuzhiyun 			    nopin->itt == (u16) RESERVED_ITT) {
1983*4882a593Smuzhiyun 				printk(KERN_ALERT "bnx2i: Unsolicited "
1984*4882a593Smuzhiyun 				       "NOP-In detected for suspended "
1985*4882a593Smuzhiyun 				       "connection dev=%s!\n",
1986*4882a593Smuzhiyun 				       hba->netdev->name);
1987*4882a593Smuzhiyun 				bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1988*4882a593Smuzhiyun 				goto cqe_out;
1989*4882a593Smuzhiyun 			}
1990*4882a593Smuzhiyun 			break;
1991*4882a593Smuzhiyun 		}
1992*4882a593Smuzhiyun 		tgt_async_msg = 0;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 		switch (nopin->op_code) {
1995*4882a593Smuzhiyun 		case ISCSI_OP_SCSI_CMD_RSP:
1996*4882a593Smuzhiyun 		case ISCSI_OP_SCSI_DATA_IN:
1997*4882a593Smuzhiyun 			/* Run the kthread engine only for data cmds
1998*4882a593Smuzhiyun 			   All other cmds will be completed in this bh! */
1999*4882a593Smuzhiyun 			bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
2000*4882a593Smuzhiyun 			goto done;
2001*4882a593Smuzhiyun 		case ISCSI_OP_LOGIN_RSP:
2002*4882a593Smuzhiyun 			bnx2i_process_login_resp(session, bnx2i_conn,
2003*4882a593Smuzhiyun 						 qp->cq_cons_qe);
2004*4882a593Smuzhiyun 			break;
2005*4882a593Smuzhiyun 		case ISCSI_OP_SCSI_TMFUNC_RSP:
2006*4882a593Smuzhiyun 			bnx2i_process_tmf_resp(session, bnx2i_conn,
2007*4882a593Smuzhiyun 					       qp->cq_cons_qe);
2008*4882a593Smuzhiyun 			break;
2009*4882a593Smuzhiyun 		case ISCSI_OP_TEXT_RSP:
2010*4882a593Smuzhiyun 			bnx2i_process_text_resp(session, bnx2i_conn,
2011*4882a593Smuzhiyun 						qp->cq_cons_qe);
2012*4882a593Smuzhiyun 			break;
2013*4882a593Smuzhiyun 		case ISCSI_OP_LOGOUT_RSP:
2014*4882a593Smuzhiyun 			bnx2i_process_logout_resp(session, bnx2i_conn,
2015*4882a593Smuzhiyun 						  qp->cq_cons_qe);
2016*4882a593Smuzhiyun 			break;
2017*4882a593Smuzhiyun 		case ISCSI_OP_NOOP_IN:
2018*4882a593Smuzhiyun 			if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
2019*4882a593Smuzhiyun 						     qp->cq_cons_qe))
2020*4882a593Smuzhiyun 				tgt_async_msg = 1;
2021*4882a593Smuzhiyun 			break;
2022*4882a593Smuzhiyun 		case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
2023*4882a593Smuzhiyun 			bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
2024*4882a593Smuzhiyun 						       qp->cq_cons_qe);
2025*4882a593Smuzhiyun 			break;
2026*4882a593Smuzhiyun 		case ISCSI_OP_ASYNC_EVENT:
2027*4882a593Smuzhiyun 			bnx2i_process_async_mesg(session, bnx2i_conn,
2028*4882a593Smuzhiyun 						 qp->cq_cons_qe);
2029*4882a593Smuzhiyun 			tgt_async_msg = 1;
2030*4882a593Smuzhiyun 			break;
2031*4882a593Smuzhiyun 		case ISCSI_OP_REJECT:
2032*4882a593Smuzhiyun 			bnx2i_process_reject_mesg(session, bnx2i_conn,
2033*4882a593Smuzhiyun 						  qp->cq_cons_qe);
2034*4882a593Smuzhiyun 			break;
2035*4882a593Smuzhiyun 		case ISCSI_OPCODE_CLEANUP_RESPONSE:
2036*4882a593Smuzhiyun 			bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
2037*4882a593Smuzhiyun 						       qp->cq_cons_qe);
2038*4882a593Smuzhiyun 			break;
2039*4882a593Smuzhiyun 		default:
2040*4882a593Smuzhiyun 			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2041*4882a593Smuzhiyun 					  nopin->op_code);
2042*4882a593Smuzhiyun 		}
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 		ADD_STATS_64(hba, rx_pdus, 1);
2045*4882a593Smuzhiyun 		ADD_STATS_64(hba, rx_bytes, nopin->data_length);
2046*4882a593Smuzhiyun done:
2047*4882a593Smuzhiyun 		if (!tgt_async_msg) {
2048*4882a593Smuzhiyun 			if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2049*4882a593Smuzhiyun 				printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2050*4882a593Smuzhiyun 				       "op 0x%x\n",
2051*4882a593Smuzhiyun 				       hba->netdev->name,
2052*4882a593Smuzhiyun 				       nopin->op_code);
2053*4882a593Smuzhiyun 			else
2054*4882a593Smuzhiyun 				atomic_dec(&bnx2i_conn->ep->num_active_cmds);
2055*4882a593Smuzhiyun 		}
2056*4882a593Smuzhiyun cqe_out:
2057*4882a593Smuzhiyun 		/* clear out in production version only, till beta keep opcode
2058*4882a593Smuzhiyun 		 * field intact, will be helpful in debugging (context dump)
2059*4882a593Smuzhiyun 		 * nopin->op_code = 0;
2060*4882a593Smuzhiyun 		 */
2061*4882a593Smuzhiyun 		cqe_cnt++;
2062*4882a593Smuzhiyun 		qp->cqe_exp_seq_sn++;
2063*4882a593Smuzhiyun 		if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
2064*4882a593Smuzhiyun 			qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 		if (qp->cq_cons_qe == qp->cq_last_qe) {
2067*4882a593Smuzhiyun 			qp->cq_cons_qe = qp->cq_first_qe;
2068*4882a593Smuzhiyun 			qp->cq_cons_idx = 0;
2069*4882a593Smuzhiyun 		} else {
2070*4882a593Smuzhiyun 			qp->cq_cons_qe++;
2071*4882a593Smuzhiyun 			qp->cq_cons_idx++;
2072*4882a593Smuzhiyun 		}
2073*4882a593Smuzhiyun 	}
2074*4882a593Smuzhiyun out:
2075*4882a593Smuzhiyun 	return cqe_cnt;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun /**
2079*4882a593Smuzhiyun  * bnx2i_fastpath_notification - process global event queue (KCQ)
2080*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2081*4882a593Smuzhiyun  * @new_cqe_kcqe:	pointer to newly DMA'ed KCQE entry
2082*4882a593Smuzhiyun  *
2083*4882a593Smuzhiyun  * Fast path event notification handler, KCQ entry carries context id
2084*4882a593Smuzhiyun  *	of the connection that has 1 or more pending CQ entries
2085*4882a593Smuzhiyun  */
bnx2i_fastpath_notification(struct bnx2i_hba * hba,struct iscsi_kcqe * new_cqe_kcqe)2086*4882a593Smuzhiyun static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
2087*4882a593Smuzhiyun 					struct iscsi_kcqe *new_cqe_kcqe)
2088*4882a593Smuzhiyun {
2089*4882a593Smuzhiyun 	struct bnx2i_conn *bnx2i_conn;
2090*4882a593Smuzhiyun 	u32 iscsi_cid;
2091*4882a593Smuzhiyun 	int nxt_idx;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
2094*4882a593Smuzhiyun 	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	if (!bnx2i_conn) {
2097*4882a593Smuzhiyun 		printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
2098*4882a593Smuzhiyun 		return;
2099*4882a593Smuzhiyun 	}
2100*4882a593Smuzhiyun 	if (!bnx2i_conn->ep) {
2101*4882a593Smuzhiyun 		printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
2102*4882a593Smuzhiyun 		return;
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	bnx2i_process_new_cqes(bnx2i_conn);
2106*4882a593Smuzhiyun 	nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
2107*4882a593Smuzhiyun 						CNIC_ARM_CQE_FP);
2108*4882a593Smuzhiyun 	if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn))
2109*4882a593Smuzhiyun 		bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun /**
2114*4882a593Smuzhiyun  * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
2115*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2116*4882a593Smuzhiyun  * @update_kcqe:	kcqe pointer
2117*4882a593Smuzhiyun  *
2118*4882a593Smuzhiyun  * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
2119*4882a593Smuzhiyun  */
bnx2i_process_update_conn_cmpl(struct bnx2i_hba * hba,struct iscsi_kcqe * update_kcqe)2120*4882a593Smuzhiyun static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
2121*4882a593Smuzhiyun 					   struct iscsi_kcqe *update_kcqe)
2122*4882a593Smuzhiyun {
2123*4882a593Smuzhiyun 	struct bnx2i_conn *conn;
2124*4882a593Smuzhiyun 	u32 iscsi_cid;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	iscsi_cid = update_kcqe->iscsi_conn_id;
2127*4882a593Smuzhiyun 	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	if (!conn) {
2130*4882a593Smuzhiyun 		printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
2131*4882a593Smuzhiyun 		return;
2132*4882a593Smuzhiyun 	}
2133*4882a593Smuzhiyun 	if (!conn->ep) {
2134*4882a593Smuzhiyun 		printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
2135*4882a593Smuzhiyun 		return;
2136*4882a593Smuzhiyun 	}
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	if (update_kcqe->completion_status) {
2139*4882a593Smuzhiyun 		printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
2140*4882a593Smuzhiyun 		conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
2141*4882a593Smuzhiyun 	} else
2142*4882a593Smuzhiyun 		conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	wake_up_interruptible(&conn->ep->ofld_wait);
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun /**
2149*4882a593Smuzhiyun  * bnx2i_recovery_que_add_conn - add connection to recovery queue
2150*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2151*4882a593Smuzhiyun  * @bnx2i_conn:		iscsi connection
2152*4882a593Smuzhiyun  *
2153*4882a593Smuzhiyun  * Add connection to recovery queue and schedule adapter eh worker
2154*4882a593Smuzhiyun  */
bnx2i_recovery_que_add_conn(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn)2155*4882a593Smuzhiyun static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
2156*4882a593Smuzhiyun 					struct bnx2i_conn *bnx2i_conn)
2157*4882a593Smuzhiyun {
2158*4882a593Smuzhiyun 	iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
2159*4882a593Smuzhiyun 			   ISCSI_ERR_CONN_FAILED);
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun /**
2164*4882a593Smuzhiyun  * bnx2i_process_tcp_error - process error notification on a given connection
2165*4882a593Smuzhiyun  *
2166*4882a593Smuzhiyun  * @hba: 		adapter structure pointer
2167*4882a593Smuzhiyun  * @tcp_err: 		tcp error kcqe pointer
2168*4882a593Smuzhiyun  *
2169*4882a593Smuzhiyun  * handles tcp level error notifications from FW.
2170*4882a593Smuzhiyun  */
bnx2i_process_tcp_error(struct bnx2i_hba * hba,struct iscsi_kcqe * tcp_err)2171*4882a593Smuzhiyun static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
2172*4882a593Smuzhiyun 				    struct iscsi_kcqe *tcp_err)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun 	struct bnx2i_conn *bnx2i_conn;
2175*4882a593Smuzhiyun 	u32 iscsi_cid;
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	iscsi_cid = tcp_err->iscsi_conn_id;
2178*4882a593Smuzhiyun 	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	if (!bnx2i_conn) {
2181*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
2182*4882a593Smuzhiyun 		return;
2183*4882a593Smuzhiyun 	}
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
2186*4882a593Smuzhiyun 			  iscsi_cid, tcp_err->completion_status);
2187*4882a593Smuzhiyun 	bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun /**
2192*4882a593Smuzhiyun  * bnx2i_process_iscsi_error - process error notification on a given connection
2193*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2194*4882a593Smuzhiyun  * @iscsi_err:		iscsi error kcqe pointer
2195*4882a593Smuzhiyun  *
2196*4882a593Smuzhiyun  * handles iscsi error notifications from the FW. Firmware based in initial
2197*4882a593Smuzhiyun  *	handshake classifies iscsi protocol / TCP rfc violation into either
2198*4882a593Smuzhiyun  *	warning or error indications. If indication is of "Error" type, driver
2199*4882a593Smuzhiyun  *	will initiate session recovery for that connection/session. For
2200*4882a593Smuzhiyun  *	"Warning" type indication, driver will put out a system log message
2201*4882a593Smuzhiyun  *	(there will be only one message for each type for the life of the
2202*4882a593Smuzhiyun  *	session, this is to avoid un-necessarily overloading the system)
2203*4882a593Smuzhiyun  */
bnx2i_process_iscsi_error(struct bnx2i_hba * hba,struct iscsi_kcqe * iscsi_err)2204*4882a593Smuzhiyun static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
2205*4882a593Smuzhiyun 				      struct iscsi_kcqe *iscsi_err)
2206*4882a593Smuzhiyun {
2207*4882a593Smuzhiyun 	struct bnx2i_conn *bnx2i_conn;
2208*4882a593Smuzhiyun 	u32 iscsi_cid;
2209*4882a593Smuzhiyun 	char warn_notice[] = "iscsi_warning";
2210*4882a593Smuzhiyun 	char error_notice[] = "iscsi_error";
2211*4882a593Smuzhiyun 	char additional_notice[64];
2212*4882a593Smuzhiyun 	char *message;
2213*4882a593Smuzhiyun 	int need_recovery;
2214*4882a593Smuzhiyun 	u64 err_mask64;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	iscsi_cid = iscsi_err->iscsi_conn_id;
2217*4882a593Smuzhiyun 	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
2218*4882a593Smuzhiyun 	if (!bnx2i_conn) {
2219*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
2220*4882a593Smuzhiyun 		return;
2221*4882a593Smuzhiyun 	}
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	err_mask64 = (0x1ULL << iscsi_err->completion_status);
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	if (err_mask64 & iscsi_error_mask) {
2226*4882a593Smuzhiyun 		need_recovery = 0;
2227*4882a593Smuzhiyun 		message = warn_notice;
2228*4882a593Smuzhiyun 	} else {
2229*4882a593Smuzhiyun 		need_recovery = 1;
2230*4882a593Smuzhiyun 		message = error_notice;
2231*4882a593Smuzhiyun 	}
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 	switch (iscsi_err->completion_status) {
2234*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
2235*4882a593Smuzhiyun 		strcpy(additional_notice, "hdr digest err");
2236*4882a593Smuzhiyun 		break;
2237*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
2238*4882a593Smuzhiyun 		strcpy(additional_notice, "data digest err");
2239*4882a593Smuzhiyun 		break;
2240*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
2241*4882a593Smuzhiyun 		strcpy(additional_notice, "wrong opcode rcvd");
2242*4882a593Smuzhiyun 		break;
2243*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
2244*4882a593Smuzhiyun 		strcpy(additional_notice, "AHS len > 0 rcvd");
2245*4882a593Smuzhiyun 		break;
2246*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
2247*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid ITT rcvd");
2248*4882a593Smuzhiyun 		break;
2249*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
2250*4882a593Smuzhiyun 		strcpy(additional_notice, "wrong StatSN rcvd");
2251*4882a593Smuzhiyun 		break;
2252*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
2253*4882a593Smuzhiyun 		strcpy(additional_notice, "wrong DataSN rcvd");
2254*4882a593Smuzhiyun 		break;
2255*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
2256*4882a593Smuzhiyun 		strcpy(additional_notice, "pend R2T violation");
2257*4882a593Smuzhiyun 		break;
2258*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
2259*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, UO");
2260*4882a593Smuzhiyun 		break;
2261*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
2262*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U1");
2263*4882a593Smuzhiyun 		break;
2264*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
2265*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U2");
2266*4882a593Smuzhiyun 		break;
2267*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
2268*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U3");
2269*4882a593Smuzhiyun 		break;
2270*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
2271*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U4");
2272*4882a593Smuzhiyun 		break;
2273*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
2274*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U5");
2275*4882a593Smuzhiyun 		break;
2276*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
2277*4882a593Smuzhiyun 		strcpy(additional_notice, "ERL0, U6");
2278*4882a593Smuzhiyun 		break;
2279*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
2280*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid resi len");
2281*4882a593Smuzhiyun 		break;
2282*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
2283*4882a593Smuzhiyun 		strcpy(additional_notice, "MRDSL violation");
2284*4882a593Smuzhiyun 		break;
2285*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
2286*4882a593Smuzhiyun 		strcpy(additional_notice, "F-bit not set");
2287*4882a593Smuzhiyun 		break;
2288*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
2289*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid TTT");
2290*4882a593Smuzhiyun 		break;
2291*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
2292*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid DataSN");
2293*4882a593Smuzhiyun 		break;
2294*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
2295*4882a593Smuzhiyun 		strcpy(additional_notice, "burst len violation");
2296*4882a593Smuzhiyun 		break;
2297*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
2298*4882a593Smuzhiyun 		strcpy(additional_notice, "buf offset violation");
2299*4882a593Smuzhiyun 		break;
2300*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
2301*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid LUN field");
2302*4882a593Smuzhiyun 		break;
2303*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
2304*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid R2TSN field");
2305*4882a593Smuzhiyun 		break;
2306*4882a593Smuzhiyun #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 	\
2307*4882a593Smuzhiyun 	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
2308*4882a593Smuzhiyun 	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
2309*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid cmd len1");
2310*4882a593Smuzhiyun 		break;
2311*4882a593Smuzhiyun #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 	\
2312*4882a593Smuzhiyun 	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
2313*4882a593Smuzhiyun 	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
2314*4882a593Smuzhiyun 		strcpy(additional_notice, "invalid cmd len2");
2315*4882a593Smuzhiyun 		break;
2316*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
2317*4882a593Smuzhiyun 		strcpy(additional_notice,
2318*4882a593Smuzhiyun 		       "pend r2t exceeds MaxOutstandingR2T value");
2319*4882a593Smuzhiyun 		break;
2320*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2321*4882a593Smuzhiyun 		strcpy(additional_notice, "TTT is rsvd");
2322*4882a593Smuzhiyun 		break;
2323*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2324*4882a593Smuzhiyun 		strcpy(additional_notice, "MBL violation");
2325*4882a593Smuzhiyun 		break;
2326*4882a593Smuzhiyun #define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO 	\
2327*4882a593Smuzhiyun 	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2328*4882a593Smuzhiyun 	case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2329*4882a593Smuzhiyun 		strcpy(additional_notice, "data seg len != 0");
2330*4882a593Smuzhiyun 		break;
2331*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2332*4882a593Smuzhiyun 		strcpy(additional_notice, "reject pdu len error");
2333*4882a593Smuzhiyun 		break;
2334*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2335*4882a593Smuzhiyun 		strcpy(additional_notice, "async pdu len error");
2336*4882a593Smuzhiyun 		break;
2337*4882a593Smuzhiyun 	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2338*4882a593Smuzhiyun 		strcpy(additional_notice, "nopin pdu len error");
2339*4882a593Smuzhiyun 		break;
2340*4882a593Smuzhiyun #define BNX2_ERR_PEND_R2T_IN_CLEANUP			\
2341*4882a593Smuzhiyun 	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2342*4882a593Smuzhiyun 	case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2343*4882a593Smuzhiyun 		strcpy(additional_notice, "pend r2t in cleanup");
2344*4882a593Smuzhiyun 		break;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2347*4882a593Smuzhiyun 		strcpy(additional_notice, "IP fragments rcvd");
2348*4882a593Smuzhiyun 		break;
2349*4882a593Smuzhiyun 	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2350*4882a593Smuzhiyun 		strcpy(additional_notice, "IP options error");
2351*4882a593Smuzhiyun 		break;
2352*4882a593Smuzhiyun 	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2353*4882a593Smuzhiyun 		strcpy(additional_notice, "urgent flag error");
2354*4882a593Smuzhiyun 		break;
2355*4882a593Smuzhiyun 	default:
2356*4882a593Smuzhiyun 		printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2357*4882a593Smuzhiyun 				  iscsi_err->completion_status);
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	if (need_recovery) {
2361*4882a593Smuzhiyun 		iscsi_conn_printk(KERN_ALERT,
2362*4882a593Smuzhiyun 				  bnx2i_conn->cls_conn->dd_data,
2363*4882a593Smuzhiyun 				  "bnx2i: %s - %s\n",
2364*4882a593Smuzhiyun 				  message, additional_notice);
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 		iscsi_conn_printk(KERN_ALERT,
2367*4882a593Smuzhiyun 				  bnx2i_conn->cls_conn->dd_data,
2368*4882a593Smuzhiyun 				  "conn_err - hostno %d conn %p, "
2369*4882a593Smuzhiyun 				  "iscsi_cid %x cid %x\n",
2370*4882a593Smuzhiyun 				  bnx2i_conn->hba->shost->host_no,
2371*4882a593Smuzhiyun 				  bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2372*4882a593Smuzhiyun 				  bnx2i_conn->ep->ep_cid);
2373*4882a593Smuzhiyun 		bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2374*4882a593Smuzhiyun 	} else
2375*4882a593Smuzhiyun 		if (!test_and_set_bit(iscsi_err->completion_status,
2376*4882a593Smuzhiyun 				      (void *) &bnx2i_conn->violation_notified))
2377*4882a593Smuzhiyun 			iscsi_conn_printk(KERN_ALERT,
2378*4882a593Smuzhiyun 					  bnx2i_conn->cls_conn->dd_data,
2379*4882a593Smuzhiyun 					  "bnx2i: %s - %s\n",
2380*4882a593Smuzhiyun 					  message, additional_notice);
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun /**
2385*4882a593Smuzhiyun  * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2386*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2387*4882a593Smuzhiyun  * @conn_destroy:	conn destroy kcqe pointer
2388*4882a593Smuzhiyun  *
2389*4882a593Smuzhiyun  * handles connection destroy completion request.
2390*4882a593Smuzhiyun  */
bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba * hba,struct iscsi_kcqe * conn_destroy)2391*4882a593Smuzhiyun static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2392*4882a593Smuzhiyun 					    struct iscsi_kcqe *conn_destroy)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep;
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2397*4882a593Smuzhiyun 	if (!ep) {
2398*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2399*4882a593Smuzhiyun 				  "offload request, unexpected completion\n");
2400*4882a593Smuzhiyun 		return;
2401*4882a593Smuzhiyun 	}
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	if (hba != ep->hba) {
2404*4882a593Smuzhiyun 		printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2405*4882a593Smuzhiyun 		return;
2406*4882a593Smuzhiyun 	}
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun 	if (conn_destroy->completion_status) {
2409*4882a593Smuzhiyun 		printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2410*4882a593Smuzhiyun 		ep->state = EP_STATE_CLEANUP_FAILED;
2411*4882a593Smuzhiyun 	} else
2412*4882a593Smuzhiyun 		ep->state = EP_STATE_CLEANUP_CMPL;
2413*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun /**
2418*4882a593Smuzhiyun  * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2419*4882a593Smuzhiyun  * @hba:		adapter structure pointer
2420*4882a593Smuzhiyun  * @ofld_kcqe:		conn offload kcqe pointer
2421*4882a593Smuzhiyun  *
2422*4882a593Smuzhiyun  * handles initial connection offload completion, ep_connect() thread is
2423*4882a593Smuzhiyun  *	woken-up to continue with LLP connect process
2424*4882a593Smuzhiyun  */
bnx2i_process_ofld_cmpl(struct bnx2i_hba * hba,struct iscsi_kcqe * ofld_kcqe)2425*4882a593Smuzhiyun static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2426*4882a593Smuzhiyun 				    struct iscsi_kcqe *ofld_kcqe)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	u32 cid_addr;
2429*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep;
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2432*4882a593Smuzhiyun 	if (!ep) {
2433*4882a593Smuzhiyun 		printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2434*4882a593Smuzhiyun 		return;
2435*4882a593Smuzhiyun 	}
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	if (hba != ep->hba) {
2438*4882a593Smuzhiyun 		printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2439*4882a593Smuzhiyun 		return;
2440*4882a593Smuzhiyun 	}
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	if (ofld_kcqe->completion_status) {
2443*4882a593Smuzhiyun 		ep->state = EP_STATE_OFLD_FAILED;
2444*4882a593Smuzhiyun 		if (ofld_kcqe->completion_status ==
2445*4882a593Smuzhiyun 		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2446*4882a593Smuzhiyun 			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable "
2447*4882a593Smuzhiyun 				"to allocate iSCSI context resources\n",
2448*4882a593Smuzhiyun 				hba->netdev->name);
2449*4882a593Smuzhiyun 		else if (ofld_kcqe->completion_status ==
2450*4882a593Smuzhiyun 			 ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE)
2451*4882a593Smuzhiyun 			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
2452*4882a593Smuzhiyun 				"opcode\n", hba->netdev->name);
2453*4882a593Smuzhiyun 		else if (ofld_kcqe->completion_status ==
2454*4882a593Smuzhiyun 			 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
2455*4882a593Smuzhiyun 			/* error status code valid only for 5771x chipset */
2456*4882a593Smuzhiyun 			ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
2457*4882a593Smuzhiyun 		else
2458*4882a593Smuzhiyun 			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
2459*4882a593Smuzhiyun 				"error code %d\n", hba->netdev->name,
2460*4882a593Smuzhiyun 				ofld_kcqe->completion_status);
2461*4882a593Smuzhiyun 	} else {
2462*4882a593Smuzhiyun 		ep->state = EP_STATE_OFLD_COMPL;
2463*4882a593Smuzhiyun 		cid_addr = ofld_kcqe->iscsi_conn_context_id;
2464*4882a593Smuzhiyun 		ep->ep_cid = cid_addr;
2465*4882a593Smuzhiyun 		ep->qp.ctx_base = NULL;
2466*4882a593Smuzhiyun 	}
2467*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun /**
2471*4882a593Smuzhiyun  * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2472*4882a593Smuzhiyun  * @context:		adapter structure pointer
2473*4882a593Smuzhiyun  * @kcqe:		kcqe pointer
2474*4882a593Smuzhiyun  * @num_cqe:		number of kcqes to process
2475*4882a593Smuzhiyun  *
2476*4882a593Smuzhiyun  * Generic KCQ event handler/dispatcher
2477*4882a593Smuzhiyun  */
bnx2i_indicate_kcqe(void * context,struct kcqe * kcqe[],u32 num_cqe)2478*4882a593Smuzhiyun static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2479*4882a593Smuzhiyun 				u32 num_cqe)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun 	struct bnx2i_hba *hba = context;
2482*4882a593Smuzhiyun 	int i = 0;
2483*4882a593Smuzhiyun 	struct iscsi_kcqe *ikcqe = NULL;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	while (i < num_cqe) {
2486*4882a593Smuzhiyun 		ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 		if (ikcqe->op_code ==
2489*4882a593Smuzhiyun 		    ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2490*4882a593Smuzhiyun 			bnx2i_fastpath_notification(hba, ikcqe);
2491*4882a593Smuzhiyun 		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2492*4882a593Smuzhiyun 			bnx2i_process_ofld_cmpl(hba, ikcqe);
2493*4882a593Smuzhiyun 		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2494*4882a593Smuzhiyun 			bnx2i_process_update_conn_cmpl(hba, ikcqe);
2495*4882a593Smuzhiyun 		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2496*4882a593Smuzhiyun 			if (ikcqe->completion_status !=
2497*4882a593Smuzhiyun 			    ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2498*4882a593Smuzhiyun 				bnx2i_iscsi_license_error(hba, ikcqe->\
2499*4882a593Smuzhiyun 							  completion_status);
2500*4882a593Smuzhiyun 			else {
2501*4882a593Smuzhiyun 				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2502*4882a593Smuzhiyun 				bnx2i_get_link_state(hba);
2503*4882a593Smuzhiyun 				printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2504*4882a593Smuzhiyun 						 "ISCSI_INIT passed\n",
2505*4882a593Smuzhiyun 						 (u8)hba->pcidev->bus->number,
2506*4882a593Smuzhiyun 						 hba->pci_devno,
2507*4882a593Smuzhiyun 						 (u8)hba->pci_func);
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 			}
2511*4882a593Smuzhiyun 		} else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2512*4882a593Smuzhiyun 			bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2513*4882a593Smuzhiyun 		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2514*4882a593Smuzhiyun 			bnx2i_process_iscsi_error(hba, ikcqe);
2515*4882a593Smuzhiyun 		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2516*4882a593Smuzhiyun 			bnx2i_process_tcp_error(hba, ikcqe);
2517*4882a593Smuzhiyun 		else
2518*4882a593Smuzhiyun 			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2519*4882a593Smuzhiyun 					  ikcqe->op_code);
2520*4882a593Smuzhiyun 	}
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun /**
2525*4882a593Smuzhiyun  * bnx2i_indicate_netevent - Generic netdev event handler
2526*4882a593Smuzhiyun  * @context:	adapter structure pointer
2527*4882a593Smuzhiyun  * @event:	event type
2528*4882a593Smuzhiyun  * @vlan_id:	vlans id - associated vlan id with this event
2529*4882a593Smuzhiyun  *
2530*4882a593Smuzhiyun  * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2531*4882a593Smuzhiyun  *	NETDEV_GOING_DOWN and NETDEV_CHANGE
2532*4882a593Smuzhiyun  */
bnx2i_indicate_netevent(void * context,unsigned long event,u16 vlan_id)2533*4882a593Smuzhiyun static void bnx2i_indicate_netevent(void *context, unsigned long event,
2534*4882a593Smuzhiyun 				    u16 vlan_id)
2535*4882a593Smuzhiyun {
2536*4882a593Smuzhiyun 	struct bnx2i_hba *hba = context;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	/* Ignore all netevent coming from vlans */
2539*4882a593Smuzhiyun 	if (vlan_id != 0)
2540*4882a593Smuzhiyun 		return;
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	switch (event) {
2543*4882a593Smuzhiyun 	case NETDEV_UP:
2544*4882a593Smuzhiyun 		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2545*4882a593Smuzhiyun 			bnx2i_send_fw_iscsi_init_msg(hba);
2546*4882a593Smuzhiyun 		break;
2547*4882a593Smuzhiyun 	case NETDEV_DOWN:
2548*4882a593Smuzhiyun 		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2549*4882a593Smuzhiyun 		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2550*4882a593Smuzhiyun 		break;
2551*4882a593Smuzhiyun 	case NETDEV_GOING_DOWN:
2552*4882a593Smuzhiyun 		set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2553*4882a593Smuzhiyun 		iscsi_host_for_each_session(hba->shost,
2554*4882a593Smuzhiyun 					    bnx2i_drop_session);
2555*4882a593Smuzhiyun 		break;
2556*4882a593Smuzhiyun 	case NETDEV_CHANGE:
2557*4882a593Smuzhiyun 		bnx2i_get_link_state(hba);
2558*4882a593Smuzhiyun 		break;
2559*4882a593Smuzhiyun 	default:
2560*4882a593Smuzhiyun 		;
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun /**
2566*4882a593Smuzhiyun  * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2567*4882a593Smuzhiyun  * @cm_sk: 		cnic sock structure pointer
2568*4882a593Smuzhiyun  *
2569*4882a593Smuzhiyun  * function callback exported via bnx2i - cnic driver interface to
2570*4882a593Smuzhiyun  *	indicate completion of option-2 TCP connect request.
2571*4882a593Smuzhiyun  */
bnx2i_cm_connect_cmpl(struct cnic_sock * cm_sk)2572*4882a593Smuzhiyun static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2577*4882a593Smuzhiyun 		ep->state = EP_STATE_CONNECT_FAILED;
2578*4882a593Smuzhiyun 	else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2579*4882a593Smuzhiyun 		ep->state = EP_STATE_CONNECT_COMPL;
2580*4882a593Smuzhiyun 	else
2581*4882a593Smuzhiyun 		ep->state = EP_STATE_CONNECT_FAILED;
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun /**
2588*4882a593Smuzhiyun  * bnx2i_cm_close_cmpl - process tcp conn close completion
2589*4882a593Smuzhiyun  * @cm_sk:	cnic sock structure pointer
2590*4882a593Smuzhiyun  *
2591*4882a593Smuzhiyun  * function callback exported via bnx2i - cnic driver interface to
2592*4882a593Smuzhiyun  *	indicate completion of option-2 graceful TCP connect shutdown
2593*4882a593Smuzhiyun  */
bnx2i_cm_close_cmpl(struct cnic_sock * cm_sk)2594*4882a593Smuzhiyun static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2595*4882a593Smuzhiyun {
2596*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	ep->state = EP_STATE_DISCONN_COMPL;
2599*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
2600*4882a593Smuzhiyun }
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun /**
2604*4882a593Smuzhiyun  * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2605*4882a593Smuzhiyun  * @cm_sk:	cnic sock structure pointer
2606*4882a593Smuzhiyun  *
2607*4882a593Smuzhiyun  * function callback exported via bnx2i - cnic driver interface to
2608*4882a593Smuzhiyun  *	indicate completion of option-2 abortive TCP connect termination
2609*4882a593Smuzhiyun  */
bnx2i_cm_abort_cmpl(struct cnic_sock * cm_sk)2610*4882a593Smuzhiyun static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2611*4882a593Smuzhiyun {
2612*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	ep->state = EP_STATE_DISCONN_COMPL;
2615*4882a593Smuzhiyun 	wake_up_interruptible(&ep->ofld_wait);
2616*4882a593Smuzhiyun }
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun /**
2620*4882a593Smuzhiyun  * bnx2i_cm_remote_close - process received TCP FIN
2621*4882a593Smuzhiyun  * @cm_sk:	cnic sock structure pointer
2622*4882a593Smuzhiyun  *
2623*4882a593Smuzhiyun  * function callback exported via bnx2i - cnic driver interface to indicate
2624*4882a593Smuzhiyun  *	async TCP events such as FIN
2625*4882a593Smuzhiyun  */
bnx2i_cm_remote_close(struct cnic_sock * cm_sk)2626*4882a593Smuzhiyun static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	ep->state = EP_STATE_TCP_FIN_RCVD;
2631*4882a593Smuzhiyun 	if (ep->conn)
2632*4882a593Smuzhiyun 		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun /**
2636*4882a593Smuzhiyun  * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2637*4882a593Smuzhiyun  * @cm_sk:	cnic sock structure pointer
2638*4882a593Smuzhiyun  *
2639*4882a593Smuzhiyun  * function callback exported via bnx2i - cnic driver interface to
2640*4882a593Smuzhiyun  *	indicate async TCP events (RST) sent by the peer.
2641*4882a593Smuzhiyun  */
bnx2i_cm_remote_abort(struct cnic_sock * cm_sk)2642*4882a593Smuzhiyun static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2643*4882a593Smuzhiyun {
2644*4882a593Smuzhiyun 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2645*4882a593Smuzhiyun 	u32 old_state = ep->state;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	ep->state = EP_STATE_TCP_RST_RCVD;
2648*4882a593Smuzhiyun 	if (old_state == EP_STATE_DISCONN_START)
2649*4882a593Smuzhiyun 		wake_up_interruptible(&ep->ofld_wait);
2650*4882a593Smuzhiyun 	else
2651*4882a593Smuzhiyun 		if (ep->conn)
2652*4882a593Smuzhiyun 			bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2653*4882a593Smuzhiyun }
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 
bnx2i_send_nl_mesg(void * context,u32 msg_type,char * buf,u16 buflen)2656*4882a593Smuzhiyun static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
2657*4882a593Smuzhiyun 			      char *buf, u16 buflen)
2658*4882a593Smuzhiyun {
2659*4882a593Smuzhiyun 	struct bnx2i_hba *hba = context;
2660*4882a593Smuzhiyun 	int rc;
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun 	if (!hba)
2663*4882a593Smuzhiyun 		return -ENODEV;
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	rc = iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2666*4882a593Smuzhiyun 				msg_type, buf, buflen);
2667*4882a593Smuzhiyun 	if (rc)
2668*4882a593Smuzhiyun 		printk(KERN_ALERT "bnx2i: private nl message send error\n");
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	return rc;
2671*4882a593Smuzhiyun }
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun /*
2675*4882a593Smuzhiyun  * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2676*4882a593Smuzhiyun  *			carrying callback function pointers
2677*4882a593Smuzhiyun  */
2678*4882a593Smuzhiyun struct cnic_ulp_ops bnx2i_cnic_cb = {
2679*4882a593Smuzhiyun 	.cnic_init = bnx2i_ulp_init,
2680*4882a593Smuzhiyun 	.cnic_exit = bnx2i_ulp_exit,
2681*4882a593Smuzhiyun 	.cnic_start = bnx2i_start,
2682*4882a593Smuzhiyun 	.cnic_stop = bnx2i_stop,
2683*4882a593Smuzhiyun 	.indicate_kcqes = bnx2i_indicate_kcqe,
2684*4882a593Smuzhiyun 	.indicate_netevent = bnx2i_indicate_netevent,
2685*4882a593Smuzhiyun 	.cm_connect_complete = bnx2i_cm_connect_cmpl,
2686*4882a593Smuzhiyun 	.cm_close_complete = bnx2i_cm_close_cmpl,
2687*4882a593Smuzhiyun 	.cm_abort_complete = bnx2i_cm_abort_cmpl,
2688*4882a593Smuzhiyun 	.cm_remote_close = bnx2i_cm_remote_close,
2689*4882a593Smuzhiyun 	.cm_remote_abort = bnx2i_cm_remote_abort,
2690*4882a593Smuzhiyun 	.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2691*4882a593Smuzhiyun 	.cnic_get_stats = bnx2i_get_stats,
2692*4882a593Smuzhiyun 	.owner = THIS_MODULE
2693*4882a593Smuzhiyun };
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun /**
2697*4882a593Smuzhiyun  * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2698*4882a593Smuzhiyun  * @ep: bnx2i endpoint
2699*4882a593Smuzhiyun  *
2700*4882a593Smuzhiyun  * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2701*4882a593Smuzhiyun  *	register in BAR #0. Whereas in 57710 these register are accessed by
2702*4882a593Smuzhiyun  *	mapping BAR #1
2703*4882a593Smuzhiyun  */
bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint * ep)2704*4882a593Smuzhiyun int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun 	u32 cid_num;
2707*4882a593Smuzhiyun 	u32 reg_off;
2708*4882a593Smuzhiyun 	u32 first_l4l5;
2709*4882a593Smuzhiyun 	u32 ctx_sz;
2710*4882a593Smuzhiyun 	u32 config2;
2711*4882a593Smuzhiyun 	resource_size_t reg_base;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	cid_num = bnx2i_get_cid_num(ep);
2714*4882a593Smuzhiyun 
2715*4882a593Smuzhiyun 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2716*4882a593Smuzhiyun 		reg_base = pci_resource_start(ep->hba->pcidev,
2717*4882a593Smuzhiyun 					      BNX2X_DOORBELL_PCI_BAR);
2718*4882a593Smuzhiyun 		reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
2719*4882a593Smuzhiyun 		ep->qp.ctx_base = ioremap(reg_base + reg_off, 4);
2720*4882a593Smuzhiyun 		if (!ep->qp.ctx_base)
2721*4882a593Smuzhiyun 			return -ENOMEM;
2722*4882a593Smuzhiyun 		goto arm_cq;
2723*4882a593Smuzhiyun 	}
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2726*4882a593Smuzhiyun 	    (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2727*4882a593Smuzhiyun 		config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2728*4882a593Smuzhiyun 		first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2729*4882a593Smuzhiyun 		ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2730*4882a593Smuzhiyun 		if (ctx_sz)
2731*4882a593Smuzhiyun 			reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2732*4882a593Smuzhiyun 				  + BNX2I_570X_PAGE_SIZE_DEFAULT *
2733*4882a593Smuzhiyun 				  (((cid_num - first_l4l5) / ctx_sz) + 256);
2734*4882a593Smuzhiyun 		else
2735*4882a593Smuzhiyun 			reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2736*4882a593Smuzhiyun 	} else
2737*4882a593Smuzhiyun 		/* 5709 device in normal node and 5706/5708 devices */
2738*4882a593Smuzhiyun 		reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 	ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off,
2741*4882a593Smuzhiyun 					  MB_KERNEL_CTX_SIZE);
2742*4882a593Smuzhiyun 	if (!ep->qp.ctx_base)
2743*4882a593Smuzhiyun 		return -ENOMEM;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun arm_cq:
2746*4882a593Smuzhiyun 	bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2747*4882a593Smuzhiyun 	return 0;
2748*4882a593Smuzhiyun }
2749