xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qlogic/qed/qed_iwarp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun  * Copyright (c) 2015-2017  QLogic Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/if_ether.h>
8*4882a593Smuzhiyun #include <linux/if_vlan.h>
9*4882a593Smuzhiyun #include <linux/ip.h>
10*4882a593Smuzhiyun #include <linux/ipv6.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/tcp.h>
13*4882a593Smuzhiyun #include "qed_cxt.h"
14*4882a593Smuzhiyun #include "qed_hw.h"
15*4882a593Smuzhiyun #include "qed_ll2.h"
16*4882a593Smuzhiyun #include "qed_rdma.h"
17*4882a593Smuzhiyun #include "qed_reg_addr.h"
18*4882a593Smuzhiyun #include "qed_sp.h"
19*4882a593Smuzhiyun #include "qed_ooo.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define QED_IWARP_ORD_DEFAULT		32
22*4882a593Smuzhiyun #define QED_IWARP_IRD_DEFAULT		32
23*4882a593Smuzhiyun #define QED_IWARP_MAX_FW_MSS		4120
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define QED_EP_SIG 0xecabcdef
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct mpa_v2_hdr {
28*4882a593Smuzhiyun 	__be16 ird;
29*4882a593Smuzhiyun 	__be16 ord;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define MPA_V2_PEER2PEER_MODEL  0x8000
33*4882a593Smuzhiyun #define MPA_V2_SEND_RTR         0x4000	/* on ird */
34*4882a593Smuzhiyun #define MPA_V2_READ_RTR         0x4000	/* on ord */
35*4882a593Smuzhiyun #define MPA_V2_WRITE_RTR        0x8000
36*4882a593Smuzhiyun #define MPA_V2_IRD_ORD_MASK     0x3FFF
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define QED_IWARP_INVALID_TCP_CID	0xffffffff
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
43*4882a593Smuzhiyun #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
44*4882a593Smuzhiyun #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
45*4882a593Smuzhiyun #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
48*4882a593Smuzhiyun #define TIMESTAMP_HEADER_SIZE		(12)
49*4882a593Smuzhiyun #define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define QED_IWARP_TS_EN			BIT(0)
52*4882a593Smuzhiyun #define QED_IWARP_DA_EN			BIT(1)
53*4882a593Smuzhiyun #define QED_IWARP_PARAM_CRC_NEEDED	(1)
54*4882a593Smuzhiyun #define QED_IWARP_PARAM_P2P		(1)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define QED_IWARP_DEF_MAX_RT_TIME	(0)
57*4882a593Smuzhiyun #define QED_IWARP_DEF_CWND_FACTOR	(4)
58*4882a593Smuzhiyun #define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
59*4882a593Smuzhiyun #define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
60*4882a593Smuzhiyun #define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
63*4882a593Smuzhiyun 				 __le16 echo, union event_ring_data *data,
64*4882a593Smuzhiyun 				 u8 fw_return_code);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* Override devinfo with iWARP specific values */
qed_iwarp_init_devinfo(struct qed_hwfn * p_hwfn)67*4882a593Smuzhiyun void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
72*4882a593Smuzhiyun 	dev->max_qp = min_t(u32,
73*4882a593Smuzhiyun 			    IWARP_MAX_QPS,
74*4882a593Smuzhiyun 			    p_hwfn->p_rdma_info->num_qps) -
75*4882a593Smuzhiyun 		      QED_IWARP_PREALLOC_CNT;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	dev->max_cq = dev->max_qp;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
80*4882a593Smuzhiyun 	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
qed_iwarp_init_hw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)83*4882a593Smuzhiyun void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
86*4882a593Smuzhiyun 	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
87*4882a593Smuzhiyun 	p_hwfn->b_rdma_enabled_in_prs = true;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* We have two cid maps, one for tcp which should be used only from passive
91*4882a593Smuzhiyun  * syn processing and replacing a pre-allocated ep in the list. The second
92*4882a593Smuzhiyun  * for active tcp and for QPs.
93*4882a593Smuzhiyun  */
qed_iwarp_cid_cleaned(struct qed_hwfn * p_hwfn,u32 cid)94*4882a593Smuzhiyun static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (cid < QED_IWARP_PREALLOC_CNT)
101*4882a593Smuzhiyun 		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
102*4882a593Smuzhiyun 				    cid);
103*4882a593Smuzhiyun 	else
104*4882a593Smuzhiyun 		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun void
qed_iwarp_init_fw_ramrod(struct qed_hwfn * p_hwfn,struct iwarp_init_func_ramrod_data * p_ramrod)110*4882a593Smuzhiyun qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
111*4882a593Smuzhiyun 			 struct iwarp_init_func_ramrod_data *p_ramrod)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	p_ramrod->iwarp.ll2_ooo_q_index =
114*4882a593Smuzhiyun 	    RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
115*4882a593Smuzhiyun 	    p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
qed_iwarp_alloc_cid(struct qed_hwfn * p_hwfn,u32 * cid)122*4882a593Smuzhiyun static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	int rc;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
127*4882a593Smuzhiyun 	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
128*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
129*4882a593Smuzhiyun 	if (rc) {
130*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
131*4882a593Smuzhiyun 		return rc;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
136*4882a593Smuzhiyun 	if (rc)
137*4882a593Smuzhiyun 		qed_iwarp_cid_cleaned(p_hwfn, *cid);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return rc;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
qed_iwarp_set_tcp_cid(struct qed_hwfn * p_hwfn,u32 cid)142*4882a593Smuzhiyun static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
147*4882a593Smuzhiyun 	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
148*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /* This function allocates a cid for passive tcp (called from syn receive)
152*4882a593Smuzhiyun  * the reason it's separate from the regular cid allocation is because it
153*4882a593Smuzhiyun  * is assured that these cids already have ilt allocated. They are preallocated
154*4882a593Smuzhiyun  * to ensure that we won't need to allocate memory during syn processing
155*4882a593Smuzhiyun  */
qed_iwarp_alloc_tcp_cid(struct qed_hwfn * p_hwfn,u32 * cid)156*4882a593Smuzhiyun static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int rc;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	rc = qed_rdma_bmap_alloc_id(p_hwfn,
163*4882a593Smuzhiyun 				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (rc) {
168*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
169*4882a593Smuzhiyun 			   "can't allocate iwarp tcp cid max-count=%d\n",
170*4882a593Smuzhiyun 			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		*cid = QED_IWARP_INVALID_TCP_CID;
173*4882a593Smuzhiyun 		return rc;
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
177*4882a593Smuzhiyun 					    p_hwfn->p_rdma_info->proto);
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
qed_iwarp_create_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,struct qed_rdma_create_qp_out_params * out_params)181*4882a593Smuzhiyun int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
182*4882a593Smuzhiyun 			struct qed_rdma_qp *qp,
183*4882a593Smuzhiyun 			struct qed_rdma_create_qp_out_params *out_params)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct iwarp_create_qp_ramrod_data *p_ramrod;
186*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
187*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
188*4882a593Smuzhiyun 	u16 physical_queue;
189*4882a593Smuzhiyun 	u32 cid;
190*4882a593Smuzhiyun 	int rc;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
193*4882a593Smuzhiyun 					      IWARP_SHARED_QUEUE_PAGE_SIZE,
194*4882a593Smuzhiyun 					      &qp->shared_queue_phys_addr,
195*4882a593Smuzhiyun 					      GFP_KERNEL);
196*4882a593Smuzhiyun 	if (!qp->shared_queue)
197*4882a593Smuzhiyun 		return -ENOMEM;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
200*4882a593Smuzhiyun 	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
201*4882a593Smuzhiyun 	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
202*4882a593Smuzhiyun 	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
203*4882a593Smuzhiyun 	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
204*4882a593Smuzhiyun 	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
205*4882a593Smuzhiyun 	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
206*4882a593Smuzhiyun 	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
209*4882a593Smuzhiyun 	if (rc)
210*4882a593Smuzhiyun 		goto err1;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	qp->icid = (u16)cid;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
215*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
216*4882a593Smuzhiyun 	init_data.cid = qp->icid;
217*4882a593Smuzhiyun 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
220*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_CREATE_QP,
221*4882a593Smuzhiyun 				 PROTOCOLID_IWARP, &init_data);
222*4882a593Smuzhiyun 	if (rc)
223*4882a593Smuzhiyun 		goto err2;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
228*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
229*4882a593Smuzhiyun 		  qp->fmr_and_reserved_lkey);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
232*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
235*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
236*4882a593Smuzhiyun 		  qp->incoming_rdma_read_en);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
239*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
240*4882a593Smuzhiyun 		  qp->incoming_rdma_write_en);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
243*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
244*4882a593Smuzhiyun 		  qp->incoming_atomic_en);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	SET_FIELD(p_ramrod->flags,
247*4882a593Smuzhiyun 		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	p_ramrod->pd = cpu_to_le16(qp->pd);
250*4882a593Smuzhiyun 	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
251*4882a593Smuzhiyun 	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
254*4882a593Smuzhiyun 	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
255*4882a593Smuzhiyun 	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
256*4882a593Smuzhiyun 	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	p_ramrod->cq_cid_for_sq =
259*4882a593Smuzhiyun 	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
260*4882a593Smuzhiyun 	p_ramrod->cq_cid_for_rq =
261*4882a593Smuzhiyun 	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
266*4882a593Smuzhiyun 	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
267*4882a593Smuzhiyun 	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
268*4882a593Smuzhiyun 	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
271*4882a593Smuzhiyun 	if (rc)
272*4882a593Smuzhiyun 		goto err2;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return rc;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun err2:
277*4882a593Smuzhiyun 	qed_iwarp_cid_cleaned(p_hwfn, cid);
278*4882a593Smuzhiyun err1:
279*4882a593Smuzhiyun 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
280*4882a593Smuzhiyun 			  IWARP_SHARED_QUEUE_PAGE_SIZE,
281*4882a593Smuzhiyun 			  qp->shared_queue, qp->shared_queue_phys_addr);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return rc;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
qed_iwarp_modify_fw(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)286*4882a593Smuzhiyun static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct iwarp_modify_qp_ramrod_data *p_ramrod;
289*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
290*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
291*4882a593Smuzhiyun 	u16 flags, trans_to_state;
292*4882a593Smuzhiyun 	int rc;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* Get SPQ entry */
295*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
296*4882a593Smuzhiyun 	init_data.cid = qp->icid;
297*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
298*4882a593Smuzhiyun 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
301*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
302*4882a593Smuzhiyun 				 p_hwfn->p_rdma_info->proto, &init_data);
303*4882a593Smuzhiyun 	if (rc)
304*4882a593Smuzhiyun 		return rc;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	flags = le16_to_cpu(p_ramrod->flags);
309*4882a593Smuzhiyun 	SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
310*4882a593Smuzhiyun 	p_ramrod->flags = cpu_to_le16(flags);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
313*4882a593Smuzhiyun 		trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
314*4882a593Smuzhiyun 	else
315*4882a593Smuzhiyun 		trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return rc;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
qed_roce2iwarp_state(enum qed_roce_qp_state state)326*4882a593Smuzhiyun enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	switch (state) {
329*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_RESET:
330*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_INIT:
331*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_RTR:
332*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_IDLE;
333*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_RTS:
334*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_RTS;
335*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_SQD:
336*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_CLOSING;
337*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_ERR:
338*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_ERROR;
339*4882a593Smuzhiyun 	case QED_ROCE_QP_STATE_SQE:
340*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_TERMINATE;
341*4882a593Smuzhiyun 	default:
342*4882a593Smuzhiyun 		return QED_IWARP_QP_STATE_ERROR;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun static enum qed_roce_qp_state
qed_iwarp2roce_state(enum qed_iwarp_qp_state state)347*4882a593Smuzhiyun qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	switch (state) {
350*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_IDLE:
351*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_INIT;
352*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_RTS:
353*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_RTS;
354*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_TERMINATE:
355*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_SQE;
356*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_CLOSING:
357*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_SQD;
358*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_ERROR:
359*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_ERR;
360*4882a593Smuzhiyun 	default:
361*4882a593Smuzhiyun 		return QED_ROCE_QP_STATE_ERR;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun static const char * const iwarp_state_names[] = {
366*4882a593Smuzhiyun 	"IDLE",
367*4882a593Smuzhiyun 	"RTS",
368*4882a593Smuzhiyun 	"TERMINATE",
369*4882a593Smuzhiyun 	"CLOSING",
370*4882a593Smuzhiyun 	"ERROR",
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun int
qed_iwarp_modify_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,enum qed_iwarp_qp_state new_state,bool internal)374*4882a593Smuzhiyun qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
375*4882a593Smuzhiyun 		    struct qed_rdma_qp *qp,
376*4882a593Smuzhiyun 		    enum qed_iwarp_qp_state new_state, bool internal)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	enum qed_iwarp_qp_state prev_iw_state;
379*4882a593Smuzhiyun 	bool modify_fw = false;
380*4882a593Smuzhiyun 	int rc = 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* modify QP can be called from upper-layer or as a result of async
383*4882a593Smuzhiyun 	 * RST/FIN... therefore need to protect
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
386*4882a593Smuzhiyun 	prev_iw_state = qp->iwarp_state;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (prev_iw_state == new_state) {
389*4882a593Smuzhiyun 		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
390*4882a593Smuzhiyun 		return 0;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	switch (prev_iw_state) {
394*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_IDLE:
395*4882a593Smuzhiyun 		switch (new_state) {
396*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_RTS:
397*4882a593Smuzhiyun 			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
398*4882a593Smuzhiyun 			break;
399*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_ERROR:
400*4882a593Smuzhiyun 			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
401*4882a593Smuzhiyun 			if (!internal)
402*4882a593Smuzhiyun 				modify_fw = true;
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		default:
405*4882a593Smuzhiyun 			break;
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 		break;
408*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_RTS:
409*4882a593Smuzhiyun 		switch (new_state) {
410*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_CLOSING:
411*4882a593Smuzhiyun 			if (!internal)
412*4882a593Smuzhiyun 				modify_fw = true;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
415*4882a593Smuzhiyun 			break;
416*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_ERROR:
417*4882a593Smuzhiyun 			if (!internal)
418*4882a593Smuzhiyun 				modify_fw = true;
419*4882a593Smuzhiyun 			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
420*4882a593Smuzhiyun 			break;
421*4882a593Smuzhiyun 		default:
422*4882a593Smuzhiyun 			break;
423*4882a593Smuzhiyun 		}
424*4882a593Smuzhiyun 		break;
425*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_ERROR:
426*4882a593Smuzhiyun 		switch (new_state) {
427*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_IDLE:
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 			qp->iwarp_state = new_state;
430*4882a593Smuzhiyun 			break;
431*4882a593Smuzhiyun 		case QED_IWARP_QP_STATE_CLOSING:
432*4882a593Smuzhiyun 			/* could happen due to race... do nothing.... */
433*4882a593Smuzhiyun 			break;
434*4882a593Smuzhiyun 		default:
435*4882a593Smuzhiyun 			rc = -EINVAL;
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 		break;
438*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_TERMINATE:
439*4882a593Smuzhiyun 	case QED_IWARP_QP_STATE_CLOSING:
440*4882a593Smuzhiyun 		qp->iwarp_state = new_state;
441*4882a593Smuzhiyun 		break;
442*4882a593Smuzhiyun 	default:
443*4882a593Smuzhiyun 		break;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
447*4882a593Smuzhiyun 		   qp->icid,
448*4882a593Smuzhiyun 		   iwarp_state_names[prev_iw_state],
449*4882a593Smuzhiyun 		   iwarp_state_names[qp->iwarp_state],
450*4882a593Smuzhiyun 		   internal ? "internal" : "");
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (modify_fw)
455*4882a593Smuzhiyun 		rc = qed_iwarp_modify_fw(p_hwfn, qp);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	return rc;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
qed_iwarp_fw_destroy(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)460*4882a593Smuzhiyun int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
463*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
464*4882a593Smuzhiyun 	int rc;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Get SPQ entry */
467*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
468*4882a593Smuzhiyun 	init_data.cid = qp->icid;
469*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
470*4882a593Smuzhiyun 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
473*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
474*4882a593Smuzhiyun 				 p_hwfn->p_rdma_info->proto, &init_data);
475*4882a593Smuzhiyun 	if (rc)
476*4882a593Smuzhiyun 		return rc;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return rc;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
qed_iwarp_destroy_ep(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,bool remove_from_active_list)485*4882a593Smuzhiyun static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
486*4882a593Smuzhiyun 				 struct qed_iwarp_ep *ep,
487*4882a593Smuzhiyun 				 bool remove_from_active_list)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
490*4882a593Smuzhiyun 			  sizeof(*ep->ep_buffer_virt),
491*4882a593Smuzhiyun 			  ep->ep_buffer_virt, ep->ep_buffer_phys);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	if (remove_from_active_list) {
494*4882a593Smuzhiyun 		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
495*4882a593Smuzhiyun 		list_del(&ep->list_entry);
496*4882a593Smuzhiyun 		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	if (ep->qp)
500*4882a593Smuzhiyun 		ep->qp->ep = NULL;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	kfree(ep);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
qed_iwarp_destroy_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)505*4882a593Smuzhiyun int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep = qp->ep;
508*4882a593Smuzhiyun 	int wait_count = 0;
509*4882a593Smuzhiyun 	int rc = 0;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
512*4882a593Smuzhiyun 		rc = qed_iwarp_modify_qp(p_hwfn, qp,
513*4882a593Smuzhiyun 					 QED_IWARP_QP_STATE_ERROR, false);
514*4882a593Smuzhiyun 		if (rc)
515*4882a593Smuzhiyun 			return rc;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* Make sure ep is closed before returning and freeing memory. */
519*4882a593Smuzhiyun 	if (ep) {
520*4882a593Smuzhiyun 		while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
521*4882a593Smuzhiyun 		       wait_count++ < 200)
522*4882a593Smuzhiyun 			msleep(100);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		if (ep->state != QED_IWARP_EP_CLOSED)
525*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
526*4882a593Smuzhiyun 				  ep->state);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		qed_iwarp_destroy_ep(p_hwfn, ep, false);
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (qp->shared_queue)
534*4882a593Smuzhiyun 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
535*4882a593Smuzhiyun 				  IWARP_SHARED_QUEUE_PAGE_SIZE,
536*4882a593Smuzhiyun 				  qp->shared_queue, qp->shared_queue_phys_addr);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return rc;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun static int
qed_iwarp_create_ep(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep ** ep_out)542*4882a593Smuzhiyun qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
545*4882a593Smuzhiyun 	int rc;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
548*4882a593Smuzhiyun 	if (!ep)
549*4882a593Smuzhiyun 		return -ENOMEM;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	ep->state = QED_IWARP_EP_INIT;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
554*4882a593Smuzhiyun 						sizeof(*ep->ep_buffer_virt),
555*4882a593Smuzhiyun 						&ep->ep_buffer_phys,
556*4882a593Smuzhiyun 						GFP_KERNEL);
557*4882a593Smuzhiyun 	if (!ep->ep_buffer_virt) {
558*4882a593Smuzhiyun 		rc = -ENOMEM;
559*4882a593Smuzhiyun 		goto err;
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	ep->sig = QED_EP_SIG;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	*ep_out = ep;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	return 0;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun err:
569*4882a593Smuzhiyun 	kfree(ep);
570*4882a593Smuzhiyun 	return rc;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun static void
qed_iwarp_print_tcp_ramrod(struct qed_hwfn * p_hwfn,struct iwarp_tcp_offload_ramrod_data * p_tcp_ramrod)574*4882a593Smuzhiyun qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
575*4882a593Smuzhiyun 			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
578*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.local_mac_addr_lo,
579*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.local_mac_addr_mid,
580*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.local_mac_addr_hi,
581*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
582*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
583*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
586*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
587*4882a593Smuzhiyun 			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
588*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.local_ip,
589*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.local_port,
590*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.remote_ip,
591*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.remote_port,
592*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.vlan_id);
593*4882a593Smuzhiyun 	} else {
594*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
595*4882a593Smuzhiyun 			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
596*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.local_ip,
597*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.local_port,
598*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.remote_ip,
599*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.remote_port,
600*4882a593Smuzhiyun 			   p_tcp_ramrod->tcp.vlan_id);
601*4882a593Smuzhiyun 	}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
604*4882a593Smuzhiyun 		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
605*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.flow_label,
606*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.ttl,
607*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.tos_or_tc,
608*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.mss,
609*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.rcv_wnd_scale,
610*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.connect_mode,
611*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.flags);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
614*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.syn_ip_payload_length,
615*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
616*4882a593Smuzhiyun 		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun static int
qed_iwarp_tcp_offload(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)620*4882a593Smuzhiyun qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
623*4882a593Smuzhiyun 	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
624*4882a593Smuzhiyun 	struct tcp_offload_params_opt2 *tcp;
625*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
626*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
627*4882a593Smuzhiyun 	dma_addr_t async_output_phys;
628*4882a593Smuzhiyun 	dma_addr_t in_pdata_phys;
629*4882a593Smuzhiyun 	u16 physical_q;
630*4882a593Smuzhiyun 	u16 flags = 0;
631*4882a593Smuzhiyun 	u8 tcp_flags;
632*4882a593Smuzhiyun 	int rc;
633*4882a593Smuzhiyun 	int i;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
636*4882a593Smuzhiyun 	init_data.cid = ep->tcp_cid;
637*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
638*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
639*4882a593Smuzhiyun 		init_data.comp_mode = QED_SPQ_MODE_CB;
640*4882a593Smuzhiyun 	else
641*4882a593Smuzhiyun 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
644*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
645*4882a593Smuzhiyun 				 PROTOCOLID_IWARP, &init_data);
646*4882a593Smuzhiyun 	if (rc)
647*4882a593Smuzhiyun 		return rc;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	in_pdata_phys = ep->ep_buffer_phys +
652*4882a593Smuzhiyun 			offsetof(struct qed_iwarp_ep_memory, in_pdata);
653*4882a593Smuzhiyun 	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
654*4882a593Smuzhiyun 		       in_pdata_phys);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
657*4882a593Smuzhiyun 	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	async_output_phys = ep->ep_buffer_phys +
660*4882a593Smuzhiyun 			    offsetof(struct qed_iwarp_ep_memory, async_output);
661*4882a593Smuzhiyun 	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
662*4882a593Smuzhiyun 		       async_output_phys);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
665*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
668*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
669*4882a593Smuzhiyun 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
670*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
671*4882a593Smuzhiyun 	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	tcp = &p_tcp_ramrod->tcp;
674*4882a593Smuzhiyun 	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
675*4882a593Smuzhiyun 			    &tcp->remote_mac_addr_mid,
676*4882a593Smuzhiyun 			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
677*4882a593Smuzhiyun 	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
678*4882a593Smuzhiyun 			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
685*4882a593Smuzhiyun 		  !!(tcp_flags & QED_IWARP_TS_EN));
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
688*4882a593Smuzhiyun 		  !!(tcp_flags & QED_IWARP_DA_EN));
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	tcp->flags = cpu_to_le16(flags);
691*4882a593Smuzhiyun 	tcp->ip_version = ep->cm_info.ip_version;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
694*4882a593Smuzhiyun 		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
695*4882a593Smuzhiyun 		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
696*4882a593Smuzhiyun 	}
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
699*4882a593Smuzhiyun 	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
700*4882a593Smuzhiyun 	tcp->mss = cpu_to_le16(ep->mss);
701*4882a593Smuzhiyun 	tcp->flow_label = 0;
702*4882a593Smuzhiyun 	tcp->ttl = 0x40;
703*4882a593Smuzhiyun 	tcp->tos_or_tc = 0;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
706*4882a593Smuzhiyun 	tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
707*4882a593Smuzhiyun 	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
708*4882a593Smuzhiyun 	tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
709*4882a593Smuzhiyun 	tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
712*4882a593Smuzhiyun 	tcp->connect_mode = ep->connect_mode;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
715*4882a593Smuzhiyun 		tcp->syn_ip_payload_length =
716*4882a593Smuzhiyun 			cpu_to_le16(ep->syn_ip_payload_length);
717*4882a593Smuzhiyun 		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
718*4882a593Smuzhiyun 		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
726*4882a593Smuzhiyun 		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	return rc;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun static void
qed_iwarp_mpa_received(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)732*4882a593Smuzhiyun qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
735*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
736*4882a593Smuzhiyun 	struct mpa_v2_hdr *mpa_v2;
737*4882a593Smuzhiyun 	union async_output *async_data;
738*4882a593Smuzhiyun 	u16 mpa_ord, mpa_ird;
739*4882a593Smuzhiyun 	u8 mpa_hdr_size = 0;
740*4882a593Smuzhiyun 	u16 ulp_data_len;
741*4882a593Smuzhiyun 	u8 mpa_rev;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	async_data = &ep->ep_buffer_virt->async_output;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
746*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
747*4882a593Smuzhiyun 		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
748*4882a593Smuzhiyun 		   async_data->mpa_request.ulp_data_len,
749*4882a593Smuzhiyun 		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
752*4882a593Smuzhiyun 		/* Read ord/ird values from private data buffer */
753*4882a593Smuzhiyun 		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
754*4882a593Smuzhiyun 		mpa_hdr_size = sizeof(*mpa_v2);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		mpa_ord = ntohs(mpa_v2->ord);
757*4882a593Smuzhiyun 		mpa_ird = ntohs(mpa_v2->ird);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 		/* Temprary store in cm_info incoming ord/ird requested, later
760*4882a593Smuzhiyun 		 * replace with negotiated value during accept
761*4882a593Smuzhiyun 		 */
762*4882a593Smuzhiyun 		ep->cm_info.ord = (u8)min_t(u16,
763*4882a593Smuzhiyun 					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
764*4882a593Smuzhiyun 					    QED_IWARP_ORD_DEFAULT);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		ep->cm_info.ird = (u8)min_t(u16,
767*4882a593Smuzhiyun 					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
768*4882a593Smuzhiyun 					    QED_IWARP_IRD_DEFAULT);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		/* Peer2Peer negotiation */
771*4882a593Smuzhiyun 		ep->rtr_type = MPA_RTR_TYPE_NONE;
772*4882a593Smuzhiyun 		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
773*4882a593Smuzhiyun 			if (mpa_ord & MPA_V2_WRITE_RTR)
774*4882a593Smuzhiyun 				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 			if (mpa_ord & MPA_V2_READ_RTR)
777*4882a593Smuzhiyun 				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 			if (mpa_ird & MPA_V2_SEND_RTR)
780*4882a593Smuzhiyun 				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 			ep->rtr_type &= iwarp_info->rtr_type;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 			/* if we're left with no match send our capabilities */
785*4882a593Smuzhiyun 			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
786*4882a593Smuzhiyun 				ep->rtr_type = iwarp_info->rtr_type;
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
790*4882a593Smuzhiyun 	} else {
791*4882a593Smuzhiyun 		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
792*4882a593Smuzhiyun 		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
793*4882a593Smuzhiyun 		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
797*4882a593Smuzhiyun 		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
798*4882a593Smuzhiyun 		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
799*4882a593Smuzhiyun 		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	/* Strip mpa v2 hdr from private data before sending to upper layer */
802*4882a593Smuzhiyun 	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
805*4882a593Smuzhiyun 	ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	params.event = QED_IWARP_EVENT_MPA_REQUEST;
808*4882a593Smuzhiyun 	params.cm_info = &ep->cm_info;
809*4882a593Smuzhiyun 	params.ep_context = ep;
810*4882a593Smuzhiyun 	params.status = 0;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
813*4882a593Smuzhiyun 	ep->event_cb(ep->cb_context, &params);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun static int
qed_iwarp_mpa_offload(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)817*4882a593Smuzhiyun qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
820*4882a593Smuzhiyun 	struct mpa_outgoing_params *common;
821*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info;
822*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
823*4882a593Smuzhiyun 	dma_addr_t async_output_phys;
824*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
825*4882a593Smuzhiyun 	dma_addr_t out_pdata_phys;
826*4882a593Smuzhiyun 	dma_addr_t in_pdata_phys;
827*4882a593Smuzhiyun 	struct qed_rdma_qp *qp;
828*4882a593Smuzhiyun 	bool reject;
829*4882a593Smuzhiyun 	u32 val;
830*4882a593Smuzhiyun 	int rc;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (!ep)
833*4882a593Smuzhiyun 		return -EINVAL;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	qp = ep->qp;
836*4882a593Smuzhiyun 	reject = !qp;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
839*4882a593Smuzhiyun 	init_data.cid = reject ? ep->tcp_cid : qp->icid;
840*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
843*4882a593Smuzhiyun 		init_data.comp_mode = QED_SPQ_MODE_CB;
844*4882a593Smuzhiyun 	else
845*4882a593Smuzhiyun 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
848*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
849*4882a593Smuzhiyun 				 PROTOCOLID_IWARP, &init_data);
850*4882a593Smuzhiyun 	if (rc)
851*4882a593Smuzhiyun 		return rc;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
854*4882a593Smuzhiyun 	common = &p_mpa_ramrod->common;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	out_pdata_phys = ep->ep_buffer_phys +
857*4882a593Smuzhiyun 			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
858*4882a593Smuzhiyun 	DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	val = ep->cm_info.private_data_len;
861*4882a593Smuzhiyun 	common->outgoing_ulp_buffer.len = cpu_to_le16(val);
862*4882a593Smuzhiyun 	common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
865*4882a593Smuzhiyun 	common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
868*4882a593Smuzhiyun 	p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	in_pdata_phys = ep->ep_buffer_phys +
871*4882a593Smuzhiyun 			offsetof(struct qed_iwarp_ep_memory, in_pdata);
872*4882a593Smuzhiyun 	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
873*4882a593Smuzhiyun 	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
874*4882a593Smuzhiyun 		       in_pdata_phys);
875*4882a593Smuzhiyun 	p_mpa_ramrod->incoming_ulp_buffer.len =
876*4882a593Smuzhiyun 	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
877*4882a593Smuzhiyun 	async_output_phys = ep->ep_buffer_phys +
878*4882a593Smuzhiyun 			    offsetof(struct qed_iwarp_ep_memory, async_output);
879*4882a593Smuzhiyun 	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
880*4882a593Smuzhiyun 		       async_output_phys);
881*4882a593Smuzhiyun 	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
882*4882a593Smuzhiyun 	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	if (!reject) {
885*4882a593Smuzhiyun 		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
886*4882a593Smuzhiyun 			       qp->shared_queue_phys_addr);
887*4882a593Smuzhiyun 		p_mpa_ramrod->stats_counter_id =
888*4882a593Smuzhiyun 		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
889*4882a593Smuzhiyun 	} else {
890*4882a593Smuzhiyun 		common->reject = 1;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
894*4882a593Smuzhiyun 	p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
895*4882a593Smuzhiyun 	p_mpa_ramrod->mode = ep->mpa_rev;
896*4882a593Smuzhiyun 	SET_FIELD(p_mpa_ramrod->rtr_pref,
897*4882a593Smuzhiyun 		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
900*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
901*4882a593Smuzhiyun 	if (!reject)
902*4882a593Smuzhiyun 		ep->cid = qp->icid;	/* Now they're migrated. */
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
905*4882a593Smuzhiyun 		   QED_MSG_RDMA,
906*4882a593Smuzhiyun 		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
907*4882a593Smuzhiyun 		   reject ? 0xffff : qp->icid,
908*4882a593Smuzhiyun 		   ep->tcp_cid,
909*4882a593Smuzhiyun 		   rc,
910*4882a593Smuzhiyun 		   ep->cm_info.ird,
911*4882a593Smuzhiyun 		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
912*4882a593Smuzhiyun 	return rc;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun static void
qed_iwarp_return_ep(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)916*4882a593Smuzhiyun qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	ep->state = QED_IWARP_EP_INIT;
919*4882a593Smuzhiyun 	if (ep->qp)
920*4882a593Smuzhiyun 		ep->qp->ep = NULL;
921*4882a593Smuzhiyun 	ep->qp = NULL;
922*4882a593Smuzhiyun 	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
925*4882a593Smuzhiyun 		/* We don't care about the return code, it's ok if tcp_cid
926*4882a593Smuzhiyun 		 * remains invalid...in this case we'll defer allocation
927*4882a593Smuzhiyun 		 */
928*4882a593Smuzhiyun 		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	list_move_tail(&ep->list_entry,
933*4882a593Smuzhiyun 		       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun static void
qed_iwarp_parse_private_data(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)939*4882a593Smuzhiyun qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	struct mpa_v2_hdr *mpa_v2_params;
942*4882a593Smuzhiyun 	union async_output *async_data;
943*4882a593Smuzhiyun 	u16 mpa_ird, mpa_ord;
944*4882a593Smuzhiyun 	u8 mpa_data_size = 0;
945*4882a593Smuzhiyun 	u16 ulp_data_len;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
948*4882a593Smuzhiyun 		mpa_v2_params =
949*4882a593Smuzhiyun 			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
950*4882a593Smuzhiyun 		mpa_data_size = sizeof(*mpa_v2_params);
951*4882a593Smuzhiyun 		mpa_ird = ntohs(mpa_v2_params->ird);
952*4882a593Smuzhiyun 		mpa_ord = ntohs(mpa_v2_params->ord);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
955*4882a593Smuzhiyun 		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	async_data = &ep->ep_buffer_virt->async_output;
959*4882a593Smuzhiyun 	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
962*4882a593Smuzhiyun 	ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)966*4882a593Smuzhiyun qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
971*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn,
972*4882a593Smuzhiyun 			  "MPA reply event not expected on passive side!\n");
973*4882a593Smuzhiyun 		return;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	qed_iwarp_parse_private_data(p_hwfn, ep);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
981*4882a593Smuzhiyun 		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
982*4882a593Smuzhiyun 		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	params.cm_info = &ep->cm_info;
985*4882a593Smuzhiyun 	params.ep_context = ep;
986*4882a593Smuzhiyun 	params.status = 0;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	ep->mpa_reply_processed = true;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	ep->event_cb(ep->cb_context, &params);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun #define QED_IWARP_CONNECT_MODE_STRING(ep) \
994*4882a593Smuzhiyun 	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun /* Called as a result of the event:
997*4882a593Smuzhiyun  * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
998*4882a593Smuzhiyun  */
999*4882a593Smuzhiyun static void
qed_iwarp_mpa_complete(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,u8 fw_return_code)1000*4882a593Smuzhiyun qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1001*4882a593Smuzhiyun 		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1006*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1007*4882a593Smuzhiyun 	else
1008*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1011*4882a593Smuzhiyun 		qed_iwarp_parse_private_data(p_hwfn, ep);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1014*4882a593Smuzhiyun 		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1015*4882a593Smuzhiyun 		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	params.cm_info = &ep->cm_info;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	params.ep_context = ep;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	switch (fw_return_code) {
1022*4882a593Smuzhiyun 	case RDMA_RETURN_OK:
1023*4882a593Smuzhiyun 		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1024*4882a593Smuzhiyun 		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1025*4882a593Smuzhiyun 		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1026*4882a593Smuzhiyun 		ep->state = QED_IWARP_EP_ESTABLISHED;
1027*4882a593Smuzhiyun 		params.status = 0;
1028*4882a593Smuzhiyun 		break;
1029*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1030*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1031*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1032*4882a593Smuzhiyun 		params.status = -EBUSY;
1033*4882a593Smuzhiyun 		break;
1034*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1035*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1036*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1038*4882a593Smuzhiyun 		break;
1039*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_RST:
1040*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1041*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1042*4882a593Smuzhiyun 			  ep->tcp_cid);
1043*4882a593Smuzhiyun 		params.status = -ECONNRESET;
1044*4882a593Smuzhiyun 		break;
1045*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_FIN:
1046*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1047*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1048*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1049*4882a593Smuzhiyun 		break;
1050*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1051*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1052*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1054*4882a593Smuzhiyun 		break;
1055*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1056*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1057*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1059*4882a593Smuzhiyun 		break;
1060*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1061*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1062*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1064*4882a593Smuzhiyun 		break;
1065*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1066*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1067*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1069*4882a593Smuzhiyun 		break;
1070*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_TERMINATE:
1071*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1072*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
1074*4882a593Smuzhiyun 		break;
1075*4882a593Smuzhiyun 	default:
1076*4882a593Smuzhiyun 		params.status = -ECONNRESET;
1077*4882a593Smuzhiyun 		break;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (fw_return_code != RDMA_RETURN_OK)
1081*4882a593Smuzhiyun 		/* paired with READ_ONCE in destroy_qp */
1082*4882a593Smuzhiyun 		smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	ep->event_cb(ep->cb_context, &params);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	/* on passive side, if there is no associated QP (REJECT) we need to
1087*4882a593Smuzhiyun 	 * return the ep to the pool, (in the regular case we add an element
1088*4882a593Smuzhiyun 	 * in accept instead of this one.
1089*4882a593Smuzhiyun 	 * In both cases we need to remove it from the ep_list.
1090*4882a593Smuzhiyun 	 */
1091*4882a593Smuzhiyun 	if (fw_return_code != RDMA_RETURN_OK) {
1092*4882a593Smuzhiyun 		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1093*4882a593Smuzhiyun 		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1094*4882a593Smuzhiyun 		    (!ep->qp)) {	/* Rejected */
1095*4882a593Smuzhiyun 			qed_iwarp_return_ep(p_hwfn, ep);
1096*4882a593Smuzhiyun 		} else {
1097*4882a593Smuzhiyun 			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1098*4882a593Smuzhiyun 			list_del(&ep->list_entry);
1099*4882a593Smuzhiyun 			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1100*4882a593Smuzhiyun 		}
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun static void
qed_iwarp_mpa_v2_set_private(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,u8 * mpa_data_size)1105*4882a593Smuzhiyun qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1106*4882a593Smuzhiyun 			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	struct mpa_v2_hdr *mpa_v2_params;
1109*4882a593Smuzhiyun 	u16 mpa_ird, mpa_ord;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	*mpa_data_size = 0;
1112*4882a593Smuzhiyun 	if (MPA_REV2(ep->mpa_rev)) {
1113*4882a593Smuzhiyun 		mpa_v2_params =
1114*4882a593Smuzhiyun 		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1115*4882a593Smuzhiyun 		*mpa_data_size = sizeof(*mpa_v2_params);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 		mpa_ird = (u16)ep->cm_info.ird;
1118*4882a593Smuzhiyun 		mpa_ord = (u16)ep->cm_info.ord;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1121*4882a593Smuzhiyun 			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1124*4882a593Smuzhiyun 				mpa_ird |= MPA_V2_SEND_RTR;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1127*4882a593Smuzhiyun 				mpa_ord |= MPA_V2_WRITE_RTR;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1130*4882a593Smuzhiyun 				mpa_ord |= MPA_V2_READ_RTR;
1131*4882a593Smuzhiyun 		}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		mpa_v2_params->ird = htons(mpa_ird);
1134*4882a593Smuzhiyun 		mpa_v2_params->ord = htons(mpa_ord);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
1137*4882a593Smuzhiyun 			   QED_MSG_RDMA,
1138*4882a593Smuzhiyun 			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1139*4882a593Smuzhiyun 			   mpa_v2_params->ird,
1140*4882a593Smuzhiyun 			   mpa_v2_params->ord,
1141*4882a593Smuzhiyun 			   *((u32 *)mpa_v2_params),
1142*4882a593Smuzhiyun 			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1143*4882a593Smuzhiyun 			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1144*4882a593Smuzhiyun 			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1145*4882a593Smuzhiyun 			   !!(mpa_ird & MPA_V2_SEND_RTR),
1146*4882a593Smuzhiyun 			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1147*4882a593Smuzhiyun 			   !!(mpa_ord & MPA_V2_READ_RTR));
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
qed_iwarp_connect(void * rdma_cxt,struct qed_iwarp_connect_in * iparams,struct qed_iwarp_connect_out * oparams)1151*4882a593Smuzhiyun int qed_iwarp_connect(void *rdma_cxt,
1152*4882a593Smuzhiyun 		      struct qed_iwarp_connect_in *iparams,
1153*4882a593Smuzhiyun 		      struct qed_iwarp_connect_out *oparams)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
1156*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info;
1157*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
1158*4882a593Smuzhiyun 	u8 mpa_data_size = 0;
1159*4882a593Smuzhiyun 	u32 cid;
1160*4882a593Smuzhiyun 	int rc;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1163*4882a593Smuzhiyun 	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1164*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn,
1165*4882a593Smuzhiyun 			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1166*4882a593Smuzhiyun 			  iparams->qp->icid, iparams->cm_info.ord,
1167*4882a593Smuzhiyun 			  iparams->cm_info.ird);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 		return -EINVAL;
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	/* Allocate ep object */
1175*4882a593Smuzhiyun 	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1176*4882a593Smuzhiyun 	if (rc)
1177*4882a593Smuzhiyun 		return rc;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1180*4882a593Smuzhiyun 	if (rc)
1181*4882a593Smuzhiyun 		goto err;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	ep->tcp_cid = cid;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1186*4882a593Smuzhiyun 	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1187*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	ep->qp = iparams->qp;
1190*4882a593Smuzhiyun 	ep->qp->ep = ep;
1191*4882a593Smuzhiyun 	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1192*4882a593Smuzhiyun 	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1193*4882a593Smuzhiyun 	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	ep->cm_info.ord = iparams->cm_info.ord;
1196*4882a593Smuzhiyun 	ep->cm_info.ird = iparams->cm_info.ird;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	ep->rtr_type = iwarp_info->rtr_type;
1199*4882a593Smuzhiyun 	if (!iwarp_info->peer2peer)
1200*4882a593Smuzhiyun 		ep->rtr_type = MPA_RTR_TYPE_NONE;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1203*4882a593Smuzhiyun 		ep->cm_info.ord = 1;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	ep->mpa_rev = iwarp_info->mpa_rev;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1210*4882a593Smuzhiyun 	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1211*4882a593Smuzhiyun 				       mpa_data_size;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1214*4882a593Smuzhiyun 	       iparams->cm_info.private_data,
1215*4882a593Smuzhiyun 	       iparams->cm_info.private_data_len);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	ep->mss = iparams->mss;
1218*4882a593Smuzhiyun 	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	ep->event_cb = iparams->event_cb;
1221*4882a593Smuzhiyun 	ep->cb_context = iparams->cb_context;
1222*4882a593Smuzhiyun 	ep->connect_mode = TCP_CONNECT_ACTIVE;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	oparams->ep_context = ep;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1229*4882a593Smuzhiyun 		   iparams->qp->icid, ep->tcp_cid, rc);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	if (rc) {
1232*4882a593Smuzhiyun 		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1233*4882a593Smuzhiyun 		goto err;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	return rc;
1237*4882a593Smuzhiyun err:
1238*4882a593Smuzhiyun 	qed_iwarp_cid_cleaned(p_hwfn, cid);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	return rc;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun 
qed_iwarp_get_free_ep(struct qed_hwfn * p_hwfn)1243*4882a593Smuzhiyun static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep = NULL;
1246*4882a593Smuzhiyun 	int rc;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1251*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Ep list is empty\n");
1252*4882a593Smuzhiyun 		goto out;
1253*4882a593Smuzhiyun 	}
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1256*4882a593Smuzhiyun 			      struct qed_iwarp_ep, list_entry);
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	/* in some cases we could have failed allocating a tcp cid when added
1259*4882a593Smuzhiyun 	 * from accept / failure... retry now..this is not the common case.
1260*4882a593Smuzhiyun 	 */
1261*4882a593Smuzhiyun 	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1262*4882a593Smuzhiyun 		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 		/* if we fail we could look for another entry with a valid
1265*4882a593Smuzhiyun 		 * tcp_cid, but since we don't expect to reach this anyway
1266*4882a593Smuzhiyun 		 * it's not worth the handling
1267*4882a593Smuzhiyun 		 */
1268*4882a593Smuzhiyun 		if (rc) {
1269*4882a593Smuzhiyun 			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1270*4882a593Smuzhiyun 			ep = NULL;
1271*4882a593Smuzhiyun 			goto out;
1272*4882a593Smuzhiyun 		}
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	list_del(&ep->list_entry);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun out:
1278*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1279*4882a593Smuzhiyun 	return ep;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun #define QED_IWARP_MAX_CID_CLEAN_TIME  100
1283*4882a593Smuzhiyun #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun /* This function waits for all the bits of a bmap to be cleared, as long as
1286*4882a593Smuzhiyun  * there is progress ( i.e. the number of bits left to be cleared decreases )
1287*4882a593Smuzhiyun  * the function continues.
1288*4882a593Smuzhiyun  */
1289*4882a593Smuzhiyun static int
qed_iwarp_wait_cid_map_cleared(struct qed_hwfn * p_hwfn,struct qed_bmap * bmap)1290*4882a593Smuzhiyun qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun 	int prev_weight = 0;
1293*4882a593Smuzhiyun 	int wait_count = 0;
1294*4882a593Smuzhiyun 	int weight = 0;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1297*4882a593Smuzhiyun 	prev_weight = weight;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	while (weight) {
1300*4882a593Smuzhiyun 		/* If the HW device is during recovery, all resources are
1301*4882a593Smuzhiyun 		 * immediately reset without receiving a per-cid indication
1302*4882a593Smuzhiyun 		 * from HW. In this case we don't expect the cid_map to be
1303*4882a593Smuzhiyun 		 * cleared.
1304*4882a593Smuzhiyun 		 */
1305*4882a593Smuzhiyun 		if (p_hwfn->cdev->recov_in_prog)
1306*4882a593Smuzhiyun 			return 0;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 		if (prev_weight == weight) {
1313*4882a593Smuzhiyun 			wait_count++;
1314*4882a593Smuzhiyun 		} else {
1315*4882a593Smuzhiyun 			prev_weight = weight;
1316*4882a593Smuzhiyun 			wait_count = 0;
1317*4882a593Smuzhiyun 		}
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1320*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn,
1321*4882a593Smuzhiyun 				  "%s bitmap wait timed out (%d cids pending)\n",
1322*4882a593Smuzhiyun 				  bmap->name, weight);
1323*4882a593Smuzhiyun 			return -EBUSY;
1324*4882a593Smuzhiyun 		}
1325*4882a593Smuzhiyun 	}
1326*4882a593Smuzhiyun 	return 0;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun 
qed_iwarp_wait_for_all_cids(struct qed_hwfn * p_hwfn)1329*4882a593Smuzhiyun static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun 	int rc;
1332*4882a593Smuzhiyun 	int i;
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1335*4882a593Smuzhiyun 					    &p_hwfn->p_rdma_info->tcp_cid_map);
1336*4882a593Smuzhiyun 	if (rc)
1337*4882a593Smuzhiyun 		return rc;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	/* Now free the tcp cids from the main cid map */
1340*4882a593Smuzhiyun 	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1341*4882a593Smuzhiyun 		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* Now wait for all cids to be completed */
1344*4882a593Smuzhiyun 	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1345*4882a593Smuzhiyun 					      &p_hwfn->p_rdma_info->cid_map);
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun 
qed_iwarp_free_prealloc_ep(struct qed_hwfn * p_hwfn)1348*4882a593Smuzhiyun static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1353*4882a593Smuzhiyun 		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1356*4882a593Smuzhiyun 				      struct qed_iwarp_ep, list_entry);
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 		if (!ep) {
1359*4882a593Smuzhiyun 			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1360*4882a593Smuzhiyun 			break;
1361*4882a593Smuzhiyun 		}
1362*4882a593Smuzhiyun 		list_del(&ep->list_entry);
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1367*4882a593Smuzhiyun 			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1370*4882a593Smuzhiyun 	}
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun 
qed_iwarp_prealloc_ep(struct qed_hwfn * p_hwfn,bool init)1373*4882a593Smuzhiyun static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
1376*4882a593Smuzhiyun 	int rc = 0;
1377*4882a593Smuzhiyun 	int count;
1378*4882a593Smuzhiyun 	u32 cid;
1379*4882a593Smuzhiyun 	int i;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1382*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
1383*4882a593Smuzhiyun 		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1384*4882a593Smuzhiyun 		if (rc)
1385*4882a593Smuzhiyun 			return rc;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 		/* During initialization we allocate from the main pool,
1388*4882a593Smuzhiyun 		 * afterwards we allocate only from the tcp_cid.
1389*4882a593Smuzhiyun 		 */
1390*4882a593Smuzhiyun 		if (init) {
1391*4882a593Smuzhiyun 			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1392*4882a593Smuzhiyun 			if (rc)
1393*4882a593Smuzhiyun 				goto err;
1394*4882a593Smuzhiyun 			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1395*4882a593Smuzhiyun 		} else {
1396*4882a593Smuzhiyun 			/* We don't care about the return code, it's ok if
1397*4882a593Smuzhiyun 			 * tcp_cid remains invalid...in this case we'll
1398*4882a593Smuzhiyun 			 * defer allocation
1399*4882a593Smuzhiyun 			 */
1400*4882a593Smuzhiyun 			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1401*4882a593Smuzhiyun 		}
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 		ep->tcp_cid = cid;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406*4882a593Smuzhiyun 		list_add_tail(&ep->list_entry,
1407*4882a593Smuzhiyun 			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1408*4882a593Smuzhiyun 		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1409*4882a593Smuzhiyun 	}
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	return rc;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun err:
1414*4882a593Smuzhiyun 	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	return rc;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
qed_iwarp_alloc(struct qed_hwfn * p_hwfn)1419*4882a593Smuzhiyun int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun 	int rc;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	/* Allocate bitmap for tcp cid. These are used by passive side
1424*4882a593Smuzhiyun 	 * to ensure it can allocate a tcp cid during dpc that was
1425*4882a593Smuzhiyun 	 * pre-acquired and doesn't require dynamic allocation of ilt
1426*4882a593Smuzhiyun 	 */
1427*4882a593Smuzhiyun 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1428*4882a593Smuzhiyun 				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1429*4882a593Smuzhiyun 	if (rc) {
1430*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1431*4882a593Smuzhiyun 			   "Failed to allocate tcp cid, rc = %d\n", rc);
1432*4882a593Smuzhiyun 		return rc;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1436*4882a593Smuzhiyun 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1439*4882a593Smuzhiyun 	if (rc)
1440*4882a593Smuzhiyun 		return rc;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	return qed_ooo_alloc(p_hwfn);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
qed_iwarp_resc_free(struct qed_hwfn * p_hwfn)1445*4882a593Smuzhiyun void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	qed_ooo_free(p_hwfn);
1450*4882a593Smuzhiyun 	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1451*4882a593Smuzhiyun 	kfree(iwarp_info->mpa_bufs);
1452*4882a593Smuzhiyun 	kfree(iwarp_info->partial_fpdus);
1453*4882a593Smuzhiyun 	kfree(iwarp_info->mpa_intermediate_buf);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun 
qed_iwarp_accept(void * rdma_cxt,struct qed_iwarp_accept_in * iparams)1456*4882a593Smuzhiyun int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
1459*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
1460*4882a593Smuzhiyun 	u8 mpa_data_size = 0;
1461*4882a593Smuzhiyun 	int rc;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	ep = iparams->ep_context;
1464*4882a593Smuzhiyun 	if (!ep) {
1465*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1466*4882a593Smuzhiyun 		return -EINVAL;
1467*4882a593Smuzhiyun 	}
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1470*4882a593Smuzhiyun 		   iparams->qp->icid, ep->tcp_cid);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1473*4882a593Smuzhiyun 	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1474*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
1475*4882a593Smuzhiyun 			   QED_MSG_RDMA,
1476*4882a593Smuzhiyun 			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1477*4882a593Smuzhiyun 			   iparams->qp->icid,
1478*4882a593Smuzhiyun 			   ep->tcp_cid, iparams->ord, iparams->ord);
1479*4882a593Smuzhiyun 		return -EINVAL;
1480*4882a593Smuzhiyun 	}
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	qed_iwarp_prealloc_ep(p_hwfn, false);
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	ep->cb_context = iparams->cb_context;
1485*4882a593Smuzhiyun 	ep->qp = iparams->qp;
1486*4882a593Smuzhiyun 	ep->qp->ep = ep;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1489*4882a593Smuzhiyun 		/* Negotiate ord/ird: if upperlayer requested ord larger than
1490*4882a593Smuzhiyun 		 * ird advertised by remote, we need to decrease our ord
1491*4882a593Smuzhiyun 		 */
1492*4882a593Smuzhiyun 		if (iparams->ord > ep->cm_info.ird)
1493*4882a593Smuzhiyun 			iparams->ord = ep->cm_info.ird;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1496*4882a593Smuzhiyun 		    (iparams->ird == 0))
1497*4882a593Smuzhiyun 			iparams->ird = 1;
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	/* Update cm_info ord/ird to be negotiated values */
1501*4882a593Smuzhiyun 	ep->cm_info.ord = iparams->ord;
1502*4882a593Smuzhiyun 	ep->cm_info.ird = iparams->ird;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1507*4882a593Smuzhiyun 	ep->cm_info.private_data_len = iparams->private_data_len +
1508*4882a593Smuzhiyun 				       mpa_data_size;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1511*4882a593Smuzhiyun 	       iparams->private_data, iparams->private_data_len);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1514*4882a593Smuzhiyun 	if (rc)
1515*4882a593Smuzhiyun 		qed_iwarp_modify_qp(p_hwfn,
1516*4882a593Smuzhiyun 				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	return rc;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun 
qed_iwarp_reject(void * rdma_cxt,struct qed_iwarp_reject_in * iparams)1521*4882a593Smuzhiyun int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
1524*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
1525*4882a593Smuzhiyun 	u8 mpa_data_size = 0;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	ep = iparams->ep_context;
1528*4882a593Smuzhiyun 	if (!ep) {
1529*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1530*4882a593Smuzhiyun 		return -EINVAL;
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	ep->cb_context = iparams->cb_context;
1536*4882a593Smuzhiyun 	ep->qp = NULL;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1541*4882a593Smuzhiyun 	ep->cm_info.private_data_len = iparams->private_data_len +
1542*4882a593Smuzhiyun 				       mpa_data_size;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1545*4882a593Smuzhiyun 	       iparams->private_data, iparams->private_data_len);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	return qed_iwarp_mpa_offload(p_hwfn, ep);
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun static void
qed_iwarp_print_cm_info(struct qed_hwfn * p_hwfn,struct qed_iwarp_cm_info * cm_info)1551*4882a593Smuzhiyun qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1552*4882a593Smuzhiyun 			struct qed_iwarp_cm_info *cm_info)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1555*4882a593Smuzhiyun 		   cm_info->ip_version);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	if (cm_info->ip_version == QED_TCP_IPV4)
1558*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1559*4882a593Smuzhiyun 			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1560*4882a593Smuzhiyun 			   cm_info->remote_ip, cm_info->remote_port,
1561*4882a593Smuzhiyun 			   cm_info->local_ip, cm_info->local_port,
1562*4882a593Smuzhiyun 			   cm_info->vlan);
1563*4882a593Smuzhiyun 	else
1564*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1565*4882a593Smuzhiyun 			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1566*4882a593Smuzhiyun 			   cm_info->remote_ip, cm_info->remote_port,
1567*4882a593Smuzhiyun 			   cm_info->local_ip, cm_info->local_port,
1568*4882a593Smuzhiyun 			   cm_info->vlan);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1571*4882a593Smuzhiyun 		   "private_data_len = %x ord = %d, ird = %d\n",
1572*4882a593Smuzhiyun 		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun static int
qed_iwarp_ll2_post_rx(struct qed_hwfn * p_hwfn,struct qed_iwarp_ll2_buff * buf,u8 handle)1576*4882a593Smuzhiyun qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1577*4882a593Smuzhiyun 		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	int rc;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1582*4882a593Smuzhiyun 				    (u16)buf->buff_size, buf, 1);
1583*4882a593Smuzhiyun 	if (rc) {
1584*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn,
1585*4882a593Smuzhiyun 			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1586*4882a593Smuzhiyun 			  rc, handle);
1587*4882a593Smuzhiyun 		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1588*4882a593Smuzhiyun 				  buf->data, buf->data_phys_addr);
1589*4882a593Smuzhiyun 		kfree(buf);
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	return rc;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun static bool
qed_iwarp_ep_exists(struct qed_hwfn * p_hwfn,struct qed_iwarp_cm_info * cm_info)1596*4882a593Smuzhiyun qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep = NULL;
1599*4882a593Smuzhiyun 	bool found = false;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	list_for_each_entry(ep,
1602*4882a593Smuzhiyun 			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1603*4882a593Smuzhiyun 			    list_entry) {
1604*4882a593Smuzhiyun 		if ((ep->cm_info.local_port == cm_info->local_port) &&
1605*4882a593Smuzhiyun 		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1606*4882a593Smuzhiyun 		    (ep->cm_info.vlan == cm_info->vlan) &&
1607*4882a593Smuzhiyun 		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1608*4882a593Smuzhiyun 			    sizeof(cm_info->local_ip)) &&
1609*4882a593Smuzhiyun 		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1610*4882a593Smuzhiyun 			    sizeof(cm_info->remote_ip))) {
1611*4882a593Smuzhiyun 			found = true;
1612*4882a593Smuzhiyun 			break;
1613*4882a593Smuzhiyun 		}
1614*4882a593Smuzhiyun 	}
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	if (found) {
1617*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn,
1618*4882a593Smuzhiyun 			  "SYN received on active connection - dropping\n");
1619*4882a593Smuzhiyun 		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 		return true;
1622*4882a593Smuzhiyun 	}
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	return false;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun static struct qed_iwarp_listener *
qed_iwarp_get_listener(struct qed_hwfn * p_hwfn,struct qed_iwarp_cm_info * cm_info)1628*4882a593Smuzhiyun qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1629*4882a593Smuzhiyun 		       struct qed_iwarp_cm_info *cm_info)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun 	struct qed_iwarp_listener *listener = NULL;
1632*4882a593Smuzhiyun 	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1633*4882a593Smuzhiyun 	bool found = false;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	list_for_each_entry(listener,
1638*4882a593Smuzhiyun 			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1639*4882a593Smuzhiyun 			    list_entry) {
1640*4882a593Smuzhiyun 		if (listener->port == cm_info->local_port) {
1641*4882a593Smuzhiyun 			if (!memcmp(listener->ip_addr,
1642*4882a593Smuzhiyun 				    ip_zero, sizeof(ip_zero))) {
1643*4882a593Smuzhiyun 				found = true;
1644*4882a593Smuzhiyun 				break;
1645*4882a593Smuzhiyun 			}
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 			if (!memcmp(listener->ip_addr,
1648*4882a593Smuzhiyun 				    cm_info->local_ip,
1649*4882a593Smuzhiyun 				    sizeof(cm_info->local_ip)) &&
1650*4882a593Smuzhiyun 			    (listener->vlan == cm_info->vlan)) {
1651*4882a593Smuzhiyun 				found = true;
1652*4882a593Smuzhiyun 				break;
1653*4882a593Smuzhiyun 			}
1654*4882a593Smuzhiyun 		}
1655*4882a593Smuzhiyun 	}
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	if (found) {
1658*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1659*4882a593Smuzhiyun 			   listener);
1660*4882a593Smuzhiyun 		return listener;
1661*4882a593Smuzhiyun 	}
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1664*4882a593Smuzhiyun 	return NULL;
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun static int
qed_iwarp_parse_rx_pkt(struct qed_hwfn * p_hwfn,struct qed_iwarp_cm_info * cm_info,void * buf,u8 * remote_mac_addr,u8 * local_mac_addr,int * payload_len,int * tcp_start_offset)1668*4882a593Smuzhiyun qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1669*4882a593Smuzhiyun 		       struct qed_iwarp_cm_info *cm_info,
1670*4882a593Smuzhiyun 		       void *buf,
1671*4882a593Smuzhiyun 		       u8 *remote_mac_addr,
1672*4882a593Smuzhiyun 		       u8 *local_mac_addr,
1673*4882a593Smuzhiyun 		       int *payload_len, int *tcp_start_offset)
1674*4882a593Smuzhiyun {
1675*4882a593Smuzhiyun 	struct vlan_ethhdr *vethh;
1676*4882a593Smuzhiyun 	bool vlan_valid = false;
1677*4882a593Smuzhiyun 	struct ipv6hdr *ip6h;
1678*4882a593Smuzhiyun 	struct ethhdr *ethh;
1679*4882a593Smuzhiyun 	struct tcphdr *tcph;
1680*4882a593Smuzhiyun 	struct iphdr *iph;
1681*4882a593Smuzhiyun 	int eth_hlen;
1682*4882a593Smuzhiyun 	int ip_hlen;
1683*4882a593Smuzhiyun 	int eth_type;
1684*4882a593Smuzhiyun 	int i;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	ethh = buf;
1687*4882a593Smuzhiyun 	eth_type = ntohs(ethh->h_proto);
1688*4882a593Smuzhiyun 	if (eth_type == ETH_P_8021Q) {
1689*4882a593Smuzhiyun 		vlan_valid = true;
1690*4882a593Smuzhiyun 		vethh = (struct vlan_ethhdr *)ethh;
1691*4882a593Smuzhiyun 		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1692*4882a593Smuzhiyun 		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1693*4882a593Smuzhiyun 	}
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	if (!ether_addr_equal(ethh->h_dest,
1698*4882a593Smuzhiyun 			      p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1699*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
1700*4882a593Smuzhiyun 			   QED_MSG_RDMA,
1701*4882a593Smuzhiyun 			   "Got unexpected mac %pM instead of %pM\n",
1702*4882a593Smuzhiyun 			   ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1703*4882a593Smuzhiyun 		return -EINVAL;
1704*4882a593Smuzhiyun 	}
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	ether_addr_copy(remote_mac_addr, ethh->h_source);
1707*4882a593Smuzhiyun 	ether_addr_copy(local_mac_addr, ethh->h_dest);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1710*4882a593Smuzhiyun 		   eth_type, ethh->h_source);
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1713*4882a593Smuzhiyun 		   eth_hlen, ethh->h_dest);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	if (eth_type == ETH_P_IP) {
1718*4882a593Smuzhiyun 		if (iph->protocol != IPPROTO_TCP) {
1719*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn,
1720*4882a593Smuzhiyun 				  "Unexpected ip protocol on ll2 %x\n",
1721*4882a593Smuzhiyun 				  iph->protocol);
1722*4882a593Smuzhiyun 			return -EINVAL;
1723*4882a593Smuzhiyun 		}
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 		cm_info->local_ip[0] = ntohl(iph->daddr);
1726*4882a593Smuzhiyun 		cm_info->remote_ip[0] = ntohl(iph->saddr);
1727*4882a593Smuzhiyun 		cm_info->ip_version = QED_TCP_IPV4;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 		ip_hlen = (iph->ihl) * sizeof(u32);
1730*4882a593Smuzhiyun 		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1731*4882a593Smuzhiyun 	} else if (eth_type == ETH_P_IPV6) {
1732*4882a593Smuzhiyun 		ip6h = (struct ipv6hdr *)iph;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 		if (ip6h->nexthdr != IPPROTO_TCP) {
1735*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn,
1736*4882a593Smuzhiyun 				  "Unexpected ip protocol on ll2 %x\n",
1737*4882a593Smuzhiyun 				  iph->protocol);
1738*4882a593Smuzhiyun 			return -EINVAL;
1739*4882a593Smuzhiyun 		}
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
1742*4882a593Smuzhiyun 			cm_info->local_ip[i] =
1743*4882a593Smuzhiyun 			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1744*4882a593Smuzhiyun 			cm_info->remote_ip[i] =
1745*4882a593Smuzhiyun 			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1746*4882a593Smuzhiyun 		}
1747*4882a593Smuzhiyun 		cm_info->ip_version = QED_TCP_IPV6;
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 		ip_hlen = sizeof(*ip6h);
1750*4882a593Smuzhiyun 		*payload_len = ntohs(ip6h->payload_len);
1751*4882a593Smuzhiyun 	} else {
1752*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1753*4882a593Smuzhiyun 		return -EINVAL;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	if (!tcph->syn) {
1759*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn,
1760*4882a593Smuzhiyun 			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1761*4882a593Smuzhiyun 			  iph->ihl, tcph->source, tcph->dest);
1762*4882a593Smuzhiyun 		return -EINVAL;
1763*4882a593Smuzhiyun 	}
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	cm_info->local_port = ntohs(tcph->dest);
1766*4882a593Smuzhiyun 	cm_info->remote_port = ntohs(tcph->source);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	*tcp_start_offset = eth_hlen + ip_hlen;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	return 0;
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun 
qed_iwarp_get_curr_fpdu(struct qed_hwfn * p_hwfn,u16 cid)1775*4882a593Smuzhiyun static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1776*4882a593Smuzhiyun 						      u16 cid)
1777*4882a593Smuzhiyun {
1778*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1779*4882a593Smuzhiyun 	struct qed_iwarp_fpdu *partial_fpdu;
1780*4882a593Smuzhiyun 	u32 idx;
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1783*4882a593Smuzhiyun 	if (idx >= iwarp_info->max_num_partial_fpdus) {
1784*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1785*4882a593Smuzhiyun 		       iwarp_info->max_num_partial_fpdus);
1786*4882a593Smuzhiyun 		return NULL;
1787*4882a593Smuzhiyun 	}
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	return partial_fpdu;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun enum qed_iwarp_mpa_pkt_type {
1795*4882a593Smuzhiyun 	QED_IWARP_MPA_PKT_PACKED,
1796*4882a593Smuzhiyun 	QED_IWARP_MPA_PKT_PARTIAL,
1797*4882a593Smuzhiyun 	QED_IWARP_MPA_PKT_UNALIGNED
1798*4882a593Smuzhiyun };
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1801*4882a593Smuzhiyun #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1802*4882a593Smuzhiyun #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun /* Pad to multiple of 4 */
1805*4882a593Smuzhiyun #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1806*4882a593Smuzhiyun #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1807*4882a593Smuzhiyun 	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1808*4882a593Smuzhiyun 					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1809*4882a593Smuzhiyun 					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1812*4882a593Smuzhiyun #define QED_IWARP_MAX_BDS_PER_FPDU 3
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun static const char * const pkt_type_str[] = {
1815*4882a593Smuzhiyun 	"QED_IWARP_MPA_PKT_PACKED",
1816*4882a593Smuzhiyun 	"QED_IWARP_MPA_PKT_PARTIAL",
1817*4882a593Smuzhiyun 	"QED_IWARP_MPA_PKT_UNALIGNED"
1818*4882a593Smuzhiyun };
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun static int
1821*4882a593Smuzhiyun qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1822*4882a593Smuzhiyun 		      struct qed_iwarp_fpdu *fpdu,
1823*4882a593Smuzhiyun 		      struct qed_iwarp_ll2_buff *buf);
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun static enum qed_iwarp_mpa_pkt_type
qed_iwarp_mpa_classify(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu,u16 tcp_payload_len,u8 * mpa_data)1826*4882a593Smuzhiyun qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1827*4882a593Smuzhiyun 		       struct qed_iwarp_fpdu *fpdu,
1828*4882a593Smuzhiyun 		       u16 tcp_payload_len, u8 *mpa_data)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun 	enum qed_iwarp_mpa_pkt_type pkt_type;
1831*4882a593Smuzhiyun 	u16 mpa_len;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	if (fpdu->incomplete_bytes) {
1834*4882a593Smuzhiyun 		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1835*4882a593Smuzhiyun 		goto out;
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	/* special case of one byte remaining...
1839*4882a593Smuzhiyun 	 * lower byte will be read next packet
1840*4882a593Smuzhiyun 	 */
1841*4882a593Smuzhiyun 	if (tcp_payload_len == 1) {
1842*4882a593Smuzhiyun 		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1843*4882a593Smuzhiyun 		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1844*4882a593Smuzhiyun 		goto out;
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	mpa_len = ntohs(*(__force __be16 *)mpa_data);
1848*4882a593Smuzhiyun 	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if (fpdu->fpdu_length <= tcp_payload_len)
1851*4882a593Smuzhiyun 		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1852*4882a593Smuzhiyun 	else
1853*4882a593Smuzhiyun 		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun out:
1856*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1857*4882a593Smuzhiyun 		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1858*4882a593Smuzhiyun 		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	return pkt_type;
1861*4882a593Smuzhiyun }
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun static void
qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff * buf,struct qed_iwarp_fpdu * fpdu,struct unaligned_opaque_data * pkt_data,u16 tcp_payload_size,u8 placement_offset)1864*4882a593Smuzhiyun qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1865*4882a593Smuzhiyun 		    struct qed_iwarp_fpdu *fpdu,
1866*4882a593Smuzhiyun 		    struct unaligned_opaque_data *pkt_data,
1867*4882a593Smuzhiyun 		    u16 tcp_payload_size, u8 placement_offset)
1868*4882a593Smuzhiyun {
1869*4882a593Smuzhiyun 	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	fpdu->mpa_buf = buf;
1872*4882a593Smuzhiyun 	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873*4882a593Smuzhiyun 	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874*4882a593Smuzhiyun 	fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
1875*4882a593Smuzhiyun 	fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	if (tcp_payload_size == 1)
1878*4882a593Smuzhiyun 		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879*4882a593Smuzhiyun 	else if (tcp_payload_size < fpdu->fpdu_length)
1880*4882a593Smuzhiyun 		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881*4882a593Smuzhiyun 	else
1882*4882a593Smuzhiyun 		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun static int
qed_iwarp_cp_pkt(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu,struct unaligned_opaque_data * pkt_data,struct qed_iwarp_ll2_buff * buf,u16 tcp_payload_size)1888*4882a593Smuzhiyun qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889*4882a593Smuzhiyun 		 struct qed_iwarp_fpdu *fpdu,
1890*4882a593Smuzhiyun 		 struct unaligned_opaque_data *pkt_data,
1891*4882a593Smuzhiyun 		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1894*4882a593Smuzhiyun 	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1895*4882a593Smuzhiyun 	int rc;
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	/* need to copy the data from the partial packet stored in fpdu
1898*4882a593Smuzhiyun 	 * to the new buf, for this we also need to move the data currently
1899*4882a593Smuzhiyun 	 * placed on the buf. The assumption is that the buffer is big enough
1900*4882a593Smuzhiyun 	 * since fpdu_length <= mss, we use an intermediate buffer since
1901*4882a593Smuzhiyun 	 * we may need to copy the new data to an overlapping location
1902*4882a593Smuzhiyun 	 */
1903*4882a593Smuzhiyun 	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1904*4882a593Smuzhiyun 		DP_ERR(p_hwfn,
1905*4882a593Smuzhiyun 		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1906*4882a593Smuzhiyun 		       buf->buff_size, fpdu->mpa_frag_len,
1907*4882a593Smuzhiyun 		       tcp_payload_size, fpdu->incomplete_bytes);
1908*4882a593Smuzhiyun 		return -EINVAL;
1909*4882a593Smuzhiyun 	}
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1912*4882a593Smuzhiyun 		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1913*4882a593Smuzhiyun 		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1914*4882a593Smuzhiyun 		   (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917*4882a593Smuzhiyun 	memcpy(tmp_buf + fpdu->mpa_frag_len,
1918*4882a593Smuzhiyun 	       (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1921*4882a593Smuzhiyun 	if (rc)
1922*4882a593Smuzhiyun 		return rc;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	/* If we managed to post the buffer copy the data to the new buffer
1925*4882a593Smuzhiyun 	 * o/w this will occur in the next round...
1926*4882a593Smuzhiyun 	 */
1927*4882a593Smuzhiyun 	memcpy((u8 *)(buf->data), tmp_buf,
1928*4882a593Smuzhiyun 	       fpdu->mpa_frag_len + tcp_payload_size);
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	fpdu->mpa_buf = buf;
1931*4882a593Smuzhiyun 	/* fpdu->pkt_hdr remains as is */
1932*4882a593Smuzhiyun 	/* fpdu->mpa_frag is overridden with new buf */
1933*4882a593Smuzhiyun 	fpdu->mpa_frag = buf->data_phys_addr;
1934*4882a593Smuzhiyun 	fpdu->mpa_frag_virt = buf->data;
1935*4882a593Smuzhiyun 	fpdu->mpa_frag_len += tcp_payload_size;
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	fpdu->incomplete_bytes -= tcp_payload_size;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
1940*4882a593Smuzhiyun 		   QED_MSG_RDMA,
1941*4882a593Smuzhiyun 		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1942*4882a593Smuzhiyun 		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1943*4882a593Smuzhiyun 		   fpdu->incomplete_bytes);
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	return 0;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun static void
qed_iwarp_update_fpdu_length(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu,u8 * mpa_data)1949*4882a593Smuzhiyun qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1950*4882a593Smuzhiyun 			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	u16 mpa_len;
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	/* Update incomplete packets if needed */
1955*4882a593Smuzhiyun 	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1956*4882a593Smuzhiyun 		/* Missing lower byte is now available */
1957*4882a593Smuzhiyun 		mpa_len = fpdu->fpdu_length | *mpa_data;
1958*4882a593Smuzhiyun 		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1959*4882a593Smuzhiyun 		/* one byte of hdr */
1960*4882a593Smuzhiyun 		fpdu->mpa_frag_len = 1;
1961*4882a593Smuzhiyun 		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1962*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
1963*4882a593Smuzhiyun 			   QED_MSG_RDMA,
1964*4882a593Smuzhiyun 			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1965*4882a593Smuzhiyun 			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1966*4882a593Smuzhiyun 	}
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1970*4882a593Smuzhiyun 	(GET_FIELD((_curr_pkt)->flags,	   \
1971*4882a593Smuzhiyun 		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun /* This function is used to recycle a buffer using the ll2 drop option. It
1974*4882a593Smuzhiyun  * uses the mechanism to ensure that all buffers posted to tx before this one
1975*4882a593Smuzhiyun  * were completed. The buffer sent here will be sent as a cookie in the tx
1976*4882a593Smuzhiyun  * completion function and can then be reposted to rx chain when done. The flow
1977*4882a593Smuzhiyun  * that requires this is the flow where a FPDU splits over more than 3 tcp
1978*4882a593Smuzhiyun  * segments. In this case the driver needs to re-post a rx buffer instead of
1979*4882a593Smuzhiyun  * the one received, but driver can't simply repost a buffer it copied from
1980*4882a593Smuzhiyun  * as there is a case where the buffer was originally a packed FPDU, and is
1981*4882a593Smuzhiyun  * partially posted to FW. Driver needs to ensure FW is done with it.
1982*4882a593Smuzhiyun  */
1983*4882a593Smuzhiyun static int
qed_iwarp_recycle_pkt(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu,struct qed_iwarp_ll2_buff * buf)1984*4882a593Smuzhiyun qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1985*4882a593Smuzhiyun 		      struct qed_iwarp_fpdu *fpdu,
1986*4882a593Smuzhiyun 		      struct qed_iwarp_ll2_buff *buf)
1987*4882a593Smuzhiyun {
1988*4882a593Smuzhiyun 	struct qed_ll2_tx_pkt_info tx_pkt;
1989*4882a593Smuzhiyun 	u8 ll2_handle;
1990*4882a593Smuzhiyun 	int rc;
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	memset(&tx_pkt, 0, sizeof(tx_pkt));
1993*4882a593Smuzhiyun 	tx_pkt.num_of_bds = 1;
1994*4882a593Smuzhiyun 	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1995*4882a593Smuzhiyun 	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1996*4882a593Smuzhiyun 	tx_pkt.first_frag = fpdu->pkt_hdr;
1997*4882a593Smuzhiyun 	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1998*4882a593Smuzhiyun 	buf->piggy_buf = NULL;
1999*4882a593Smuzhiyun 	tx_pkt.cookie = buf;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2004*4882a593Smuzhiyun 	if (rc)
2005*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2006*4882a593Smuzhiyun 			   "Can't drop packet rc=%d\n", rc);
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
2009*4882a593Smuzhiyun 		   QED_MSG_RDMA,
2010*4882a593Smuzhiyun 		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2011*4882a593Smuzhiyun 		   (unsigned long int)tx_pkt.first_frag,
2012*4882a593Smuzhiyun 		   tx_pkt.first_frag_len, buf, rc);
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	return rc;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun static int
qed_iwarp_win_right_edge(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu)2018*4882a593Smuzhiyun qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun 	struct qed_ll2_tx_pkt_info tx_pkt;
2021*4882a593Smuzhiyun 	u8 ll2_handle;
2022*4882a593Smuzhiyun 	int rc;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	memset(&tx_pkt, 0, sizeof(tx_pkt));
2025*4882a593Smuzhiyun 	tx_pkt.num_of_bds = 1;
2026*4882a593Smuzhiyun 	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2027*4882a593Smuzhiyun 	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	tx_pkt.first_frag = fpdu->pkt_hdr;
2030*4882a593Smuzhiyun 	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2031*4882a593Smuzhiyun 	tx_pkt.enable_ip_cksum = true;
2032*4882a593Smuzhiyun 	tx_pkt.enable_l4_cksum = true;
2033*4882a593Smuzhiyun 	tx_pkt.calc_ip_len = true;
2034*4882a593Smuzhiyun 	/* vlan overload with enum iwarp_ll2_tx_queues */
2035*4882a593Smuzhiyun 	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2040*4882a593Smuzhiyun 	if (rc)
2041*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2042*4882a593Smuzhiyun 			   "Can't send right edge rc=%d\n", rc);
2043*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
2044*4882a593Smuzhiyun 		   QED_MSG_RDMA,
2045*4882a593Smuzhiyun 		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2046*4882a593Smuzhiyun 		   tx_pkt.num_of_bds,
2047*4882a593Smuzhiyun 		   (unsigned long int)tx_pkt.first_frag,
2048*4882a593Smuzhiyun 		   tx_pkt.first_frag_len, rc);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	return rc;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun static int
qed_iwarp_send_fpdu(struct qed_hwfn * p_hwfn,struct qed_iwarp_fpdu * fpdu,struct unaligned_opaque_data * curr_pkt,struct qed_iwarp_ll2_buff * buf,u16 tcp_payload_size,enum qed_iwarp_mpa_pkt_type pkt_type)2054*4882a593Smuzhiyun qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2055*4882a593Smuzhiyun 		    struct qed_iwarp_fpdu *fpdu,
2056*4882a593Smuzhiyun 		    struct unaligned_opaque_data *curr_pkt,
2057*4882a593Smuzhiyun 		    struct qed_iwarp_ll2_buff *buf,
2058*4882a593Smuzhiyun 		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	struct qed_ll2_tx_pkt_info tx_pkt;
2061*4882a593Smuzhiyun 	u16 first_mpa_offset;
2062*4882a593Smuzhiyun 	u8 ll2_handle;
2063*4882a593Smuzhiyun 	int rc;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	memset(&tx_pkt, 0, sizeof(tx_pkt));
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	/* An unaligned packet means it's split over two tcp segments. So the
2068*4882a593Smuzhiyun 	 * complete packet requires 3 bds, one for the header, one for the
2069*4882a593Smuzhiyun 	 * part of the fpdu of the first tcp segment, and the last fragment
2070*4882a593Smuzhiyun 	 * will point to the remainder of the fpdu. A packed pdu, requires only
2071*4882a593Smuzhiyun 	 * two bds, one for the header and one for the data.
2072*4882a593Smuzhiyun 	 */
2073*4882a593Smuzhiyun 	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074*4882a593Smuzhiyun 	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075*4882a593Smuzhiyun 	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2078*4882a593Smuzhiyun 	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079*4882a593Smuzhiyun 	    tcp_payload_size <= fpdu->fpdu_length)
2080*4882a593Smuzhiyun 		tx_pkt.cookie = fpdu->mpa_buf;
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 	tx_pkt.first_frag = fpdu->pkt_hdr;
2083*4882a593Smuzhiyun 	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084*4882a593Smuzhiyun 	tx_pkt.enable_ip_cksum = true;
2085*4882a593Smuzhiyun 	tx_pkt.enable_l4_cksum = true;
2086*4882a593Smuzhiyun 	tx_pkt.calc_ip_len = true;
2087*4882a593Smuzhiyun 	/* vlan overload with enum iwarp_ll2_tx_queues */
2088*4882a593Smuzhiyun 	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	/* special case of unaligned packet and not packed, need to send
2091*4882a593Smuzhiyun 	 * both buffers as cookie to release.
2092*4882a593Smuzhiyun 	 */
2093*4882a593Smuzhiyun 	if (tcp_payload_size == fpdu->incomplete_bytes)
2094*4882a593Smuzhiyun 		fpdu->mpa_buf->piggy_buf = buf;
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	/* Set first fragment to header */
2099*4882a593Smuzhiyun 	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100*4882a593Smuzhiyun 	if (rc)
2101*4882a593Smuzhiyun 		goto out;
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	/* Set second fragment to first part of packet */
2104*4882a593Smuzhiyun 	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105*4882a593Smuzhiyun 					       fpdu->mpa_frag,
2106*4882a593Smuzhiyun 					       fpdu->mpa_frag_len);
2107*4882a593Smuzhiyun 	if (rc)
2108*4882a593Smuzhiyun 		goto out;
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	if (!fpdu->incomplete_bytes)
2111*4882a593Smuzhiyun 		goto out;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	/* Set third fragment to second part of the packet */
2116*4882a593Smuzhiyun 	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2117*4882a593Smuzhiyun 					       ll2_handle,
2118*4882a593Smuzhiyun 					       buf->data_phys_addr +
2119*4882a593Smuzhiyun 					       first_mpa_offset,
2120*4882a593Smuzhiyun 					       fpdu->incomplete_bytes);
2121*4882a593Smuzhiyun out:
2122*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
2123*4882a593Smuzhiyun 		   QED_MSG_RDMA,
2124*4882a593Smuzhiyun 		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2125*4882a593Smuzhiyun 		   tx_pkt.num_of_bds,
2126*4882a593Smuzhiyun 		   tx_pkt.first_frag_len,
2127*4882a593Smuzhiyun 		   fpdu->mpa_frag_len,
2128*4882a593Smuzhiyun 		   fpdu->incomplete_bytes, rc);
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	return rc;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun static void
qed_iwarp_mpa_get_data(struct qed_hwfn * p_hwfn,struct unaligned_opaque_data * curr_pkt,u32 opaque_data0,u32 opaque_data1)2134*4882a593Smuzhiyun qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2135*4882a593Smuzhiyun 		       struct unaligned_opaque_data *curr_pkt,
2136*4882a593Smuzhiyun 		       u32 opaque_data0, u32 opaque_data1)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	u64 opaque_data;
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	opaque_data = HILO_64(cpu_to_le32(opaque_data1),
2141*4882a593Smuzhiyun 			      cpu_to_le32(opaque_data0));
2142*4882a593Smuzhiyun 	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	le16_add_cpu(&curr_pkt->first_mpa_offset,
2145*4882a593Smuzhiyun 		     curr_pkt->tcp_payload_offset);
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun /* This function is called when an unaligned or incomplete MPA packet arrives
2149*4882a593Smuzhiyun  * driver needs to align the packet, perhaps using previous data and send
2150*4882a593Smuzhiyun  * it down to FW once it is aligned.
2151*4882a593Smuzhiyun  */
2152*4882a593Smuzhiyun static int
qed_iwarp_process_mpa_pkt(struct qed_hwfn * p_hwfn,struct qed_iwarp_ll2_mpa_buf * mpa_buf)2153*4882a593Smuzhiyun qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2154*4882a593Smuzhiyun 			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun 	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2157*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2158*4882a593Smuzhiyun 	enum qed_iwarp_mpa_pkt_type pkt_type;
2159*4882a593Smuzhiyun 	struct qed_iwarp_fpdu *fpdu;
2160*4882a593Smuzhiyun 	u16 cid, first_mpa_offset;
2161*4882a593Smuzhiyun 	int rc = -EINVAL;
2162*4882a593Smuzhiyun 	u8 *mpa_data;
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	cid = le32_to_cpu(curr_pkt->cid);
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2167*4882a593Smuzhiyun 	if (!fpdu) { /* something corrupt with cid, post rx back */
2168*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2169*4882a593Smuzhiyun 		       cid);
2170*4882a593Smuzhiyun 		goto err;
2171*4882a593Smuzhiyun 	}
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	do {
2174*4882a593Smuzhiyun 		first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2175*4882a593Smuzhiyun 		mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2178*4882a593Smuzhiyun 						  mpa_buf->tcp_payload_len,
2179*4882a593Smuzhiyun 						  mpa_data);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 		switch (pkt_type) {
2182*4882a593Smuzhiyun 		case QED_IWARP_MPA_PKT_PARTIAL:
2183*4882a593Smuzhiyun 			qed_iwarp_init_fpdu(buf, fpdu,
2184*4882a593Smuzhiyun 					    curr_pkt,
2185*4882a593Smuzhiyun 					    mpa_buf->tcp_payload_len,
2186*4882a593Smuzhiyun 					    mpa_buf->placement_offset);
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2189*4882a593Smuzhiyun 				mpa_buf->tcp_payload_len = 0;
2190*4882a593Smuzhiyun 				break;
2191*4882a593Smuzhiyun 			}
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 			if (rc) {
2196*4882a593Smuzhiyun 				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2197*4882a593Smuzhiyun 					   "Can't send FPDU:reset rc=%d\n", rc);
2198*4882a593Smuzhiyun 				memset(fpdu, 0, sizeof(*fpdu));
2199*4882a593Smuzhiyun 				break;
2200*4882a593Smuzhiyun 			}
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 			mpa_buf->tcp_payload_len = 0;
2203*4882a593Smuzhiyun 			break;
2204*4882a593Smuzhiyun 		case QED_IWARP_MPA_PKT_PACKED:
2205*4882a593Smuzhiyun 			qed_iwarp_init_fpdu(buf, fpdu,
2206*4882a593Smuzhiyun 					    curr_pkt,
2207*4882a593Smuzhiyun 					    mpa_buf->tcp_payload_len,
2208*4882a593Smuzhiyun 					    mpa_buf->placement_offset);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2211*4882a593Smuzhiyun 						 mpa_buf->tcp_payload_len,
2212*4882a593Smuzhiyun 						 pkt_type);
2213*4882a593Smuzhiyun 			if (rc) {
2214*4882a593Smuzhiyun 				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2215*4882a593Smuzhiyun 					   "Can't send FPDU:reset rc=%d\n", rc);
2216*4882a593Smuzhiyun 				memset(fpdu, 0, sizeof(*fpdu));
2217*4882a593Smuzhiyun 				break;
2218*4882a593Smuzhiyun 			}
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2221*4882a593Smuzhiyun 			le16_add_cpu(&curr_pkt->first_mpa_offset,
2222*4882a593Smuzhiyun 				     fpdu->fpdu_length);
2223*4882a593Smuzhiyun 			break;
2224*4882a593Smuzhiyun 		case QED_IWARP_MPA_PKT_UNALIGNED:
2225*4882a593Smuzhiyun 			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2226*4882a593Smuzhiyun 			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2227*4882a593Smuzhiyun 				/* special handling of fpdu split over more
2228*4882a593Smuzhiyun 				 * than 2 segments
2229*4882a593Smuzhiyun 				 */
2230*4882a593Smuzhiyun 				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2231*4882a593Smuzhiyun 					rc = qed_iwarp_win_right_edge(p_hwfn,
2232*4882a593Smuzhiyun 								      fpdu);
2233*4882a593Smuzhiyun 					/* packet will be re-processed later */
2234*4882a593Smuzhiyun 					if (rc)
2235*4882a593Smuzhiyun 						return rc;
2236*4882a593Smuzhiyun 				}
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2239*4882a593Smuzhiyun 						      buf,
2240*4882a593Smuzhiyun 						      mpa_buf->tcp_payload_len);
2241*4882a593Smuzhiyun 				if (rc) /* packet will be re-processed later */
2242*4882a593Smuzhiyun 					return rc;
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 				mpa_buf->tcp_payload_len = 0;
2245*4882a593Smuzhiyun 				break;
2246*4882a593Smuzhiyun 			}
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2249*4882a593Smuzhiyun 						 mpa_buf->tcp_payload_len,
2250*4882a593Smuzhiyun 						 pkt_type);
2251*4882a593Smuzhiyun 			if (rc) {
2252*4882a593Smuzhiyun 				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2253*4882a593Smuzhiyun 					   "Can't send FPDU:delay rc=%d\n", rc);
2254*4882a593Smuzhiyun 				/* don't reset fpdu -> we need it for next
2255*4882a593Smuzhiyun 				 * classify
2256*4882a593Smuzhiyun 				 */
2257*4882a593Smuzhiyun 				break;
2258*4882a593Smuzhiyun 			}
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2261*4882a593Smuzhiyun 			le16_add_cpu(&curr_pkt->first_mpa_offset,
2262*4882a593Smuzhiyun 				     fpdu->incomplete_bytes);
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 			/* The framed PDU was sent - no more incomplete bytes */
2265*4882a593Smuzhiyun 			fpdu->incomplete_bytes = 0;
2266*4882a593Smuzhiyun 			break;
2267*4882a593Smuzhiyun 		}
2268*4882a593Smuzhiyun 	} while (mpa_buf->tcp_payload_len && !rc);
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 	return rc;
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun err:
2273*4882a593Smuzhiyun 	qed_iwarp_ll2_post_rx(p_hwfn,
2274*4882a593Smuzhiyun 			      buf,
2275*4882a593Smuzhiyun 			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2276*4882a593Smuzhiyun 	return rc;
2277*4882a593Smuzhiyun }
2278*4882a593Smuzhiyun 
qed_iwarp_process_pending_pkts(struct qed_hwfn * p_hwfn)2279*4882a593Smuzhiyun static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2280*4882a593Smuzhiyun {
2281*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2282*4882a593Smuzhiyun 	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2283*4882a593Smuzhiyun 	int rc;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2286*4882a593Smuzhiyun 		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2287*4882a593Smuzhiyun 					   struct qed_iwarp_ll2_mpa_buf,
2288*4882a593Smuzhiyun 					   list_entry);
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 		/* busy means break and continue processing later, don't
2293*4882a593Smuzhiyun 		 * remove the buf from the pending list.
2294*4882a593Smuzhiyun 		 */
2295*4882a593Smuzhiyun 		if (rc == -EBUSY)
2296*4882a593Smuzhiyun 			break;
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 		list_move_tail(&mpa_buf->list_entry,
2299*4882a593Smuzhiyun 			       &iwarp_info->mpa_buf_list);
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 		if (rc) {	/* different error, don't continue */
2302*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2303*4882a593Smuzhiyun 			break;
2304*4882a593Smuzhiyun 		}
2305*4882a593Smuzhiyun 	}
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun static void
qed_iwarp_ll2_comp_mpa_pkt(void * cxt,struct qed_ll2_comp_rx_data * data)2309*4882a593Smuzhiyun qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2310*4882a593Smuzhiyun {
2311*4882a593Smuzhiyun 	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2312*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info;
2313*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2314*4882a593Smuzhiyun 	u16 first_mpa_offset;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2317*4882a593Smuzhiyun 	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2318*4882a593Smuzhiyun 				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2319*4882a593Smuzhiyun 	if (!mpa_buf) {
2320*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "No free mpa buf\n");
2321*4882a593Smuzhiyun 		goto err;
2322*4882a593Smuzhiyun 	}
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	list_del(&mpa_buf->list_entry);
2325*4882a593Smuzhiyun 	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2326*4882a593Smuzhiyun 			       data->opaque_data_0, data->opaque_data_1);
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
2331*4882a593Smuzhiyun 		   QED_MSG_RDMA,
2332*4882a593Smuzhiyun 		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2333*4882a593Smuzhiyun 		   data->length.packet_length, first_mpa_offset,
2334*4882a593Smuzhiyun 		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2335*4882a593Smuzhiyun 		   mpa_buf->data.cid);
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	mpa_buf->ll2_buf = data->cookie;
2338*4882a593Smuzhiyun 	mpa_buf->tcp_payload_len = data->length.packet_length -
2339*4882a593Smuzhiyun 				   first_mpa_offset;
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	first_mpa_offset += data->u.placement_offset;
2342*4882a593Smuzhiyun 	mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
2343*4882a593Smuzhiyun 	mpa_buf->placement_offset = data->u.placement_offset;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	qed_iwarp_process_pending_pkts(p_hwfn);
2348*4882a593Smuzhiyun 	return;
2349*4882a593Smuzhiyun err:
2350*4882a593Smuzhiyun 	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2351*4882a593Smuzhiyun 			      iwarp_info->ll2_mpa_handle);
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun static void
qed_iwarp_ll2_comp_syn_pkt(void * cxt,struct qed_ll2_comp_rx_data * data)2355*4882a593Smuzhiyun qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buf = data->cookie;
2358*4882a593Smuzhiyun 	struct qed_iwarp_listener *listener;
2359*4882a593Smuzhiyun 	struct qed_ll2_tx_pkt_info tx_pkt;
2360*4882a593Smuzhiyun 	struct qed_iwarp_cm_info cm_info;
2361*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2362*4882a593Smuzhiyun 	u8 remote_mac_addr[ETH_ALEN];
2363*4882a593Smuzhiyun 	u8 local_mac_addr[ETH_ALEN];
2364*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
2365*4882a593Smuzhiyun 	int tcp_start_offset;
2366*4882a593Smuzhiyun 	u8 ll2_syn_handle;
2367*4882a593Smuzhiyun 	int payload_len;
2368*4882a593Smuzhiyun 	u32 hdr_size;
2369*4882a593Smuzhiyun 	int rc;
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	memset(&cm_info, 0, sizeof(cm_info));
2372*4882a593Smuzhiyun 	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	/* Check if packet was received with errors... */
2375*4882a593Smuzhiyun 	if (data->err_flags) {
2376*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2377*4882a593Smuzhiyun 			  data->err_flags);
2378*4882a593Smuzhiyun 		goto err;
2379*4882a593Smuzhiyun 	}
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	if (GET_FIELD(data->parse_flags,
2382*4882a593Smuzhiyun 		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2383*4882a593Smuzhiyun 	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2384*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2385*4882a593Smuzhiyun 		goto err;
2386*4882a593Smuzhiyun 	}
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2389*4882a593Smuzhiyun 				    data->u.placement_offset, remote_mac_addr,
2390*4882a593Smuzhiyun 				    local_mac_addr, &payload_len,
2391*4882a593Smuzhiyun 				    &tcp_start_offset);
2392*4882a593Smuzhiyun 	if (rc)
2393*4882a593Smuzhiyun 		goto err;
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	/* Check if there is a listener for this 4-tuple+vlan */
2396*4882a593Smuzhiyun 	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2397*4882a593Smuzhiyun 	if (!listener) {
2398*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
2399*4882a593Smuzhiyun 			   QED_MSG_RDMA,
2400*4882a593Smuzhiyun 			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2401*4882a593Smuzhiyun 			   data->parse_flags, data->length.packet_length);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		memset(&tx_pkt, 0, sizeof(tx_pkt));
2404*4882a593Smuzhiyun 		tx_pkt.num_of_bds = 1;
2405*4882a593Smuzhiyun 		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2406*4882a593Smuzhiyun 		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2407*4882a593Smuzhiyun 		tx_pkt.first_frag = buf->data_phys_addr +
2408*4882a593Smuzhiyun 				    data->u.placement_offset;
2409*4882a593Smuzhiyun 		tx_pkt.first_frag_len = data->length.packet_length;
2410*4882a593Smuzhiyun 		tx_pkt.cookie = buf;
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun 		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2413*4882a593Smuzhiyun 					       &tx_pkt, true);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 		if (rc) {
2416*4882a593Smuzhiyun 			DP_NOTICE(p_hwfn,
2417*4882a593Smuzhiyun 				  "Can't post SYN back to chip rc=%d\n", rc);
2418*4882a593Smuzhiyun 			goto err;
2419*4882a593Smuzhiyun 		}
2420*4882a593Smuzhiyun 		return;
2421*4882a593Smuzhiyun 	}
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2424*4882a593Smuzhiyun 	/* There may be an open ep on this connection if this is a syn
2425*4882a593Smuzhiyun 	 * retrasnmit... need to make sure there isn't...
2426*4882a593Smuzhiyun 	 */
2427*4882a593Smuzhiyun 	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2428*4882a593Smuzhiyun 		goto err;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	ep = qed_iwarp_get_free_ep(p_hwfn);
2431*4882a593Smuzhiyun 	if (!ep)
2432*4882a593Smuzhiyun 		goto err;
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2435*4882a593Smuzhiyun 	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2436*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2439*4882a593Smuzhiyun 	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2442*4882a593Smuzhiyun 
2443*4882a593Smuzhiyun 	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2444*4882a593Smuzhiyun 	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2445*4882a593Smuzhiyun 	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 	ep->event_cb = listener->event_cb;
2448*4882a593Smuzhiyun 	ep->cb_context = listener->cb_context;
2449*4882a593Smuzhiyun 	ep->connect_mode = TCP_CONNECT_PASSIVE;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	ep->syn = buf;
2452*4882a593Smuzhiyun 	ep->syn_ip_payload_length = (u16)payload_len;
2453*4882a593Smuzhiyun 	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2454*4882a593Smuzhiyun 			   tcp_start_offset;
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2457*4882a593Smuzhiyun 	if (rc) {
2458*4882a593Smuzhiyun 		qed_iwarp_return_ep(p_hwfn, ep);
2459*4882a593Smuzhiyun 		goto err;
2460*4882a593Smuzhiyun 	}
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	return;
2463*4882a593Smuzhiyun err:
2464*4882a593Smuzhiyun 	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun 
qed_iwarp_ll2_rel_rx_pkt(void * cxt,u8 connection_handle,void * cookie,dma_addr_t rx_buf_addr,bool b_last_packet)2467*4882a593Smuzhiyun static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2468*4882a593Smuzhiyun 				     void *cookie, dma_addr_t rx_buf_addr,
2469*4882a593Smuzhiyun 				     bool b_last_packet)
2470*4882a593Smuzhiyun {
2471*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buffer = cookie;
2472*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2475*4882a593Smuzhiyun 			  buffer->data, buffer->data_phys_addr);
2476*4882a593Smuzhiyun 	kfree(buffer);
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun 
qed_iwarp_ll2_comp_tx_pkt(void * cxt,u8 connection_handle,void * cookie,dma_addr_t first_frag_addr,bool b_last_fragment,bool b_last_packet)2479*4882a593Smuzhiyun static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2480*4882a593Smuzhiyun 				      void *cookie, dma_addr_t first_frag_addr,
2481*4882a593Smuzhiyun 				      bool b_last_fragment, bool b_last_packet)
2482*4882a593Smuzhiyun {
2483*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buffer = cookie;
2484*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *piggy;
2485*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 	if (!buffer)		/* can happen in packed mpa unaligned... */
2488*4882a593Smuzhiyun 		return;
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	/* this was originally an rx packet, post it back */
2491*4882a593Smuzhiyun 	piggy = buffer->piggy_buf;
2492*4882a593Smuzhiyun 	if (piggy) {
2493*4882a593Smuzhiyun 		buffer->piggy_buf = NULL;
2494*4882a593Smuzhiyun 		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2500*4882a593Smuzhiyun 		qed_iwarp_process_pending_pkts(p_hwfn);
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	return;
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun 
qed_iwarp_ll2_rel_tx_pkt(void * cxt,u8 connection_handle,void * cookie,dma_addr_t first_frag_addr,bool b_last_fragment,bool b_last_packet)2505*4882a593Smuzhiyun static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2506*4882a593Smuzhiyun 				     void *cookie, dma_addr_t first_frag_addr,
2507*4882a593Smuzhiyun 				     bool b_last_fragment, bool b_last_packet)
2508*4882a593Smuzhiyun {
2509*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buffer = cookie;
2510*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	if (!buffer)
2513*4882a593Smuzhiyun 		return;
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	if (buffer->piggy_buf) {
2516*4882a593Smuzhiyun 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2517*4882a593Smuzhiyun 				  buffer->piggy_buf->buff_size,
2518*4882a593Smuzhiyun 				  buffer->piggy_buf->data,
2519*4882a593Smuzhiyun 				  buffer->piggy_buf->data_phys_addr);
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 		kfree(buffer->piggy_buf);
2522*4882a593Smuzhiyun 	}
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2525*4882a593Smuzhiyun 			  buffer->data, buffer->data_phys_addr);
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	kfree(buffer);
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2531*4882a593Smuzhiyun  * is received, need to reset the FPDU.
2532*4882a593Smuzhiyun  */
2533*4882a593Smuzhiyun static void
qed_iwarp_ll2_slowpath(void * cxt,u8 connection_handle,u32 opaque_data_0,u32 opaque_data_1)2534*4882a593Smuzhiyun qed_iwarp_ll2_slowpath(void *cxt,
2535*4882a593Smuzhiyun 		       u8 connection_handle,
2536*4882a593Smuzhiyun 		       u32 opaque_data_0, u32 opaque_data_1)
2537*4882a593Smuzhiyun {
2538*4882a593Smuzhiyun 	struct unaligned_opaque_data unalign_data;
2539*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = cxt;
2540*4882a593Smuzhiyun 	struct qed_iwarp_fpdu *fpdu;
2541*4882a593Smuzhiyun 	u32 cid;
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2544*4882a593Smuzhiyun 			       opaque_data_0, opaque_data_1);
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	cid = le32_to_cpu(unalign_data.cid);
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2551*4882a593Smuzhiyun 	if (fpdu)
2552*4882a593Smuzhiyun 		memset(fpdu, 0, sizeof(*fpdu));
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun 
qed_iwarp_ll2_stop(struct qed_hwfn * p_hwfn)2555*4882a593Smuzhiyun static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2556*4882a593Smuzhiyun {
2557*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2558*4882a593Smuzhiyun 	int rc = 0;
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2561*4882a593Smuzhiyun 		rc = qed_ll2_terminate_connection(p_hwfn,
2562*4882a593Smuzhiyun 						  iwarp_info->ll2_syn_handle);
2563*4882a593Smuzhiyun 		if (rc)
2564*4882a593Smuzhiyun 			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2567*4882a593Smuzhiyun 		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2568*4882a593Smuzhiyun 	}
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2571*4882a593Smuzhiyun 		rc = qed_ll2_terminate_connection(p_hwfn,
2572*4882a593Smuzhiyun 						  iwarp_info->ll2_ooo_handle);
2573*4882a593Smuzhiyun 		if (rc)
2574*4882a593Smuzhiyun 			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2577*4882a593Smuzhiyun 		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2578*4882a593Smuzhiyun 	}
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2581*4882a593Smuzhiyun 		rc = qed_ll2_terminate_connection(p_hwfn,
2582*4882a593Smuzhiyun 						  iwarp_info->ll2_mpa_handle);
2583*4882a593Smuzhiyun 		if (rc)
2584*4882a593Smuzhiyun 			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2587*4882a593Smuzhiyun 		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2588*4882a593Smuzhiyun 	}
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2591*4882a593Smuzhiyun 				  p_hwfn->p_rdma_info->iwarp.mac_addr);
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 	return rc;
2594*4882a593Smuzhiyun }
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun static int
qed_iwarp_ll2_alloc_buffers(struct qed_hwfn * p_hwfn,int num_rx_bufs,int buff_size,u8 ll2_handle)2597*4882a593Smuzhiyun qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2598*4882a593Smuzhiyun 			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun 	struct qed_iwarp_ll2_buff *buffer;
2601*4882a593Smuzhiyun 	int rc = 0;
2602*4882a593Smuzhiyun 	int i;
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 	for (i = 0; i < num_rx_bufs; i++) {
2605*4882a593Smuzhiyun 		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2606*4882a593Smuzhiyun 		if (!buffer) {
2607*4882a593Smuzhiyun 			rc = -ENOMEM;
2608*4882a593Smuzhiyun 			break;
2609*4882a593Smuzhiyun 		}
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun 		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2612*4882a593Smuzhiyun 						  buff_size,
2613*4882a593Smuzhiyun 						  &buffer->data_phys_addr,
2614*4882a593Smuzhiyun 						  GFP_KERNEL);
2615*4882a593Smuzhiyun 		if (!buffer->data) {
2616*4882a593Smuzhiyun 			kfree(buffer);
2617*4882a593Smuzhiyun 			rc = -ENOMEM;
2618*4882a593Smuzhiyun 			break;
2619*4882a593Smuzhiyun 		}
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun 		buffer->buff_size = buff_size;
2622*4882a593Smuzhiyun 		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2623*4882a593Smuzhiyun 		if (rc)
2624*4882a593Smuzhiyun 			/* buffers will be deallocated by qed_ll2 */
2625*4882a593Smuzhiyun 			break;
2626*4882a593Smuzhiyun 	}
2627*4882a593Smuzhiyun 	return rc;
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun #define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2631*4882a593Smuzhiyun 	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2632*4882a593Smuzhiyun 		ETH_CACHE_LINE_SIZE)
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun static int
qed_iwarp_ll2_start(struct qed_hwfn * p_hwfn,struct qed_rdma_start_in_params * params,u32 rcv_wnd_size)2635*4882a593Smuzhiyun qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2636*4882a593Smuzhiyun 		    struct qed_rdma_start_in_params *params,
2637*4882a593Smuzhiyun 		    u32 rcv_wnd_size)
2638*4882a593Smuzhiyun {
2639*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info;
2640*4882a593Smuzhiyun 	struct qed_ll2_acquire_data data;
2641*4882a593Smuzhiyun 	struct qed_ll2_cbs cbs;
2642*4882a593Smuzhiyun 	u32 buff_size;
2643*4882a593Smuzhiyun 	u16 n_ooo_bufs;
2644*4882a593Smuzhiyun 	int rc = 0;
2645*4882a593Smuzhiyun 	int i;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2648*4882a593Smuzhiyun 	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2649*4882a593Smuzhiyun 	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2650*4882a593Smuzhiyun 	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2651*4882a593Smuzhiyun 
2652*4882a593Smuzhiyun 	iwarp_info->max_mtu = params->max_mtu;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2657*4882a593Smuzhiyun 	if (rc)
2658*4882a593Smuzhiyun 		return rc;
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	/* Start SYN connection */
2661*4882a593Smuzhiyun 	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2662*4882a593Smuzhiyun 	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2663*4882a593Smuzhiyun 	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2664*4882a593Smuzhiyun 	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2665*4882a593Smuzhiyun 	cbs.slowpath_cb = NULL;
2666*4882a593Smuzhiyun 	cbs.cookie = p_hwfn;
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	memset(&data, 0, sizeof(data));
2669*4882a593Smuzhiyun 	data.input.conn_type = QED_LL2_TYPE_IWARP;
2670*4882a593Smuzhiyun 	/* SYN will use ctx based queues */
2671*4882a593Smuzhiyun 	data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
2672*4882a593Smuzhiyun 	data.input.mtu = params->max_mtu;
2673*4882a593Smuzhiyun 	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2674*4882a593Smuzhiyun 	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2675*4882a593Smuzhiyun 	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2676*4882a593Smuzhiyun 	data.input.tx_tc = PKT_LB_TC;
2677*4882a593Smuzhiyun 	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2678*4882a593Smuzhiyun 	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2679*4882a593Smuzhiyun 	data.cbs = &cbs;
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2682*4882a593Smuzhiyun 	if (rc) {
2683*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2684*4882a593Smuzhiyun 		qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2685*4882a593Smuzhiyun 		return rc;
2686*4882a593Smuzhiyun 	}
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2689*4882a593Smuzhiyun 	if (rc) {
2690*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2691*4882a593Smuzhiyun 		goto err;
2692*4882a593Smuzhiyun 	}
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2695*4882a593Smuzhiyun 	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2696*4882a593Smuzhiyun 					 QED_IWARP_LL2_SYN_RX_SIZE,
2697*4882a593Smuzhiyun 					 buff_size,
2698*4882a593Smuzhiyun 					 iwarp_info->ll2_syn_handle);
2699*4882a593Smuzhiyun 	if (rc)
2700*4882a593Smuzhiyun 		goto err;
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	/* Start OOO connection */
2703*4882a593Smuzhiyun 	data.input.conn_type = QED_LL2_TYPE_OOO;
2704*4882a593Smuzhiyun 	/* OOO/unaligned will use legacy ll2 queues (ram based) */
2705*4882a593Smuzhiyun 	data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
2706*4882a593Smuzhiyun 	data.input.mtu = params->max_mtu;
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 	n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2709*4882a593Smuzhiyun 		     iwarp_info->max_mtu;
2710*4882a593Smuzhiyun 	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	data.input.rx_num_desc = n_ooo_bufs;
2713*4882a593Smuzhiyun 	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2714*4882a593Smuzhiyun 
2715*4882a593Smuzhiyun 	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2716*4882a593Smuzhiyun 	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2717*4882a593Smuzhiyun 	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2720*4882a593Smuzhiyun 	if (rc)
2721*4882a593Smuzhiyun 		goto err;
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun 	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2724*4882a593Smuzhiyun 	if (rc)
2725*4882a593Smuzhiyun 		goto err;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	/* Start Unaligned MPA connection */
2728*4882a593Smuzhiyun 	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2729*4882a593Smuzhiyun 	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	memset(&data, 0, sizeof(data));
2732*4882a593Smuzhiyun 	data.input.conn_type = QED_LL2_TYPE_IWARP;
2733*4882a593Smuzhiyun 	data.input.mtu = params->max_mtu;
2734*4882a593Smuzhiyun 	/* FW requires that once a packet arrives OOO, it must have at
2735*4882a593Smuzhiyun 	 * least 2 rx buffers available on the unaligned connection
2736*4882a593Smuzhiyun 	 * for handling the case that it is a partial fpdu.
2737*4882a593Smuzhiyun 	 */
2738*4882a593Smuzhiyun 	data.input.rx_num_desc = n_ooo_bufs * 2;
2739*4882a593Smuzhiyun 	data.input.tx_num_desc = data.input.rx_num_desc;
2740*4882a593Smuzhiyun 	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2741*4882a593Smuzhiyun 	data.input.tx_tc = PKT_LB_TC;
2742*4882a593Smuzhiyun 	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2743*4882a593Smuzhiyun 	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2744*4882a593Smuzhiyun 	data.input.secondary_queue = true;
2745*4882a593Smuzhiyun 	data.cbs = &cbs;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2748*4882a593Smuzhiyun 	if (rc)
2749*4882a593Smuzhiyun 		goto err;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2752*4882a593Smuzhiyun 	if (rc)
2753*4882a593Smuzhiyun 		goto err;
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2756*4882a593Smuzhiyun 					 data.input.rx_num_desc,
2757*4882a593Smuzhiyun 					 buff_size,
2758*4882a593Smuzhiyun 					 iwarp_info->ll2_mpa_handle);
2759*4882a593Smuzhiyun 	if (rc)
2760*4882a593Smuzhiyun 		goto err;
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2763*4882a593Smuzhiyun 					    sizeof(*iwarp_info->partial_fpdus),
2764*4882a593Smuzhiyun 					    GFP_KERNEL);
2765*4882a593Smuzhiyun 	if (!iwarp_info->partial_fpdus) {
2766*4882a593Smuzhiyun 		rc = -ENOMEM;
2767*4882a593Smuzhiyun 		goto err;
2768*4882a593Smuzhiyun 	}
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2773*4882a593Smuzhiyun 	if (!iwarp_info->mpa_intermediate_buf) {
2774*4882a593Smuzhiyun 		rc = -ENOMEM;
2775*4882a593Smuzhiyun 		goto err;
2776*4882a593Smuzhiyun 	}
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	/* The mpa_bufs array serves for pending RX packets received on the
2779*4882a593Smuzhiyun 	 * mpa ll2 that don't have place on the tx ring and require later
2780*4882a593Smuzhiyun 	 * processing. We can't fail on allocation of such a struct therefore
2781*4882a593Smuzhiyun 	 * we allocate enough to take care of all rx packets
2782*4882a593Smuzhiyun 	 */
2783*4882a593Smuzhiyun 	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2784*4882a593Smuzhiyun 				       sizeof(*iwarp_info->mpa_bufs),
2785*4882a593Smuzhiyun 				       GFP_KERNEL);
2786*4882a593Smuzhiyun 	if (!iwarp_info->mpa_bufs) {
2787*4882a593Smuzhiyun 		rc = -ENOMEM;
2788*4882a593Smuzhiyun 		goto err;
2789*4882a593Smuzhiyun 	}
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2792*4882a593Smuzhiyun 	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2793*4882a593Smuzhiyun 	for (i = 0; i < data.input.rx_num_desc; i++)
2794*4882a593Smuzhiyun 		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2795*4882a593Smuzhiyun 			      &iwarp_info->mpa_buf_list);
2796*4882a593Smuzhiyun 	return rc;
2797*4882a593Smuzhiyun err:
2798*4882a593Smuzhiyun 	qed_iwarp_ll2_stop(p_hwfn);
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	return rc;
2801*4882a593Smuzhiyun }
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun static struct {
2804*4882a593Smuzhiyun 	u32 two_ports;
2805*4882a593Smuzhiyun 	u32 four_ports;
2806*4882a593Smuzhiyun } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2807*4882a593Smuzhiyun 	{QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2808*4882a593Smuzhiyun 	{QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2809*4882a593Smuzhiyun };
2810*4882a593Smuzhiyun 
qed_iwarp_setup(struct qed_hwfn * p_hwfn,struct qed_rdma_start_in_params * params)2811*4882a593Smuzhiyun int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2812*4882a593Smuzhiyun 		    struct qed_rdma_start_in_params *params)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun 	struct qed_dev *cdev = p_hwfn->cdev;
2815*4882a593Smuzhiyun 	struct qed_iwarp_info *iwarp_info;
2816*4882a593Smuzhiyun 	enum chip_ids chip_id;
2817*4882a593Smuzhiyun 	u32 rcv_wnd_size;
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun 	chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2824*4882a593Smuzhiyun 	rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2825*4882a593Smuzhiyun 		qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2826*4882a593Smuzhiyun 		qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2829*4882a593Smuzhiyun 	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2830*4882a593Smuzhiyun 	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2831*4882a593Smuzhiyun 	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2832*4882a593Smuzhiyun 	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2833*4882a593Smuzhiyun 	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2834*4882a593Smuzhiyun 
2835*4882a593Smuzhiyun 	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2838*4882a593Smuzhiyun 				MPA_RTR_TYPE_ZERO_WRITE |
2839*4882a593Smuzhiyun 				MPA_RTR_TYPE_ZERO_READ;
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2842*4882a593Smuzhiyun 	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2843*4882a593Smuzhiyun 	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2846*4882a593Smuzhiyun 				  qed_iwarp_async_event);
2847*4882a593Smuzhiyun 	qed_ooo_setup(p_hwfn);
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 	return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2850*4882a593Smuzhiyun }
2851*4882a593Smuzhiyun 
qed_iwarp_stop(struct qed_hwfn * p_hwfn)2852*4882a593Smuzhiyun int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2853*4882a593Smuzhiyun {
2854*4882a593Smuzhiyun 	int rc;
2855*4882a593Smuzhiyun 
2856*4882a593Smuzhiyun 	qed_iwarp_free_prealloc_ep(p_hwfn);
2857*4882a593Smuzhiyun 	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2858*4882a593Smuzhiyun 	if (rc)
2859*4882a593Smuzhiyun 		return rc;
2860*4882a593Smuzhiyun 
2861*4882a593Smuzhiyun 	return qed_iwarp_ll2_stop(p_hwfn);
2862*4882a593Smuzhiyun }
2863*4882a593Smuzhiyun 
qed_iwarp_qp_in_error(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,u8 fw_return_code)2864*4882a593Smuzhiyun static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2865*4882a593Smuzhiyun 				  struct qed_iwarp_ep *ep,
2866*4882a593Smuzhiyun 				  u8 fw_return_code)
2867*4882a593Smuzhiyun {
2868*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2871*4882a593Smuzhiyun 
2872*4882a593Smuzhiyun 	params.event = QED_IWARP_EVENT_CLOSE;
2873*4882a593Smuzhiyun 	params.ep_context = ep;
2874*4882a593Smuzhiyun 	params.cm_info = &ep->cm_info;
2875*4882a593Smuzhiyun 	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2876*4882a593Smuzhiyun 			 0 : -ECONNRESET;
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun 	/* paired with READ_ONCE in destroy_qp */
2879*4882a593Smuzhiyun 	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2882*4882a593Smuzhiyun 	list_del(&ep->list_entry);
2883*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	ep->event_cb(ep->cb_context, &params);
2886*4882a593Smuzhiyun }
2887*4882a593Smuzhiyun 
qed_iwarp_exception_received(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,int fw_ret_code)2888*4882a593Smuzhiyun static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2889*4882a593Smuzhiyun 					 struct qed_iwarp_ep *ep,
2890*4882a593Smuzhiyun 					 int fw_ret_code)
2891*4882a593Smuzhiyun {
2892*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
2893*4882a593Smuzhiyun 	bool event_cb = false;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2896*4882a593Smuzhiyun 		   ep->cid, fw_ret_code);
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	switch (fw_ret_code) {
2899*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2900*4882a593Smuzhiyun 		params.status = 0;
2901*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_DISCONNECT;
2902*4882a593Smuzhiyun 		event_cb = true;
2903*4882a593Smuzhiyun 		break;
2904*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2905*4882a593Smuzhiyun 		params.status = -ECONNRESET;
2906*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_DISCONNECT;
2907*4882a593Smuzhiyun 		event_cb = true;
2908*4882a593Smuzhiyun 		break;
2909*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2910*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2911*4882a593Smuzhiyun 		event_cb = true;
2912*4882a593Smuzhiyun 		break;
2913*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2914*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_IRQ_FULL;
2915*4882a593Smuzhiyun 		event_cb = true;
2916*4882a593Smuzhiyun 		break;
2917*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2918*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2919*4882a593Smuzhiyun 		event_cb = true;
2920*4882a593Smuzhiyun 		break;
2921*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2922*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2923*4882a593Smuzhiyun 		event_cb = true;
2924*4882a593Smuzhiyun 		break;
2925*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2926*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2927*4882a593Smuzhiyun 		event_cb = true;
2928*4882a593Smuzhiyun 		break;
2929*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2930*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2931*4882a593Smuzhiyun 		event_cb = true;
2932*4882a593Smuzhiyun 		break;
2933*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2934*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2935*4882a593Smuzhiyun 		event_cb = true;
2936*4882a593Smuzhiyun 		break;
2937*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2938*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2939*4882a593Smuzhiyun 		event_cb = true;
2940*4882a593Smuzhiyun 		break;
2941*4882a593Smuzhiyun 	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2942*4882a593Smuzhiyun 		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2943*4882a593Smuzhiyun 		event_cb = true;
2944*4882a593Smuzhiyun 		break;
2945*4882a593Smuzhiyun 	default:
2946*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2947*4882a593Smuzhiyun 			   "Unhandled exception received...fw_ret_code=%d\n",
2948*4882a593Smuzhiyun 			   fw_ret_code);
2949*4882a593Smuzhiyun 		break;
2950*4882a593Smuzhiyun 	}
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	if (event_cb) {
2953*4882a593Smuzhiyun 		params.ep_context = ep;
2954*4882a593Smuzhiyun 		params.cm_info = &ep->cm_info;
2955*4882a593Smuzhiyun 		ep->event_cb(ep->cb_context, &params);
2956*4882a593Smuzhiyun 	}
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun static void
qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,u8 fw_return_code)2960*4882a593Smuzhiyun qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2961*4882a593Smuzhiyun 				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2962*4882a593Smuzhiyun {
2963*4882a593Smuzhiyun 	struct qed_iwarp_cm_event_params params;
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	memset(&params, 0, sizeof(params));
2966*4882a593Smuzhiyun 	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2967*4882a593Smuzhiyun 	params.ep_context = ep;
2968*4882a593Smuzhiyun 	params.cm_info = &ep->cm_info;
2969*4882a593Smuzhiyun 	/* paired with READ_ONCE in destroy_qp */
2970*4882a593Smuzhiyun 	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	switch (fw_return_code) {
2973*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2974*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2975*4882a593Smuzhiyun 			   "%s(0x%x) TCP connect got invalid packet\n",
2976*4882a593Smuzhiyun 			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2977*4882a593Smuzhiyun 		params.status = -ECONNRESET;
2978*4882a593Smuzhiyun 		break;
2979*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2980*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2981*4882a593Smuzhiyun 			   "%s(0x%x) TCP Connection Reset\n",
2982*4882a593Smuzhiyun 			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2983*4882a593Smuzhiyun 		params.status = -ECONNRESET;
2984*4882a593Smuzhiyun 		break;
2985*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2986*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2987*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2988*4882a593Smuzhiyun 		params.status = -EBUSY;
2989*4882a593Smuzhiyun 		break;
2990*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2991*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2992*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2993*4882a593Smuzhiyun 		params.status = -ECONNREFUSED;
2994*4882a593Smuzhiyun 		break;
2995*4882a593Smuzhiyun 	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2996*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2997*4882a593Smuzhiyun 			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2998*4882a593Smuzhiyun 		params.status = -ECONNRESET;
2999*4882a593Smuzhiyun 		break;
3000*4882a593Smuzhiyun 	default:
3001*4882a593Smuzhiyun 		DP_ERR(p_hwfn,
3002*4882a593Smuzhiyun 		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
3003*4882a593Smuzhiyun 		       QED_IWARP_CONNECT_MODE_STRING(ep),
3004*4882a593Smuzhiyun 		       ep->tcp_cid, fw_return_code);
3005*4882a593Smuzhiyun 		params.status = -ECONNRESET;
3006*4882a593Smuzhiyun 		break;
3007*4882a593Smuzhiyun 	}
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3010*4882a593Smuzhiyun 		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
3011*4882a593Smuzhiyun 		qed_iwarp_return_ep(p_hwfn, ep);
3012*4882a593Smuzhiyun 	} else {
3013*4882a593Smuzhiyun 		ep->event_cb(ep->cb_context, &params);
3014*4882a593Smuzhiyun 		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3015*4882a593Smuzhiyun 		list_del(&ep->list_entry);
3016*4882a593Smuzhiyun 		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3017*4882a593Smuzhiyun 	}
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun 
3020*4882a593Smuzhiyun static void
qed_iwarp_connect_complete(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep,u8 fw_return_code)3021*4882a593Smuzhiyun qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
3022*4882a593Smuzhiyun 			   struct qed_iwarp_ep *ep, u8 fw_return_code)
3023*4882a593Smuzhiyun {
3024*4882a593Smuzhiyun 	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun 	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3027*4882a593Smuzhiyun 		/* Done with the SYN packet, post back to ll2 rx */
3028*4882a593Smuzhiyun 		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 		ep->syn = NULL;
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 		/* If connect failed - upper layer doesn't know about it */
3033*4882a593Smuzhiyun 		if (fw_return_code == RDMA_RETURN_OK)
3034*4882a593Smuzhiyun 			qed_iwarp_mpa_received(p_hwfn, ep);
3035*4882a593Smuzhiyun 		else
3036*4882a593Smuzhiyun 			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3037*4882a593Smuzhiyun 							   fw_return_code);
3038*4882a593Smuzhiyun 	} else {
3039*4882a593Smuzhiyun 		if (fw_return_code == RDMA_RETURN_OK)
3040*4882a593Smuzhiyun 			qed_iwarp_mpa_offload(p_hwfn, ep);
3041*4882a593Smuzhiyun 		else
3042*4882a593Smuzhiyun 			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3043*4882a593Smuzhiyun 							   fw_return_code);
3044*4882a593Smuzhiyun 	}
3045*4882a593Smuzhiyun }
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun static inline bool
qed_iwarp_check_ep_ok(struct qed_hwfn * p_hwfn,struct qed_iwarp_ep * ep)3048*4882a593Smuzhiyun qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3049*4882a593Smuzhiyun {
3050*4882a593Smuzhiyun 	if (!ep || (ep->sig != QED_EP_SIG)) {
3051*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3052*4882a593Smuzhiyun 		return false;
3053*4882a593Smuzhiyun 	}
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	return true;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun 
qed_iwarp_async_event(struct qed_hwfn * p_hwfn,u8 fw_event_code,__le16 echo,union event_ring_data * data,u8 fw_return_code)3058*4882a593Smuzhiyun static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
3059*4882a593Smuzhiyun 				 __le16 echo, union event_ring_data *data,
3060*4882a593Smuzhiyun 				 u8 fw_return_code)
3061*4882a593Smuzhiyun {
3062*4882a593Smuzhiyun 	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3063*4882a593Smuzhiyun 	struct regpair *fw_handle = &data->rdma_data.async_handle;
3064*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep = NULL;
3065*4882a593Smuzhiyun 	u16 srq_offset;
3066*4882a593Smuzhiyun 	u16 srq_id;
3067*4882a593Smuzhiyun 	u16 cid;
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3070*4882a593Smuzhiyun 						       fw_handle->lo);
3071*4882a593Smuzhiyun 
3072*4882a593Smuzhiyun 	switch (fw_event_code) {
3073*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3074*4882a593Smuzhiyun 		/* Async completion after TCP 3-way handshake */
3075*4882a593Smuzhiyun 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3076*4882a593Smuzhiyun 			return -EINVAL;
3077*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
3078*4882a593Smuzhiyun 			   QED_MSG_RDMA,
3079*4882a593Smuzhiyun 			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3080*4882a593Smuzhiyun 			   ep->tcp_cid, fw_return_code);
3081*4882a593Smuzhiyun 		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3082*4882a593Smuzhiyun 		break;
3083*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3084*4882a593Smuzhiyun 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3085*4882a593Smuzhiyun 			return -EINVAL;
3086*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
3087*4882a593Smuzhiyun 			   QED_MSG_RDMA,
3088*4882a593Smuzhiyun 			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3089*4882a593Smuzhiyun 			   ep->cid, fw_return_code);
3090*4882a593Smuzhiyun 		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3091*4882a593Smuzhiyun 		break;
3092*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3093*4882a593Smuzhiyun 		/* Async completion for Close Connection ramrod */
3094*4882a593Smuzhiyun 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3095*4882a593Smuzhiyun 			return -EINVAL;
3096*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
3097*4882a593Smuzhiyun 			   QED_MSG_RDMA,
3098*4882a593Smuzhiyun 			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3099*4882a593Smuzhiyun 			   ep->cid, fw_return_code);
3100*4882a593Smuzhiyun 		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3101*4882a593Smuzhiyun 		break;
3102*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3103*4882a593Smuzhiyun 		/* Async event for active side only */
3104*4882a593Smuzhiyun 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3105*4882a593Smuzhiyun 			return -EINVAL;
3106*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
3107*4882a593Smuzhiyun 			   QED_MSG_RDMA,
3108*4882a593Smuzhiyun 			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3109*4882a593Smuzhiyun 			   ep->cid, fw_return_code);
3110*4882a593Smuzhiyun 		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3111*4882a593Smuzhiyun 		break;
3112*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3113*4882a593Smuzhiyun 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3114*4882a593Smuzhiyun 			return -EINVAL;
3115*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn,
3116*4882a593Smuzhiyun 			   QED_MSG_RDMA,
3117*4882a593Smuzhiyun 			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3118*4882a593Smuzhiyun 			   ep->cid, fw_return_code);
3119*4882a593Smuzhiyun 		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3120*4882a593Smuzhiyun 		break;
3121*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3122*4882a593Smuzhiyun 		cid = (u16)le32_to_cpu(fw_handle->lo);
3123*4882a593Smuzhiyun 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3124*4882a593Smuzhiyun 			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3125*4882a593Smuzhiyun 		qed_iwarp_cid_cleaned(p_hwfn, cid);
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun 		break;
3128*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3129*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3130*4882a593Smuzhiyun 		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3131*4882a593Smuzhiyun 		/* FW assigns value that is no greater than u16 */
3132*4882a593Smuzhiyun 		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3133*4882a593Smuzhiyun 		events.affiliated_event(events.context,
3134*4882a593Smuzhiyun 					QED_IWARP_EVENT_SRQ_EMPTY,
3135*4882a593Smuzhiyun 					&srq_id);
3136*4882a593Smuzhiyun 		break;
3137*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3138*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3139*4882a593Smuzhiyun 		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3140*4882a593Smuzhiyun 		/* FW assigns value that is no greater than u16 */
3141*4882a593Smuzhiyun 		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3142*4882a593Smuzhiyun 		events.affiliated_event(events.context,
3143*4882a593Smuzhiyun 					QED_IWARP_EVENT_SRQ_LIMIT,
3144*4882a593Smuzhiyun 					&srq_id);
3145*4882a593Smuzhiyun 		break;
3146*4882a593Smuzhiyun 	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3147*4882a593Smuzhiyun 		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 		p_hwfn->p_rdma_info->events.affiliated_event(
3150*4882a593Smuzhiyun 			p_hwfn->p_rdma_info->events.context,
3151*4882a593Smuzhiyun 			QED_IWARP_EVENT_CQ_OVERFLOW,
3152*4882a593Smuzhiyun 			(void *)fw_handle);
3153*4882a593Smuzhiyun 		break;
3154*4882a593Smuzhiyun 	default:
3155*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3156*4882a593Smuzhiyun 		       fw_event_code);
3157*4882a593Smuzhiyun 		return -EINVAL;
3158*4882a593Smuzhiyun 	}
3159*4882a593Smuzhiyun 	return 0;
3160*4882a593Smuzhiyun }
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun int
qed_iwarp_create_listen(void * rdma_cxt,struct qed_iwarp_listen_in * iparams,struct qed_iwarp_listen_out * oparams)3163*4882a593Smuzhiyun qed_iwarp_create_listen(void *rdma_cxt,
3164*4882a593Smuzhiyun 			struct qed_iwarp_listen_in *iparams,
3165*4882a593Smuzhiyun 			struct qed_iwarp_listen_out *oparams)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
3168*4882a593Smuzhiyun 	struct qed_iwarp_listener *listener;
3169*4882a593Smuzhiyun 
3170*4882a593Smuzhiyun 	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3171*4882a593Smuzhiyun 	if (!listener)
3172*4882a593Smuzhiyun 		return -ENOMEM;
3173*4882a593Smuzhiyun 
3174*4882a593Smuzhiyun 	listener->ip_version = iparams->ip_version;
3175*4882a593Smuzhiyun 	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3176*4882a593Smuzhiyun 	listener->port = iparams->port;
3177*4882a593Smuzhiyun 	listener->vlan = iparams->vlan;
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun 	listener->event_cb = iparams->event_cb;
3180*4882a593Smuzhiyun 	listener->cb_context = iparams->cb_context;
3181*4882a593Smuzhiyun 	listener->max_backlog = iparams->max_backlog;
3182*4882a593Smuzhiyun 	oparams->handle = listener;
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3185*4882a593Smuzhiyun 	list_add_tail(&listener->list_entry,
3186*4882a593Smuzhiyun 		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3187*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn,
3190*4882a593Smuzhiyun 		   QED_MSG_RDMA,
3191*4882a593Smuzhiyun 		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3192*4882a593Smuzhiyun 		   listener->event_cb,
3193*4882a593Smuzhiyun 		   listener,
3194*4882a593Smuzhiyun 		   listener->ip_addr[0],
3195*4882a593Smuzhiyun 		   listener->ip_addr[1],
3196*4882a593Smuzhiyun 		   listener->ip_addr[2],
3197*4882a593Smuzhiyun 		   listener->ip_addr[3], listener->port, listener->vlan);
3198*4882a593Smuzhiyun 
3199*4882a593Smuzhiyun 	return 0;
3200*4882a593Smuzhiyun }
3201*4882a593Smuzhiyun 
qed_iwarp_destroy_listen(void * rdma_cxt,void * handle)3202*4882a593Smuzhiyun int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3203*4882a593Smuzhiyun {
3204*4882a593Smuzhiyun 	struct qed_iwarp_listener *listener = handle;
3205*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3210*4882a593Smuzhiyun 	list_del(&listener->list_entry);
3211*4882a593Smuzhiyun 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3212*4882a593Smuzhiyun 
3213*4882a593Smuzhiyun 	kfree(listener);
3214*4882a593Smuzhiyun 
3215*4882a593Smuzhiyun 	return 0;
3216*4882a593Smuzhiyun }
3217*4882a593Smuzhiyun 
qed_iwarp_send_rtr(void * rdma_cxt,struct qed_iwarp_send_rtr_in * iparams)3218*4882a593Smuzhiyun int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3219*4882a593Smuzhiyun {
3220*4882a593Smuzhiyun 	struct qed_hwfn *p_hwfn = rdma_cxt;
3221*4882a593Smuzhiyun 	struct qed_sp_init_data init_data;
3222*4882a593Smuzhiyun 	struct qed_spq_entry *p_ent;
3223*4882a593Smuzhiyun 	struct qed_iwarp_ep *ep;
3224*4882a593Smuzhiyun 	struct qed_rdma_qp *qp;
3225*4882a593Smuzhiyun 	int rc;
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	ep = iparams->ep_context;
3228*4882a593Smuzhiyun 	if (!ep) {
3229*4882a593Smuzhiyun 		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3230*4882a593Smuzhiyun 		return -EINVAL;
3231*4882a593Smuzhiyun 	}
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 	qp = ep->qp;
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3236*4882a593Smuzhiyun 		   qp->icid, ep->tcp_cid);
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 	memset(&init_data, 0, sizeof(init_data));
3239*4882a593Smuzhiyun 	init_data.cid = qp->icid;
3240*4882a593Smuzhiyun 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3241*4882a593Smuzhiyun 	init_data.comp_mode = QED_SPQ_MODE_CB;
3242*4882a593Smuzhiyun 
3243*4882a593Smuzhiyun 	rc = qed_sp_init_request(p_hwfn, &p_ent,
3244*4882a593Smuzhiyun 				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3245*4882a593Smuzhiyun 				 PROTOCOLID_IWARP, &init_data);
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 	if (rc)
3248*4882a593Smuzhiyun 		return rc;
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3251*4882a593Smuzhiyun 
3252*4882a593Smuzhiyun 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3253*4882a593Smuzhiyun 
3254*4882a593Smuzhiyun 	return rc;
3255*4882a593Smuzhiyun }
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun void
qed_iwarp_query_qp(struct qed_rdma_qp * qp,struct qed_rdma_query_qp_out_params * out_params)3258*4882a593Smuzhiyun qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3259*4882a593Smuzhiyun 		   struct qed_rdma_query_qp_out_params *out_params)
3260*4882a593Smuzhiyun {
3261*4882a593Smuzhiyun 	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3262*4882a593Smuzhiyun }
3263