xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/cxgb4/cm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *	  copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *	  disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *	  copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *	  disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *	  provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <linux/list.h>
34*4882a593Smuzhiyun #include <linux/workqueue.h>
35*4882a593Smuzhiyun #include <linux/skbuff.h>
36*4882a593Smuzhiyun #include <linux/timer.h>
37*4882a593Smuzhiyun #include <linux/notifier.h>
38*4882a593Smuzhiyun #include <linux/inetdevice.h>
39*4882a593Smuzhiyun #include <linux/ip.h>
40*4882a593Smuzhiyun #include <linux/tcp.h>
41*4882a593Smuzhiyun #include <linux/if_vlan.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <net/neighbour.h>
44*4882a593Smuzhiyun #include <net/netevent.h>
45*4882a593Smuzhiyun #include <net/route.h>
46*4882a593Smuzhiyun #include <net/tcp.h>
47*4882a593Smuzhiyun #include <net/ip6_route.h>
48*4882a593Smuzhiyun #include <net/addrconf.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #include <rdma/ib_addr.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #include <libcxgb_cm.h>
53*4882a593Smuzhiyun #include "iw_cxgb4.h"
54*4882a593Smuzhiyun #include "clip_tbl.h"
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun static char *states[] = {
57*4882a593Smuzhiyun 	"idle",
58*4882a593Smuzhiyun 	"listen",
59*4882a593Smuzhiyun 	"connecting",
60*4882a593Smuzhiyun 	"mpa_wait_req",
61*4882a593Smuzhiyun 	"mpa_req_sent",
62*4882a593Smuzhiyun 	"mpa_req_rcvd",
63*4882a593Smuzhiyun 	"mpa_rep_sent",
64*4882a593Smuzhiyun 	"fpdu_mode",
65*4882a593Smuzhiyun 	"aborting",
66*4882a593Smuzhiyun 	"closing",
67*4882a593Smuzhiyun 	"moribund",
68*4882a593Smuzhiyun 	"dead",
69*4882a593Smuzhiyun 	NULL,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static int nocong;
73*4882a593Smuzhiyun module_param(nocong, int, 0644);
74*4882a593Smuzhiyun MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static int enable_ecn;
77*4882a593Smuzhiyun module_param(enable_ecn, int, 0644);
78*4882a593Smuzhiyun MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static int dack_mode;
81*4882a593Smuzhiyun module_param(dack_mode, int, 0644);
82*4882a593Smuzhiyun MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun uint c4iw_max_read_depth = 32;
85*4882a593Smuzhiyun module_param(c4iw_max_read_depth, int, 0644);
86*4882a593Smuzhiyun MODULE_PARM_DESC(c4iw_max_read_depth,
87*4882a593Smuzhiyun 		 "Per-connection max ORD/IRD (default=32)");
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun static int enable_tcp_timestamps;
90*4882a593Smuzhiyun module_param(enable_tcp_timestamps, int, 0644);
91*4882a593Smuzhiyun MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun static int enable_tcp_sack;
94*4882a593Smuzhiyun module_param(enable_tcp_sack, int, 0644);
95*4882a593Smuzhiyun MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static int enable_tcp_window_scaling = 1;
98*4882a593Smuzhiyun module_param(enable_tcp_window_scaling, int, 0644);
99*4882a593Smuzhiyun MODULE_PARM_DESC(enable_tcp_window_scaling,
100*4882a593Smuzhiyun 		 "Enable tcp window scaling (default=1)");
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static int peer2peer = 1;
103*4882a593Smuzhiyun module_param(peer2peer, int, 0644);
104*4882a593Smuzhiyun MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
107*4882a593Smuzhiyun module_param(p2p_type, int, 0644);
108*4882a593Smuzhiyun MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
109*4882a593Smuzhiyun 			   "1=RDMA_READ 0=RDMA_WRITE (default 1)");
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun static int ep_timeout_secs = 60;
112*4882a593Smuzhiyun module_param(ep_timeout_secs, int, 0644);
113*4882a593Smuzhiyun MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
114*4882a593Smuzhiyun 				   "in seconds (default=60)");
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun static int mpa_rev = 2;
117*4882a593Smuzhiyun module_param(mpa_rev, int, 0644);
118*4882a593Smuzhiyun MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
119*4882a593Smuzhiyun 		"1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
120*4882a593Smuzhiyun 		" compliant (default=2)");
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun static int markers_enabled;
123*4882a593Smuzhiyun module_param(markers_enabled, int, 0644);
124*4882a593Smuzhiyun MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static int crc_enabled = 1;
127*4882a593Smuzhiyun module_param(crc_enabled, int, 0644);
128*4882a593Smuzhiyun MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static int rcv_win = 256 * 1024;
131*4882a593Smuzhiyun module_param(rcv_win, int, 0644);
132*4882a593Smuzhiyun MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun static int snd_win = 128 * 1024;
135*4882a593Smuzhiyun module_param(snd_win, int, 0644);
136*4882a593Smuzhiyun MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun static struct workqueue_struct *workq;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static struct sk_buff_head rxq;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
143*4882a593Smuzhiyun static void ep_timeout(struct timer_list *t);
144*4882a593Smuzhiyun static void connect_reply_upcall(struct c4iw_ep *ep, int status);
145*4882a593Smuzhiyun static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static LIST_HEAD(timeout_list);
148*4882a593Smuzhiyun static spinlock_t timeout_lock;
149*4882a593Smuzhiyun 
deref_cm_id(struct c4iw_ep_common * epc)150*4882a593Smuzhiyun static void deref_cm_id(struct c4iw_ep_common *epc)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	epc->cm_id->rem_ref(epc->cm_id);
153*4882a593Smuzhiyun 	epc->cm_id = NULL;
154*4882a593Smuzhiyun 	set_bit(CM_ID_DEREFED, &epc->history);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
ref_cm_id(struct c4iw_ep_common * epc)157*4882a593Smuzhiyun static void ref_cm_id(struct c4iw_ep_common *epc)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	set_bit(CM_ID_REFED, &epc->history);
160*4882a593Smuzhiyun 	epc->cm_id->add_ref(epc->cm_id);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
deref_qp(struct c4iw_ep * ep)163*4882a593Smuzhiyun static void deref_qp(struct c4iw_ep *ep)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
166*4882a593Smuzhiyun 	clear_bit(QP_REFERENCED, &ep->com.flags);
167*4882a593Smuzhiyun 	set_bit(QP_DEREFED, &ep->com.history);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
ref_qp(struct c4iw_ep * ep)170*4882a593Smuzhiyun static void ref_qp(struct c4iw_ep *ep)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	set_bit(QP_REFERENCED, &ep->com.flags);
173*4882a593Smuzhiyun 	set_bit(QP_REFED, &ep->com.history);
174*4882a593Smuzhiyun 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
start_ep_timer(struct c4iw_ep * ep)177*4882a593Smuzhiyun static void start_ep_timer(struct c4iw_ep *ep)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	pr_debug("ep %p\n", ep);
180*4882a593Smuzhiyun 	if (timer_pending(&ep->timer)) {
181*4882a593Smuzhiyun 		pr_err("%s timer already started! ep %p\n",
182*4882a593Smuzhiyun 		       __func__, ep);
183*4882a593Smuzhiyun 		return;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 	clear_bit(TIMEOUT, &ep->com.flags);
186*4882a593Smuzhiyun 	c4iw_get_ep(&ep->com);
187*4882a593Smuzhiyun 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
188*4882a593Smuzhiyun 	add_timer(&ep->timer);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
stop_ep_timer(struct c4iw_ep * ep)191*4882a593Smuzhiyun static int stop_ep_timer(struct c4iw_ep *ep)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	pr_debug("ep %p stopping\n", ep);
194*4882a593Smuzhiyun 	del_timer_sync(&ep->timer);
195*4882a593Smuzhiyun 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
196*4882a593Smuzhiyun 		c4iw_put_ep(&ep->com);
197*4882a593Smuzhiyun 		return 0;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 	return 1;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
c4iw_l2t_send(struct c4iw_rdev * rdev,struct sk_buff * skb,struct l2t_entry * l2e)202*4882a593Smuzhiyun static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
203*4882a593Smuzhiyun 		  struct l2t_entry *l2e)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	int	error = 0;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	if (c4iw_fatal_error(rdev)) {
208*4882a593Smuzhiyun 		kfree_skb(skb);
209*4882a593Smuzhiyun 		pr_err("%s - device in error state - dropping\n", __func__);
210*4882a593Smuzhiyun 		return -EIO;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
213*4882a593Smuzhiyun 	if (error < 0)
214*4882a593Smuzhiyun 		kfree_skb(skb);
215*4882a593Smuzhiyun 	else if (error == NET_XMIT_DROP)
216*4882a593Smuzhiyun 		return -ENOMEM;
217*4882a593Smuzhiyun 	return error < 0 ? error : 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
c4iw_ofld_send(struct c4iw_rdev * rdev,struct sk_buff * skb)220*4882a593Smuzhiyun int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	int	error = 0;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (c4iw_fatal_error(rdev)) {
225*4882a593Smuzhiyun 		kfree_skb(skb);
226*4882a593Smuzhiyun 		pr_err("%s - device in error state - dropping\n", __func__);
227*4882a593Smuzhiyun 		return -EIO;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
230*4882a593Smuzhiyun 	if (error < 0)
231*4882a593Smuzhiyun 		kfree_skb(skb);
232*4882a593Smuzhiyun 	return error < 0 ? error : 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
release_tid(struct c4iw_rdev * rdev,u32 hwtid,struct sk_buff * skb)235*4882a593Smuzhiyun static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	u32 len = roundup(sizeof(struct cpl_tid_release), 16);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	skb = get_skb(skb, len, GFP_KERNEL);
240*4882a593Smuzhiyun 	if (!skb)
241*4882a593Smuzhiyun 		return;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	cxgb_mk_tid_release(skb, len, hwtid, 0);
244*4882a593Smuzhiyun 	c4iw_ofld_send(rdev, skb);
245*4882a593Smuzhiyun 	return;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
set_emss(struct c4iw_ep * ep,u16 opt)248*4882a593Smuzhiyun static void set_emss(struct c4iw_ep *ep, u16 opt)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
251*4882a593Smuzhiyun 		   ((AF_INET == ep->com.remote_addr.ss_family) ?
252*4882a593Smuzhiyun 		    sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
253*4882a593Smuzhiyun 		   sizeof(struct tcphdr);
254*4882a593Smuzhiyun 	ep->mss = ep->emss;
255*4882a593Smuzhiyun 	if (TCPOPT_TSTAMP_G(opt))
256*4882a593Smuzhiyun 		ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
257*4882a593Smuzhiyun 	if (ep->emss < 128)
258*4882a593Smuzhiyun 		ep->emss = 128;
259*4882a593Smuzhiyun 	if (ep->emss & 7)
260*4882a593Smuzhiyun 		pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
261*4882a593Smuzhiyun 			 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
262*4882a593Smuzhiyun 	pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
263*4882a593Smuzhiyun 		 ep->emss);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
state_read(struct c4iw_ep_common * epc)266*4882a593Smuzhiyun static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	enum c4iw_ep_state state;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	mutex_lock(&epc->mutex);
271*4882a593Smuzhiyun 	state = epc->state;
272*4882a593Smuzhiyun 	mutex_unlock(&epc->mutex);
273*4882a593Smuzhiyun 	return state;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
__state_set(struct c4iw_ep_common * epc,enum c4iw_ep_state new)276*4882a593Smuzhiyun static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	epc->state = new;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
state_set(struct c4iw_ep_common * epc,enum c4iw_ep_state new)281*4882a593Smuzhiyun static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	mutex_lock(&epc->mutex);
284*4882a593Smuzhiyun 	pr_debug("%s -> %s\n", states[epc->state], states[new]);
285*4882a593Smuzhiyun 	__state_set(epc, new);
286*4882a593Smuzhiyun 	mutex_unlock(&epc->mutex);
287*4882a593Smuzhiyun 	return;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
alloc_ep_skb_list(struct sk_buff_head * ep_skb_list,int size)290*4882a593Smuzhiyun static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct sk_buff *skb;
293*4882a593Smuzhiyun 	unsigned int i;
294*4882a593Smuzhiyun 	size_t len;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	len = roundup(sizeof(union cpl_wr_size), 16);
297*4882a593Smuzhiyun 	for (i = 0; i < size; i++) {
298*4882a593Smuzhiyun 		skb = alloc_skb(len, GFP_KERNEL);
299*4882a593Smuzhiyun 		if (!skb)
300*4882a593Smuzhiyun 			goto fail;
301*4882a593Smuzhiyun 		skb_queue_tail(ep_skb_list, skb);
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 	return 0;
304*4882a593Smuzhiyun fail:
305*4882a593Smuzhiyun 	skb_queue_purge(ep_skb_list);
306*4882a593Smuzhiyun 	return -ENOMEM;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
alloc_ep(int size,gfp_t gfp)309*4882a593Smuzhiyun static void *alloc_ep(int size, gfp_t gfp)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct c4iw_ep_common *epc;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	epc = kzalloc(size, gfp);
314*4882a593Smuzhiyun 	if (epc) {
315*4882a593Smuzhiyun 		epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
316*4882a593Smuzhiyun 		if (!epc->wr_waitp) {
317*4882a593Smuzhiyun 			kfree(epc);
318*4882a593Smuzhiyun 			epc = NULL;
319*4882a593Smuzhiyun 			goto out;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 		kref_init(&epc->kref);
322*4882a593Smuzhiyun 		mutex_init(&epc->mutex);
323*4882a593Smuzhiyun 		c4iw_init_wr_wait(epc->wr_waitp);
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 	pr_debug("alloc ep %p\n", epc);
326*4882a593Smuzhiyun out:
327*4882a593Smuzhiyun 	return epc;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
remove_ep_tid(struct c4iw_ep * ep)330*4882a593Smuzhiyun static void remove_ep_tid(struct c4iw_ep *ep)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	unsigned long flags;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	xa_lock_irqsave(&ep->com.dev->hwtids, flags);
335*4882a593Smuzhiyun 	__xa_erase(&ep->com.dev->hwtids, ep->hwtid);
336*4882a593Smuzhiyun 	if (xa_empty(&ep->com.dev->hwtids))
337*4882a593Smuzhiyun 		wake_up(&ep->com.dev->wait);
338*4882a593Smuzhiyun 	xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
insert_ep_tid(struct c4iw_ep * ep)341*4882a593Smuzhiyun static int insert_ep_tid(struct c4iw_ep *ep)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	unsigned long flags;
344*4882a593Smuzhiyun 	int err;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	xa_lock_irqsave(&ep->com.dev->hwtids, flags);
347*4882a593Smuzhiyun 	err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
348*4882a593Smuzhiyun 	xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return err;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun  * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
355*4882a593Smuzhiyun  */
get_ep_from_tid(struct c4iw_dev * dev,unsigned int tid)356*4882a593Smuzhiyun static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct c4iw_ep *ep;
359*4882a593Smuzhiyun 	unsigned long flags;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	xa_lock_irqsave(&dev->hwtids, flags);
362*4882a593Smuzhiyun 	ep = xa_load(&dev->hwtids, tid);
363*4882a593Smuzhiyun 	if (ep)
364*4882a593Smuzhiyun 		c4iw_get_ep(&ep->com);
365*4882a593Smuzhiyun 	xa_unlock_irqrestore(&dev->hwtids, flags);
366*4882a593Smuzhiyun 	return ep;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun  * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
371*4882a593Smuzhiyun  */
get_ep_from_stid(struct c4iw_dev * dev,unsigned int stid)372*4882a593Smuzhiyun static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
373*4882a593Smuzhiyun 					       unsigned int stid)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct c4iw_listen_ep *ep;
376*4882a593Smuzhiyun 	unsigned long flags;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	xa_lock_irqsave(&dev->stids, flags);
379*4882a593Smuzhiyun 	ep = xa_load(&dev->stids, stid);
380*4882a593Smuzhiyun 	if (ep)
381*4882a593Smuzhiyun 		c4iw_get_ep(&ep->com);
382*4882a593Smuzhiyun 	xa_unlock_irqrestore(&dev->stids, flags);
383*4882a593Smuzhiyun 	return ep;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
_c4iw_free_ep(struct kref * kref)386*4882a593Smuzhiyun void _c4iw_free_ep(struct kref *kref)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct c4iw_ep *ep;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	ep = container_of(kref, struct c4iw_ep, com.kref);
391*4882a593Smuzhiyun 	pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
392*4882a593Smuzhiyun 	if (test_bit(QP_REFERENCED, &ep->com.flags))
393*4882a593Smuzhiyun 		deref_qp(ep);
394*4882a593Smuzhiyun 	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
395*4882a593Smuzhiyun 		if (ep->com.remote_addr.ss_family == AF_INET6) {
396*4882a593Smuzhiyun 			struct sockaddr_in6 *sin6 =
397*4882a593Smuzhiyun 					(struct sockaddr_in6 *)
398*4882a593Smuzhiyun 					&ep->com.local_addr;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 			cxgb4_clip_release(
401*4882a593Smuzhiyun 					ep->com.dev->rdev.lldi.ports[0],
402*4882a593Smuzhiyun 					(const u32 *)&sin6->sin6_addr.s6_addr,
403*4882a593Smuzhiyun 					1);
404*4882a593Smuzhiyun 		}
405*4882a593Smuzhiyun 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
406*4882a593Smuzhiyun 				 ep->com.local_addr.ss_family);
407*4882a593Smuzhiyun 		dst_release(ep->dst);
408*4882a593Smuzhiyun 		cxgb4_l2t_release(ep->l2t);
409*4882a593Smuzhiyun 		kfree_skb(ep->mpa_skb);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 	if (!skb_queue_empty(&ep->com.ep_skb_list))
412*4882a593Smuzhiyun 		skb_queue_purge(&ep->com.ep_skb_list);
413*4882a593Smuzhiyun 	c4iw_put_wr_wait(ep->com.wr_waitp);
414*4882a593Smuzhiyun 	kfree(ep);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
release_ep_resources(struct c4iw_ep * ep)417*4882a593Smuzhiyun static void release_ep_resources(struct c4iw_ep *ep)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * If we have a hwtid, then remove it from the idr table
423*4882a593Smuzhiyun 	 * so lookups will no longer find this endpoint.  Otherwise
424*4882a593Smuzhiyun 	 * we have a race where one thread finds the ep ptr just
425*4882a593Smuzhiyun 	 * before the other thread is freeing the ep memory.
426*4882a593Smuzhiyun 	 */
427*4882a593Smuzhiyun 	if (ep->hwtid != -1)
428*4882a593Smuzhiyun 		remove_ep_tid(ep);
429*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
status2errno(int status)432*4882a593Smuzhiyun static int status2errno(int status)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	switch (status) {
435*4882a593Smuzhiyun 	case CPL_ERR_NONE:
436*4882a593Smuzhiyun 		return 0;
437*4882a593Smuzhiyun 	case CPL_ERR_CONN_RESET:
438*4882a593Smuzhiyun 		return -ECONNRESET;
439*4882a593Smuzhiyun 	case CPL_ERR_ARP_MISS:
440*4882a593Smuzhiyun 		return -EHOSTUNREACH;
441*4882a593Smuzhiyun 	case CPL_ERR_CONN_TIMEDOUT:
442*4882a593Smuzhiyun 		return -ETIMEDOUT;
443*4882a593Smuzhiyun 	case CPL_ERR_TCAM_FULL:
444*4882a593Smuzhiyun 		return -ENOMEM;
445*4882a593Smuzhiyun 	case CPL_ERR_CONN_EXIST:
446*4882a593Smuzhiyun 		return -EADDRINUSE;
447*4882a593Smuzhiyun 	default:
448*4882a593Smuzhiyun 		return -EIO;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun  * Try and reuse skbs already allocated...
454*4882a593Smuzhiyun  */
get_skb(struct sk_buff * skb,int len,gfp_t gfp)455*4882a593Smuzhiyun static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
458*4882a593Smuzhiyun 		skb_trim(skb, 0);
459*4882a593Smuzhiyun 		skb_get(skb);
460*4882a593Smuzhiyun 		skb_reset_transport_header(skb);
461*4882a593Smuzhiyun 	} else {
462*4882a593Smuzhiyun 		skb = alloc_skb(len, gfp);
463*4882a593Smuzhiyun 		if (!skb)
464*4882a593Smuzhiyun 			return NULL;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, NULL, NULL);
467*4882a593Smuzhiyun 	return skb;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
get_real_dev(struct net_device * egress_dev)470*4882a593Smuzhiyun static struct net_device *get_real_dev(struct net_device *egress_dev)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
arp_failure_discard(void * handle,struct sk_buff * skb)475*4882a593Smuzhiyun static void arp_failure_discard(void *handle, struct sk_buff *skb)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	pr_err("ARP failure\n");
478*4882a593Smuzhiyun 	kfree_skb(skb);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
mpa_start_arp_failure(void * handle,struct sk_buff * skb)481*4882a593Smuzhiyun static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun enum {
487*4882a593Smuzhiyun 	NUM_FAKE_CPLS = 2,
488*4882a593Smuzhiyun 	FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
489*4882a593Smuzhiyun 	FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun 
_put_ep_safe(struct c4iw_dev * dev,struct sk_buff * skb)492*4882a593Smuzhiyun static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct c4iw_ep *ep;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
497*4882a593Smuzhiyun 	release_ep_resources(ep);
498*4882a593Smuzhiyun 	return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
_put_pass_ep_safe(struct c4iw_dev * dev,struct sk_buff * skb)501*4882a593Smuzhiyun static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	struct c4iw_ep *ep;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
506*4882a593Smuzhiyun 	c4iw_put_ep(&ep->parent_ep->com);
507*4882a593Smuzhiyun 	release_ep_resources(ep);
508*4882a593Smuzhiyun 	return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun  * Fake up a special CPL opcode and call sched() so process_work() will call
513*4882a593Smuzhiyun  * _put_ep_safe() in a safe context to free the ep resources.  This is needed
514*4882a593Smuzhiyun  * because ARP error handlers are called in an ATOMIC context, and
515*4882a593Smuzhiyun  * _c4iw_free_ep() needs to block.
516*4882a593Smuzhiyun  */
queue_arp_failure_cpl(struct c4iw_ep * ep,struct sk_buff * skb,int cpl)517*4882a593Smuzhiyun static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
518*4882a593Smuzhiyun 				  int cpl)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct cpl_act_establish *rpl = cplhdr(skb);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Set our special ARP_FAILURE opcode */
523*4882a593Smuzhiyun 	rpl->ot.opcode = cpl;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/*
526*4882a593Smuzhiyun 	 * Save ep in the skb->cb area, after where sched() will save the dev
527*4882a593Smuzhiyun 	 * ptr.
528*4882a593Smuzhiyun 	 */
529*4882a593Smuzhiyun 	*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
530*4882a593Smuzhiyun 	sched(ep->com.dev, skb);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /* Handle an ARP failure for an accept */
pass_accept_rpl_arp_failure(void * handle,struct sk_buff * skb)534*4882a593Smuzhiyun static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct c4iw_ep *ep = handle;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	pr_err("ARP failure during accept - tid %u - dropping connection\n",
539*4882a593Smuzhiyun 	       ep->hwtid);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	__state_set(&ep->com, DEAD);
542*4882a593Smuzhiyun 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun  * Handle an ARP failure for an active open.
547*4882a593Smuzhiyun  */
act_open_req_arp_failure(void * handle,struct sk_buff * skb)548*4882a593Smuzhiyun static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct c4iw_ep *ep = handle;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	pr_err("ARP failure during connect\n");
553*4882a593Smuzhiyun 	connect_reply_upcall(ep, -EHOSTUNREACH);
554*4882a593Smuzhiyun 	__state_set(&ep->com, DEAD);
555*4882a593Smuzhiyun 	if (ep->com.remote_addr.ss_family == AF_INET6) {
556*4882a593Smuzhiyun 		struct sockaddr_in6 *sin6 =
557*4882a593Smuzhiyun 			(struct sockaddr_in6 *)&ep->com.local_addr;
558*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
559*4882a593Smuzhiyun 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->atids, ep->atid);
562*4882a593Smuzhiyun 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
563*4882a593Smuzhiyun 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun  * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
568*4882a593Smuzhiyun  * and send it along.
569*4882a593Smuzhiyun  */
abort_arp_failure(void * handle,struct sk_buff * skb)570*4882a593Smuzhiyun static void abort_arp_failure(void *handle, struct sk_buff *skb)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	int ret;
573*4882a593Smuzhiyun 	struct c4iw_ep *ep = handle;
574*4882a593Smuzhiyun 	struct c4iw_rdev *rdev = &ep->com.dev->rdev;
575*4882a593Smuzhiyun 	struct cpl_abort_req *req = cplhdr(skb);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	pr_debug("rdev %p\n", rdev);
578*4882a593Smuzhiyun 	req->cmd = CPL_ABORT_NO_RST;
579*4882a593Smuzhiyun 	skb_get(skb);
580*4882a593Smuzhiyun 	ret = c4iw_ofld_send(rdev, skb);
581*4882a593Smuzhiyun 	if (ret) {
582*4882a593Smuzhiyun 		__state_set(&ep->com, DEAD);
583*4882a593Smuzhiyun 		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
584*4882a593Smuzhiyun 	} else
585*4882a593Smuzhiyun 		kfree_skb(skb);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
send_flowc(struct c4iw_ep * ep)588*4882a593Smuzhiyun static int send_flowc(struct c4iw_ep *ep)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct fw_flowc_wr *flowc;
591*4882a593Smuzhiyun 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
592*4882a593Smuzhiyun 	u16 vlan = ep->l2t->vlan;
593*4882a593Smuzhiyun 	int nparams;
594*4882a593Smuzhiyun 	int flowclen, flowclen16;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (WARN_ON(!skb))
597*4882a593Smuzhiyun 		return -ENOMEM;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	if (vlan == CPL_L2T_VLAN_NONE)
600*4882a593Smuzhiyun 		nparams = 9;
601*4882a593Smuzhiyun 	else
602*4882a593Smuzhiyun 		nparams = 10;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
605*4882a593Smuzhiyun 	flowclen16 = DIV_ROUND_UP(flowclen, 16);
606*4882a593Smuzhiyun 	flowclen = flowclen16 * 16;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	flowc = __skb_put(skb, flowclen);
609*4882a593Smuzhiyun 	memset(flowc, 0, flowclen);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
612*4882a593Smuzhiyun 					   FW_FLOWC_WR_NPARAMS_V(nparams));
613*4882a593Smuzhiyun 	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
614*4882a593Smuzhiyun 					  FW_WR_FLOWID_V(ep->hwtid));
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
617*4882a593Smuzhiyun 	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
618*4882a593Smuzhiyun 					    (ep->com.dev->rdev.lldi.pf));
619*4882a593Smuzhiyun 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
620*4882a593Smuzhiyun 	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
621*4882a593Smuzhiyun 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
622*4882a593Smuzhiyun 	flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
623*4882a593Smuzhiyun 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
624*4882a593Smuzhiyun 	flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
625*4882a593Smuzhiyun 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
626*4882a593Smuzhiyun 	flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
627*4882a593Smuzhiyun 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
628*4882a593Smuzhiyun 	flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
629*4882a593Smuzhiyun 	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
630*4882a593Smuzhiyun 	flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
631*4882a593Smuzhiyun 	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
632*4882a593Smuzhiyun 	flowc->mnemval[7].val = cpu_to_be32(ep->emss);
633*4882a593Smuzhiyun 	flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
634*4882a593Smuzhiyun 	flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
635*4882a593Smuzhiyun 	if (nparams == 10) {
636*4882a593Smuzhiyun 		u16 pri;
637*4882a593Smuzhiyun 		pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
638*4882a593Smuzhiyun 		flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
639*4882a593Smuzhiyun 		flowc->mnemval[9].val = cpu_to_be32(pri);
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
643*4882a593Smuzhiyun 	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
send_halfclose(struct c4iw_ep * ep)646*4882a593Smuzhiyun static int send_halfclose(struct c4iw_ep *ep)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
649*4882a593Smuzhiyun 	u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
652*4882a593Smuzhiyun 	if (WARN_ON(!skb))
653*4882a593Smuzhiyun 		return -ENOMEM;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
656*4882a593Smuzhiyun 			      NULL, arp_failure_discard);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
read_tcb(struct c4iw_ep * ep)661*4882a593Smuzhiyun static void read_tcb(struct c4iw_ep *ep)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct sk_buff *skb;
664*4882a593Smuzhiyun 	struct cpl_get_tcb *req;
665*4882a593Smuzhiyun 	int wrlen = roundup(sizeof(*req), 16);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
668*4882a593Smuzhiyun 	if (WARN_ON(!skb))
669*4882a593Smuzhiyun 		return;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
672*4882a593Smuzhiyun 	req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
673*4882a593Smuzhiyun 	memset(req, 0, wrlen);
674*4882a593Smuzhiyun 	INIT_TP_WR(req, ep->hwtid);
675*4882a593Smuzhiyun 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
676*4882a593Smuzhiyun 	req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	/*
679*4882a593Smuzhiyun 	 * keep a ref on the ep so the tcb is not unlocked before this
680*4882a593Smuzhiyun 	 * cpl completes. The ref is released in read_tcb_rpl().
681*4882a593Smuzhiyun 	 */
682*4882a593Smuzhiyun 	c4iw_get_ep(&ep->com);
683*4882a593Smuzhiyun 	if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
684*4882a593Smuzhiyun 		c4iw_put_ep(&ep->com);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
send_abort_req(struct c4iw_ep * ep)687*4882a593Smuzhiyun static int send_abort_req(struct c4iw_ep *ep)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
690*4882a593Smuzhiyun 	struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
693*4882a593Smuzhiyun 	if (WARN_ON(!req_skb))
694*4882a593Smuzhiyun 		return -ENOMEM;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
697*4882a593Smuzhiyun 			  ep, abort_arp_failure);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
send_abort(struct c4iw_ep * ep)702*4882a593Smuzhiyun static int send_abort(struct c4iw_ep *ep)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	if (!ep->com.qp || !ep->com.qp->srq) {
705*4882a593Smuzhiyun 		send_abort_req(ep);
706*4882a593Smuzhiyun 		return 0;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 	set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
709*4882a593Smuzhiyun 	read_tcb(ep);
710*4882a593Smuzhiyun 	return 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
send_connect(struct c4iw_ep * ep)713*4882a593Smuzhiyun static int send_connect(struct c4iw_ep *ep)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct cpl_act_open_req *req = NULL;
716*4882a593Smuzhiyun 	struct cpl_t5_act_open_req *t5req = NULL;
717*4882a593Smuzhiyun 	struct cpl_t6_act_open_req *t6req = NULL;
718*4882a593Smuzhiyun 	struct cpl_act_open_req6 *req6 = NULL;
719*4882a593Smuzhiyun 	struct cpl_t5_act_open_req6 *t5req6 = NULL;
720*4882a593Smuzhiyun 	struct cpl_t6_act_open_req6 *t6req6 = NULL;
721*4882a593Smuzhiyun 	struct sk_buff *skb;
722*4882a593Smuzhiyun 	u64 opt0;
723*4882a593Smuzhiyun 	u32 opt2;
724*4882a593Smuzhiyun 	unsigned int mtu_idx;
725*4882a593Smuzhiyun 	u32 wscale;
726*4882a593Smuzhiyun 	int win, sizev4, sizev6, wrlen;
727*4882a593Smuzhiyun 	struct sockaddr_in *la = (struct sockaddr_in *)
728*4882a593Smuzhiyun 				 &ep->com.local_addr;
729*4882a593Smuzhiyun 	struct sockaddr_in *ra = (struct sockaddr_in *)
730*4882a593Smuzhiyun 				 &ep->com.remote_addr;
731*4882a593Smuzhiyun 	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
732*4882a593Smuzhiyun 				   &ep->com.local_addr;
733*4882a593Smuzhiyun 	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
734*4882a593Smuzhiyun 				   &ep->com.remote_addr;
735*4882a593Smuzhiyun 	int ret;
736*4882a593Smuzhiyun 	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
737*4882a593Smuzhiyun 	u32 isn = (prandom_u32() & ~7UL) - 1;
738*4882a593Smuzhiyun 	struct net_device *netdev;
739*4882a593Smuzhiyun 	u64 params;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	netdev = ep->com.dev->rdev.lldi.ports[0];
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	switch (CHELSIO_CHIP_VERSION(adapter_type)) {
744*4882a593Smuzhiyun 	case CHELSIO_T4:
745*4882a593Smuzhiyun 		sizev4 = sizeof(struct cpl_act_open_req);
746*4882a593Smuzhiyun 		sizev6 = sizeof(struct cpl_act_open_req6);
747*4882a593Smuzhiyun 		break;
748*4882a593Smuzhiyun 	case CHELSIO_T5:
749*4882a593Smuzhiyun 		sizev4 = sizeof(struct cpl_t5_act_open_req);
750*4882a593Smuzhiyun 		sizev6 = sizeof(struct cpl_t5_act_open_req6);
751*4882a593Smuzhiyun 		break;
752*4882a593Smuzhiyun 	case CHELSIO_T6:
753*4882a593Smuzhiyun 		sizev4 = sizeof(struct cpl_t6_act_open_req);
754*4882a593Smuzhiyun 		sizev6 = sizeof(struct cpl_t6_act_open_req6);
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 	default:
757*4882a593Smuzhiyun 		pr_err("T%d Chip is not supported\n",
758*4882a593Smuzhiyun 		       CHELSIO_CHIP_VERSION(adapter_type));
759*4882a593Smuzhiyun 		return -EINVAL;
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
763*4882a593Smuzhiyun 			roundup(sizev4, 16) :
764*4882a593Smuzhiyun 			roundup(sizev6, 16);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	pr_debug("ep %p atid %u\n", ep, ep->atid);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
769*4882a593Smuzhiyun 	if (!skb) {
770*4882a593Smuzhiyun 		pr_err("%s - failed to alloc skb\n", __func__);
771*4882a593Smuzhiyun 		return -ENOMEM;
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
776*4882a593Smuzhiyun 		      enable_tcp_timestamps,
777*4882a593Smuzhiyun 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
778*4882a593Smuzhiyun 	wscale = cxgb_compute_wscale(rcv_win);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/*
781*4882a593Smuzhiyun 	 * Specify the largest window that will fit in opt0. The
782*4882a593Smuzhiyun 	 * remainder will be specified in the rx_data_ack.
783*4882a593Smuzhiyun 	 */
784*4882a593Smuzhiyun 	win = ep->rcv_win >> 10;
785*4882a593Smuzhiyun 	if (win > RCV_BUFSIZ_M)
786*4882a593Smuzhiyun 		win = RCV_BUFSIZ_M;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	opt0 = (nocong ? NO_CONG_F : 0) |
789*4882a593Smuzhiyun 	       KEEP_ALIVE_F |
790*4882a593Smuzhiyun 	       DELACK_F |
791*4882a593Smuzhiyun 	       WND_SCALE_V(wscale) |
792*4882a593Smuzhiyun 	       MSS_IDX_V(mtu_idx) |
793*4882a593Smuzhiyun 	       L2T_IDX_V(ep->l2t->idx) |
794*4882a593Smuzhiyun 	       TX_CHAN_V(ep->tx_chan) |
795*4882a593Smuzhiyun 	       SMAC_SEL_V(ep->smac_idx) |
796*4882a593Smuzhiyun 	       DSCP_V(ep->tos >> 2) |
797*4882a593Smuzhiyun 	       ULP_MODE_V(ULP_MODE_TCPDDP) |
798*4882a593Smuzhiyun 	       RCV_BUFSIZ_V(win);
799*4882a593Smuzhiyun 	opt2 = RX_CHANNEL_V(0) |
800*4882a593Smuzhiyun 	       CCTRL_ECN_V(enable_ecn) |
801*4882a593Smuzhiyun 	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
802*4882a593Smuzhiyun 	if (enable_tcp_timestamps)
803*4882a593Smuzhiyun 		opt2 |= TSTAMPS_EN_F;
804*4882a593Smuzhiyun 	if (enable_tcp_sack)
805*4882a593Smuzhiyun 		opt2 |= SACK_EN_F;
806*4882a593Smuzhiyun 	if (wscale && enable_tcp_window_scaling)
807*4882a593Smuzhiyun 		opt2 |= WND_SCALE_EN_F;
808*4882a593Smuzhiyun 	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
809*4882a593Smuzhiyun 		if (peer2peer)
810*4882a593Smuzhiyun 			isn += 4;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		opt2 |= T5_OPT_2_VALID_F;
813*4882a593Smuzhiyun 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
814*4882a593Smuzhiyun 		opt2 |= T5_ISS_F;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	params = cxgb4_select_ntuple(netdev, ep->l2t);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (ep->com.remote_addr.ss_family == AF_INET6)
820*4882a593Smuzhiyun 		cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
821*4882a593Smuzhiyun 			       (const u32 *)&la6->sin6_addr.s6_addr, 1);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	if (ep->com.remote_addr.ss_family == AF_INET) {
826*4882a593Smuzhiyun 		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
827*4882a593Smuzhiyun 		case CHELSIO_T4:
828*4882a593Smuzhiyun 			req = skb_put(skb, wrlen);
829*4882a593Smuzhiyun 			INIT_TP_WR(req, 0);
830*4882a593Smuzhiyun 			break;
831*4882a593Smuzhiyun 		case CHELSIO_T5:
832*4882a593Smuzhiyun 			t5req = skb_put(skb, wrlen);
833*4882a593Smuzhiyun 			INIT_TP_WR(t5req, 0);
834*4882a593Smuzhiyun 			req = (struct cpl_act_open_req *)t5req;
835*4882a593Smuzhiyun 			break;
836*4882a593Smuzhiyun 		case CHELSIO_T6:
837*4882a593Smuzhiyun 			t6req = skb_put(skb, wrlen);
838*4882a593Smuzhiyun 			INIT_TP_WR(t6req, 0);
839*4882a593Smuzhiyun 			req = (struct cpl_act_open_req *)t6req;
840*4882a593Smuzhiyun 			t5req = (struct cpl_t5_act_open_req *)t6req;
841*4882a593Smuzhiyun 			break;
842*4882a593Smuzhiyun 		default:
843*4882a593Smuzhiyun 			pr_err("T%d Chip is not supported\n",
844*4882a593Smuzhiyun 			       CHELSIO_CHIP_VERSION(adapter_type));
845*4882a593Smuzhiyun 			ret = -EINVAL;
846*4882a593Smuzhiyun 			goto clip_release;
847*4882a593Smuzhiyun 		}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
850*4882a593Smuzhiyun 					((ep->rss_qid<<14) | ep->atid)));
851*4882a593Smuzhiyun 		req->local_port = la->sin_port;
852*4882a593Smuzhiyun 		req->peer_port = ra->sin_port;
853*4882a593Smuzhiyun 		req->local_ip = la->sin_addr.s_addr;
854*4882a593Smuzhiyun 		req->peer_ip = ra->sin_addr.s_addr;
855*4882a593Smuzhiyun 		req->opt0 = cpu_to_be64(opt0);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
858*4882a593Smuzhiyun 			req->params = cpu_to_be32(params);
859*4882a593Smuzhiyun 			req->opt2 = cpu_to_be32(opt2);
860*4882a593Smuzhiyun 		} else {
861*4882a593Smuzhiyun 			if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
862*4882a593Smuzhiyun 				t5req->params =
863*4882a593Smuzhiyun 					  cpu_to_be64(FILTER_TUPLE_V(params));
864*4882a593Smuzhiyun 				t5req->rsvd = cpu_to_be32(isn);
865*4882a593Smuzhiyun 				pr_debug("snd_isn %u\n", t5req->rsvd);
866*4882a593Smuzhiyun 				t5req->opt2 = cpu_to_be32(opt2);
867*4882a593Smuzhiyun 			} else {
868*4882a593Smuzhiyun 				t6req->params =
869*4882a593Smuzhiyun 					  cpu_to_be64(FILTER_TUPLE_V(params));
870*4882a593Smuzhiyun 				t6req->rsvd = cpu_to_be32(isn);
871*4882a593Smuzhiyun 				pr_debug("snd_isn %u\n", t6req->rsvd);
872*4882a593Smuzhiyun 				t6req->opt2 = cpu_to_be32(opt2);
873*4882a593Smuzhiyun 			}
874*4882a593Smuzhiyun 		}
875*4882a593Smuzhiyun 	} else {
876*4882a593Smuzhiyun 		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
877*4882a593Smuzhiyun 		case CHELSIO_T4:
878*4882a593Smuzhiyun 			req6 = skb_put(skb, wrlen);
879*4882a593Smuzhiyun 			INIT_TP_WR(req6, 0);
880*4882a593Smuzhiyun 			break;
881*4882a593Smuzhiyun 		case CHELSIO_T5:
882*4882a593Smuzhiyun 			t5req6 = skb_put(skb, wrlen);
883*4882a593Smuzhiyun 			INIT_TP_WR(t5req6, 0);
884*4882a593Smuzhiyun 			req6 = (struct cpl_act_open_req6 *)t5req6;
885*4882a593Smuzhiyun 			break;
886*4882a593Smuzhiyun 		case CHELSIO_T6:
887*4882a593Smuzhiyun 			t6req6 = skb_put(skb, wrlen);
888*4882a593Smuzhiyun 			INIT_TP_WR(t6req6, 0);
889*4882a593Smuzhiyun 			req6 = (struct cpl_act_open_req6 *)t6req6;
890*4882a593Smuzhiyun 			t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
891*4882a593Smuzhiyun 			break;
892*4882a593Smuzhiyun 		default:
893*4882a593Smuzhiyun 			pr_err("T%d Chip is not supported\n",
894*4882a593Smuzhiyun 			       CHELSIO_CHIP_VERSION(adapter_type));
895*4882a593Smuzhiyun 			ret = -EINVAL;
896*4882a593Smuzhiyun 			goto clip_release;
897*4882a593Smuzhiyun 		}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
900*4882a593Smuzhiyun 					((ep->rss_qid<<14)|ep->atid)));
901*4882a593Smuzhiyun 		req6->local_port = la6->sin6_port;
902*4882a593Smuzhiyun 		req6->peer_port = ra6->sin6_port;
903*4882a593Smuzhiyun 		req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
904*4882a593Smuzhiyun 		req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
905*4882a593Smuzhiyun 		req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
906*4882a593Smuzhiyun 		req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
907*4882a593Smuzhiyun 		req6->opt0 = cpu_to_be64(opt0);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
910*4882a593Smuzhiyun 			req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
911*4882a593Smuzhiyun 								      ep->l2t));
912*4882a593Smuzhiyun 			req6->opt2 = cpu_to_be32(opt2);
913*4882a593Smuzhiyun 		} else {
914*4882a593Smuzhiyun 			if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
915*4882a593Smuzhiyun 				t5req6->params =
916*4882a593Smuzhiyun 					    cpu_to_be64(FILTER_TUPLE_V(params));
917*4882a593Smuzhiyun 				t5req6->rsvd = cpu_to_be32(isn);
918*4882a593Smuzhiyun 				pr_debug("snd_isn %u\n", t5req6->rsvd);
919*4882a593Smuzhiyun 				t5req6->opt2 = cpu_to_be32(opt2);
920*4882a593Smuzhiyun 			} else {
921*4882a593Smuzhiyun 				t6req6->params =
922*4882a593Smuzhiyun 					    cpu_to_be64(FILTER_TUPLE_V(params));
923*4882a593Smuzhiyun 				t6req6->rsvd = cpu_to_be32(isn);
924*4882a593Smuzhiyun 				pr_debug("snd_isn %u\n", t6req6->rsvd);
925*4882a593Smuzhiyun 				t6req6->opt2 = cpu_to_be32(opt2);
926*4882a593Smuzhiyun 			}
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 		}
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	set_bit(ACT_OPEN_REQ, &ep->com.history);
932*4882a593Smuzhiyun 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
933*4882a593Smuzhiyun clip_release:
934*4882a593Smuzhiyun 	if (ret && ep->com.remote_addr.ss_family == AF_INET6)
935*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
936*4882a593Smuzhiyun 				   (const u32 *)&la6->sin6_addr.s6_addr, 1);
937*4882a593Smuzhiyun 	return ret;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
send_mpa_req(struct c4iw_ep * ep,struct sk_buff * skb,u8 mpa_rev_to_use)940*4882a593Smuzhiyun static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
941*4882a593Smuzhiyun 			u8 mpa_rev_to_use)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	int mpalen, wrlen, ret;
944*4882a593Smuzhiyun 	struct fw_ofld_tx_data_wr *req;
945*4882a593Smuzhiyun 	struct mpa_message *mpa;
946*4882a593Smuzhiyun 	struct mpa_v2_conn_params mpa_v2_params;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	pr_debug("ep %p tid %u pd_len %d\n",
949*4882a593Smuzhiyun 		 ep, ep->hwtid, ep->plen);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	mpalen = sizeof(*mpa) + ep->plen;
952*4882a593Smuzhiyun 	if (mpa_rev_to_use == 2)
953*4882a593Smuzhiyun 		mpalen += sizeof(struct mpa_v2_conn_params);
954*4882a593Smuzhiyun 	wrlen = roundup(mpalen + sizeof(*req), 16);
955*4882a593Smuzhiyun 	skb = get_skb(skb, wrlen, GFP_KERNEL);
956*4882a593Smuzhiyun 	if (!skb) {
957*4882a593Smuzhiyun 		connect_reply_upcall(ep, -ENOMEM);
958*4882a593Smuzhiyun 		return -ENOMEM;
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	req = skb_put_zero(skb, wrlen);
963*4882a593Smuzhiyun 	req->op_to_immdlen = cpu_to_be32(
964*4882a593Smuzhiyun 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
965*4882a593Smuzhiyun 		FW_WR_COMPL_F |
966*4882a593Smuzhiyun 		FW_WR_IMMDLEN_V(mpalen));
967*4882a593Smuzhiyun 	req->flowid_len16 = cpu_to_be32(
968*4882a593Smuzhiyun 		FW_WR_FLOWID_V(ep->hwtid) |
969*4882a593Smuzhiyun 		FW_WR_LEN16_V(wrlen >> 4));
970*4882a593Smuzhiyun 	req->plen = cpu_to_be32(mpalen);
971*4882a593Smuzhiyun 	req->tunnel_to_proxy = cpu_to_be32(
972*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_FLUSH_F |
973*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_SHOVE_F);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	mpa = (struct mpa_message *)(req + 1);
976*4882a593Smuzhiyun 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	mpa->flags = 0;
979*4882a593Smuzhiyun 	if (crc_enabled)
980*4882a593Smuzhiyun 		mpa->flags |= MPA_CRC;
981*4882a593Smuzhiyun 	if (markers_enabled) {
982*4882a593Smuzhiyun 		mpa->flags |= MPA_MARKERS;
983*4882a593Smuzhiyun 		ep->mpa_attr.recv_marker_enabled = 1;
984*4882a593Smuzhiyun 	} else {
985*4882a593Smuzhiyun 		ep->mpa_attr.recv_marker_enabled = 0;
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 	if (mpa_rev_to_use == 2)
988*4882a593Smuzhiyun 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	mpa->private_data_size = htons(ep->plen);
991*4882a593Smuzhiyun 	mpa->revision = mpa_rev_to_use;
992*4882a593Smuzhiyun 	if (mpa_rev_to_use == 1) {
993*4882a593Smuzhiyun 		ep->tried_with_mpa_v1 = 1;
994*4882a593Smuzhiyun 		ep->retry_with_mpa_v1 = 0;
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	if (mpa_rev_to_use == 2) {
998*4882a593Smuzhiyun 		mpa->private_data_size =
999*4882a593Smuzhiyun 			htons(ntohs(mpa->private_data_size) +
1000*4882a593Smuzhiyun 			      sizeof(struct mpa_v2_conn_params));
1001*4882a593Smuzhiyun 		pr_debug("initiator ird %u ord %u\n", ep->ird,
1002*4882a593Smuzhiyun 			 ep->ord);
1003*4882a593Smuzhiyun 		mpa_v2_params.ird = htons((u16)ep->ird);
1004*4882a593Smuzhiyun 		mpa_v2_params.ord = htons((u16)ep->ord);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 		if (peer2peer) {
1007*4882a593Smuzhiyun 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1008*4882a593Smuzhiyun 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1009*4882a593Smuzhiyun 				mpa_v2_params.ord |=
1010*4882a593Smuzhiyun 					htons(MPA_V2_RDMA_WRITE_RTR);
1011*4882a593Smuzhiyun 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1012*4882a593Smuzhiyun 				mpa_v2_params.ord |=
1013*4882a593Smuzhiyun 					htons(MPA_V2_RDMA_READ_RTR);
1014*4882a593Smuzhiyun 		}
1015*4882a593Smuzhiyun 		memcpy(mpa->private_data, &mpa_v2_params,
1016*4882a593Smuzhiyun 		       sizeof(struct mpa_v2_conn_params));
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 		if (ep->plen)
1019*4882a593Smuzhiyun 			memcpy(mpa->private_data +
1020*4882a593Smuzhiyun 			       sizeof(struct mpa_v2_conn_params),
1021*4882a593Smuzhiyun 			       ep->mpa_pkt + sizeof(*mpa), ep->plen);
1022*4882a593Smuzhiyun 	} else
1023*4882a593Smuzhiyun 		if (ep->plen)
1024*4882a593Smuzhiyun 			memcpy(mpa->private_data,
1025*4882a593Smuzhiyun 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/*
1028*4882a593Smuzhiyun 	 * Reference the mpa skb.  This ensures the data area
1029*4882a593Smuzhiyun 	 * will remain in memory until the hw acks the tx.
1030*4882a593Smuzhiyun 	 * Function fw4_ack() will deref it.
1031*4882a593Smuzhiyun 	 */
1032*4882a593Smuzhiyun 	skb_get(skb);
1033*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1034*4882a593Smuzhiyun 	ep->mpa_skb = skb;
1035*4882a593Smuzhiyun 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1036*4882a593Smuzhiyun 	if (ret)
1037*4882a593Smuzhiyun 		return ret;
1038*4882a593Smuzhiyun 	start_ep_timer(ep);
1039*4882a593Smuzhiyun 	__state_set(&ep->com, MPA_REQ_SENT);
1040*4882a593Smuzhiyun 	ep->mpa_attr.initiator = 1;
1041*4882a593Smuzhiyun 	ep->snd_seq += mpalen;
1042*4882a593Smuzhiyun 	return ret;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
send_mpa_reject(struct c4iw_ep * ep,const void * pdata,u8 plen)1045*4882a593Smuzhiyun static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	int mpalen, wrlen;
1048*4882a593Smuzhiyun 	struct fw_ofld_tx_data_wr *req;
1049*4882a593Smuzhiyun 	struct mpa_message *mpa;
1050*4882a593Smuzhiyun 	struct sk_buff *skb;
1051*4882a593Smuzhiyun 	struct mpa_v2_conn_params mpa_v2_params;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	pr_debug("ep %p tid %u pd_len %d\n",
1054*4882a593Smuzhiyun 		 ep, ep->hwtid, ep->plen);
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	mpalen = sizeof(*mpa) + plen;
1057*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1058*4882a593Smuzhiyun 		mpalen += sizeof(struct mpa_v2_conn_params);
1059*4882a593Smuzhiyun 	wrlen = roundup(mpalen + sizeof(*req), 16);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1062*4882a593Smuzhiyun 	if (!skb) {
1063*4882a593Smuzhiyun 		pr_err("%s - cannot alloc skb!\n", __func__);
1064*4882a593Smuzhiyun 		return -ENOMEM;
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	req = skb_put_zero(skb, wrlen);
1069*4882a593Smuzhiyun 	req->op_to_immdlen = cpu_to_be32(
1070*4882a593Smuzhiyun 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1071*4882a593Smuzhiyun 		FW_WR_COMPL_F |
1072*4882a593Smuzhiyun 		FW_WR_IMMDLEN_V(mpalen));
1073*4882a593Smuzhiyun 	req->flowid_len16 = cpu_to_be32(
1074*4882a593Smuzhiyun 		FW_WR_FLOWID_V(ep->hwtid) |
1075*4882a593Smuzhiyun 		FW_WR_LEN16_V(wrlen >> 4));
1076*4882a593Smuzhiyun 	req->plen = cpu_to_be32(mpalen);
1077*4882a593Smuzhiyun 	req->tunnel_to_proxy = cpu_to_be32(
1078*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_FLUSH_F |
1079*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_SHOVE_F);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	mpa = (struct mpa_message *)(req + 1);
1082*4882a593Smuzhiyun 	memset(mpa, 0, sizeof(*mpa));
1083*4882a593Smuzhiyun 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1084*4882a593Smuzhiyun 	mpa->flags = MPA_REJECT;
1085*4882a593Smuzhiyun 	mpa->revision = ep->mpa_attr.version;
1086*4882a593Smuzhiyun 	mpa->private_data_size = htons(plen);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1089*4882a593Smuzhiyun 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1090*4882a593Smuzhiyun 		mpa->private_data_size =
1091*4882a593Smuzhiyun 			htons(ntohs(mpa->private_data_size) +
1092*4882a593Smuzhiyun 			      sizeof(struct mpa_v2_conn_params));
1093*4882a593Smuzhiyun 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1094*4882a593Smuzhiyun 					  (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1095*4882a593Smuzhiyun 					   0));
1096*4882a593Smuzhiyun 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1097*4882a593Smuzhiyun 					  (p2p_type ==
1098*4882a593Smuzhiyun 					   FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1099*4882a593Smuzhiyun 					   MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1100*4882a593Smuzhiyun 					   FW_RI_INIT_P2PTYPE_READ_REQ ?
1101*4882a593Smuzhiyun 					   MPA_V2_RDMA_READ_RTR : 0) : 0));
1102*4882a593Smuzhiyun 		memcpy(mpa->private_data, &mpa_v2_params,
1103*4882a593Smuzhiyun 		       sizeof(struct mpa_v2_conn_params));
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		if (ep->plen)
1106*4882a593Smuzhiyun 			memcpy(mpa->private_data +
1107*4882a593Smuzhiyun 			       sizeof(struct mpa_v2_conn_params), pdata, plen);
1108*4882a593Smuzhiyun 	} else
1109*4882a593Smuzhiyun 		if (plen)
1110*4882a593Smuzhiyun 			memcpy(mpa->private_data, pdata, plen);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	/*
1113*4882a593Smuzhiyun 	 * Reference the mpa skb again.  This ensures the data area
1114*4882a593Smuzhiyun 	 * will remain in memory until the hw acks the tx.
1115*4882a593Smuzhiyun 	 * Function fw4_ack() will deref it.
1116*4882a593Smuzhiyun 	 */
1117*4882a593Smuzhiyun 	skb_get(skb);
1118*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1119*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1120*4882a593Smuzhiyun 	ep->mpa_skb = skb;
1121*4882a593Smuzhiyun 	ep->snd_seq += mpalen;
1122*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
send_mpa_reply(struct c4iw_ep * ep,const void * pdata,u8 plen)1125*4882a593Smuzhiyun static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	int mpalen, wrlen;
1128*4882a593Smuzhiyun 	struct fw_ofld_tx_data_wr *req;
1129*4882a593Smuzhiyun 	struct mpa_message *mpa;
1130*4882a593Smuzhiyun 	struct sk_buff *skb;
1131*4882a593Smuzhiyun 	struct mpa_v2_conn_params mpa_v2_params;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	pr_debug("ep %p tid %u pd_len %d\n",
1134*4882a593Smuzhiyun 		 ep, ep->hwtid, ep->plen);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	mpalen = sizeof(*mpa) + plen;
1137*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1138*4882a593Smuzhiyun 		mpalen += sizeof(struct mpa_v2_conn_params);
1139*4882a593Smuzhiyun 	wrlen = roundup(mpalen + sizeof(*req), 16);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1142*4882a593Smuzhiyun 	if (!skb) {
1143*4882a593Smuzhiyun 		pr_err("%s - cannot alloc skb!\n", __func__);
1144*4882a593Smuzhiyun 		return -ENOMEM;
1145*4882a593Smuzhiyun 	}
1146*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	req = skb_put_zero(skb, wrlen);
1149*4882a593Smuzhiyun 	req->op_to_immdlen = cpu_to_be32(
1150*4882a593Smuzhiyun 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1151*4882a593Smuzhiyun 		FW_WR_COMPL_F |
1152*4882a593Smuzhiyun 		FW_WR_IMMDLEN_V(mpalen));
1153*4882a593Smuzhiyun 	req->flowid_len16 = cpu_to_be32(
1154*4882a593Smuzhiyun 		FW_WR_FLOWID_V(ep->hwtid) |
1155*4882a593Smuzhiyun 		FW_WR_LEN16_V(wrlen >> 4));
1156*4882a593Smuzhiyun 	req->plen = cpu_to_be32(mpalen);
1157*4882a593Smuzhiyun 	req->tunnel_to_proxy = cpu_to_be32(
1158*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_FLUSH_F |
1159*4882a593Smuzhiyun 		FW_OFLD_TX_DATA_WR_SHOVE_F);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	mpa = (struct mpa_message *)(req + 1);
1162*4882a593Smuzhiyun 	memset(mpa, 0, sizeof(*mpa));
1163*4882a593Smuzhiyun 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1164*4882a593Smuzhiyun 	mpa->flags = 0;
1165*4882a593Smuzhiyun 	if (ep->mpa_attr.crc_enabled)
1166*4882a593Smuzhiyun 		mpa->flags |= MPA_CRC;
1167*4882a593Smuzhiyun 	if (ep->mpa_attr.recv_marker_enabled)
1168*4882a593Smuzhiyun 		mpa->flags |= MPA_MARKERS;
1169*4882a593Smuzhiyun 	mpa->revision = ep->mpa_attr.version;
1170*4882a593Smuzhiyun 	mpa->private_data_size = htons(plen);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1173*4882a593Smuzhiyun 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1174*4882a593Smuzhiyun 		mpa->private_data_size =
1175*4882a593Smuzhiyun 			htons(ntohs(mpa->private_data_size) +
1176*4882a593Smuzhiyun 			      sizeof(struct mpa_v2_conn_params));
1177*4882a593Smuzhiyun 		mpa_v2_params.ird = htons((u16)ep->ird);
1178*4882a593Smuzhiyun 		mpa_v2_params.ord = htons((u16)ep->ord);
1179*4882a593Smuzhiyun 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1180*4882a593Smuzhiyun 					FW_RI_INIT_P2PTYPE_DISABLED)) {
1181*4882a593Smuzhiyun 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1184*4882a593Smuzhiyun 				mpa_v2_params.ord |=
1185*4882a593Smuzhiyun 					htons(MPA_V2_RDMA_WRITE_RTR);
1186*4882a593Smuzhiyun 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1187*4882a593Smuzhiyun 				mpa_v2_params.ord |=
1188*4882a593Smuzhiyun 					htons(MPA_V2_RDMA_READ_RTR);
1189*4882a593Smuzhiyun 		}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 		memcpy(mpa->private_data, &mpa_v2_params,
1192*4882a593Smuzhiyun 		       sizeof(struct mpa_v2_conn_params));
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 		if (ep->plen)
1195*4882a593Smuzhiyun 			memcpy(mpa->private_data +
1196*4882a593Smuzhiyun 			       sizeof(struct mpa_v2_conn_params), pdata, plen);
1197*4882a593Smuzhiyun 	} else
1198*4882a593Smuzhiyun 		if (plen)
1199*4882a593Smuzhiyun 			memcpy(mpa->private_data, pdata, plen);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	/*
1202*4882a593Smuzhiyun 	 * Reference the mpa skb.  This ensures the data area
1203*4882a593Smuzhiyun 	 * will remain in memory until the hw acks the tx.
1204*4882a593Smuzhiyun 	 * Function fw4_ack() will deref it.
1205*4882a593Smuzhiyun 	 */
1206*4882a593Smuzhiyun 	skb_get(skb);
1207*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1208*4882a593Smuzhiyun 	ep->mpa_skb = skb;
1209*4882a593Smuzhiyun 	__state_set(&ep->com, MPA_REP_SENT);
1210*4882a593Smuzhiyun 	ep->snd_seq += mpalen;
1211*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
act_establish(struct c4iw_dev * dev,struct sk_buff * skb)1214*4882a593Smuzhiyun static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	struct c4iw_ep *ep;
1217*4882a593Smuzhiyun 	struct cpl_act_establish *req = cplhdr(skb);
1218*4882a593Smuzhiyun 	unsigned short tcp_opt = ntohs(req->tcp_opt);
1219*4882a593Smuzhiyun 	unsigned int tid = GET_TID(req);
1220*4882a593Smuzhiyun 	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1221*4882a593Smuzhiyun 	struct tid_info *t = dev->rdev.lldi.tids;
1222*4882a593Smuzhiyun 	int ret;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	ep = lookup_atid(t, atid);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
1227*4882a593Smuzhiyun 		 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
1230*4882a593Smuzhiyun 	dst_confirm(ep->dst);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/* setup the hwtid for this connection */
1233*4882a593Smuzhiyun 	ep->hwtid = tid;
1234*4882a593Smuzhiyun 	cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
1235*4882a593Smuzhiyun 	insert_ep_tid(ep);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	ep->snd_seq = be32_to_cpu(req->snd_isn);
1238*4882a593Smuzhiyun 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1239*4882a593Smuzhiyun 	ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	set_emss(ep, tcp_opt);
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	/* dealloc the atid */
1244*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->atids, atid);
1245*4882a593Smuzhiyun 	cxgb4_free_atid(t, atid);
1246*4882a593Smuzhiyun 	set_bit(ACT_ESTAB, &ep->com.history);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	/* start MPA negotiation */
1249*4882a593Smuzhiyun 	ret = send_flowc(ep);
1250*4882a593Smuzhiyun 	if (ret)
1251*4882a593Smuzhiyun 		goto err;
1252*4882a593Smuzhiyun 	if (ep->retry_with_mpa_v1)
1253*4882a593Smuzhiyun 		ret = send_mpa_req(ep, skb, 1);
1254*4882a593Smuzhiyun 	else
1255*4882a593Smuzhiyun 		ret = send_mpa_req(ep, skb, mpa_rev);
1256*4882a593Smuzhiyun 	if (ret)
1257*4882a593Smuzhiyun 		goto err;
1258*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
1259*4882a593Smuzhiyun 	return 0;
1260*4882a593Smuzhiyun err:
1261*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
1262*4882a593Smuzhiyun 	connect_reply_upcall(ep, -ENOMEM);
1263*4882a593Smuzhiyun 	c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1264*4882a593Smuzhiyun 	return 0;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun 
close_complete_upcall(struct c4iw_ep * ep,int status)1267*4882a593Smuzhiyun static void close_complete_upcall(struct c4iw_ep *ep, int status)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	struct iw_cm_event event;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1272*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1273*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_CLOSE;
1274*4882a593Smuzhiyun 	event.status = status;
1275*4882a593Smuzhiyun 	if (ep->com.cm_id) {
1276*4882a593Smuzhiyun 		pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1277*4882a593Smuzhiyun 			 ep, ep->com.cm_id, ep->hwtid);
1278*4882a593Smuzhiyun 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1279*4882a593Smuzhiyun 		deref_cm_id(&ep->com);
1280*4882a593Smuzhiyun 		set_bit(CLOSE_UPCALL, &ep->com.history);
1281*4882a593Smuzhiyun 	}
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
peer_close_upcall(struct c4iw_ep * ep)1284*4882a593Smuzhiyun static void peer_close_upcall(struct c4iw_ep *ep)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	struct iw_cm_event event;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1289*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1290*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_DISCONNECT;
1291*4882a593Smuzhiyun 	if (ep->com.cm_id) {
1292*4882a593Smuzhiyun 		pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1293*4882a593Smuzhiyun 			 ep, ep->com.cm_id, ep->hwtid);
1294*4882a593Smuzhiyun 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1295*4882a593Smuzhiyun 		set_bit(DISCONN_UPCALL, &ep->com.history);
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun 
peer_abort_upcall(struct c4iw_ep * ep)1299*4882a593Smuzhiyun static void peer_abort_upcall(struct c4iw_ep *ep)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct iw_cm_event event;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1304*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1305*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_CLOSE;
1306*4882a593Smuzhiyun 	event.status = -ECONNRESET;
1307*4882a593Smuzhiyun 	if (ep->com.cm_id) {
1308*4882a593Smuzhiyun 		pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1309*4882a593Smuzhiyun 			 ep->com.cm_id, ep->hwtid);
1310*4882a593Smuzhiyun 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1311*4882a593Smuzhiyun 		deref_cm_id(&ep->com);
1312*4882a593Smuzhiyun 		set_bit(ABORT_UPCALL, &ep->com.history);
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun 
connect_reply_upcall(struct c4iw_ep * ep,int status)1316*4882a593Smuzhiyun static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun 	struct iw_cm_event event;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	pr_debug("ep %p tid %u status %d\n",
1321*4882a593Smuzhiyun 		 ep, ep->hwtid, status);
1322*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1323*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1324*4882a593Smuzhiyun 	event.status = status;
1325*4882a593Smuzhiyun 	memcpy(&event.local_addr, &ep->com.local_addr,
1326*4882a593Smuzhiyun 	       sizeof(ep->com.local_addr));
1327*4882a593Smuzhiyun 	memcpy(&event.remote_addr, &ep->com.remote_addr,
1328*4882a593Smuzhiyun 	       sizeof(ep->com.remote_addr));
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if ((status == 0) || (status == -ECONNREFUSED)) {
1331*4882a593Smuzhiyun 		if (!ep->tried_with_mpa_v1) {
1332*4882a593Smuzhiyun 			/* this means MPA_v2 is used */
1333*4882a593Smuzhiyun 			event.ord = ep->ird;
1334*4882a593Smuzhiyun 			event.ird = ep->ord;
1335*4882a593Smuzhiyun 			event.private_data_len = ep->plen -
1336*4882a593Smuzhiyun 				sizeof(struct mpa_v2_conn_params);
1337*4882a593Smuzhiyun 			event.private_data = ep->mpa_pkt +
1338*4882a593Smuzhiyun 				sizeof(struct mpa_message) +
1339*4882a593Smuzhiyun 				sizeof(struct mpa_v2_conn_params);
1340*4882a593Smuzhiyun 		} else {
1341*4882a593Smuzhiyun 			/* this means MPA_v1 is used */
1342*4882a593Smuzhiyun 			event.ord = cur_max_read_depth(ep->com.dev);
1343*4882a593Smuzhiyun 			event.ird = cur_max_read_depth(ep->com.dev);
1344*4882a593Smuzhiyun 			event.private_data_len = ep->plen;
1345*4882a593Smuzhiyun 			event.private_data = ep->mpa_pkt +
1346*4882a593Smuzhiyun 				sizeof(struct mpa_message);
1347*4882a593Smuzhiyun 		}
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	pr_debug("ep %p tid %u status %d\n", ep,
1351*4882a593Smuzhiyun 		 ep->hwtid, status);
1352*4882a593Smuzhiyun 	set_bit(CONN_RPL_UPCALL, &ep->com.history);
1353*4882a593Smuzhiyun 	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	if (status < 0)
1356*4882a593Smuzhiyun 		deref_cm_id(&ep->com);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
connect_request_upcall(struct c4iw_ep * ep)1359*4882a593Smuzhiyun static int connect_request_upcall(struct c4iw_ep *ep)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	struct iw_cm_event event;
1362*4882a593Smuzhiyun 	int ret;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1365*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1366*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1367*4882a593Smuzhiyun 	memcpy(&event.local_addr, &ep->com.local_addr,
1368*4882a593Smuzhiyun 	       sizeof(ep->com.local_addr));
1369*4882a593Smuzhiyun 	memcpy(&event.remote_addr, &ep->com.remote_addr,
1370*4882a593Smuzhiyun 	       sizeof(ep->com.remote_addr));
1371*4882a593Smuzhiyun 	event.provider_data = ep;
1372*4882a593Smuzhiyun 	if (!ep->tried_with_mpa_v1) {
1373*4882a593Smuzhiyun 		/* this means MPA_v2 is used */
1374*4882a593Smuzhiyun 		event.ord = ep->ord;
1375*4882a593Smuzhiyun 		event.ird = ep->ird;
1376*4882a593Smuzhiyun 		event.private_data_len = ep->plen -
1377*4882a593Smuzhiyun 			sizeof(struct mpa_v2_conn_params);
1378*4882a593Smuzhiyun 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1379*4882a593Smuzhiyun 			sizeof(struct mpa_v2_conn_params);
1380*4882a593Smuzhiyun 	} else {
1381*4882a593Smuzhiyun 		/* this means MPA_v1 is used. Send max supported */
1382*4882a593Smuzhiyun 		event.ord = cur_max_read_depth(ep->com.dev);
1383*4882a593Smuzhiyun 		event.ird = cur_max_read_depth(ep->com.dev);
1384*4882a593Smuzhiyun 		event.private_data_len = ep->plen;
1385*4882a593Smuzhiyun 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun 	c4iw_get_ep(&ep->com);
1388*4882a593Smuzhiyun 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1389*4882a593Smuzhiyun 						      &event);
1390*4882a593Smuzhiyun 	if (ret)
1391*4882a593Smuzhiyun 		c4iw_put_ep(&ep->com);
1392*4882a593Smuzhiyun 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1393*4882a593Smuzhiyun 	c4iw_put_ep(&ep->parent_ep->com);
1394*4882a593Smuzhiyun 	return ret;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun 
established_upcall(struct c4iw_ep * ep)1397*4882a593Smuzhiyun static void established_upcall(struct c4iw_ep *ep)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	struct iw_cm_event event;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1402*4882a593Smuzhiyun 	memset(&event, 0, sizeof(event));
1403*4882a593Smuzhiyun 	event.event = IW_CM_EVENT_ESTABLISHED;
1404*4882a593Smuzhiyun 	event.ird = ep->ord;
1405*4882a593Smuzhiyun 	event.ord = ep->ird;
1406*4882a593Smuzhiyun 	if (ep->com.cm_id) {
1407*4882a593Smuzhiyun 		pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1408*4882a593Smuzhiyun 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1409*4882a593Smuzhiyun 		set_bit(ESTAB_UPCALL, &ep->com.history);
1410*4882a593Smuzhiyun 	}
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
update_rx_credits(struct c4iw_ep * ep,u32 credits)1413*4882a593Smuzhiyun static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	struct sk_buff *skb;
1416*4882a593Smuzhiyun 	u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
1417*4882a593Smuzhiyun 	u32 credit_dack;
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	pr_debug("ep %p tid %u credits %u\n",
1420*4882a593Smuzhiyun 		 ep, ep->hwtid, credits);
1421*4882a593Smuzhiyun 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1422*4882a593Smuzhiyun 	if (!skb) {
1423*4882a593Smuzhiyun 		pr_err("update_rx_credits - cannot alloc skb!\n");
1424*4882a593Smuzhiyun 		return 0;
1425*4882a593Smuzhiyun 	}
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	/*
1428*4882a593Smuzhiyun 	 * If we couldn't specify the entire rcv window at connection setup
1429*4882a593Smuzhiyun 	 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1430*4882a593Smuzhiyun 	 * then add the overage in to the credits returned.
1431*4882a593Smuzhiyun 	 */
1432*4882a593Smuzhiyun 	if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1433*4882a593Smuzhiyun 		credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
1436*4882a593Smuzhiyun 		      RX_DACK_MODE_V(dack_mode);
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1439*4882a593Smuzhiyun 			    credit_dack);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
1442*4882a593Smuzhiyun 	return credits;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun #define RELAXED_IRD_NEGOTIATION 1
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun /*
1448*4882a593Smuzhiyun  * process_mpa_reply - process streaming mode MPA reply
1449*4882a593Smuzhiyun  *
1450*4882a593Smuzhiyun  * Returns:
1451*4882a593Smuzhiyun  *
1452*4882a593Smuzhiyun  * 0 upon success indicating a connect request was delivered to the ULP
1453*4882a593Smuzhiyun  * or the mpa request is incomplete but valid so far.
1454*4882a593Smuzhiyun  *
1455*4882a593Smuzhiyun  * 1 if a failure requires the caller to close the connection.
1456*4882a593Smuzhiyun  *
1457*4882a593Smuzhiyun  * 2 if a failure requires the caller to abort the connection.
1458*4882a593Smuzhiyun  */
process_mpa_reply(struct c4iw_ep * ep,struct sk_buff * skb)1459*4882a593Smuzhiyun static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun 	struct mpa_message *mpa;
1462*4882a593Smuzhiyun 	struct mpa_v2_conn_params *mpa_v2_params;
1463*4882a593Smuzhiyun 	u16 plen;
1464*4882a593Smuzhiyun 	u16 resp_ird, resp_ord;
1465*4882a593Smuzhiyun 	u8 rtr_mismatch = 0, insuff_ird = 0;
1466*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
1467*4882a593Smuzhiyun 	enum c4iw_qp_attr_mask mask;
1468*4882a593Smuzhiyun 	int err;
1469*4882a593Smuzhiyun 	int disconnect = 0;
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	/*
1474*4882a593Smuzhiyun 	 * If we get more than the supported amount of private data
1475*4882a593Smuzhiyun 	 * then we must fail this connection.
1476*4882a593Smuzhiyun 	 */
1477*4882a593Smuzhiyun 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1478*4882a593Smuzhiyun 		err = -EINVAL;
1479*4882a593Smuzhiyun 		goto err_stop_timer;
1480*4882a593Smuzhiyun 	}
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	/*
1483*4882a593Smuzhiyun 	 * copy the new data into our accumulation buffer.
1484*4882a593Smuzhiyun 	 */
1485*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1486*4882a593Smuzhiyun 				  skb->len);
1487*4882a593Smuzhiyun 	ep->mpa_pkt_len += skb->len;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	/*
1490*4882a593Smuzhiyun 	 * if we don't even have the mpa message, then bail.
1491*4882a593Smuzhiyun 	 */
1492*4882a593Smuzhiyun 	if (ep->mpa_pkt_len < sizeof(*mpa))
1493*4882a593Smuzhiyun 		return 0;
1494*4882a593Smuzhiyun 	mpa = (struct mpa_message *) ep->mpa_pkt;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/* Validate MPA header. */
1497*4882a593Smuzhiyun 	if (mpa->revision > mpa_rev) {
1498*4882a593Smuzhiyun 		pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1499*4882a593Smuzhiyun 		       __func__, mpa_rev, mpa->revision);
1500*4882a593Smuzhiyun 		err = -EPROTO;
1501*4882a593Smuzhiyun 		goto err_stop_timer;
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1504*4882a593Smuzhiyun 		err = -EPROTO;
1505*4882a593Smuzhiyun 		goto err_stop_timer;
1506*4882a593Smuzhiyun 	}
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	plen = ntohs(mpa->private_data_size);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	/*
1511*4882a593Smuzhiyun 	 * Fail if there's too much private data.
1512*4882a593Smuzhiyun 	 */
1513*4882a593Smuzhiyun 	if (plen > MPA_MAX_PRIVATE_DATA) {
1514*4882a593Smuzhiyun 		err = -EPROTO;
1515*4882a593Smuzhiyun 		goto err_stop_timer;
1516*4882a593Smuzhiyun 	}
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	/*
1519*4882a593Smuzhiyun 	 * If plen does not account for pkt size
1520*4882a593Smuzhiyun 	 */
1521*4882a593Smuzhiyun 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1522*4882a593Smuzhiyun 		err = -EPROTO;
1523*4882a593Smuzhiyun 		goto err_stop_timer;
1524*4882a593Smuzhiyun 	}
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	ep->plen = (u8) plen;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	/*
1529*4882a593Smuzhiyun 	 * If we don't have all the pdata yet, then bail.
1530*4882a593Smuzhiyun 	 * We'll continue process when more data arrives.
1531*4882a593Smuzhiyun 	 */
1532*4882a593Smuzhiyun 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1533*4882a593Smuzhiyun 		return 0;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	if (mpa->flags & MPA_REJECT) {
1536*4882a593Smuzhiyun 		err = -ECONNREFUSED;
1537*4882a593Smuzhiyun 		goto err_stop_timer;
1538*4882a593Smuzhiyun 	}
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	/*
1541*4882a593Smuzhiyun 	 * Stop mpa timer.  If it expired, then
1542*4882a593Smuzhiyun 	 * we ignore the MPA reply.  process_timeout()
1543*4882a593Smuzhiyun 	 * will abort the connection.
1544*4882a593Smuzhiyun 	 */
1545*4882a593Smuzhiyun 	if (stop_ep_timer(ep))
1546*4882a593Smuzhiyun 		return 0;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	/*
1549*4882a593Smuzhiyun 	 * If we get here we have accumulated the entire mpa
1550*4882a593Smuzhiyun 	 * start reply message including private data. And
1551*4882a593Smuzhiyun 	 * the MPA header is valid.
1552*4882a593Smuzhiyun 	 */
1553*4882a593Smuzhiyun 	__state_set(&ep->com, FPDU_MODE);
1554*4882a593Smuzhiyun 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1555*4882a593Smuzhiyun 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1556*4882a593Smuzhiyun 	ep->mpa_attr.version = mpa->revision;
1557*4882a593Smuzhiyun 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	if (mpa->revision == 2) {
1560*4882a593Smuzhiyun 		ep->mpa_attr.enhanced_rdma_conn =
1561*4882a593Smuzhiyun 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1562*4882a593Smuzhiyun 		if (ep->mpa_attr.enhanced_rdma_conn) {
1563*4882a593Smuzhiyun 			mpa_v2_params = (struct mpa_v2_conn_params *)
1564*4882a593Smuzhiyun 				(ep->mpa_pkt + sizeof(*mpa));
1565*4882a593Smuzhiyun 			resp_ird = ntohs(mpa_v2_params->ird) &
1566*4882a593Smuzhiyun 				MPA_V2_IRD_ORD_MASK;
1567*4882a593Smuzhiyun 			resp_ord = ntohs(mpa_v2_params->ord) &
1568*4882a593Smuzhiyun 				MPA_V2_IRD_ORD_MASK;
1569*4882a593Smuzhiyun 			pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
1570*4882a593Smuzhiyun 				 resp_ird, resp_ord, ep->ird, ep->ord);
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 			/*
1573*4882a593Smuzhiyun 			 * This is a double-check. Ideally, below checks are
1574*4882a593Smuzhiyun 			 * not required since ird/ord stuff has been taken
1575*4882a593Smuzhiyun 			 * care of in c4iw_accept_cr
1576*4882a593Smuzhiyun 			 */
1577*4882a593Smuzhiyun 			if (ep->ird < resp_ord) {
1578*4882a593Smuzhiyun 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1579*4882a593Smuzhiyun 				    ep->com.dev->rdev.lldi.max_ordird_qp)
1580*4882a593Smuzhiyun 					ep->ird = resp_ord;
1581*4882a593Smuzhiyun 				else
1582*4882a593Smuzhiyun 					insuff_ird = 1;
1583*4882a593Smuzhiyun 			} else if (ep->ird > resp_ord) {
1584*4882a593Smuzhiyun 				ep->ird = resp_ord;
1585*4882a593Smuzhiyun 			}
1586*4882a593Smuzhiyun 			if (ep->ord > resp_ird) {
1587*4882a593Smuzhiyun 				if (RELAXED_IRD_NEGOTIATION)
1588*4882a593Smuzhiyun 					ep->ord = resp_ird;
1589*4882a593Smuzhiyun 				else
1590*4882a593Smuzhiyun 					insuff_ird = 1;
1591*4882a593Smuzhiyun 			}
1592*4882a593Smuzhiyun 			if (insuff_ird) {
1593*4882a593Smuzhiyun 				err = -ENOMEM;
1594*4882a593Smuzhiyun 				ep->ird = resp_ord;
1595*4882a593Smuzhiyun 				ep->ord = resp_ird;
1596*4882a593Smuzhiyun 			}
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 			if (ntohs(mpa_v2_params->ird) &
1599*4882a593Smuzhiyun 					MPA_V2_PEER2PEER_MODEL) {
1600*4882a593Smuzhiyun 				if (ntohs(mpa_v2_params->ord) &
1601*4882a593Smuzhiyun 						MPA_V2_RDMA_WRITE_RTR)
1602*4882a593Smuzhiyun 					ep->mpa_attr.p2p_type =
1603*4882a593Smuzhiyun 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1604*4882a593Smuzhiyun 				else if (ntohs(mpa_v2_params->ord) &
1605*4882a593Smuzhiyun 						MPA_V2_RDMA_READ_RTR)
1606*4882a593Smuzhiyun 					ep->mpa_attr.p2p_type =
1607*4882a593Smuzhiyun 						FW_RI_INIT_P2PTYPE_READ_REQ;
1608*4882a593Smuzhiyun 			}
1609*4882a593Smuzhiyun 		}
1610*4882a593Smuzhiyun 	} else if (mpa->revision == 1)
1611*4882a593Smuzhiyun 		if (peer2peer)
1612*4882a593Smuzhiyun 			ep->mpa_attr.p2p_type = p2p_type;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
1615*4882a593Smuzhiyun 		 ep->mpa_attr.crc_enabled,
1616*4882a593Smuzhiyun 		 ep->mpa_attr.recv_marker_enabled,
1617*4882a593Smuzhiyun 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1618*4882a593Smuzhiyun 		 ep->mpa_attr.p2p_type, p2p_type);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	/*
1621*4882a593Smuzhiyun 	 * If responder's RTR does not match with that of initiator, assign
1622*4882a593Smuzhiyun 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1623*4882a593Smuzhiyun 	 * generated when moving QP to RTS state.
1624*4882a593Smuzhiyun 	 * A TERM message will be sent after QP has moved to RTS state
1625*4882a593Smuzhiyun 	 */
1626*4882a593Smuzhiyun 	if ((ep->mpa_attr.version == 2) && peer2peer &&
1627*4882a593Smuzhiyun 			(ep->mpa_attr.p2p_type != p2p_type)) {
1628*4882a593Smuzhiyun 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1629*4882a593Smuzhiyun 		rtr_mismatch = 1;
1630*4882a593Smuzhiyun 	}
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	attrs.mpa_attr = ep->mpa_attr;
1633*4882a593Smuzhiyun 	attrs.max_ird = ep->ird;
1634*4882a593Smuzhiyun 	attrs.max_ord = ep->ord;
1635*4882a593Smuzhiyun 	attrs.llp_stream_handle = ep;
1636*4882a593Smuzhiyun 	attrs.next_state = C4IW_QP_STATE_RTS;
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	mask = C4IW_QP_ATTR_NEXT_STATE |
1639*4882a593Smuzhiyun 	    C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1640*4882a593Smuzhiyun 	    C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/* bind QP and TID with INIT_WR */
1643*4882a593Smuzhiyun 	err = c4iw_modify_qp(ep->com.qp->rhp,
1644*4882a593Smuzhiyun 			     ep->com.qp, mask, &attrs, 1);
1645*4882a593Smuzhiyun 	if (err)
1646*4882a593Smuzhiyun 		goto err;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	/*
1649*4882a593Smuzhiyun 	 * If responder's RTR requirement did not match with what initiator
1650*4882a593Smuzhiyun 	 * supports, generate TERM message
1651*4882a593Smuzhiyun 	 */
1652*4882a593Smuzhiyun 	if (rtr_mismatch) {
1653*4882a593Smuzhiyun 		pr_err("%s: RTR mismatch, sending TERM\n", __func__);
1654*4882a593Smuzhiyun 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1655*4882a593Smuzhiyun 		attrs.ecode = MPA_NOMATCH_RTR;
1656*4882a593Smuzhiyun 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1657*4882a593Smuzhiyun 		attrs.send_term = 1;
1658*4882a593Smuzhiyun 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1659*4882a593Smuzhiyun 				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1660*4882a593Smuzhiyun 		err = -ENOMEM;
1661*4882a593Smuzhiyun 		disconnect = 1;
1662*4882a593Smuzhiyun 		goto out;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	/*
1666*4882a593Smuzhiyun 	 * Generate TERM if initiator IRD is not sufficient for responder
1667*4882a593Smuzhiyun 	 * provided ORD. Currently, we do the same behaviour even when
1668*4882a593Smuzhiyun 	 * responder provided IRD is also not sufficient as regards to
1669*4882a593Smuzhiyun 	 * initiator ORD.
1670*4882a593Smuzhiyun 	 */
1671*4882a593Smuzhiyun 	if (insuff_ird) {
1672*4882a593Smuzhiyun 		pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
1673*4882a593Smuzhiyun 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1674*4882a593Smuzhiyun 		attrs.ecode = MPA_INSUFF_IRD;
1675*4882a593Smuzhiyun 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1676*4882a593Smuzhiyun 		attrs.send_term = 1;
1677*4882a593Smuzhiyun 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1678*4882a593Smuzhiyun 				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1679*4882a593Smuzhiyun 		err = -ENOMEM;
1680*4882a593Smuzhiyun 		disconnect = 1;
1681*4882a593Smuzhiyun 		goto out;
1682*4882a593Smuzhiyun 	}
1683*4882a593Smuzhiyun 	goto out;
1684*4882a593Smuzhiyun err_stop_timer:
1685*4882a593Smuzhiyun 	stop_ep_timer(ep);
1686*4882a593Smuzhiyun err:
1687*4882a593Smuzhiyun 	disconnect = 2;
1688*4882a593Smuzhiyun out:
1689*4882a593Smuzhiyun 	connect_reply_upcall(ep, err);
1690*4882a593Smuzhiyun 	return disconnect;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun /*
1694*4882a593Smuzhiyun  * process_mpa_request - process streaming mode MPA request
1695*4882a593Smuzhiyun  *
1696*4882a593Smuzhiyun  * Returns:
1697*4882a593Smuzhiyun  *
1698*4882a593Smuzhiyun  * 0 upon success indicating a connect request was delivered to the ULP
1699*4882a593Smuzhiyun  * or the mpa request is incomplete but valid so far.
1700*4882a593Smuzhiyun  *
1701*4882a593Smuzhiyun  * 1 if a failure requires the caller to close the connection.
1702*4882a593Smuzhiyun  *
1703*4882a593Smuzhiyun  * 2 if a failure requires the caller to abort the connection.
1704*4882a593Smuzhiyun  */
process_mpa_request(struct c4iw_ep * ep,struct sk_buff * skb)1705*4882a593Smuzhiyun static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1706*4882a593Smuzhiyun {
1707*4882a593Smuzhiyun 	struct mpa_message *mpa;
1708*4882a593Smuzhiyun 	struct mpa_v2_conn_params *mpa_v2_params;
1709*4882a593Smuzhiyun 	u16 plen;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	/*
1714*4882a593Smuzhiyun 	 * If we get more than the supported amount of private data
1715*4882a593Smuzhiyun 	 * then we must fail this connection.
1716*4882a593Smuzhiyun 	 */
1717*4882a593Smuzhiyun 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1718*4882a593Smuzhiyun 		goto err_stop_timer;
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 	pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	/*
1723*4882a593Smuzhiyun 	 * Copy the new data into our accumulation buffer.
1724*4882a593Smuzhiyun 	 */
1725*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1726*4882a593Smuzhiyun 				  skb->len);
1727*4882a593Smuzhiyun 	ep->mpa_pkt_len += skb->len;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	/*
1730*4882a593Smuzhiyun 	 * If we don't even have the mpa message, then bail.
1731*4882a593Smuzhiyun 	 * We'll continue process when more data arrives.
1732*4882a593Smuzhiyun 	 */
1733*4882a593Smuzhiyun 	if (ep->mpa_pkt_len < sizeof(*mpa))
1734*4882a593Smuzhiyun 		return 0;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
1737*4882a593Smuzhiyun 	mpa = (struct mpa_message *) ep->mpa_pkt;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	/*
1740*4882a593Smuzhiyun 	 * Validate MPA Header.
1741*4882a593Smuzhiyun 	 */
1742*4882a593Smuzhiyun 	if (mpa->revision > mpa_rev) {
1743*4882a593Smuzhiyun 		pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1744*4882a593Smuzhiyun 		       __func__, mpa_rev, mpa->revision);
1745*4882a593Smuzhiyun 		goto err_stop_timer;
1746*4882a593Smuzhiyun 	}
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1749*4882a593Smuzhiyun 		goto err_stop_timer;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	plen = ntohs(mpa->private_data_size);
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	/*
1754*4882a593Smuzhiyun 	 * Fail if there's too much private data.
1755*4882a593Smuzhiyun 	 */
1756*4882a593Smuzhiyun 	if (plen > MPA_MAX_PRIVATE_DATA)
1757*4882a593Smuzhiyun 		goto err_stop_timer;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	/*
1760*4882a593Smuzhiyun 	 * If plen does not account for pkt size
1761*4882a593Smuzhiyun 	 */
1762*4882a593Smuzhiyun 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1763*4882a593Smuzhiyun 		goto err_stop_timer;
1764*4882a593Smuzhiyun 	ep->plen = (u8) plen;
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	/*
1767*4882a593Smuzhiyun 	 * If we don't have all the pdata yet, then bail.
1768*4882a593Smuzhiyun 	 */
1769*4882a593Smuzhiyun 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1770*4882a593Smuzhiyun 		return 0;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	/*
1773*4882a593Smuzhiyun 	 * If we get here we have accumulated the entire mpa
1774*4882a593Smuzhiyun 	 * start reply message including private data.
1775*4882a593Smuzhiyun 	 */
1776*4882a593Smuzhiyun 	ep->mpa_attr.initiator = 0;
1777*4882a593Smuzhiyun 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1778*4882a593Smuzhiyun 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1779*4882a593Smuzhiyun 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1780*4882a593Smuzhiyun 	ep->mpa_attr.version = mpa->revision;
1781*4882a593Smuzhiyun 	if (mpa->revision == 1)
1782*4882a593Smuzhiyun 		ep->tried_with_mpa_v1 = 1;
1783*4882a593Smuzhiyun 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	if (mpa->revision == 2) {
1786*4882a593Smuzhiyun 		ep->mpa_attr.enhanced_rdma_conn =
1787*4882a593Smuzhiyun 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1788*4882a593Smuzhiyun 		if (ep->mpa_attr.enhanced_rdma_conn) {
1789*4882a593Smuzhiyun 			mpa_v2_params = (struct mpa_v2_conn_params *)
1790*4882a593Smuzhiyun 				(ep->mpa_pkt + sizeof(*mpa));
1791*4882a593Smuzhiyun 			ep->ird = ntohs(mpa_v2_params->ird) &
1792*4882a593Smuzhiyun 				MPA_V2_IRD_ORD_MASK;
1793*4882a593Smuzhiyun 			ep->ird = min_t(u32, ep->ird,
1794*4882a593Smuzhiyun 					cur_max_read_depth(ep->com.dev));
1795*4882a593Smuzhiyun 			ep->ord = ntohs(mpa_v2_params->ord) &
1796*4882a593Smuzhiyun 				MPA_V2_IRD_ORD_MASK;
1797*4882a593Smuzhiyun 			ep->ord = min_t(u32, ep->ord,
1798*4882a593Smuzhiyun 					cur_max_read_depth(ep->com.dev));
1799*4882a593Smuzhiyun 			pr_debug("initiator ird %u ord %u\n",
1800*4882a593Smuzhiyun 				 ep->ird, ep->ord);
1801*4882a593Smuzhiyun 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1802*4882a593Smuzhiyun 				if (peer2peer) {
1803*4882a593Smuzhiyun 					if (ntohs(mpa_v2_params->ord) &
1804*4882a593Smuzhiyun 							MPA_V2_RDMA_WRITE_RTR)
1805*4882a593Smuzhiyun 						ep->mpa_attr.p2p_type =
1806*4882a593Smuzhiyun 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1807*4882a593Smuzhiyun 					else if (ntohs(mpa_v2_params->ord) &
1808*4882a593Smuzhiyun 							MPA_V2_RDMA_READ_RTR)
1809*4882a593Smuzhiyun 						ep->mpa_attr.p2p_type =
1810*4882a593Smuzhiyun 						FW_RI_INIT_P2PTYPE_READ_REQ;
1811*4882a593Smuzhiyun 				}
1812*4882a593Smuzhiyun 		}
1813*4882a593Smuzhiyun 	} else if (mpa->revision == 1)
1814*4882a593Smuzhiyun 		if (peer2peer)
1815*4882a593Smuzhiyun 			ep->mpa_attr.p2p_type = p2p_type;
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
1818*4882a593Smuzhiyun 		 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1819*4882a593Smuzhiyun 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1820*4882a593Smuzhiyun 		 ep->mpa_attr.p2p_type);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	__state_set(&ep->com, MPA_REQ_RCVD);
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	/* drive upcall */
1825*4882a593Smuzhiyun 	mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1826*4882a593Smuzhiyun 	if (ep->parent_ep->com.state != DEAD) {
1827*4882a593Smuzhiyun 		if (connect_request_upcall(ep))
1828*4882a593Smuzhiyun 			goto err_unlock_parent;
1829*4882a593Smuzhiyun 	} else {
1830*4882a593Smuzhiyun 		goto err_unlock_parent;
1831*4882a593Smuzhiyun 	}
1832*4882a593Smuzhiyun 	mutex_unlock(&ep->parent_ep->com.mutex);
1833*4882a593Smuzhiyun 	return 0;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun err_unlock_parent:
1836*4882a593Smuzhiyun 	mutex_unlock(&ep->parent_ep->com.mutex);
1837*4882a593Smuzhiyun 	goto err_out;
1838*4882a593Smuzhiyun err_stop_timer:
1839*4882a593Smuzhiyun 	(void)stop_ep_timer(ep);
1840*4882a593Smuzhiyun err_out:
1841*4882a593Smuzhiyun 	return 2;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun 
rx_data(struct c4iw_dev * dev,struct sk_buff * skb)1844*4882a593Smuzhiyun static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun 	struct c4iw_ep *ep;
1847*4882a593Smuzhiyun 	struct cpl_rx_data *hdr = cplhdr(skb);
1848*4882a593Smuzhiyun 	unsigned int dlen = ntohs(hdr->len);
1849*4882a593Smuzhiyun 	unsigned int tid = GET_TID(hdr);
1850*4882a593Smuzhiyun 	__u8 status = hdr->status;
1851*4882a593Smuzhiyun 	int disconnect = 0;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
1854*4882a593Smuzhiyun 	if (!ep)
1855*4882a593Smuzhiyun 		return 0;
1856*4882a593Smuzhiyun 	pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
1857*4882a593Smuzhiyun 	skb_pull(skb, sizeof(*hdr));
1858*4882a593Smuzhiyun 	skb_trim(skb, dlen);
1859*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	switch (ep->com.state) {
1862*4882a593Smuzhiyun 	case MPA_REQ_SENT:
1863*4882a593Smuzhiyun 		update_rx_credits(ep, dlen);
1864*4882a593Smuzhiyun 		ep->rcv_seq += dlen;
1865*4882a593Smuzhiyun 		disconnect = process_mpa_reply(ep, skb);
1866*4882a593Smuzhiyun 		break;
1867*4882a593Smuzhiyun 	case MPA_REQ_WAIT:
1868*4882a593Smuzhiyun 		update_rx_credits(ep, dlen);
1869*4882a593Smuzhiyun 		ep->rcv_seq += dlen;
1870*4882a593Smuzhiyun 		disconnect = process_mpa_request(ep, skb);
1871*4882a593Smuzhiyun 		break;
1872*4882a593Smuzhiyun 	case FPDU_MODE: {
1873*4882a593Smuzhiyun 		struct c4iw_qp_attributes attrs;
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 		update_rx_credits(ep, dlen);
1876*4882a593Smuzhiyun 		if (status)
1877*4882a593Smuzhiyun 			pr_err("%s Unexpected streaming data." \
1878*4882a593Smuzhiyun 			       " qpid %u ep %p state %d tid %u status %d\n",
1879*4882a593Smuzhiyun 			       __func__, ep->com.qp->wq.sq.qid, ep,
1880*4882a593Smuzhiyun 			       ep->com.state, ep->hwtid, status);
1881*4882a593Smuzhiyun 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1882*4882a593Smuzhiyun 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1883*4882a593Smuzhiyun 			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1884*4882a593Smuzhiyun 		disconnect = 1;
1885*4882a593Smuzhiyun 		break;
1886*4882a593Smuzhiyun 	}
1887*4882a593Smuzhiyun 	default:
1888*4882a593Smuzhiyun 		break;
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
1891*4882a593Smuzhiyun 	if (disconnect)
1892*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1893*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
1894*4882a593Smuzhiyun 	return 0;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
complete_cached_srq_buffers(struct c4iw_ep * ep,u32 srqidx)1897*4882a593Smuzhiyun static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
1898*4882a593Smuzhiyun {
1899*4882a593Smuzhiyun 	enum chip_type adapter_type;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	adapter_type = ep->com.dev->rdev.lldi.adapter_type;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	/*
1904*4882a593Smuzhiyun 	 * If this TCB had a srq buffer cached, then we must complete
1905*4882a593Smuzhiyun 	 * it. For user mode, that means saving the srqidx in the
1906*4882a593Smuzhiyun 	 * user/kernel status page for this qp.  For kernel mode, just
1907*4882a593Smuzhiyun 	 * synthesize the CQE now.
1908*4882a593Smuzhiyun 	 */
1909*4882a593Smuzhiyun 	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
1910*4882a593Smuzhiyun 		if (ep->com.qp->ibqp.uobject)
1911*4882a593Smuzhiyun 			t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
1912*4882a593Smuzhiyun 		else
1913*4882a593Smuzhiyun 			c4iw_flush_srqidx(ep->com.qp, srqidx);
1914*4882a593Smuzhiyun 	}
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun 
abort_rpl(struct c4iw_dev * dev,struct sk_buff * skb)1917*4882a593Smuzhiyun static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun 	u32 srqidx;
1920*4882a593Smuzhiyun 	struct c4iw_ep *ep;
1921*4882a593Smuzhiyun 	struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
1922*4882a593Smuzhiyun 	int release = 0;
1923*4882a593Smuzhiyun 	unsigned int tid = GET_TID(rpl);
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
1926*4882a593Smuzhiyun 	if (!ep) {
1927*4882a593Smuzhiyun 		pr_warn("Abort rpl to freed endpoint\n");
1928*4882a593Smuzhiyun 		return 0;
1929*4882a593Smuzhiyun 	}
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	if (ep->com.qp && ep->com.qp->srq) {
1932*4882a593Smuzhiyun 		srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
1933*4882a593Smuzhiyun 		complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
1934*4882a593Smuzhiyun 	}
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1937*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
1938*4882a593Smuzhiyun 	switch (ep->com.state) {
1939*4882a593Smuzhiyun 	case ABORTING:
1940*4882a593Smuzhiyun 		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
1941*4882a593Smuzhiyun 		__state_set(&ep->com, DEAD);
1942*4882a593Smuzhiyun 		release = 1;
1943*4882a593Smuzhiyun 		break;
1944*4882a593Smuzhiyun 	default:
1945*4882a593Smuzhiyun 		pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
1946*4882a593Smuzhiyun 		break;
1947*4882a593Smuzhiyun 	}
1948*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	if (release) {
1951*4882a593Smuzhiyun 		close_complete_upcall(ep, -ECONNRESET);
1952*4882a593Smuzhiyun 		release_ep_resources(ep);
1953*4882a593Smuzhiyun 	}
1954*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
1955*4882a593Smuzhiyun 	return 0;
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun 
send_fw_act_open_req(struct c4iw_ep * ep,unsigned int atid)1958*4882a593Smuzhiyun static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1959*4882a593Smuzhiyun {
1960*4882a593Smuzhiyun 	struct sk_buff *skb;
1961*4882a593Smuzhiyun 	struct fw_ofld_connection_wr *req;
1962*4882a593Smuzhiyun 	unsigned int mtu_idx;
1963*4882a593Smuzhiyun 	u32 wscale;
1964*4882a593Smuzhiyun 	struct sockaddr_in *sin;
1965*4882a593Smuzhiyun 	int win;
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1968*4882a593Smuzhiyun 	req = __skb_put_zero(skb, sizeof(*req));
1969*4882a593Smuzhiyun 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1970*4882a593Smuzhiyun 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1971*4882a593Smuzhiyun 	req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1972*4882a593Smuzhiyun 				     ep->com.dev->rdev.lldi.ports[0],
1973*4882a593Smuzhiyun 				     ep->l2t));
1974*4882a593Smuzhiyun 	sin = (struct sockaddr_in *)&ep->com.local_addr;
1975*4882a593Smuzhiyun 	req->le.lport = sin->sin_port;
1976*4882a593Smuzhiyun 	req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1977*4882a593Smuzhiyun 	sin = (struct sockaddr_in *)&ep->com.remote_addr;
1978*4882a593Smuzhiyun 	req->le.pport = sin->sin_port;
1979*4882a593Smuzhiyun 	req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1980*4882a593Smuzhiyun 	req->tcb.t_state_to_astid =
1981*4882a593Smuzhiyun 			htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1982*4882a593Smuzhiyun 			FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1983*4882a593Smuzhiyun 	req->tcb.cplrxdataack_cplpassacceptrpl =
1984*4882a593Smuzhiyun 			htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1985*4882a593Smuzhiyun 	req->tcb.tx_max = (__force __be32) jiffies;
1986*4882a593Smuzhiyun 	req->tcb.rcv_adv = htons(1);
1987*4882a593Smuzhiyun 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1988*4882a593Smuzhiyun 		      enable_tcp_timestamps,
1989*4882a593Smuzhiyun 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1990*4882a593Smuzhiyun 	wscale = cxgb_compute_wscale(rcv_win);
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	/*
1993*4882a593Smuzhiyun 	 * Specify the largest window that will fit in opt0. The
1994*4882a593Smuzhiyun 	 * remainder will be specified in the rx_data_ack.
1995*4882a593Smuzhiyun 	 */
1996*4882a593Smuzhiyun 	win = ep->rcv_win >> 10;
1997*4882a593Smuzhiyun 	if (win > RCV_BUFSIZ_M)
1998*4882a593Smuzhiyun 		win = RCV_BUFSIZ_M;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
2001*4882a593Smuzhiyun 		(nocong ? NO_CONG_F : 0) |
2002*4882a593Smuzhiyun 		KEEP_ALIVE_F |
2003*4882a593Smuzhiyun 		DELACK_F |
2004*4882a593Smuzhiyun 		WND_SCALE_V(wscale) |
2005*4882a593Smuzhiyun 		MSS_IDX_V(mtu_idx) |
2006*4882a593Smuzhiyun 		L2T_IDX_V(ep->l2t->idx) |
2007*4882a593Smuzhiyun 		TX_CHAN_V(ep->tx_chan) |
2008*4882a593Smuzhiyun 		SMAC_SEL_V(ep->smac_idx) |
2009*4882a593Smuzhiyun 		DSCP_V(ep->tos >> 2) |
2010*4882a593Smuzhiyun 		ULP_MODE_V(ULP_MODE_TCPDDP) |
2011*4882a593Smuzhiyun 		RCV_BUFSIZ_V(win));
2012*4882a593Smuzhiyun 	req->tcb.opt2 = (__force __be32) (PACE_V(1) |
2013*4882a593Smuzhiyun 		TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
2014*4882a593Smuzhiyun 		RX_CHANNEL_V(0) |
2015*4882a593Smuzhiyun 		CCTRL_ECN_V(enable_ecn) |
2016*4882a593Smuzhiyun 		RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
2017*4882a593Smuzhiyun 	if (enable_tcp_timestamps)
2018*4882a593Smuzhiyun 		req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
2019*4882a593Smuzhiyun 	if (enable_tcp_sack)
2020*4882a593Smuzhiyun 		req->tcb.opt2 |= (__force __be32)SACK_EN_F;
2021*4882a593Smuzhiyun 	if (wscale && enable_tcp_window_scaling)
2022*4882a593Smuzhiyun 		req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
2023*4882a593Smuzhiyun 	req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
2024*4882a593Smuzhiyun 	req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
2025*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
2026*4882a593Smuzhiyun 	set_bit(ACT_OFLD_CONN, &ep->com.history);
2027*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun /*
2031*4882a593Smuzhiyun  * Some of the error codes above implicitly indicate that there is no TID
2032*4882a593Smuzhiyun  * allocated with the result of an ACT_OPEN.  We use this predicate to make
2033*4882a593Smuzhiyun  * that explicit.
2034*4882a593Smuzhiyun  */
act_open_has_tid(int status)2035*4882a593Smuzhiyun static inline int act_open_has_tid(int status)
2036*4882a593Smuzhiyun {
2037*4882a593Smuzhiyun 	return (status != CPL_ERR_TCAM_PARITY &&
2038*4882a593Smuzhiyun 		status != CPL_ERR_TCAM_MISS &&
2039*4882a593Smuzhiyun 		status != CPL_ERR_TCAM_FULL &&
2040*4882a593Smuzhiyun 		status != CPL_ERR_CONN_EXIST_SYNRECV &&
2041*4882a593Smuzhiyun 		status != CPL_ERR_CONN_EXIST);
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun 
neg_adv_str(unsigned int status)2044*4882a593Smuzhiyun static char *neg_adv_str(unsigned int status)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	switch (status) {
2047*4882a593Smuzhiyun 	case CPL_ERR_RTX_NEG_ADVICE:
2048*4882a593Smuzhiyun 		return "Retransmit timeout";
2049*4882a593Smuzhiyun 	case CPL_ERR_PERSIST_NEG_ADVICE:
2050*4882a593Smuzhiyun 		return "Persist timeout";
2051*4882a593Smuzhiyun 	case CPL_ERR_KEEPALV_NEG_ADVICE:
2052*4882a593Smuzhiyun 		return "Keepalive timeout";
2053*4882a593Smuzhiyun 	default:
2054*4882a593Smuzhiyun 		return "Unknown";
2055*4882a593Smuzhiyun 	}
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun 
set_tcp_window(struct c4iw_ep * ep,struct port_info * pi)2058*4882a593Smuzhiyun static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	ep->snd_win = snd_win;
2061*4882a593Smuzhiyun 	ep->rcv_win = rcv_win;
2062*4882a593Smuzhiyun 	pr_debug("snd_win %d rcv_win %d\n",
2063*4882a593Smuzhiyun 		 ep->snd_win, ep->rcv_win);
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun #define ACT_OPEN_RETRY_COUNT 2
2067*4882a593Smuzhiyun 
import_ep(struct c4iw_ep * ep,int iptype,__u8 * peer_ip,struct dst_entry * dst,struct c4iw_dev * cdev,bool clear_mpa_v1,enum chip_type adapter_type,u8 tos)2068*4882a593Smuzhiyun static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2069*4882a593Smuzhiyun 		     struct dst_entry *dst, struct c4iw_dev *cdev,
2070*4882a593Smuzhiyun 		     bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun 	struct neighbour *n;
2073*4882a593Smuzhiyun 	int err, step;
2074*4882a593Smuzhiyun 	struct net_device *pdev;
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	n = dst_neigh_lookup(dst, peer_ip);
2077*4882a593Smuzhiyun 	if (!n)
2078*4882a593Smuzhiyun 		return -ENODEV;
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	rcu_read_lock();
2081*4882a593Smuzhiyun 	err = -ENOMEM;
2082*4882a593Smuzhiyun 	if (n->dev->flags & IFF_LOOPBACK) {
2083*4882a593Smuzhiyun 		if (iptype == 4)
2084*4882a593Smuzhiyun 			pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2085*4882a593Smuzhiyun 		else if (IS_ENABLED(CONFIG_IPV6))
2086*4882a593Smuzhiyun 			for_each_netdev(&init_net, pdev) {
2087*4882a593Smuzhiyun 				if (ipv6_chk_addr(&init_net,
2088*4882a593Smuzhiyun 						  (struct in6_addr *)peer_ip,
2089*4882a593Smuzhiyun 						  pdev, 1))
2090*4882a593Smuzhiyun 					break;
2091*4882a593Smuzhiyun 			}
2092*4882a593Smuzhiyun 		else
2093*4882a593Smuzhiyun 			pdev = NULL;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 		if (!pdev) {
2096*4882a593Smuzhiyun 			err = -ENODEV;
2097*4882a593Smuzhiyun 			goto out;
2098*4882a593Smuzhiyun 		}
2099*4882a593Smuzhiyun 		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2100*4882a593Smuzhiyun 					n, pdev, rt_tos2priority(tos));
2101*4882a593Smuzhiyun 		if (!ep->l2t) {
2102*4882a593Smuzhiyun 			dev_put(pdev);
2103*4882a593Smuzhiyun 			goto out;
2104*4882a593Smuzhiyun 		}
2105*4882a593Smuzhiyun 		ep->mtu = pdev->mtu;
2106*4882a593Smuzhiyun 		ep->tx_chan = cxgb4_port_chan(pdev);
2107*4882a593Smuzhiyun 		ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2108*4882a593Smuzhiyun 		step = cdev->rdev.lldi.ntxq /
2109*4882a593Smuzhiyun 			cdev->rdev.lldi.nchan;
2110*4882a593Smuzhiyun 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
2111*4882a593Smuzhiyun 		step = cdev->rdev.lldi.nrxq /
2112*4882a593Smuzhiyun 			cdev->rdev.lldi.nchan;
2113*4882a593Smuzhiyun 		ep->ctrlq_idx = cxgb4_port_idx(pdev);
2114*4882a593Smuzhiyun 		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2115*4882a593Smuzhiyun 			cxgb4_port_idx(pdev) * step];
2116*4882a593Smuzhiyun 		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2117*4882a593Smuzhiyun 		dev_put(pdev);
2118*4882a593Smuzhiyun 	} else {
2119*4882a593Smuzhiyun 		pdev = get_real_dev(n->dev);
2120*4882a593Smuzhiyun 		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2121*4882a593Smuzhiyun 					n, pdev, rt_tos2priority(tos));
2122*4882a593Smuzhiyun 		if (!ep->l2t)
2123*4882a593Smuzhiyun 			goto out;
2124*4882a593Smuzhiyun 		ep->mtu = dst_mtu(dst);
2125*4882a593Smuzhiyun 		ep->tx_chan = cxgb4_port_chan(pdev);
2126*4882a593Smuzhiyun 		ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2127*4882a593Smuzhiyun 		step = cdev->rdev.lldi.ntxq /
2128*4882a593Smuzhiyun 			cdev->rdev.lldi.nchan;
2129*4882a593Smuzhiyun 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
2130*4882a593Smuzhiyun 		ep->ctrlq_idx = cxgb4_port_idx(pdev);
2131*4882a593Smuzhiyun 		step = cdev->rdev.lldi.nrxq /
2132*4882a593Smuzhiyun 			cdev->rdev.lldi.nchan;
2133*4882a593Smuzhiyun 		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2134*4882a593Smuzhiyun 			cxgb4_port_idx(pdev) * step];
2135*4882a593Smuzhiyun 		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 		if (clear_mpa_v1) {
2138*4882a593Smuzhiyun 			ep->retry_with_mpa_v1 = 0;
2139*4882a593Smuzhiyun 			ep->tried_with_mpa_v1 = 0;
2140*4882a593Smuzhiyun 		}
2141*4882a593Smuzhiyun 	}
2142*4882a593Smuzhiyun 	err = 0;
2143*4882a593Smuzhiyun out:
2144*4882a593Smuzhiyun 	rcu_read_unlock();
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	neigh_release(n);
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	return err;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun 
c4iw_reconnect(struct c4iw_ep * ep)2151*4882a593Smuzhiyun static int c4iw_reconnect(struct c4iw_ep *ep)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun 	int err = 0;
2154*4882a593Smuzhiyun 	int size = 0;
2155*4882a593Smuzhiyun 	struct sockaddr_in *laddr = (struct sockaddr_in *)
2156*4882a593Smuzhiyun 				    &ep->com.cm_id->m_local_addr;
2157*4882a593Smuzhiyun 	struct sockaddr_in *raddr = (struct sockaddr_in *)
2158*4882a593Smuzhiyun 				    &ep->com.cm_id->m_remote_addr;
2159*4882a593Smuzhiyun 	struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
2160*4882a593Smuzhiyun 				      &ep->com.cm_id->m_local_addr;
2161*4882a593Smuzhiyun 	struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2162*4882a593Smuzhiyun 				      &ep->com.cm_id->m_remote_addr;
2163*4882a593Smuzhiyun 	int iptype;
2164*4882a593Smuzhiyun 	__u8 *ra;
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
2167*4882a593Smuzhiyun 	c4iw_init_wr_wait(ep->com.wr_waitp);
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	/* When MPA revision is different on nodes, the node with MPA_rev=2
2170*4882a593Smuzhiyun 	 * tries to reconnect with MPA_rev 1 for the same EP through
2171*4882a593Smuzhiyun 	 * c4iw_reconnect(), where the same EP is assigned with new tid for
2172*4882a593Smuzhiyun 	 * further connection establishment. As we are using the same EP pointer
2173*4882a593Smuzhiyun 	 * for reconnect, few skbs are used during the previous c4iw_connect(),
2174*4882a593Smuzhiyun 	 * which leaves the EP with inadequate skbs for further
2175*4882a593Smuzhiyun 	 * c4iw_reconnect(), Further causing a crash due to an empty
2176*4882a593Smuzhiyun 	 * skb_list() during peer_abort(). Allocate skbs which is already used.
2177*4882a593Smuzhiyun 	 */
2178*4882a593Smuzhiyun 	size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2179*4882a593Smuzhiyun 	if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2180*4882a593Smuzhiyun 		err = -ENOMEM;
2181*4882a593Smuzhiyun 		goto fail1;
2182*4882a593Smuzhiyun 	}
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	/*
2185*4882a593Smuzhiyun 	 * Allocate an active TID to initiate a TCP connection.
2186*4882a593Smuzhiyun 	 */
2187*4882a593Smuzhiyun 	ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2188*4882a593Smuzhiyun 	if (ep->atid == -1) {
2189*4882a593Smuzhiyun 		pr_err("%s - cannot alloc atid\n", __func__);
2190*4882a593Smuzhiyun 		err = -ENOMEM;
2191*4882a593Smuzhiyun 		goto fail2;
2192*4882a593Smuzhiyun 	}
2193*4882a593Smuzhiyun 	err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
2194*4882a593Smuzhiyun 	if (err)
2195*4882a593Smuzhiyun 		goto fail2a;
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 	/* find a route */
2198*4882a593Smuzhiyun 	if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2199*4882a593Smuzhiyun 		ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2200*4882a593Smuzhiyun 					  laddr->sin_addr.s_addr,
2201*4882a593Smuzhiyun 					  raddr->sin_addr.s_addr,
2202*4882a593Smuzhiyun 					  laddr->sin_port,
2203*4882a593Smuzhiyun 					  raddr->sin_port, ep->com.cm_id->tos);
2204*4882a593Smuzhiyun 		iptype = 4;
2205*4882a593Smuzhiyun 		ra = (__u8 *)&raddr->sin_addr;
2206*4882a593Smuzhiyun 	} else {
2207*4882a593Smuzhiyun 		ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2208*4882a593Smuzhiyun 					   get_real_dev,
2209*4882a593Smuzhiyun 					   laddr6->sin6_addr.s6_addr,
2210*4882a593Smuzhiyun 					   raddr6->sin6_addr.s6_addr,
2211*4882a593Smuzhiyun 					   laddr6->sin6_port,
2212*4882a593Smuzhiyun 					   raddr6->sin6_port,
2213*4882a593Smuzhiyun 					   ep->com.cm_id->tos,
2214*4882a593Smuzhiyun 					   raddr6->sin6_scope_id);
2215*4882a593Smuzhiyun 		iptype = 6;
2216*4882a593Smuzhiyun 		ra = (__u8 *)&raddr6->sin6_addr;
2217*4882a593Smuzhiyun 	}
2218*4882a593Smuzhiyun 	if (!ep->dst) {
2219*4882a593Smuzhiyun 		pr_err("%s - cannot find route\n", __func__);
2220*4882a593Smuzhiyun 		err = -EHOSTUNREACH;
2221*4882a593Smuzhiyun 		goto fail3;
2222*4882a593Smuzhiyun 	}
2223*4882a593Smuzhiyun 	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2224*4882a593Smuzhiyun 			ep->com.dev->rdev.lldi.adapter_type,
2225*4882a593Smuzhiyun 			ep->com.cm_id->tos);
2226*4882a593Smuzhiyun 	if (err) {
2227*4882a593Smuzhiyun 		pr_err("%s - cannot alloc l2e\n", __func__);
2228*4882a593Smuzhiyun 		goto fail4;
2229*4882a593Smuzhiyun 	}
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2232*4882a593Smuzhiyun 		 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2233*4882a593Smuzhiyun 		 ep->l2t->idx);
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	state_set(&ep->com, CONNECTING);
2236*4882a593Smuzhiyun 	ep->tos = ep->com.cm_id->tos;
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	/* send connect request to rnic */
2239*4882a593Smuzhiyun 	err = send_connect(ep);
2240*4882a593Smuzhiyun 	if (!err)
2241*4882a593Smuzhiyun 		goto out;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	cxgb4_l2t_release(ep->l2t);
2244*4882a593Smuzhiyun fail4:
2245*4882a593Smuzhiyun 	dst_release(ep->dst);
2246*4882a593Smuzhiyun fail3:
2247*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->atids, ep->atid);
2248*4882a593Smuzhiyun fail2a:
2249*4882a593Smuzhiyun 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2250*4882a593Smuzhiyun fail2:
2251*4882a593Smuzhiyun 	/*
2252*4882a593Smuzhiyun 	 * remember to send notification to upper layer.
2253*4882a593Smuzhiyun 	 * We are in here so the upper layer is not aware that this is
2254*4882a593Smuzhiyun 	 * re-connect attempt and so, upper layer is still waiting for
2255*4882a593Smuzhiyun 	 * response of 1st connect request.
2256*4882a593Smuzhiyun 	 */
2257*4882a593Smuzhiyun 	connect_reply_upcall(ep, -ECONNRESET);
2258*4882a593Smuzhiyun fail1:
2259*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2260*4882a593Smuzhiyun out:
2261*4882a593Smuzhiyun 	return err;
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun 
act_open_rpl(struct c4iw_dev * dev,struct sk_buff * skb)2264*4882a593Smuzhiyun static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2265*4882a593Smuzhiyun {
2266*4882a593Smuzhiyun 	struct c4iw_ep *ep;
2267*4882a593Smuzhiyun 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
2268*4882a593Smuzhiyun 	unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2269*4882a593Smuzhiyun 				      ntohl(rpl->atid_status)));
2270*4882a593Smuzhiyun 	struct tid_info *t = dev->rdev.lldi.tids;
2271*4882a593Smuzhiyun 	int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2272*4882a593Smuzhiyun 	struct sockaddr_in *la;
2273*4882a593Smuzhiyun 	struct sockaddr_in *ra;
2274*4882a593Smuzhiyun 	struct sockaddr_in6 *la6;
2275*4882a593Smuzhiyun 	struct sockaddr_in6 *ra6;
2276*4882a593Smuzhiyun 	int ret = 0;
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	ep = lookup_atid(t, atid);
2279*4882a593Smuzhiyun 	la = (struct sockaddr_in *)&ep->com.local_addr;
2280*4882a593Smuzhiyun 	ra = (struct sockaddr_in *)&ep->com.remote_addr;
2281*4882a593Smuzhiyun 	la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2282*4882a593Smuzhiyun 	ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
2285*4882a593Smuzhiyun 		 status, status2errno(status));
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (cxgb_is_neg_adv(status)) {
2288*4882a593Smuzhiyun 		pr_debug("Connection problems for atid %u status %u (%s)\n",
2289*4882a593Smuzhiyun 			 atid, status, neg_adv_str(status));
2290*4882a593Smuzhiyun 		ep->stats.connect_neg_adv++;
2291*4882a593Smuzhiyun 		mutex_lock(&dev->rdev.stats.lock);
2292*4882a593Smuzhiyun 		dev->rdev.stats.neg_adv++;
2293*4882a593Smuzhiyun 		mutex_unlock(&dev->rdev.stats.lock);
2294*4882a593Smuzhiyun 		return 0;
2295*4882a593Smuzhiyun 	}
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	set_bit(ACT_OPEN_RPL, &ep->com.history);
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	/*
2300*4882a593Smuzhiyun 	 * Log interesting failures.
2301*4882a593Smuzhiyun 	 */
2302*4882a593Smuzhiyun 	switch (status) {
2303*4882a593Smuzhiyun 	case CPL_ERR_CONN_RESET:
2304*4882a593Smuzhiyun 	case CPL_ERR_CONN_TIMEDOUT:
2305*4882a593Smuzhiyun 		break;
2306*4882a593Smuzhiyun 	case CPL_ERR_TCAM_FULL:
2307*4882a593Smuzhiyun 		mutex_lock(&dev->rdev.stats.lock);
2308*4882a593Smuzhiyun 		dev->rdev.stats.tcam_full++;
2309*4882a593Smuzhiyun 		mutex_unlock(&dev->rdev.stats.lock);
2310*4882a593Smuzhiyun 		if (ep->com.local_addr.ss_family == AF_INET &&
2311*4882a593Smuzhiyun 		    dev->rdev.lldi.enable_fw_ofld_conn) {
2312*4882a593Smuzhiyun 			ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2313*4882a593Smuzhiyun 						   ntohl(rpl->atid_status))));
2314*4882a593Smuzhiyun 			if (ret)
2315*4882a593Smuzhiyun 				goto fail;
2316*4882a593Smuzhiyun 			return 0;
2317*4882a593Smuzhiyun 		}
2318*4882a593Smuzhiyun 		break;
2319*4882a593Smuzhiyun 	case CPL_ERR_CONN_EXIST:
2320*4882a593Smuzhiyun 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2321*4882a593Smuzhiyun 			set_bit(ACT_RETRY_INUSE, &ep->com.history);
2322*4882a593Smuzhiyun 			if (ep->com.remote_addr.ss_family == AF_INET6) {
2323*4882a593Smuzhiyun 				struct sockaddr_in6 *sin6 =
2324*4882a593Smuzhiyun 						(struct sockaddr_in6 *)
2325*4882a593Smuzhiyun 						&ep->com.local_addr;
2326*4882a593Smuzhiyun 				cxgb4_clip_release(
2327*4882a593Smuzhiyun 						ep->com.dev->rdev.lldi.ports[0],
2328*4882a593Smuzhiyun 						(const u32 *)
2329*4882a593Smuzhiyun 						&sin6->sin6_addr.s6_addr, 1);
2330*4882a593Smuzhiyun 			}
2331*4882a593Smuzhiyun 			xa_erase_irq(&ep->com.dev->atids, atid);
2332*4882a593Smuzhiyun 			cxgb4_free_atid(t, atid);
2333*4882a593Smuzhiyun 			dst_release(ep->dst);
2334*4882a593Smuzhiyun 			cxgb4_l2t_release(ep->l2t);
2335*4882a593Smuzhiyun 			c4iw_reconnect(ep);
2336*4882a593Smuzhiyun 			return 0;
2337*4882a593Smuzhiyun 		}
2338*4882a593Smuzhiyun 		break;
2339*4882a593Smuzhiyun 	default:
2340*4882a593Smuzhiyun 		if (ep->com.local_addr.ss_family == AF_INET) {
2341*4882a593Smuzhiyun 			pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2342*4882a593Smuzhiyun 				atid, status, status2errno(status),
2343*4882a593Smuzhiyun 				&la->sin_addr.s_addr, ntohs(la->sin_port),
2344*4882a593Smuzhiyun 				&ra->sin_addr.s_addr, ntohs(ra->sin_port));
2345*4882a593Smuzhiyun 		} else {
2346*4882a593Smuzhiyun 			pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2347*4882a593Smuzhiyun 				atid, status, status2errno(status),
2348*4882a593Smuzhiyun 				la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2349*4882a593Smuzhiyun 				ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2350*4882a593Smuzhiyun 		}
2351*4882a593Smuzhiyun 		break;
2352*4882a593Smuzhiyun 	}
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun fail:
2355*4882a593Smuzhiyun 	connect_reply_upcall(ep, status2errno(status));
2356*4882a593Smuzhiyun 	state_set(&ep->com, DEAD);
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	if (ep->com.remote_addr.ss_family == AF_INET6) {
2359*4882a593Smuzhiyun 		struct sockaddr_in6 *sin6 =
2360*4882a593Smuzhiyun 			(struct sockaddr_in6 *)&ep->com.local_addr;
2361*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2362*4882a593Smuzhiyun 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2363*4882a593Smuzhiyun 	}
2364*4882a593Smuzhiyun 	if (status && act_open_has_tid(status))
2365*4882a593Smuzhiyun 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
2366*4882a593Smuzhiyun 				 ep->com.local_addr.ss_family);
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->atids, atid);
2369*4882a593Smuzhiyun 	cxgb4_free_atid(t, atid);
2370*4882a593Smuzhiyun 	dst_release(ep->dst);
2371*4882a593Smuzhiyun 	cxgb4_l2t_release(ep->l2t);
2372*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	return 0;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun 
pass_open_rpl(struct c4iw_dev * dev,struct sk_buff * skb)2377*4882a593Smuzhiyun static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2378*4882a593Smuzhiyun {
2379*4882a593Smuzhiyun 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2380*4882a593Smuzhiyun 	unsigned int stid = GET_TID(rpl);
2381*4882a593Smuzhiyun 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	if (!ep) {
2384*4882a593Smuzhiyun 		pr_warn("%s stid %d lookup failure!\n", __func__, stid);
2385*4882a593Smuzhiyun 		goto out;
2386*4882a593Smuzhiyun 	}
2387*4882a593Smuzhiyun 	pr_debug("ep %p status %d error %d\n", ep,
2388*4882a593Smuzhiyun 		 rpl->status, status2errno(rpl->status));
2389*4882a593Smuzhiyun 	c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2390*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2391*4882a593Smuzhiyun out:
2392*4882a593Smuzhiyun 	return 0;
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun 
close_listsrv_rpl(struct c4iw_dev * dev,struct sk_buff * skb)2395*4882a593Smuzhiyun static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2396*4882a593Smuzhiyun {
2397*4882a593Smuzhiyun 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2398*4882a593Smuzhiyun 	unsigned int stid = GET_TID(rpl);
2399*4882a593Smuzhiyun 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	if (!ep) {
2402*4882a593Smuzhiyun 		pr_warn("%s stid %d lookup failure!\n", __func__, stid);
2403*4882a593Smuzhiyun 		goto out;
2404*4882a593Smuzhiyun 	}
2405*4882a593Smuzhiyun 	pr_debug("ep %p\n", ep);
2406*4882a593Smuzhiyun 	c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2407*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2408*4882a593Smuzhiyun out:
2409*4882a593Smuzhiyun 	return 0;
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun 
accept_cr(struct c4iw_ep * ep,struct sk_buff * skb,struct cpl_pass_accept_req * req)2412*4882a593Smuzhiyun static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2413*4882a593Smuzhiyun 		     struct cpl_pass_accept_req *req)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun 	struct cpl_pass_accept_rpl *rpl;
2416*4882a593Smuzhiyun 	unsigned int mtu_idx;
2417*4882a593Smuzhiyun 	u64 opt0;
2418*4882a593Smuzhiyun 	u32 opt2;
2419*4882a593Smuzhiyun 	u32 wscale;
2420*4882a593Smuzhiyun 	struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2421*4882a593Smuzhiyun 	int win;
2422*4882a593Smuzhiyun 	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2425*4882a593Smuzhiyun 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2426*4882a593Smuzhiyun 		      enable_tcp_timestamps && req->tcpopt.tstamp,
2427*4882a593Smuzhiyun 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
2428*4882a593Smuzhiyun 	wscale = cxgb_compute_wscale(rcv_win);
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	/*
2431*4882a593Smuzhiyun 	 * Specify the largest window that will fit in opt0. The
2432*4882a593Smuzhiyun 	 * remainder will be specified in the rx_data_ack.
2433*4882a593Smuzhiyun 	 */
2434*4882a593Smuzhiyun 	win = ep->rcv_win >> 10;
2435*4882a593Smuzhiyun 	if (win > RCV_BUFSIZ_M)
2436*4882a593Smuzhiyun 		win = RCV_BUFSIZ_M;
2437*4882a593Smuzhiyun 	opt0 = (nocong ? NO_CONG_F : 0) |
2438*4882a593Smuzhiyun 	       KEEP_ALIVE_F |
2439*4882a593Smuzhiyun 	       DELACK_F |
2440*4882a593Smuzhiyun 	       WND_SCALE_V(wscale) |
2441*4882a593Smuzhiyun 	       MSS_IDX_V(mtu_idx) |
2442*4882a593Smuzhiyun 	       L2T_IDX_V(ep->l2t->idx) |
2443*4882a593Smuzhiyun 	       TX_CHAN_V(ep->tx_chan) |
2444*4882a593Smuzhiyun 	       SMAC_SEL_V(ep->smac_idx) |
2445*4882a593Smuzhiyun 	       DSCP_V(ep->tos >> 2) |
2446*4882a593Smuzhiyun 	       ULP_MODE_V(ULP_MODE_TCPDDP) |
2447*4882a593Smuzhiyun 	       RCV_BUFSIZ_V(win);
2448*4882a593Smuzhiyun 	opt2 = RX_CHANNEL_V(0) |
2449*4882a593Smuzhiyun 	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	if (enable_tcp_timestamps && req->tcpopt.tstamp)
2452*4882a593Smuzhiyun 		opt2 |= TSTAMPS_EN_F;
2453*4882a593Smuzhiyun 	if (enable_tcp_sack && req->tcpopt.sack)
2454*4882a593Smuzhiyun 		opt2 |= SACK_EN_F;
2455*4882a593Smuzhiyun 	if (wscale && enable_tcp_window_scaling)
2456*4882a593Smuzhiyun 		opt2 |= WND_SCALE_EN_F;
2457*4882a593Smuzhiyun 	if (enable_ecn) {
2458*4882a593Smuzhiyun 		const struct tcphdr *tcph;
2459*4882a593Smuzhiyun 		u32 hlen = ntohl(req->hdr_len);
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 		if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2462*4882a593Smuzhiyun 			tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2463*4882a593Smuzhiyun 				IP_HDR_LEN_G(hlen);
2464*4882a593Smuzhiyun 		else
2465*4882a593Smuzhiyun 			tcph = (const void *)(req + 1) +
2466*4882a593Smuzhiyun 				T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
2467*4882a593Smuzhiyun 		if (tcph->ece && tcph->cwr)
2468*4882a593Smuzhiyun 			opt2 |= CCTRL_ECN_V(1);
2469*4882a593Smuzhiyun 	}
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 	skb_get(skb);
2472*4882a593Smuzhiyun 	rpl = cplhdr(skb);
2473*4882a593Smuzhiyun 	if (!is_t4(adapter_type)) {
2474*4882a593Smuzhiyun 		skb_trim(skb, roundup(sizeof(*rpl5), 16));
2475*4882a593Smuzhiyun 		rpl5 = (void *)rpl;
2476*4882a593Smuzhiyun 		INIT_TP_WR(rpl5, ep->hwtid);
2477*4882a593Smuzhiyun 	} else {
2478*4882a593Smuzhiyun 		skb_trim(skb, sizeof(*rpl));
2479*4882a593Smuzhiyun 		INIT_TP_WR(rpl, ep->hwtid);
2480*4882a593Smuzhiyun 	}
2481*4882a593Smuzhiyun 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2482*4882a593Smuzhiyun 						    ep->hwtid));
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun 	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
2485*4882a593Smuzhiyun 		u32 isn = (prandom_u32() & ~7UL) - 1;
2486*4882a593Smuzhiyun 		opt2 |= T5_OPT_2_VALID_F;
2487*4882a593Smuzhiyun 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2488*4882a593Smuzhiyun 		opt2 |= T5_ISS_F;
2489*4882a593Smuzhiyun 		rpl5 = (void *)rpl;
2490*4882a593Smuzhiyun 		memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2491*4882a593Smuzhiyun 		if (peer2peer)
2492*4882a593Smuzhiyun 			isn += 4;
2493*4882a593Smuzhiyun 		rpl5->iss = cpu_to_be32(isn);
2494*4882a593Smuzhiyun 		pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	rpl->opt0 = cpu_to_be64(opt0);
2498*4882a593Smuzhiyun 	rpl->opt2 = cpu_to_be32(opt2);
2499*4882a593Smuzhiyun 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2500*4882a593Smuzhiyun 	t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun 
reject_cr(struct c4iw_dev * dev,u32 hwtid,struct sk_buff * skb)2505*4882a593Smuzhiyun static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2506*4882a593Smuzhiyun {
2507*4882a593Smuzhiyun 	pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
2508*4882a593Smuzhiyun 	skb_trim(skb, sizeof(struct cpl_tid_release));
2509*4882a593Smuzhiyun 	release_tid(&dev->rdev, hwtid, skb);
2510*4882a593Smuzhiyun 	return;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun 
pass_accept_req(struct c4iw_dev * dev,struct sk_buff * skb)2513*4882a593Smuzhiyun static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2514*4882a593Smuzhiyun {
2515*4882a593Smuzhiyun 	struct c4iw_ep *child_ep = NULL, *parent_ep;
2516*4882a593Smuzhiyun 	struct cpl_pass_accept_req *req = cplhdr(skb);
2517*4882a593Smuzhiyun 	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2518*4882a593Smuzhiyun 	struct tid_info *t = dev->rdev.lldi.tids;
2519*4882a593Smuzhiyun 	unsigned int hwtid = GET_TID(req);
2520*4882a593Smuzhiyun 	struct dst_entry *dst;
2521*4882a593Smuzhiyun 	__u8 local_ip[16], peer_ip[16];
2522*4882a593Smuzhiyun 	__be16 local_port, peer_port;
2523*4882a593Smuzhiyun 	struct sockaddr_in6 *sin6;
2524*4882a593Smuzhiyun 	int err;
2525*4882a593Smuzhiyun 	u16 peer_mss = ntohs(req->tcpopt.mss);
2526*4882a593Smuzhiyun 	int iptype;
2527*4882a593Smuzhiyun 	unsigned short hdrs;
2528*4882a593Smuzhiyun 	u8 tos;
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
2531*4882a593Smuzhiyun 	if (!parent_ep) {
2532*4882a593Smuzhiyun 		pr_err("%s connect request on invalid stid %d\n",
2533*4882a593Smuzhiyun 		       __func__, stid);
2534*4882a593Smuzhiyun 		goto reject;
2535*4882a593Smuzhiyun 	}
2536*4882a593Smuzhiyun 
2537*4882a593Smuzhiyun 	if (state_read(&parent_ep->com) != LISTEN) {
2538*4882a593Smuzhiyun 		pr_err("%s - listening ep not in LISTEN\n", __func__);
2539*4882a593Smuzhiyun 		goto reject;
2540*4882a593Smuzhiyun 	}
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	if (parent_ep->com.cm_id->tos_set)
2543*4882a593Smuzhiyun 		tos = parent_ep->com.cm_id->tos;
2544*4882a593Smuzhiyun 	else
2545*4882a593Smuzhiyun 		tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
2548*4882a593Smuzhiyun 			&iptype, local_ip, peer_ip, &local_port, &peer_port);
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	/* Find output route */
2551*4882a593Smuzhiyun 	if (iptype == 4)  {
2552*4882a593Smuzhiyun 		pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2553*4882a593Smuzhiyun 			 , parent_ep, hwtid,
2554*4882a593Smuzhiyun 			 local_ip, peer_ip, ntohs(local_port),
2555*4882a593Smuzhiyun 			 ntohs(peer_port), peer_mss);
2556*4882a593Smuzhiyun 		dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
2557*4882a593Smuzhiyun 				      *(__be32 *)local_ip, *(__be32 *)peer_ip,
2558*4882a593Smuzhiyun 				      local_port, peer_port, tos);
2559*4882a593Smuzhiyun 	} else {
2560*4882a593Smuzhiyun 		pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2561*4882a593Smuzhiyun 			 , parent_ep, hwtid,
2562*4882a593Smuzhiyun 			 local_ip, peer_ip, ntohs(local_port),
2563*4882a593Smuzhiyun 			 ntohs(peer_port), peer_mss);
2564*4882a593Smuzhiyun 		dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
2565*4882a593Smuzhiyun 				local_ip, peer_ip, local_port, peer_port,
2566*4882a593Smuzhiyun 				tos,
2567*4882a593Smuzhiyun 				((struct sockaddr_in6 *)
2568*4882a593Smuzhiyun 				 &parent_ep->com.local_addr)->sin6_scope_id);
2569*4882a593Smuzhiyun 	}
2570*4882a593Smuzhiyun 	if (!dst) {
2571*4882a593Smuzhiyun 		pr_err("%s - failed to find dst entry!\n", __func__);
2572*4882a593Smuzhiyun 		goto reject;
2573*4882a593Smuzhiyun 	}
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2576*4882a593Smuzhiyun 	if (!child_ep) {
2577*4882a593Smuzhiyun 		pr_err("%s - failed to allocate ep entry!\n", __func__);
2578*4882a593Smuzhiyun 		dst_release(dst);
2579*4882a593Smuzhiyun 		goto reject;
2580*4882a593Smuzhiyun 	}
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 	err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
2583*4882a593Smuzhiyun 			parent_ep->com.dev->rdev.lldi.adapter_type, tos);
2584*4882a593Smuzhiyun 	if (err) {
2585*4882a593Smuzhiyun 		pr_err("%s - failed to allocate l2t entry!\n", __func__);
2586*4882a593Smuzhiyun 		dst_release(dst);
2587*4882a593Smuzhiyun 		kfree(child_ep);
2588*4882a593Smuzhiyun 		goto reject;
2589*4882a593Smuzhiyun 	}
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 	hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
2592*4882a593Smuzhiyun 	       sizeof(struct tcphdr) +
2593*4882a593Smuzhiyun 	       ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2594*4882a593Smuzhiyun 	if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2595*4882a593Smuzhiyun 		child_ep->mtu = peer_mss + hdrs;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	skb_queue_head_init(&child_ep->com.ep_skb_list);
2598*4882a593Smuzhiyun 	if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
2599*4882a593Smuzhiyun 		goto fail;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	state_set(&child_ep->com, CONNECTING);
2602*4882a593Smuzhiyun 	child_ep->com.dev = dev;
2603*4882a593Smuzhiyun 	child_ep->com.cm_id = NULL;
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	if (iptype == 4) {
2606*4882a593Smuzhiyun 		struct sockaddr_in *sin = (struct sockaddr_in *)
2607*4882a593Smuzhiyun 			&child_ep->com.local_addr;
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 		sin->sin_family = AF_INET;
2610*4882a593Smuzhiyun 		sin->sin_port = local_port;
2611*4882a593Smuzhiyun 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 		sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2614*4882a593Smuzhiyun 		sin->sin_family = AF_INET;
2615*4882a593Smuzhiyun 		sin->sin_port = ((struct sockaddr_in *)
2616*4882a593Smuzhiyun 				 &parent_ep->com.local_addr)->sin_port;
2617*4882a593Smuzhiyun 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 		sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2620*4882a593Smuzhiyun 		sin->sin_family = AF_INET;
2621*4882a593Smuzhiyun 		sin->sin_port = peer_port;
2622*4882a593Smuzhiyun 		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2623*4882a593Smuzhiyun 	} else {
2624*4882a593Smuzhiyun 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2625*4882a593Smuzhiyun 		sin6->sin6_family = PF_INET6;
2626*4882a593Smuzhiyun 		sin6->sin6_port = local_port;
2627*4882a593Smuzhiyun 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2630*4882a593Smuzhiyun 		sin6->sin6_family = PF_INET6;
2631*4882a593Smuzhiyun 		sin6->sin6_port = ((struct sockaddr_in6 *)
2632*4882a593Smuzhiyun 				   &parent_ep->com.local_addr)->sin6_port;
2633*4882a593Smuzhiyun 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 		sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2636*4882a593Smuzhiyun 		sin6->sin6_family = PF_INET6;
2637*4882a593Smuzhiyun 		sin6->sin6_port = peer_port;
2638*4882a593Smuzhiyun 		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2639*4882a593Smuzhiyun 	}
2640*4882a593Smuzhiyun 
2641*4882a593Smuzhiyun 	c4iw_get_ep(&parent_ep->com);
2642*4882a593Smuzhiyun 	child_ep->parent_ep = parent_ep;
2643*4882a593Smuzhiyun 	child_ep->tos = tos;
2644*4882a593Smuzhiyun 	child_ep->dst = dst;
2645*4882a593Smuzhiyun 	child_ep->hwtid = hwtid;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
2648*4882a593Smuzhiyun 		 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun 	timer_setup(&child_ep->timer, ep_timeout, 0);
2651*4882a593Smuzhiyun 	cxgb4_insert_tid(t, child_ep, hwtid,
2652*4882a593Smuzhiyun 			 child_ep->com.local_addr.ss_family);
2653*4882a593Smuzhiyun 	insert_ep_tid(child_ep);
2654*4882a593Smuzhiyun 	if (accept_cr(child_ep, skb, req)) {
2655*4882a593Smuzhiyun 		c4iw_put_ep(&parent_ep->com);
2656*4882a593Smuzhiyun 		release_ep_resources(child_ep);
2657*4882a593Smuzhiyun 	} else {
2658*4882a593Smuzhiyun 		set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2659*4882a593Smuzhiyun 	}
2660*4882a593Smuzhiyun 	if (iptype == 6) {
2661*4882a593Smuzhiyun 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2662*4882a593Smuzhiyun 		cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2663*4882a593Smuzhiyun 			       (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2664*4882a593Smuzhiyun 	}
2665*4882a593Smuzhiyun 	goto out;
2666*4882a593Smuzhiyun fail:
2667*4882a593Smuzhiyun 	c4iw_put_ep(&child_ep->com);
2668*4882a593Smuzhiyun reject:
2669*4882a593Smuzhiyun 	reject_cr(dev, hwtid, skb);
2670*4882a593Smuzhiyun out:
2671*4882a593Smuzhiyun 	if (parent_ep)
2672*4882a593Smuzhiyun 		c4iw_put_ep(&parent_ep->com);
2673*4882a593Smuzhiyun 	return 0;
2674*4882a593Smuzhiyun }
2675*4882a593Smuzhiyun 
pass_establish(struct c4iw_dev * dev,struct sk_buff * skb)2676*4882a593Smuzhiyun static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2677*4882a593Smuzhiyun {
2678*4882a593Smuzhiyun 	struct c4iw_ep *ep;
2679*4882a593Smuzhiyun 	struct cpl_pass_establish *req = cplhdr(skb);
2680*4882a593Smuzhiyun 	unsigned int tid = GET_TID(req);
2681*4882a593Smuzhiyun 	int ret;
2682*4882a593Smuzhiyun 	u16 tcp_opt = ntohs(req->tcp_opt);
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
2685*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2686*4882a593Smuzhiyun 	ep->snd_seq = be32_to_cpu(req->snd_isn);
2687*4882a593Smuzhiyun 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2688*4882a593Smuzhiyun 	ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
2689*4882a593Smuzhiyun 
2690*4882a593Smuzhiyun 	pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 	set_emss(ep, tcp_opt);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	dst_confirm(ep->dst);
2695*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
2696*4882a593Smuzhiyun 	ep->com.state = MPA_REQ_WAIT;
2697*4882a593Smuzhiyun 	start_ep_timer(ep);
2698*4882a593Smuzhiyun 	set_bit(PASS_ESTAB, &ep->com.history);
2699*4882a593Smuzhiyun 	ret = send_flowc(ep);
2700*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
2701*4882a593Smuzhiyun 	if (ret)
2702*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2703*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	return 0;
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun 
peer_close(struct c4iw_dev * dev,struct sk_buff * skb)2708*4882a593Smuzhiyun static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2709*4882a593Smuzhiyun {
2710*4882a593Smuzhiyun 	struct cpl_peer_close *hdr = cplhdr(skb);
2711*4882a593Smuzhiyun 	struct c4iw_ep *ep;
2712*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
2713*4882a593Smuzhiyun 	int disconnect = 1;
2714*4882a593Smuzhiyun 	int release = 0;
2715*4882a593Smuzhiyun 	unsigned int tid = GET_TID(hdr);
2716*4882a593Smuzhiyun 	int ret;
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
2719*4882a593Smuzhiyun 	if (!ep)
2720*4882a593Smuzhiyun 		return 0;
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2723*4882a593Smuzhiyun 	dst_confirm(ep->dst);
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	set_bit(PEER_CLOSE, &ep->com.history);
2726*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
2727*4882a593Smuzhiyun 	switch (ep->com.state) {
2728*4882a593Smuzhiyun 	case MPA_REQ_WAIT:
2729*4882a593Smuzhiyun 		__state_set(&ep->com, CLOSING);
2730*4882a593Smuzhiyun 		break;
2731*4882a593Smuzhiyun 	case MPA_REQ_SENT:
2732*4882a593Smuzhiyun 		__state_set(&ep->com, CLOSING);
2733*4882a593Smuzhiyun 		connect_reply_upcall(ep, -ECONNRESET);
2734*4882a593Smuzhiyun 		break;
2735*4882a593Smuzhiyun 	case MPA_REQ_RCVD:
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 		/*
2738*4882a593Smuzhiyun 		 * We're gonna mark this puppy DEAD, but keep
2739*4882a593Smuzhiyun 		 * the reference on it until the ULP accepts or
2740*4882a593Smuzhiyun 		 * rejects the CR. Also wake up anyone waiting
2741*4882a593Smuzhiyun 		 * in rdma connection migration (see c4iw_accept_cr()).
2742*4882a593Smuzhiyun 		 */
2743*4882a593Smuzhiyun 		__state_set(&ep->com, CLOSING);
2744*4882a593Smuzhiyun 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2745*4882a593Smuzhiyun 		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2746*4882a593Smuzhiyun 		break;
2747*4882a593Smuzhiyun 	case MPA_REP_SENT:
2748*4882a593Smuzhiyun 		__state_set(&ep->com, CLOSING);
2749*4882a593Smuzhiyun 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2750*4882a593Smuzhiyun 		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2751*4882a593Smuzhiyun 		break;
2752*4882a593Smuzhiyun 	case FPDU_MODE:
2753*4882a593Smuzhiyun 		start_ep_timer(ep);
2754*4882a593Smuzhiyun 		__state_set(&ep->com, CLOSING);
2755*4882a593Smuzhiyun 		attrs.next_state = C4IW_QP_STATE_CLOSING;
2756*4882a593Smuzhiyun 		ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2757*4882a593Smuzhiyun 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2758*4882a593Smuzhiyun 		if (ret != -ECONNRESET) {
2759*4882a593Smuzhiyun 			peer_close_upcall(ep);
2760*4882a593Smuzhiyun 			disconnect = 1;
2761*4882a593Smuzhiyun 		}
2762*4882a593Smuzhiyun 		break;
2763*4882a593Smuzhiyun 	case ABORTING:
2764*4882a593Smuzhiyun 		disconnect = 0;
2765*4882a593Smuzhiyun 		break;
2766*4882a593Smuzhiyun 	case CLOSING:
2767*4882a593Smuzhiyun 		__state_set(&ep->com, MORIBUND);
2768*4882a593Smuzhiyun 		disconnect = 0;
2769*4882a593Smuzhiyun 		break;
2770*4882a593Smuzhiyun 	case MORIBUND:
2771*4882a593Smuzhiyun 		(void)stop_ep_timer(ep);
2772*4882a593Smuzhiyun 		if (ep->com.cm_id && ep->com.qp) {
2773*4882a593Smuzhiyun 			attrs.next_state = C4IW_QP_STATE_IDLE;
2774*4882a593Smuzhiyun 			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2775*4882a593Smuzhiyun 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2776*4882a593Smuzhiyun 		}
2777*4882a593Smuzhiyun 		close_complete_upcall(ep, 0);
2778*4882a593Smuzhiyun 		__state_set(&ep->com, DEAD);
2779*4882a593Smuzhiyun 		release = 1;
2780*4882a593Smuzhiyun 		disconnect = 0;
2781*4882a593Smuzhiyun 		break;
2782*4882a593Smuzhiyun 	case DEAD:
2783*4882a593Smuzhiyun 		disconnect = 0;
2784*4882a593Smuzhiyun 		break;
2785*4882a593Smuzhiyun 	default:
2786*4882a593Smuzhiyun 		WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2787*4882a593Smuzhiyun 	}
2788*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
2789*4882a593Smuzhiyun 	if (disconnect)
2790*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2791*4882a593Smuzhiyun 	if (release)
2792*4882a593Smuzhiyun 		release_ep_resources(ep);
2793*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2794*4882a593Smuzhiyun 	return 0;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun 
finish_peer_abort(struct c4iw_dev * dev,struct c4iw_ep * ep)2797*4882a593Smuzhiyun static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
2798*4882a593Smuzhiyun {
2799*4882a593Smuzhiyun 	complete_cached_srq_buffers(ep, ep->srqe_idx);
2800*4882a593Smuzhiyun 	if (ep->com.cm_id && ep->com.qp) {
2801*4882a593Smuzhiyun 		struct c4iw_qp_attributes attrs;
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 		attrs.next_state = C4IW_QP_STATE_ERROR;
2804*4882a593Smuzhiyun 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2805*4882a593Smuzhiyun 			       C4IW_QP_ATTR_NEXT_STATE,	&attrs, 1);
2806*4882a593Smuzhiyun 	}
2807*4882a593Smuzhiyun 	peer_abort_upcall(ep);
2808*4882a593Smuzhiyun 	release_ep_resources(ep);
2809*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun 
peer_abort(struct c4iw_dev * dev,struct sk_buff * skb)2812*4882a593Smuzhiyun static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun 	struct cpl_abort_req_rss6 *req = cplhdr(skb);
2815*4882a593Smuzhiyun 	struct c4iw_ep *ep;
2816*4882a593Smuzhiyun 	struct sk_buff *rpl_skb;
2817*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
2818*4882a593Smuzhiyun 	int ret;
2819*4882a593Smuzhiyun 	int release = 0;
2820*4882a593Smuzhiyun 	unsigned int tid = GET_TID(req);
2821*4882a593Smuzhiyun 	u8 status;
2822*4882a593Smuzhiyun 	u32 srqidx;
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
2827*4882a593Smuzhiyun 	if (!ep)
2828*4882a593Smuzhiyun 		return 0;
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 	status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun 	if (cxgb_is_neg_adv(status)) {
2833*4882a593Smuzhiyun 		pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
2834*4882a593Smuzhiyun 			 ep->hwtid, status, neg_adv_str(status));
2835*4882a593Smuzhiyun 		ep->stats.abort_neg_adv++;
2836*4882a593Smuzhiyun 		mutex_lock(&dev->rdev.stats.lock);
2837*4882a593Smuzhiyun 		dev->rdev.stats.neg_adv++;
2838*4882a593Smuzhiyun 		mutex_unlock(&dev->rdev.stats.lock);
2839*4882a593Smuzhiyun 		goto deref_ep;
2840*4882a593Smuzhiyun 	}
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 	pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
2843*4882a593Smuzhiyun 		 ep->com.state);
2844*4882a593Smuzhiyun 	set_bit(PEER_ABORT, &ep->com.history);
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	/*
2847*4882a593Smuzhiyun 	 * Wake up any threads in rdma_init() or rdma_fini().
2848*4882a593Smuzhiyun 	 * However, this is not needed if com state is just
2849*4882a593Smuzhiyun 	 * MPA_REQ_SENT
2850*4882a593Smuzhiyun 	 */
2851*4882a593Smuzhiyun 	if (ep->com.state != MPA_REQ_SENT)
2852*4882a593Smuzhiyun 		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
2855*4882a593Smuzhiyun 	switch (ep->com.state) {
2856*4882a593Smuzhiyun 	case CONNECTING:
2857*4882a593Smuzhiyun 		c4iw_put_ep(&ep->parent_ep->com);
2858*4882a593Smuzhiyun 		break;
2859*4882a593Smuzhiyun 	case MPA_REQ_WAIT:
2860*4882a593Smuzhiyun 		(void)stop_ep_timer(ep);
2861*4882a593Smuzhiyun 		break;
2862*4882a593Smuzhiyun 	case MPA_REQ_SENT:
2863*4882a593Smuzhiyun 		(void)stop_ep_timer(ep);
2864*4882a593Smuzhiyun 		if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 ||
2865*4882a593Smuzhiyun 		    (mpa_rev == 2 && ep->tried_with_mpa_v1))
2866*4882a593Smuzhiyun 			connect_reply_upcall(ep, -ECONNRESET);
2867*4882a593Smuzhiyun 		else {
2868*4882a593Smuzhiyun 			/*
2869*4882a593Smuzhiyun 			 * we just don't send notification upwards because we
2870*4882a593Smuzhiyun 			 * want to retry with mpa_v1 without upper layers even
2871*4882a593Smuzhiyun 			 * knowing it.
2872*4882a593Smuzhiyun 			 *
2873*4882a593Smuzhiyun 			 * do some housekeeping so as to re-initiate the
2874*4882a593Smuzhiyun 			 * connection
2875*4882a593Smuzhiyun 			 */
2876*4882a593Smuzhiyun 			pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
2877*4882a593Smuzhiyun 				__func__, mpa_rev);
2878*4882a593Smuzhiyun 			ep->retry_with_mpa_v1 = 1;
2879*4882a593Smuzhiyun 		}
2880*4882a593Smuzhiyun 		break;
2881*4882a593Smuzhiyun 	case MPA_REP_SENT:
2882*4882a593Smuzhiyun 		break;
2883*4882a593Smuzhiyun 	case MPA_REQ_RCVD:
2884*4882a593Smuzhiyun 		break;
2885*4882a593Smuzhiyun 	case MORIBUND:
2886*4882a593Smuzhiyun 	case CLOSING:
2887*4882a593Smuzhiyun 		stop_ep_timer(ep);
2888*4882a593Smuzhiyun 		fallthrough;
2889*4882a593Smuzhiyun 	case FPDU_MODE:
2890*4882a593Smuzhiyun 		if (ep->com.qp && ep->com.qp->srq) {
2891*4882a593Smuzhiyun 			srqidx = ABORT_RSS_SRQIDX_G(
2892*4882a593Smuzhiyun 					be32_to_cpu(req->srqidx_status));
2893*4882a593Smuzhiyun 			if (srqidx) {
2894*4882a593Smuzhiyun 				complete_cached_srq_buffers(ep, srqidx);
2895*4882a593Smuzhiyun 			} else {
2896*4882a593Smuzhiyun 				/* Hold ep ref until finish_peer_abort() */
2897*4882a593Smuzhiyun 				c4iw_get_ep(&ep->com);
2898*4882a593Smuzhiyun 				__state_set(&ep->com, ABORTING);
2899*4882a593Smuzhiyun 				set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
2900*4882a593Smuzhiyun 				read_tcb(ep);
2901*4882a593Smuzhiyun 				break;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 			}
2904*4882a593Smuzhiyun 		}
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 		if (ep->com.cm_id && ep->com.qp) {
2907*4882a593Smuzhiyun 			attrs.next_state = C4IW_QP_STATE_ERROR;
2908*4882a593Smuzhiyun 			ret = c4iw_modify_qp(ep->com.qp->rhp,
2909*4882a593Smuzhiyun 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2910*4882a593Smuzhiyun 				     &attrs, 1);
2911*4882a593Smuzhiyun 			if (ret)
2912*4882a593Smuzhiyun 				pr_err("%s - qp <- error failed!\n", __func__);
2913*4882a593Smuzhiyun 		}
2914*4882a593Smuzhiyun 		peer_abort_upcall(ep);
2915*4882a593Smuzhiyun 		break;
2916*4882a593Smuzhiyun 	case ABORTING:
2917*4882a593Smuzhiyun 		break;
2918*4882a593Smuzhiyun 	case DEAD:
2919*4882a593Smuzhiyun 		pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2920*4882a593Smuzhiyun 		mutex_unlock(&ep->com.mutex);
2921*4882a593Smuzhiyun 		goto deref_ep;
2922*4882a593Smuzhiyun 	default:
2923*4882a593Smuzhiyun 		WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2924*4882a593Smuzhiyun 		break;
2925*4882a593Smuzhiyun 	}
2926*4882a593Smuzhiyun 	dst_confirm(ep->dst);
2927*4882a593Smuzhiyun 	if (ep->com.state != ABORTING) {
2928*4882a593Smuzhiyun 		__state_set(&ep->com, DEAD);
2929*4882a593Smuzhiyun 		/* we don't release if we want to retry with mpa_v1 */
2930*4882a593Smuzhiyun 		if (!ep->retry_with_mpa_v1)
2931*4882a593Smuzhiyun 			release = 1;
2932*4882a593Smuzhiyun 	}
2933*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun 	rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2936*4882a593Smuzhiyun 	if (WARN_ON(!rpl_skb)) {
2937*4882a593Smuzhiyun 		release = 1;
2938*4882a593Smuzhiyun 		goto out;
2939*4882a593Smuzhiyun 	}
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun 	cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2944*4882a593Smuzhiyun out:
2945*4882a593Smuzhiyun 	if (release)
2946*4882a593Smuzhiyun 		release_ep_resources(ep);
2947*4882a593Smuzhiyun 	else if (ep->retry_with_mpa_v1) {
2948*4882a593Smuzhiyun 		if (ep->com.remote_addr.ss_family == AF_INET6) {
2949*4882a593Smuzhiyun 			struct sockaddr_in6 *sin6 =
2950*4882a593Smuzhiyun 					(struct sockaddr_in6 *)
2951*4882a593Smuzhiyun 					&ep->com.local_addr;
2952*4882a593Smuzhiyun 			cxgb4_clip_release(
2953*4882a593Smuzhiyun 					ep->com.dev->rdev.lldi.ports[0],
2954*4882a593Smuzhiyun 					(const u32 *)&sin6->sin6_addr.s6_addr,
2955*4882a593Smuzhiyun 					1);
2956*4882a593Smuzhiyun 		}
2957*4882a593Smuzhiyun 		xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
2958*4882a593Smuzhiyun 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
2959*4882a593Smuzhiyun 				 ep->com.local_addr.ss_family);
2960*4882a593Smuzhiyun 		dst_release(ep->dst);
2961*4882a593Smuzhiyun 		cxgb4_l2t_release(ep->l2t);
2962*4882a593Smuzhiyun 		c4iw_reconnect(ep);
2963*4882a593Smuzhiyun 	}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun deref_ep:
2966*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2967*4882a593Smuzhiyun 	/* Dereferencing ep, referenced in peer_abort_intr() */
2968*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
2969*4882a593Smuzhiyun 	return 0;
2970*4882a593Smuzhiyun }
2971*4882a593Smuzhiyun 
close_con_rpl(struct c4iw_dev * dev,struct sk_buff * skb)2972*4882a593Smuzhiyun static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2973*4882a593Smuzhiyun {
2974*4882a593Smuzhiyun 	struct c4iw_ep *ep;
2975*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
2976*4882a593Smuzhiyun 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
2977*4882a593Smuzhiyun 	int release = 0;
2978*4882a593Smuzhiyun 	unsigned int tid = GET_TID(rpl);
2979*4882a593Smuzhiyun 
2980*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
2981*4882a593Smuzhiyun 	if (!ep)
2982*4882a593Smuzhiyun 		return 0;
2983*4882a593Smuzhiyun 
2984*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	/* The cm_id may be null if we failed to connect */
2987*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
2988*4882a593Smuzhiyun 	set_bit(CLOSE_CON_RPL, &ep->com.history);
2989*4882a593Smuzhiyun 	switch (ep->com.state) {
2990*4882a593Smuzhiyun 	case CLOSING:
2991*4882a593Smuzhiyun 		__state_set(&ep->com, MORIBUND);
2992*4882a593Smuzhiyun 		break;
2993*4882a593Smuzhiyun 	case MORIBUND:
2994*4882a593Smuzhiyun 		(void)stop_ep_timer(ep);
2995*4882a593Smuzhiyun 		if ((ep->com.cm_id) && (ep->com.qp)) {
2996*4882a593Smuzhiyun 			attrs.next_state = C4IW_QP_STATE_IDLE;
2997*4882a593Smuzhiyun 			c4iw_modify_qp(ep->com.qp->rhp,
2998*4882a593Smuzhiyun 					     ep->com.qp,
2999*4882a593Smuzhiyun 					     C4IW_QP_ATTR_NEXT_STATE,
3000*4882a593Smuzhiyun 					     &attrs, 1);
3001*4882a593Smuzhiyun 		}
3002*4882a593Smuzhiyun 		close_complete_upcall(ep, 0);
3003*4882a593Smuzhiyun 		__state_set(&ep->com, DEAD);
3004*4882a593Smuzhiyun 		release = 1;
3005*4882a593Smuzhiyun 		break;
3006*4882a593Smuzhiyun 	case ABORTING:
3007*4882a593Smuzhiyun 	case DEAD:
3008*4882a593Smuzhiyun 		break;
3009*4882a593Smuzhiyun 	default:
3010*4882a593Smuzhiyun 		WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3011*4882a593Smuzhiyun 		break;
3012*4882a593Smuzhiyun 	}
3013*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
3014*4882a593Smuzhiyun 	if (release)
3015*4882a593Smuzhiyun 		release_ep_resources(ep);
3016*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3017*4882a593Smuzhiyun 	return 0;
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun 
terminate(struct c4iw_dev * dev,struct sk_buff * skb)3020*4882a593Smuzhiyun static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun 	struct cpl_rdma_terminate *rpl = cplhdr(skb);
3023*4882a593Smuzhiyun 	unsigned int tid = GET_TID(rpl);
3024*4882a593Smuzhiyun 	struct c4iw_ep *ep;
3025*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	if (ep) {
3030*4882a593Smuzhiyun 		if (ep->com.qp) {
3031*4882a593Smuzhiyun 			pr_warn("TERM received tid %u qpid %u\n", tid,
3032*4882a593Smuzhiyun 				ep->com.qp->wq.sq.qid);
3033*4882a593Smuzhiyun 			attrs.next_state = C4IW_QP_STATE_TERMINATE;
3034*4882a593Smuzhiyun 			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
3035*4882a593Smuzhiyun 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
3036*4882a593Smuzhiyun 		}
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 		/* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
3039*4882a593Smuzhiyun 		 * when entering the TERM state the RNIC MUST initiate a CLOSE.
3040*4882a593Smuzhiyun 		 */
3041*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3042*4882a593Smuzhiyun 		c4iw_put_ep(&ep->com);
3043*4882a593Smuzhiyun 	} else
3044*4882a593Smuzhiyun 		pr_warn("TERM received tid %u no ep/qp\n", tid);
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 	return 0;
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun /*
3050*4882a593Smuzhiyun  * Upcall from the adapter indicating data has been transmitted.
3051*4882a593Smuzhiyun  * For us its just the single MPA request or reply.  We can now free
3052*4882a593Smuzhiyun  * the skb holding the mpa message.
3053*4882a593Smuzhiyun  */
fw4_ack(struct c4iw_dev * dev,struct sk_buff * skb)3054*4882a593Smuzhiyun static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
3055*4882a593Smuzhiyun {
3056*4882a593Smuzhiyun 	struct c4iw_ep *ep;
3057*4882a593Smuzhiyun 	struct cpl_fw4_ack *hdr = cplhdr(skb);
3058*4882a593Smuzhiyun 	u8 credits = hdr->credits;
3059*4882a593Smuzhiyun 	unsigned int tid = GET_TID(hdr);
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
3063*4882a593Smuzhiyun 	if (!ep)
3064*4882a593Smuzhiyun 		return 0;
3065*4882a593Smuzhiyun 	pr_debug("ep %p tid %u credits %u\n",
3066*4882a593Smuzhiyun 		 ep, ep->hwtid, credits);
3067*4882a593Smuzhiyun 	if (credits == 0) {
3068*4882a593Smuzhiyun 		pr_debug("0 credit ack ep %p tid %u state %u\n",
3069*4882a593Smuzhiyun 			 ep, ep->hwtid, state_read(&ep->com));
3070*4882a593Smuzhiyun 		goto out;
3071*4882a593Smuzhiyun 	}
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun 	dst_confirm(ep->dst);
3074*4882a593Smuzhiyun 	if (ep->mpa_skb) {
3075*4882a593Smuzhiyun 		pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
3076*4882a593Smuzhiyun 			 ep, ep->hwtid, state_read(&ep->com),
3077*4882a593Smuzhiyun 			 ep->mpa_attr.initiator ? 1 : 0);
3078*4882a593Smuzhiyun 		mutex_lock(&ep->com.mutex);
3079*4882a593Smuzhiyun 		kfree_skb(ep->mpa_skb);
3080*4882a593Smuzhiyun 		ep->mpa_skb = NULL;
3081*4882a593Smuzhiyun 		if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
3082*4882a593Smuzhiyun 			stop_ep_timer(ep);
3083*4882a593Smuzhiyun 		mutex_unlock(&ep->com.mutex);
3084*4882a593Smuzhiyun 	}
3085*4882a593Smuzhiyun out:
3086*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3087*4882a593Smuzhiyun 	return 0;
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun 
c4iw_reject_cr(struct iw_cm_id * cm_id,const void * pdata,u8 pdata_len)3090*4882a593Smuzhiyun int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3091*4882a593Smuzhiyun {
3092*4882a593Smuzhiyun 	int abort;
3093*4882a593Smuzhiyun 	struct c4iw_ep *ep = to_ep(cm_id);
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
3098*4882a593Smuzhiyun 	if (ep->com.state != MPA_REQ_RCVD) {
3099*4882a593Smuzhiyun 		mutex_unlock(&ep->com.mutex);
3100*4882a593Smuzhiyun 		c4iw_put_ep(&ep->com);
3101*4882a593Smuzhiyun 		return -ECONNRESET;
3102*4882a593Smuzhiyun 	}
3103*4882a593Smuzhiyun 	set_bit(ULP_REJECT, &ep->com.history);
3104*4882a593Smuzhiyun 	if (mpa_rev == 0)
3105*4882a593Smuzhiyun 		abort = 1;
3106*4882a593Smuzhiyun 	else
3107*4882a593Smuzhiyun 		abort = send_mpa_reject(ep, pdata, pdata_len);
3108*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun 	stop_ep_timer(ep);
3111*4882a593Smuzhiyun 	c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
3112*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3113*4882a593Smuzhiyun 	return 0;
3114*4882a593Smuzhiyun }
3115*4882a593Smuzhiyun 
c4iw_accept_cr(struct iw_cm_id * cm_id,struct iw_cm_conn_param * conn_param)3116*4882a593Smuzhiyun int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3117*4882a593Smuzhiyun {
3118*4882a593Smuzhiyun 	int err;
3119*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
3120*4882a593Smuzhiyun 	enum c4iw_qp_attr_mask mask;
3121*4882a593Smuzhiyun 	struct c4iw_ep *ep = to_ep(cm_id);
3122*4882a593Smuzhiyun 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3123*4882a593Smuzhiyun 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
3124*4882a593Smuzhiyun 	int abort = 0;
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
3129*4882a593Smuzhiyun 	if (ep->com.state != MPA_REQ_RCVD) {
3130*4882a593Smuzhiyun 		err = -ECONNRESET;
3131*4882a593Smuzhiyun 		goto err_out;
3132*4882a593Smuzhiyun 	}
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	if (!qp) {
3135*4882a593Smuzhiyun 		err = -EINVAL;
3136*4882a593Smuzhiyun 		goto err_out;
3137*4882a593Smuzhiyun 	}
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	set_bit(ULP_ACCEPT, &ep->com.history);
3140*4882a593Smuzhiyun 	if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3141*4882a593Smuzhiyun 	    (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3142*4882a593Smuzhiyun 		err = -EINVAL;
3143*4882a593Smuzhiyun 		goto err_abort;
3144*4882a593Smuzhiyun 	}
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3147*4882a593Smuzhiyun 		if (conn_param->ord > ep->ird) {
3148*4882a593Smuzhiyun 			if (RELAXED_IRD_NEGOTIATION) {
3149*4882a593Smuzhiyun 				conn_param->ord = ep->ird;
3150*4882a593Smuzhiyun 			} else {
3151*4882a593Smuzhiyun 				ep->ird = conn_param->ird;
3152*4882a593Smuzhiyun 				ep->ord = conn_param->ord;
3153*4882a593Smuzhiyun 				send_mpa_reject(ep, conn_param->private_data,
3154*4882a593Smuzhiyun 						conn_param->private_data_len);
3155*4882a593Smuzhiyun 				err = -ENOMEM;
3156*4882a593Smuzhiyun 				goto err_abort;
3157*4882a593Smuzhiyun 			}
3158*4882a593Smuzhiyun 		}
3159*4882a593Smuzhiyun 		if (conn_param->ird < ep->ord) {
3160*4882a593Smuzhiyun 			if (RELAXED_IRD_NEGOTIATION &&
3161*4882a593Smuzhiyun 			    ep->ord <= h->rdev.lldi.max_ordird_qp) {
3162*4882a593Smuzhiyun 				conn_param->ird = ep->ord;
3163*4882a593Smuzhiyun 			} else {
3164*4882a593Smuzhiyun 				err = -ENOMEM;
3165*4882a593Smuzhiyun 				goto err_abort;
3166*4882a593Smuzhiyun 			}
3167*4882a593Smuzhiyun 		}
3168*4882a593Smuzhiyun 	}
3169*4882a593Smuzhiyun 	ep->ird = conn_param->ird;
3170*4882a593Smuzhiyun 	ep->ord = conn_param->ord;
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun 	if (ep->mpa_attr.version == 1) {
3173*4882a593Smuzhiyun 		if (peer2peer && ep->ird == 0)
3174*4882a593Smuzhiyun 			ep->ird = 1;
3175*4882a593Smuzhiyun 	} else {
3176*4882a593Smuzhiyun 		if (peer2peer &&
3177*4882a593Smuzhiyun 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3178*4882a593Smuzhiyun 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3179*4882a593Smuzhiyun 			ep->ird = 1;
3180*4882a593Smuzhiyun 	}
3181*4882a593Smuzhiyun 
3182*4882a593Smuzhiyun 	pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	ep->com.cm_id = cm_id;
3185*4882a593Smuzhiyun 	ref_cm_id(&ep->com);
3186*4882a593Smuzhiyun 	ep->com.qp = qp;
3187*4882a593Smuzhiyun 	ref_qp(ep);
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 	/* bind QP to EP and move to RTS */
3190*4882a593Smuzhiyun 	attrs.mpa_attr = ep->mpa_attr;
3191*4882a593Smuzhiyun 	attrs.max_ird = ep->ird;
3192*4882a593Smuzhiyun 	attrs.max_ord = ep->ord;
3193*4882a593Smuzhiyun 	attrs.llp_stream_handle = ep;
3194*4882a593Smuzhiyun 	attrs.next_state = C4IW_QP_STATE_RTS;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	/* bind QP and TID with INIT_WR */
3197*4882a593Smuzhiyun 	mask = C4IW_QP_ATTR_NEXT_STATE |
3198*4882a593Smuzhiyun 			     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3199*4882a593Smuzhiyun 			     C4IW_QP_ATTR_MPA_ATTR |
3200*4882a593Smuzhiyun 			     C4IW_QP_ATTR_MAX_IRD |
3201*4882a593Smuzhiyun 			     C4IW_QP_ATTR_MAX_ORD;
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 	err = c4iw_modify_qp(ep->com.qp->rhp,
3204*4882a593Smuzhiyun 			     ep->com.qp, mask, &attrs, 1);
3205*4882a593Smuzhiyun 	if (err)
3206*4882a593Smuzhiyun 		goto err_deref_cm_id;
3207*4882a593Smuzhiyun 
3208*4882a593Smuzhiyun 	set_bit(STOP_MPA_TIMER, &ep->com.flags);
3209*4882a593Smuzhiyun 	err = send_mpa_reply(ep, conn_param->private_data,
3210*4882a593Smuzhiyun 			     conn_param->private_data_len);
3211*4882a593Smuzhiyun 	if (err)
3212*4882a593Smuzhiyun 		goto err_deref_cm_id;
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	__state_set(&ep->com, FPDU_MODE);
3215*4882a593Smuzhiyun 	established_upcall(ep);
3216*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
3217*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3218*4882a593Smuzhiyun 	return 0;
3219*4882a593Smuzhiyun err_deref_cm_id:
3220*4882a593Smuzhiyun 	deref_cm_id(&ep->com);
3221*4882a593Smuzhiyun err_abort:
3222*4882a593Smuzhiyun 	abort = 1;
3223*4882a593Smuzhiyun err_out:
3224*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
3225*4882a593Smuzhiyun 	if (abort)
3226*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3227*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3228*4882a593Smuzhiyun 	return err;
3229*4882a593Smuzhiyun }
3230*4882a593Smuzhiyun 
pick_local_ipaddrs(struct c4iw_dev * dev,struct iw_cm_id * cm_id)3231*4882a593Smuzhiyun static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3232*4882a593Smuzhiyun {
3233*4882a593Smuzhiyun 	struct in_device *ind;
3234*4882a593Smuzhiyun 	int found = 0;
3235*4882a593Smuzhiyun 	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3236*4882a593Smuzhiyun 	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3237*4882a593Smuzhiyun 	const struct in_ifaddr *ifa;
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	ind = in_dev_get(dev->rdev.lldi.ports[0]);
3240*4882a593Smuzhiyun 	if (!ind)
3241*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
3242*4882a593Smuzhiyun 	rcu_read_lock();
3243*4882a593Smuzhiyun 	in_dev_for_each_ifa_rcu(ifa, ind) {
3244*4882a593Smuzhiyun 		if (ifa->ifa_flags & IFA_F_SECONDARY)
3245*4882a593Smuzhiyun 			continue;
3246*4882a593Smuzhiyun 		laddr->sin_addr.s_addr = ifa->ifa_address;
3247*4882a593Smuzhiyun 		raddr->sin_addr.s_addr = ifa->ifa_address;
3248*4882a593Smuzhiyun 		found = 1;
3249*4882a593Smuzhiyun 		break;
3250*4882a593Smuzhiyun 	}
3251*4882a593Smuzhiyun 	rcu_read_unlock();
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 	in_dev_put(ind);
3254*4882a593Smuzhiyun 	return found ? 0 : -EADDRNOTAVAIL;
3255*4882a593Smuzhiyun }
3256*4882a593Smuzhiyun 
get_lladdr(struct net_device * dev,struct in6_addr * addr,unsigned char banned_flags)3257*4882a593Smuzhiyun static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3258*4882a593Smuzhiyun 		      unsigned char banned_flags)
3259*4882a593Smuzhiyun {
3260*4882a593Smuzhiyun 	struct inet6_dev *idev;
3261*4882a593Smuzhiyun 	int err = -EADDRNOTAVAIL;
3262*4882a593Smuzhiyun 
3263*4882a593Smuzhiyun 	rcu_read_lock();
3264*4882a593Smuzhiyun 	idev = __in6_dev_get(dev);
3265*4882a593Smuzhiyun 	if (idev != NULL) {
3266*4882a593Smuzhiyun 		struct inet6_ifaddr *ifp;
3267*4882a593Smuzhiyun 
3268*4882a593Smuzhiyun 		read_lock_bh(&idev->lock);
3269*4882a593Smuzhiyun 		list_for_each_entry(ifp, &idev->addr_list, if_list) {
3270*4882a593Smuzhiyun 			if (ifp->scope == IFA_LINK &&
3271*4882a593Smuzhiyun 			    !(ifp->flags & banned_flags)) {
3272*4882a593Smuzhiyun 				memcpy(addr, &ifp->addr, 16);
3273*4882a593Smuzhiyun 				err = 0;
3274*4882a593Smuzhiyun 				break;
3275*4882a593Smuzhiyun 			}
3276*4882a593Smuzhiyun 		}
3277*4882a593Smuzhiyun 		read_unlock_bh(&idev->lock);
3278*4882a593Smuzhiyun 	}
3279*4882a593Smuzhiyun 	rcu_read_unlock();
3280*4882a593Smuzhiyun 	return err;
3281*4882a593Smuzhiyun }
3282*4882a593Smuzhiyun 
pick_local_ip6addrs(struct c4iw_dev * dev,struct iw_cm_id * cm_id)3283*4882a593Smuzhiyun static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3284*4882a593Smuzhiyun {
3285*4882a593Smuzhiyun 	struct in6_addr addr;
3286*4882a593Smuzhiyun 	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3287*4882a593Smuzhiyun 	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3288*4882a593Smuzhiyun 
3289*4882a593Smuzhiyun 	if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
3290*4882a593Smuzhiyun 		memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3291*4882a593Smuzhiyun 		memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3292*4882a593Smuzhiyun 		return 0;
3293*4882a593Smuzhiyun 	}
3294*4882a593Smuzhiyun 	return -EADDRNOTAVAIL;
3295*4882a593Smuzhiyun }
3296*4882a593Smuzhiyun 
c4iw_connect(struct iw_cm_id * cm_id,struct iw_cm_conn_param * conn_param)3297*4882a593Smuzhiyun int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3298*4882a593Smuzhiyun {
3299*4882a593Smuzhiyun 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3300*4882a593Smuzhiyun 	struct c4iw_ep *ep;
3301*4882a593Smuzhiyun 	int err = 0;
3302*4882a593Smuzhiyun 	struct sockaddr_in *laddr;
3303*4882a593Smuzhiyun 	struct sockaddr_in *raddr;
3304*4882a593Smuzhiyun 	struct sockaddr_in6 *laddr6;
3305*4882a593Smuzhiyun 	struct sockaddr_in6 *raddr6;
3306*4882a593Smuzhiyun 	__u8 *ra;
3307*4882a593Smuzhiyun 	int iptype;
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun 	if ((conn_param->ord > cur_max_read_depth(dev)) ||
3310*4882a593Smuzhiyun 	    (conn_param->ird > cur_max_read_depth(dev))) {
3311*4882a593Smuzhiyun 		err = -EINVAL;
3312*4882a593Smuzhiyun 		goto out;
3313*4882a593Smuzhiyun 	}
3314*4882a593Smuzhiyun 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3315*4882a593Smuzhiyun 	if (!ep) {
3316*4882a593Smuzhiyun 		pr_err("%s - cannot alloc ep\n", __func__);
3317*4882a593Smuzhiyun 		err = -ENOMEM;
3318*4882a593Smuzhiyun 		goto out;
3319*4882a593Smuzhiyun 	}
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 	skb_queue_head_init(&ep->com.ep_skb_list);
3322*4882a593Smuzhiyun 	if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3323*4882a593Smuzhiyun 		err = -ENOMEM;
3324*4882a593Smuzhiyun 		goto fail1;
3325*4882a593Smuzhiyun 	}
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	timer_setup(&ep->timer, ep_timeout, 0);
3328*4882a593Smuzhiyun 	ep->plen = conn_param->private_data_len;
3329*4882a593Smuzhiyun 	if (ep->plen)
3330*4882a593Smuzhiyun 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3331*4882a593Smuzhiyun 		       conn_param->private_data, ep->plen);
3332*4882a593Smuzhiyun 	ep->ird = conn_param->ird;
3333*4882a593Smuzhiyun 	ep->ord = conn_param->ord;
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 	if (peer2peer && ep->ord == 0)
3336*4882a593Smuzhiyun 		ep->ord = 1;
3337*4882a593Smuzhiyun 
3338*4882a593Smuzhiyun 	ep->com.cm_id = cm_id;
3339*4882a593Smuzhiyun 	ref_cm_id(&ep->com);
3340*4882a593Smuzhiyun 	cm_id->provider_data = ep;
3341*4882a593Smuzhiyun 	ep->com.dev = dev;
3342*4882a593Smuzhiyun 	ep->com.qp = get_qhp(dev, conn_param->qpn);
3343*4882a593Smuzhiyun 	if (!ep->com.qp) {
3344*4882a593Smuzhiyun 		pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
3345*4882a593Smuzhiyun 		err = -EINVAL;
3346*4882a593Smuzhiyun 		goto fail2;
3347*4882a593Smuzhiyun 	}
3348*4882a593Smuzhiyun 	ref_qp(ep);
3349*4882a593Smuzhiyun 	pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
3350*4882a593Smuzhiyun 		 ep->com.qp, cm_id);
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun 	/*
3353*4882a593Smuzhiyun 	 * Allocate an active TID to initiate a TCP connection.
3354*4882a593Smuzhiyun 	 */
3355*4882a593Smuzhiyun 	ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3356*4882a593Smuzhiyun 	if (ep->atid == -1) {
3357*4882a593Smuzhiyun 		pr_err("%s - cannot alloc atid\n", __func__);
3358*4882a593Smuzhiyun 		err = -ENOMEM;
3359*4882a593Smuzhiyun 		goto fail2;
3360*4882a593Smuzhiyun 	}
3361*4882a593Smuzhiyun 	err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
3362*4882a593Smuzhiyun 	if (err)
3363*4882a593Smuzhiyun 		goto fail5;
3364*4882a593Smuzhiyun 
3365*4882a593Smuzhiyun 	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3366*4882a593Smuzhiyun 	       sizeof(ep->com.local_addr));
3367*4882a593Smuzhiyun 	memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3368*4882a593Smuzhiyun 	       sizeof(ep->com.remote_addr));
3369*4882a593Smuzhiyun 
3370*4882a593Smuzhiyun 	laddr = (struct sockaddr_in *)&ep->com.local_addr;
3371*4882a593Smuzhiyun 	raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3372*4882a593Smuzhiyun 	laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3373*4882a593Smuzhiyun 	raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 	if (cm_id->m_remote_addr.ss_family == AF_INET) {
3376*4882a593Smuzhiyun 		iptype = 4;
3377*4882a593Smuzhiyun 		ra = (__u8 *)&raddr->sin_addr;
3378*4882a593Smuzhiyun 
3379*4882a593Smuzhiyun 		/*
3380*4882a593Smuzhiyun 		 * Handle loopback requests to INADDR_ANY.
3381*4882a593Smuzhiyun 		 */
3382*4882a593Smuzhiyun 		if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
3383*4882a593Smuzhiyun 			err = pick_local_ipaddrs(dev, cm_id);
3384*4882a593Smuzhiyun 			if (err)
3385*4882a593Smuzhiyun 				goto fail3;
3386*4882a593Smuzhiyun 		}
3387*4882a593Smuzhiyun 
3388*4882a593Smuzhiyun 		/* find a route */
3389*4882a593Smuzhiyun 		pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3390*4882a593Smuzhiyun 			 &laddr->sin_addr, ntohs(laddr->sin_port),
3391*4882a593Smuzhiyun 			 ra, ntohs(raddr->sin_port));
3392*4882a593Smuzhiyun 		ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3393*4882a593Smuzhiyun 					  laddr->sin_addr.s_addr,
3394*4882a593Smuzhiyun 					  raddr->sin_addr.s_addr,
3395*4882a593Smuzhiyun 					  laddr->sin_port,
3396*4882a593Smuzhiyun 					  raddr->sin_port, cm_id->tos);
3397*4882a593Smuzhiyun 	} else {
3398*4882a593Smuzhiyun 		iptype = 6;
3399*4882a593Smuzhiyun 		ra = (__u8 *)&raddr6->sin6_addr;
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun 		/*
3402*4882a593Smuzhiyun 		 * Handle loopback requests to INADDR_ANY.
3403*4882a593Smuzhiyun 		 */
3404*4882a593Smuzhiyun 		if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3405*4882a593Smuzhiyun 			err = pick_local_ip6addrs(dev, cm_id);
3406*4882a593Smuzhiyun 			if (err)
3407*4882a593Smuzhiyun 				goto fail3;
3408*4882a593Smuzhiyun 		}
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun 		/* find a route */
3411*4882a593Smuzhiyun 		pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3412*4882a593Smuzhiyun 			 laddr6->sin6_addr.s6_addr,
3413*4882a593Smuzhiyun 			 ntohs(laddr6->sin6_port),
3414*4882a593Smuzhiyun 			 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3415*4882a593Smuzhiyun 		ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3416*4882a593Smuzhiyun 					   laddr6->sin6_addr.s6_addr,
3417*4882a593Smuzhiyun 					   raddr6->sin6_addr.s6_addr,
3418*4882a593Smuzhiyun 					   laddr6->sin6_port,
3419*4882a593Smuzhiyun 					   raddr6->sin6_port, cm_id->tos,
3420*4882a593Smuzhiyun 					   raddr6->sin6_scope_id);
3421*4882a593Smuzhiyun 	}
3422*4882a593Smuzhiyun 	if (!ep->dst) {
3423*4882a593Smuzhiyun 		pr_err("%s - cannot find route\n", __func__);
3424*4882a593Smuzhiyun 		err = -EHOSTUNREACH;
3425*4882a593Smuzhiyun 		goto fail3;
3426*4882a593Smuzhiyun 	}
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3429*4882a593Smuzhiyun 			ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3430*4882a593Smuzhiyun 	if (err) {
3431*4882a593Smuzhiyun 		pr_err("%s - cannot alloc l2e\n", __func__);
3432*4882a593Smuzhiyun 		goto fail4;
3433*4882a593Smuzhiyun 	}
3434*4882a593Smuzhiyun 
3435*4882a593Smuzhiyun 	pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3436*4882a593Smuzhiyun 		 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3437*4882a593Smuzhiyun 		 ep->l2t->idx);
3438*4882a593Smuzhiyun 
3439*4882a593Smuzhiyun 	state_set(&ep->com, CONNECTING);
3440*4882a593Smuzhiyun 	ep->tos = cm_id->tos;
3441*4882a593Smuzhiyun 
3442*4882a593Smuzhiyun 	/* send connect request to rnic */
3443*4882a593Smuzhiyun 	err = send_connect(ep);
3444*4882a593Smuzhiyun 	if (!err)
3445*4882a593Smuzhiyun 		goto out;
3446*4882a593Smuzhiyun 
3447*4882a593Smuzhiyun 	cxgb4_l2t_release(ep->l2t);
3448*4882a593Smuzhiyun fail4:
3449*4882a593Smuzhiyun 	dst_release(ep->dst);
3450*4882a593Smuzhiyun fail3:
3451*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->atids, ep->atid);
3452*4882a593Smuzhiyun fail5:
3453*4882a593Smuzhiyun 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3454*4882a593Smuzhiyun fail2:
3455*4882a593Smuzhiyun 	skb_queue_purge(&ep->com.ep_skb_list);
3456*4882a593Smuzhiyun 	deref_cm_id(&ep->com);
3457*4882a593Smuzhiyun fail1:
3458*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3459*4882a593Smuzhiyun out:
3460*4882a593Smuzhiyun 	return err;
3461*4882a593Smuzhiyun }
3462*4882a593Smuzhiyun 
create_server6(struct c4iw_dev * dev,struct c4iw_listen_ep * ep)3463*4882a593Smuzhiyun static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3464*4882a593Smuzhiyun {
3465*4882a593Smuzhiyun 	int err;
3466*4882a593Smuzhiyun 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3467*4882a593Smuzhiyun 				    &ep->com.local_addr;
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3470*4882a593Smuzhiyun 		err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3471*4882a593Smuzhiyun 				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3472*4882a593Smuzhiyun 		if (err)
3473*4882a593Smuzhiyun 			return err;
3474*4882a593Smuzhiyun 	}
3475*4882a593Smuzhiyun 	c4iw_init_wr_wait(ep->com.wr_waitp);
3476*4882a593Smuzhiyun 	err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3477*4882a593Smuzhiyun 				   ep->stid, &sin6->sin6_addr,
3478*4882a593Smuzhiyun 				   sin6->sin6_port,
3479*4882a593Smuzhiyun 				   ep->com.dev->rdev.lldi.rxq_ids[0]);
3480*4882a593Smuzhiyun 	if (!err)
3481*4882a593Smuzhiyun 		err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3482*4882a593Smuzhiyun 					  ep->com.wr_waitp,
3483*4882a593Smuzhiyun 					  0, 0, __func__);
3484*4882a593Smuzhiyun 	else if (err > 0)
3485*4882a593Smuzhiyun 		err = net_xmit_errno(err);
3486*4882a593Smuzhiyun 	if (err) {
3487*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3488*4882a593Smuzhiyun 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3489*4882a593Smuzhiyun 		pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3490*4882a593Smuzhiyun 		       err, ep->stid,
3491*4882a593Smuzhiyun 		       sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3492*4882a593Smuzhiyun 	}
3493*4882a593Smuzhiyun 	return err;
3494*4882a593Smuzhiyun }
3495*4882a593Smuzhiyun 
create_server4(struct c4iw_dev * dev,struct c4iw_listen_ep * ep)3496*4882a593Smuzhiyun static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3497*4882a593Smuzhiyun {
3498*4882a593Smuzhiyun 	int err;
3499*4882a593Smuzhiyun 	struct sockaddr_in *sin = (struct sockaddr_in *)
3500*4882a593Smuzhiyun 				  &ep->com.local_addr;
3501*4882a593Smuzhiyun 
3502*4882a593Smuzhiyun 	if (dev->rdev.lldi.enable_fw_ofld_conn) {
3503*4882a593Smuzhiyun 		do {
3504*4882a593Smuzhiyun 			err = cxgb4_create_server_filter(
3505*4882a593Smuzhiyun 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
3506*4882a593Smuzhiyun 				sin->sin_addr.s_addr, sin->sin_port, 0,
3507*4882a593Smuzhiyun 				ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3508*4882a593Smuzhiyun 			if (err == -EBUSY) {
3509*4882a593Smuzhiyun 				if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3510*4882a593Smuzhiyun 					err = -EIO;
3511*4882a593Smuzhiyun 					break;
3512*4882a593Smuzhiyun 				}
3513*4882a593Smuzhiyun 				set_current_state(TASK_UNINTERRUPTIBLE);
3514*4882a593Smuzhiyun 				schedule_timeout(usecs_to_jiffies(100));
3515*4882a593Smuzhiyun 			}
3516*4882a593Smuzhiyun 		} while (err == -EBUSY);
3517*4882a593Smuzhiyun 	} else {
3518*4882a593Smuzhiyun 		c4iw_init_wr_wait(ep->com.wr_waitp);
3519*4882a593Smuzhiyun 		err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3520*4882a593Smuzhiyun 				ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3521*4882a593Smuzhiyun 				0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3522*4882a593Smuzhiyun 		if (!err)
3523*4882a593Smuzhiyun 			err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3524*4882a593Smuzhiyun 						  ep->com.wr_waitp,
3525*4882a593Smuzhiyun 						  0, 0, __func__);
3526*4882a593Smuzhiyun 		else if (err > 0)
3527*4882a593Smuzhiyun 			err = net_xmit_errno(err);
3528*4882a593Smuzhiyun 	}
3529*4882a593Smuzhiyun 	if (err)
3530*4882a593Smuzhiyun 		pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3531*4882a593Smuzhiyun 		       , err, ep->stid,
3532*4882a593Smuzhiyun 		       &sin->sin_addr, ntohs(sin->sin_port));
3533*4882a593Smuzhiyun 	return err;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun 
c4iw_create_listen(struct iw_cm_id * cm_id,int backlog)3536*4882a593Smuzhiyun int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3537*4882a593Smuzhiyun {
3538*4882a593Smuzhiyun 	int err = 0;
3539*4882a593Smuzhiyun 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3540*4882a593Smuzhiyun 	struct c4iw_listen_ep *ep;
3541*4882a593Smuzhiyun 
3542*4882a593Smuzhiyun 	might_sleep();
3543*4882a593Smuzhiyun 
3544*4882a593Smuzhiyun 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3545*4882a593Smuzhiyun 	if (!ep) {
3546*4882a593Smuzhiyun 		pr_err("%s - cannot alloc ep\n", __func__);
3547*4882a593Smuzhiyun 		err = -ENOMEM;
3548*4882a593Smuzhiyun 		goto fail1;
3549*4882a593Smuzhiyun 	}
3550*4882a593Smuzhiyun 	skb_queue_head_init(&ep->com.ep_skb_list);
3551*4882a593Smuzhiyun 	pr_debug("ep %p\n", ep);
3552*4882a593Smuzhiyun 	ep->com.cm_id = cm_id;
3553*4882a593Smuzhiyun 	ref_cm_id(&ep->com);
3554*4882a593Smuzhiyun 	ep->com.dev = dev;
3555*4882a593Smuzhiyun 	ep->backlog = backlog;
3556*4882a593Smuzhiyun 	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3557*4882a593Smuzhiyun 	       sizeof(ep->com.local_addr));
3558*4882a593Smuzhiyun 
3559*4882a593Smuzhiyun 	/*
3560*4882a593Smuzhiyun 	 * Allocate a server TID.
3561*4882a593Smuzhiyun 	 */
3562*4882a593Smuzhiyun 	if (dev->rdev.lldi.enable_fw_ofld_conn &&
3563*4882a593Smuzhiyun 	    ep->com.local_addr.ss_family == AF_INET)
3564*4882a593Smuzhiyun 		ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3565*4882a593Smuzhiyun 					     cm_id->m_local_addr.ss_family, ep);
3566*4882a593Smuzhiyun 	else
3567*4882a593Smuzhiyun 		ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3568*4882a593Smuzhiyun 					    cm_id->m_local_addr.ss_family, ep);
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun 	if (ep->stid == -1) {
3571*4882a593Smuzhiyun 		pr_err("%s - cannot alloc stid\n", __func__);
3572*4882a593Smuzhiyun 		err = -ENOMEM;
3573*4882a593Smuzhiyun 		goto fail2;
3574*4882a593Smuzhiyun 	}
3575*4882a593Smuzhiyun 	err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
3576*4882a593Smuzhiyun 	if (err)
3577*4882a593Smuzhiyun 		goto fail3;
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	state_set(&ep->com, LISTEN);
3580*4882a593Smuzhiyun 	if (ep->com.local_addr.ss_family == AF_INET)
3581*4882a593Smuzhiyun 		err = create_server4(dev, ep);
3582*4882a593Smuzhiyun 	else
3583*4882a593Smuzhiyun 		err = create_server6(dev, ep);
3584*4882a593Smuzhiyun 	if (!err) {
3585*4882a593Smuzhiyun 		cm_id->provider_data = ep;
3586*4882a593Smuzhiyun 		goto out;
3587*4882a593Smuzhiyun 	}
3588*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->stids, ep->stid);
3589*4882a593Smuzhiyun fail3:
3590*4882a593Smuzhiyun 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3591*4882a593Smuzhiyun 			ep->com.local_addr.ss_family);
3592*4882a593Smuzhiyun fail2:
3593*4882a593Smuzhiyun 	deref_cm_id(&ep->com);
3594*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3595*4882a593Smuzhiyun fail1:
3596*4882a593Smuzhiyun out:
3597*4882a593Smuzhiyun 	return err;
3598*4882a593Smuzhiyun }
3599*4882a593Smuzhiyun 
c4iw_destroy_listen(struct iw_cm_id * cm_id)3600*4882a593Smuzhiyun int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3601*4882a593Smuzhiyun {
3602*4882a593Smuzhiyun 	int err;
3603*4882a593Smuzhiyun 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3604*4882a593Smuzhiyun 
3605*4882a593Smuzhiyun 	pr_debug("ep %p\n", ep);
3606*4882a593Smuzhiyun 
3607*4882a593Smuzhiyun 	might_sleep();
3608*4882a593Smuzhiyun 	state_set(&ep->com, DEAD);
3609*4882a593Smuzhiyun 	if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3610*4882a593Smuzhiyun 	    ep->com.local_addr.ss_family == AF_INET) {
3611*4882a593Smuzhiyun 		err = cxgb4_remove_server_filter(
3612*4882a593Smuzhiyun 			ep->com.dev->rdev.lldi.ports[0], ep->stid,
3613*4882a593Smuzhiyun 			ep->com.dev->rdev.lldi.rxq_ids[0], false);
3614*4882a593Smuzhiyun 	} else {
3615*4882a593Smuzhiyun 		struct sockaddr_in6 *sin6;
3616*4882a593Smuzhiyun 		c4iw_init_wr_wait(ep->com.wr_waitp);
3617*4882a593Smuzhiyun 		err = cxgb4_remove_server(
3618*4882a593Smuzhiyun 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
3619*4882a593Smuzhiyun 				ep->com.dev->rdev.lldi.rxq_ids[0],
3620*4882a593Smuzhiyun 				ep->com.local_addr.ss_family == AF_INET6);
3621*4882a593Smuzhiyun 		if (err)
3622*4882a593Smuzhiyun 			goto done;
3623*4882a593Smuzhiyun 		err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
3624*4882a593Smuzhiyun 					  0, 0, __func__);
3625*4882a593Smuzhiyun 		sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3626*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3627*4882a593Smuzhiyun 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3628*4882a593Smuzhiyun 	}
3629*4882a593Smuzhiyun 	xa_erase_irq(&ep->com.dev->stids, ep->stid);
3630*4882a593Smuzhiyun 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3631*4882a593Smuzhiyun 			ep->com.local_addr.ss_family);
3632*4882a593Smuzhiyun done:
3633*4882a593Smuzhiyun 	deref_cm_id(&ep->com);
3634*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3635*4882a593Smuzhiyun 	return err;
3636*4882a593Smuzhiyun }
3637*4882a593Smuzhiyun 
c4iw_ep_disconnect(struct c4iw_ep * ep,int abrupt,gfp_t gfp)3638*4882a593Smuzhiyun int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3639*4882a593Smuzhiyun {
3640*4882a593Smuzhiyun 	int ret = 0;
3641*4882a593Smuzhiyun 	int close = 0;
3642*4882a593Smuzhiyun 	int fatal = 0;
3643*4882a593Smuzhiyun 	struct c4iw_rdev *rdev;
3644*4882a593Smuzhiyun 
3645*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun 	pr_debug("ep %p state %s, abrupt %d\n", ep,
3648*4882a593Smuzhiyun 		 states[ep->com.state], abrupt);
3649*4882a593Smuzhiyun 
3650*4882a593Smuzhiyun 	/*
3651*4882a593Smuzhiyun 	 * Ref the ep here in case we have fatal errors causing the
3652*4882a593Smuzhiyun 	 * ep to be released and freed.
3653*4882a593Smuzhiyun 	 */
3654*4882a593Smuzhiyun 	c4iw_get_ep(&ep->com);
3655*4882a593Smuzhiyun 
3656*4882a593Smuzhiyun 	rdev = &ep->com.dev->rdev;
3657*4882a593Smuzhiyun 	if (c4iw_fatal_error(rdev)) {
3658*4882a593Smuzhiyun 		fatal = 1;
3659*4882a593Smuzhiyun 		close_complete_upcall(ep, -EIO);
3660*4882a593Smuzhiyun 		ep->com.state = DEAD;
3661*4882a593Smuzhiyun 	}
3662*4882a593Smuzhiyun 	switch (ep->com.state) {
3663*4882a593Smuzhiyun 	case MPA_REQ_WAIT:
3664*4882a593Smuzhiyun 	case MPA_REQ_SENT:
3665*4882a593Smuzhiyun 	case MPA_REQ_RCVD:
3666*4882a593Smuzhiyun 	case MPA_REP_SENT:
3667*4882a593Smuzhiyun 	case FPDU_MODE:
3668*4882a593Smuzhiyun 	case CONNECTING:
3669*4882a593Smuzhiyun 		close = 1;
3670*4882a593Smuzhiyun 		if (abrupt)
3671*4882a593Smuzhiyun 			ep->com.state = ABORTING;
3672*4882a593Smuzhiyun 		else {
3673*4882a593Smuzhiyun 			ep->com.state = CLOSING;
3674*4882a593Smuzhiyun 
3675*4882a593Smuzhiyun 			/*
3676*4882a593Smuzhiyun 			 * if we close before we see the fw4_ack() then we fix
3677*4882a593Smuzhiyun 			 * up the timer state since we're reusing it.
3678*4882a593Smuzhiyun 			 */
3679*4882a593Smuzhiyun 			if (ep->mpa_skb &&
3680*4882a593Smuzhiyun 			    test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3681*4882a593Smuzhiyun 				clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3682*4882a593Smuzhiyun 				stop_ep_timer(ep);
3683*4882a593Smuzhiyun 			}
3684*4882a593Smuzhiyun 			start_ep_timer(ep);
3685*4882a593Smuzhiyun 		}
3686*4882a593Smuzhiyun 		set_bit(CLOSE_SENT, &ep->com.flags);
3687*4882a593Smuzhiyun 		break;
3688*4882a593Smuzhiyun 	case CLOSING:
3689*4882a593Smuzhiyun 		if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3690*4882a593Smuzhiyun 			close = 1;
3691*4882a593Smuzhiyun 			if (abrupt) {
3692*4882a593Smuzhiyun 				(void)stop_ep_timer(ep);
3693*4882a593Smuzhiyun 				ep->com.state = ABORTING;
3694*4882a593Smuzhiyun 			} else
3695*4882a593Smuzhiyun 				ep->com.state = MORIBUND;
3696*4882a593Smuzhiyun 		}
3697*4882a593Smuzhiyun 		break;
3698*4882a593Smuzhiyun 	case MORIBUND:
3699*4882a593Smuzhiyun 	case ABORTING:
3700*4882a593Smuzhiyun 	case DEAD:
3701*4882a593Smuzhiyun 		pr_debug("ignoring disconnect ep %p state %u\n",
3702*4882a593Smuzhiyun 			 ep, ep->com.state);
3703*4882a593Smuzhiyun 		break;
3704*4882a593Smuzhiyun 	default:
3705*4882a593Smuzhiyun 		WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3706*4882a593Smuzhiyun 		break;
3707*4882a593Smuzhiyun 	}
3708*4882a593Smuzhiyun 
3709*4882a593Smuzhiyun 	if (close) {
3710*4882a593Smuzhiyun 		if (abrupt) {
3711*4882a593Smuzhiyun 			set_bit(EP_DISC_ABORT, &ep->com.history);
3712*4882a593Smuzhiyun 			ret = send_abort(ep);
3713*4882a593Smuzhiyun 		} else {
3714*4882a593Smuzhiyun 			set_bit(EP_DISC_CLOSE, &ep->com.history);
3715*4882a593Smuzhiyun 			ret = send_halfclose(ep);
3716*4882a593Smuzhiyun 		}
3717*4882a593Smuzhiyun 		if (ret) {
3718*4882a593Smuzhiyun 			set_bit(EP_DISC_FAIL, &ep->com.history);
3719*4882a593Smuzhiyun 			if (!abrupt) {
3720*4882a593Smuzhiyun 				stop_ep_timer(ep);
3721*4882a593Smuzhiyun 				close_complete_upcall(ep, -EIO);
3722*4882a593Smuzhiyun 			}
3723*4882a593Smuzhiyun 			if (ep->com.qp) {
3724*4882a593Smuzhiyun 				struct c4iw_qp_attributes attrs;
3725*4882a593Smuzhiyun 
3726*4882a593Smuzhiyun 				attrs.next_state = C4IW_QP_STATE_ERROR;
3727*4882a593Smuzhiyun 				ret = c4iw_modify_qp(ep->com.qp->rhp,
3728*4882a593Smuzhiyun 						     ep->com.qp,
3729*4882a593Smuzhiyun 						     C4IW_QP_ATTR_NEXT_STATE,
3730*4882a593Smuzhiyun 						     &attrs, 1);
3731*4882a593Smuzhiyun 				if (ret)
3732*4882a593Smuzhiyun 					pr_err("%s - qp <- error failed!\n",
3733*4882a593Smuzhiyun 					       __func__);
3734*4882a593Smuzhiyun 			}
3735*4882a593Smuzhiyun 			fatal = 1;
3736*4882a593Smuzhiyun 		}
3737*4882a593Smuzhiyun 	}
3738*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
3739*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3740*4882a593Smuzhiyun 	if (fatal)
3741*4882a593Smuzhiyun 		release_ep_resources(ep);
3742*4882a593Smuzhiyun 	return ret;
3743*4882a593Smuzhiyun }
3744*4882a593Smuzhiyun 
active_ofld_conn_reply(struct c4iw_dev * dev,struct sk_buff * skb,struct cpl_fw6_msg_ofld_connection_wr_rpl * req)3745*4882a593Smuzhiyun static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3746*4882a593Smuzhiyun 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3747*4882a593Smuzhiyun {
3748*4882a593Smuzhiyun 	struct c4iw_ep *ep;
3749*4882a593Smuzhiyun 	int atid = be32_to_cpu(req->tid);
3750*4882a593Smuzhiyun 
3751*4882a593Smuzhiyun 	ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3752*4882a593Smuzhiyun 					   (__force u32) req->tid);
3753*4882a593Smuzhiyun 	if (!ep)
3754*4882a593Smuzhiyun 		return;
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	switch (req->retval) {
3757*4882a593Smuzhiyun 	case FW_ENOMEM:
3758*4882a593Smuzhiyun 		set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3759*4882a593Smuzhiyun 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3760*4882a593Smuzhiyun 			send_fw_act_open_req(ep, atid);
3761*4882a593Smuzhiyun 			return;
3762*4882a593Smuzhiyun 		}
3763*4882a593Smuzhiyun 		fallthrough;
3764*4882a593Smuzhiyun 	case FW_EADDRINUSE:
3765*4882a593Smuzhiyun 		set_bit(ACT_RETRY_INUSE, &ep->com.history);
3766*4882a593Smuzhiyun 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3767*4882a593Smuzhiyun 			send_fw_act_open_req(ep, atid);
3768*4882a593Smuzhiyun 			return;
3769*4882a593Smuzhiyun 		}
3770*4882a593Smuzhiyun 		break;
3771*4882a593Smuzhiyun 	default:
3772*4882a593Smuzhiyun 		pr_info("%s unexpected ofld conn wr retval %d\n",
3773*4882a593Smuzhiyun 		       __func__, req->retval);
3774*4882a593Smuzhiyun 		break;
3775*4882a593Smuzhiyun 	}
3776*4882a593Smuzhiyun 	pr_err("active ofld_connect_wr failure %d atid %d\n",
3777*4882a593Smuzhiyun 	       req->retval, atid);
3778*4882a593Smuzhiyun 	mutex_lock(&dev->rdev.stats.lock);
3779*4882a593Smuzhiyun 	dev->rdev.stats.act_ofld_conn_fails++;
3780*4882a593Smuzhiyun 	mutex_unlock(&dev->rdev.stats.lock);
3781*4882a593Smuzhiyun 	connect_reply_upcall(ep, status2errno(req->retval));
3782*4882a593Smuzhiyun 	state_set(&ep->com, DEAD);
3783*4882a593Smuzhiyun 	if (ep->com.remote_addr.ss_family == AF_INET6) {
3784*4882a593Smuzhiyun 		struct sockaddr_in6 *sin6 =
3785*4882a593Smuzhiyun 			(struct sockaddr_in6 *)&ep->com.local_addr;
3786*4882a593Smuzhiyun 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3787*4882a593Smuzhiyun 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3788*4882a593Smuzhiyun 	}
3789*4882a593Smuzhiyun 	xa_erase_irq(&dev->atids, atid);
3790*4882a593Smuzhiyun 	cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3791*4882a593Smuzhiyun 	dst_release(ep->dst);
3792*4882a593Smuzhiyun 	cxgb4_l2t_release(ep->l2t);
3793*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
3794*4882a593Smuzhiyun }
3795*4882a593Smuzhiyun 
passive_ofld_conn_reply(struct c4iw_dev * dev,struct sk_buff * skb,struct cpl_fw6_msg_ofld_connection_wr_rpl * req)3796*4882a593Smuzhiyun static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3797*4882a593Smuzhiyun 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3798*4882a593Smuzhiyun {
3799*4882a593Smuzhiyun 	struct sk_buff *rpl_skb;
3800*4882a593Smuzhiyun 	struct cpl_pass_accept_req *cpl;
3801*4882a593Smuzhiyun 	int ret;
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3804*4882a593Smuzhiyun 	if (req->retval) {
3805*4882a593Smuzhiyun 		pr_err("%s passive open failure %d\n", __func__, req->retval);
3806*4882a593Smuzhiyun 		mutex_lock(&dev->rdev.stats.lock);
3807*4882a593Smuzhiyun 		dev->rdev.stats.pas_ofld_conn_fails++;
3808*4882a593Smuzhiyun 		mutex_unlock(&dev->rdev.stats.lock);
3809*4882a593Smuzhiyun 		kfree_skb(rpl_skb);
3810*4882a593Smuzhiyun 	} else {
3811*4882a593Smuzhiyun 		cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3812*4882a593Smuzhiyun 		OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3813*4882a593Smuzhiyun 					(__force u32) htonl(
3814*4882a593Smuzhiyun 					(__force u32) req->tid)));
3815*4882a593Smuzhiyun 		ret = pass_accept_req(dev, rpl_skb);
3816*4882a593Smuzhiyun 		if (!ret)
3817*4882a593Smuzhiyun 			kfree_skb(rpl_skb);
3818*4882a593Smuzhiyun 	}
3819*4882a593Smuzhiyun 	return;
3820*4882a593Smuzhiyun }
3821*4882a593Smuzhiyun 
t4_tcb_get_field64(__be64 * tcb,u16 word)3822*4882a593Smuzhiyun static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
3823*4882a593Smuzhiyun {
3824*4882a593Smuzhiyun 	u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
3825*4882a593Smuzhiyun 	u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
3826*4882a593Smuzhiyun 	u64 t;
3827*4882a593Smuzhiyun 	u32 shift = 32;
3828*4882a593Smuzhiyun 
3829*4882a593Smuzhiyun 	t = (thi << shift) | (tlo >> shift);
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun 	return t;
3832*4882a593Smuzhiyun }
3833*4882a593Smuzhiyun 
t4_tcb_get_field32(__be64 * tcb,u16 word,u32 mask,u32 shift)3834*4882a593Smuzhiyun static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
3835*4882a593Smuzhiyun {
3836*4882a593Smuzhiyun 	u32 v;
3837*4882a593Smuzhiyun 	u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
3838*4882a593Smuzhiyun 
3839*4882a593Smuzhiyun 	if (word & 0x1)
3840*4882a593Smuzhiyun 		shift += 32;
3841*4882a593Smuzhiyun 	v = (t >> shift) & mask;
3842*4882a593Smuzhiyun 	return v;
3843*4882a593Smuzhiyun }
3844*4882a593Smuzhiyun 
read_tcb_rpl(struct c4iw_dev * dev,struct sk_buff * skb)3845*4882a593Smuzhiyun static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3846*4882a593Smuzhiyun {
3847*4882a593Smuzhiyun 	struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
3848*4882a593Smuzhiyun 	__be64 *tcb = (__be64 *)(rpl + 1);
3849*4882a593Smuzhiyun 	unsigned int tid = GET_TID(rpl);
3850*4882a593Smuzhiyun 	struct c4iw_ep *ep;
3851*4882a593Smuzhiyun 	u64 t_flags_64;
3852*4882a593Smuzhiyun 	u32 rx_pdu_out;
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
3855*4882a593Smuzhiyun 	if (!ep)
3856*4882a593Smuzhiyun 		return 0;
3857*4882a593Smuzhiyun 	/* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
3858*4882a593Smuzhiyun 	 * determine if there's a rx PDU feedback event pending.
3859*4882a593Smuzhiyun 	 *
3860*4882a593Smuzhiyun 	 * If that bit is set, it means we'll need to re-read the TCB's
3861*4882a593Smuzhiyun 	 * rq_start value. The final value is the one present in a TCB
3862*4882a593Smuzhiyun 	 * with the TF_RX_PDU_OUT bit cleared.
3863*4882a593Smuzhiyun 	 */
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun 	t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
3866*4882a593Smuzhiyun 	rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
3867*4882a593Smuzhiyun 
3868*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
3869*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com); /* from read_tcb() */
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	/* If TF_RX_PDU_OUT bit is set, re-read the TCB */
3872*4882a593Smuzhiyun 	if (rx_pdu_out) {
3873*4882a593Smuzhiyun 		if (++ep->rx_pdu_out_cnt >= 2) {
3874*4882a593Smuzhiyun 			WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
3875*4882a593Smuzhiyun 			goto cleanup;
3876*4882a593Smuzhiyun 		}
3877*4882a593Smuzhiyun 		read_tcb(ep);
3878*4882a593Smuzhiyun 		return 0;
3879*4882a593Smuzhiyun 	}
3880*4882a593Smuzhiyun 
3881*4882a593Smuzhiyun 	ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
3882*4882a593Smuzhiyun 					  TCB_RQ_START_S);
3883*4882a593Smuzhiyun cleanup:
3884*4882a593Smuzhiyun 	pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
3885*4882a593Smuzhiyun 
3886*4882a593Smuzhiyun 	if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
3887*4882a593Smuzhiyun 		finish_peer_abort(dev, ep);
3888*4882a593Smuzhiyun 	else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
3889*4882a593Smuzhiyun 		send_abort_req(ep);
3890*4882a593Smuzhiyun 	else
3891*4882a593Smuzhiyun 		WARN_ONCE(1, "unexpected state!");
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	return 0;
3894*4882a593Smuzhiyun }
3895*4882a593Smuzhiyun 
deferred_fw6_msg(struct c4iw_dev * dev,struct sk_buff * skb)3896*4882a593Smuzhiyun static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3897*4882a593Smuzhiyun {
3898*4882a593Smuzhiyun 	struct cpl_fw6_msg *rpl = cplhdr(skb);
3899*4882a593Smuzhiyun 	struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3900*4882a593Smuzhiyun 
3901*4882a593Smuzhiyun 	switch (rpl->type) {
3902*4882a593Smuzhiyun 	case FW6_TYPE_CQE:
3903*4882a593Smuzhiyun 		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3904*4882a593Smuzhiyun 		break;
3905*4882a593Smuzhiyun 	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3906*4882a593Smuzhiyun 		req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3907*4882a593Smuzhiyun 		switch (req->t_state) {
3908*4882a593Smuzhiyun 		case TCP_SYN_SENT:
3909*4882a593Smuzhiyun 			active_ofld_conn_reply(dev, skb, req);
3910*4882a593Smuzhiyun 			break;
3911*4882a593Smuzhiyun 		case TCP_SYN_RECV:
3912*4882a593Smuzhiyun 			passive_ofld_conn_reply(dev, skb, req);
3913*4882a593Smuzhiyun 			break;
3914*4882a593Smuzhiyun 		default:
3915*4882a593Smuzhiyun 			pr_err("%s unexpected ofld conn wr state %d\n",
3916*4882a593Smuzhiyun 			       __func__, req->t_state);
3917*4882a593Smuzhiyun 			break;
3918*4882a593Smuzhiyun 		}
3919*4882a593Smuzhiyun 		break;
3920*4882a593Smuzhiyun 	}
3921*4882a593Smuzhiyun 	return 0;
3922*4882a593Smuzhiyun }
3923*4882a593Smuzhiyun 
build_cpl_pass_accept_req(struct sk_buff * skb,int stid,u8 tos)3924*4882a593Smuzhiyun static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3925*4882a593Smuzhiyun {
3926*4882a593Smuzhiyun 	__be32 l2info;
3927*4882a593Smuzhiyun 	__be16 hdr_len, vlantag, len;
3928*4882a593Smuzhiyun 	u16 eth_hdr_len;
3929*4882a593Smuzhiyun 	int tcp_hdr_len, ip_hdr_len;
3930*4882a593Smuzhiyun 	u8 intf;
3931*4882a593Smuzhiyun 	struct cpl_rx_pkt *cpl = cplhdr(skb);
3932*4882a593Smuzhiyun 	struct cpl_pass_accept_req *req;
3933*4882a593Smuzhiyun 	struct tcp_options_received tmp_opt;
3934*4882a593Smuzhiyun 	struct c4iw_dev *dev;
3935*4882a593Smuzhiyun 	enum chip_type type;
3936*4882a593Smuzhiyun 
3937*4882a593Smuzhiyun 	dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3938*4882a593Smuzhiyun 	/* Store values from cpl_rx_pkt in temporary location. */
3939*4882a593Smuzhiyun 	vlantag = cpl->vlan;
3940*4882a593Smuzhiyun 	len = cpl->len;
3941*4882a593Smuzhiyun 	l2info  = cpl->l2info;
3942*4882a593Smuzhiyun 	hdr_len = cpl->hdr_len;
3943*4882a593Smuzhiyun 	intf = cpl->iff;
3944*4882a593Smuzhiyun 
3945*4882a593Smuzhiyun 	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun 	/*
3948*4882a593Smuzhiyun 	 * We need to parse the TCP options from SYN packet.
3949*4882a593Smuzhiyun 	 * to generate cpl_pass_accept_req.
3950*4882a593Smuzhiyun 	 */
3951*4882a593Smuzhiyun 	memset(&tmp_opt, 0, sizeof(tmp_opt));
3952*4882a593Smuzhiyun 	tcp_clear_options(&tmp_opt);
3953*4882a593Smuzhiyun 	tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
3954*4882a593Smuzhiyun 
3955*4882a593Smuzhiyun 	req = __skb_push(skb, sizeof(*req));
3956*4882a593Smuzhiyun 	memset(req, 0, sizeof(*req));
3957*4882a593Smuzhiyun 	req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3958*4882a593Smuzhiyun 			 SYN_MAC_IDX_V(RX_MACIDX_G(
3959*4882a593Smuzhiyun 			 be32_to_cpu(l2info))) |
3960*4882a593Smuzhiyun 			 SYN_XACT_MATCH_F);
3961*4882a593Smuzhiyun 	type = dev->rdev.lldi.adapter_type;
3962*4882a593Smuzhiyun 	tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3963*4882a593Smuzhiyun 	ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3964*4882a593Smuzhiyun 	req->hdr_len =
3965*4882a593Smuzhiyun 		cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3966*4882a593Smuzhiyun 	if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3967*4882a593Smuzhiyun 		eth_hdr_len = is_t4(type) ?
3968*4882a593Smuzhiyun 				RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3969*4882a593Smuzhiyun 				RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3970*4882a593Smuzhiyun 		req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3971*4882a593Smuzhiyun 					    IP_HDR_LEN_V(ip_hdr_len) |
3972*4882a593Smuzhiyun 					    ETH_HDR_LEN_V(eth_hdr_len));
3973*4882a593Smuzhiyun 	} else { /* T6 and later */
3974*4882a593Smuzhiyun 		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3975*4882a593Smuzhiyun 		req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3976*4882a593Smuzhiyun 					    T6_IP_HDR_LEN_V(ip_hdr_len) |
3977*4882a593Smuzhiyun 					    T6_ETH_HDR_LEN_V(eth_hdr_len));
3978*4882a593Smuzhiyun 	}
3979*4882a593Smuzhiyun 	req->vlan = vlantag;
3980*4882a593Smuzhiyun 	req->len = len;
3981*4882a593Smuzhiyun 	req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3982*4882a593Smuzhiyun 				    PASS_OPEN_TOS_V(tos));
3983*4882a593Smuzhiyun 	req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3984*4882a593Smuzhiyun 	if (tmp_opt.wscale_ok)
3985*4882a593Smuzhiyun 		req->tcpopt.wsf = tmp_opt.snd_wscale;
3986*4882a593Smuzhiyun 	req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3987*4882a593Smuzhiyun 	if (tmp_opt.sack_ok)
3988*4882a593Smuzhiyun 		req->tcpopt.sack = 1;
3989*4882a593Smuzhiyun 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3990*4882a593Smuzhiyun 	return;
3991*4882a593Smuzhiyun }
3992*4882a593Smuzhiyun 
send_fw_pass_open_req(struct c4iw_dev * dev,struct sk_buff * skb,__be32 laddr,__be16 lport,__be32 raddr,__be16 rport,u32 rcv_isn,u32 filter,u16 window,u32 rss_qid,u8 port_id)3993*4882a593Smuzhiyun static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3994*4882a593Smuzhiyun 				  __be32 laddr, __be16 lport,
3995*4882a593Smuzhiyun 				  __be32 raddr, __be16 rport,
3996*4882a593Smuzhiyun 				  u32 rcv_isn, u32 filter, u16 window,
3997*4882a593Smuzhiyun 				  u32 rss_qid, u8 port_id)
3998*4882a593Smuzhiyun {
3999*4882a593Smuzhiyun 	struct sk_buff *req_skb;
4000*4882a593Smuzhiyun 	struct fw_ofld_connection_wr *req;
4001*4882a593Smuzhiyun 	struct cpl_pass_accept_req *cpl = cplhdr(skb);
4002*4882a593Smuzhiyun 	int ret;
4003*4882a593Smuzhiyun 
4004*4882a593Smuzhiyun 	req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
4005*4882a593Smuzhiyun 	if (!req_skb)
4006*4882a593Smuzhiyun 		return;
4007*4882a593Smuzhiyun 	req = __skb_put_zero(req_skb, sizeof(*req));
4008*4882a593Smuzhiyun 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
4009*4882a593Smuzhiyun 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
4010*4882a593Smuzhiyun 	req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
4011*4882a593Smuzhiyun 	req->le.filter = (__force __be32) filter;
4012*4882a593Smuzhiyun 	req->le.lport = lport;
4013*4882a593Smuzhiyun 	req->le.pport = rport;
4014*4882a593Smuzhiyun 	req->le.u.ipv4.lip = laddr;
4015*4882a593Smuzhiyun 	req->le.u.ipv4.pip = raddr;
4016*4882a593Smuzhiyun 	req->tcb.rcv_nxt = htonl(rcv_isn + 1);
4017*4882a593Smuzhiyun 	req->tcb.rcv_adv = htons(window);
4018*4882a593Smuzhiyun 	req->tcb.t_state_to_astid =
4019*4882a593Smuzhiyun 		 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
4020*4882a593Smuzhiyun 			FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
4021*4882a593Smuzhiyun 			FW_OFLD_CONNECTION_WR_ASTID_V(
4022*4882a593Smuzhiyun 			PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
4023*4882a593Smuzhiyun 
4024*4882a593Smuzhiyun 	/*
4025*4882a593Smuzhiyun 	 * We store the qid in opt2 which will be used by the firmware
4026*4882a593Smuzhiyun 	 * to send us the wr response.
4027*4882a593Smuzhiyun 	 */
4028*4882a593Smuzhiyun 	req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
4029*4882a593Smuzhiyun 
4030*4882a593Smuzhiyun 	/*
4031*4882a593Smuzhiyun 	 * We initialize the MSS index in TCB to 0xF.
4032*4882a593Smuzhiyun 	 * So that when driver sends cpl_pass_accept_rpl
4033*4882a593Smuzhiyun 	 * TCB picks up the correct value. If this was 0
4034*4882a593Smuzhiyun 	 * TP will ignore any value > 0 for MSS index.
4035*4882a593Smuzhiyun 	 */
4036*4882a593Smuzhiyun 	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
4037*4882a593Smuzhiyun 	req->cookie = (uintptr_t)skb;
4038*4882a593Smuzhiyun 
4039*4882a593Smuzhiyun 	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
4040*4882a593Smuzhiyun 	ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
4041*4882a593Smuzhiyun 	if (ret < 0) {
4042*4882a593Smuzhiyun 		pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
4043*4882a593Smuzhiyun 		       ret);
4044*4882a593Smuzhiyun 		kfree_skb(skb);
4045*4882a593Smuzhiyun 		kfree_skb(req_skb);
4046*4882a593Smuzhiyun 	}
4047*4882a593Smuzhiyun }
4048*4882a593Smuzhiyun 
4049*4882a593Smuzhiyun /*
4050*4882a593Smuzhiyun  * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
4051*4882a593Smuzhiyun  * messages when a filter is being used instead of server to
4052*4882a593Smuzhiyun  * redirect a syn packet. When packets hit filter they are redirected
4053*4882a593Smuzhiyun  * to the offload queue and driver tries to establish the connection
4054*4882a593Smuzhiyun  * using firmware work request.
4055*4882a593Smuzhiyun  */
rx_pkt(struct c4iw_dev * dev,struct sk_buff * skb)4056*4882a593Smuzhiyun static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
4057*4882a593Smuzhiyun {
4058*4882a593Smuzhiyun 	int stid;
4059*4882a593Smuzhiyun 	unsigned int filter;
4060*4882a593Smuzhiyun 	struct ethhdr *eh = NULL;
4061*4882a593Smuzhiyun 	struct vlan_ethhdr *vlan_eh = NULL;
4062*4882a593Smuzhiyun 	struct iphdr *iph;
4063*4882a593Smuzhiyun 	struct tcphdr *tcph;
4064*4882a593Smuzhiyun 	struct rss_header *rss = (void *)skb->data;
4065*4882a593Smuzhiyun 	struct cpl_rx_pkt *cpl = (void *)skb->data;
4066*4882a593Smuzhiyun 	struct cpl_pass_accept_req *req = (void *)(rss + 1);
4067*4882a593Smuzhiyun 	struct l2t_entry *e;
4068*4882a593Smuzhiyun 	struct dst_entry *dst;
4069*4882a593Smuzhiyun 	struct c4iw_ep *lep = NULL;
4070*4882a593Smuzhiyun 	u16 window;
4071*4882a593Smuzhiyun 	struct port_info *pi;
4072*4882a593Smuzhiyun 	struct net_device *pdev;
4073*4882a593Smuzhiyun 	u16 rss_qid, eth_hdr_len;
4074*4882a593Smuzhiyun 	int step;
4075*4882a593Smuzhiyun 	struct neighbour *neigh;
4076*4882a593Smuzhiyun 
4077*4882a593Smuzhiyun 	/* Drop all non-SYN packets */
4078*4882a593Smuzhiyun 	if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
4079*4882a593Smuzhiyun 		goto reject;
4080*4882a593Smuzhiyun 
4081*4882a593Smuzhiyun 	/*
4082*4882a593Smuzhiyun 	 * Drop all packets which did not hit the filter.
4083*4882a593Smuzhiyun 	 * Unlikely to happen.
4084*4882a593Smuzhiyun 	 */
4085*4882a593Smuzhiyun 	if (!(rss->filter_hit && rss->filter_tid))
4086*4882a593Smuzhiyun 		goto reject;
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun 	/*
4089*4882a593Smuzhiyun 	 * Calculate the server tid from filter hit index from cpl_rx_pkt.
4090*4882a593Smuzhiyun 	 */
4091*4882a593Smuzhiyun 	stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
4092*4882a593Smuzhiyun 
4093*4882a593Smuzhiyun 	lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
4094*4882a593Smuzhiyun 	if (!lep) {
4095*4882a593Smuzhiyun 		pr_warn("%s connect request on invalid stid %d\n",
4096*4882a593Smuzhiyun 			__func__, stid);
4097*4882a593Smuzhiyun 		goto reject;
4098*4882a593Smuzhiyun 	}
4099*4882a593Smuzhiyun 
4100*4882a593Smuzhiyun 	switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
4101*4882a593Smuzhiyun 	case CHELSIO_T4:
4102*4882a593Smuzhiyun 		eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4103*4882a593Smuzhiyun 		break;
4104*4882a593Smuzhiyun 	case CHELSIO_T5:
4105*4882a593Smuzhiyun 		eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4106*4882a593Smuzhiyun 		break;
4107*4882a593Smuzhiyun 	case CHELSIO_T6:
4108*4882a593Smuzhiyun 		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
4109*4882a593Smuzhiyun 		break;
4110*4882a593Smuzhiyun 	default:
4111*4882a593Smuzhiyun 		pr_err("T%d Chip is not supported\n",
4112*4882a593Smuzhiyun 		       CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
4113*4882a593Smuzhiyun 		goto reject;
4114*4882a593Smuzhiyun 	}
4115*4882a593Smuzhiyun 
4116*4882a593Smuzhiyun 	if (eth_hdr_len == ETH_HLEN) {
4117*4882a593Smuzhiyun 		eh = (struct ethhdr *)(req + 1);
4118*4882a593Smuzhiyun 		iph = (struct iphdr *)(eh + 1);
4119*4882a593Smuzhiyun 	} else {
4120*4882a593Smuzhiyun 		vlan_eh = (struct vlan_ethhdr *)(req + 1);
4121*4882a593Smuzhiyun 		iph = (struct iphdr *)(vlan_eh + 1);
4122*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
4123*4882a593Smuzhiyun 	}
4124*4882a593Smuzhiyun 
4125*4882a593Smuzhiyun 	if (iph->version != 0x4)
4126*4882a593Smuzhiyun 		goto reject;
4127*4882a593Smuzhiyun 
4128*4882a593Smuzhiyun 	tcph = (struct tcphdr *)(iph + 1);
4129*4882a593Smuzhiyun 	skb_set_network_header(skb, (void *)iph - (void *)rss);
4130*4882a593Smuzhiyun 	skb_set_transport_header(skb, (void *)tcph - (void *)rss);
4131*4882a593Smuzhiyun 	skb_get(skb);
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
4134*4882a593Smuzhiyun 		 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
4135*4882a593Smuzhiyun 		 ntohs(tcph->source), iph->tos);
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
4138*4882a593Smuzhiyun 			      iph->daddr, iph->saddr, tcph->dest,
4139*4882a593Smuzhiyun 			      tcph->source, iph->tos);
4140*4882a593Smuzhiyun 	if (!dst) {
4141*4882a593Smuzhiyun 		pr_err("%s - failed to find dst entry!\n", __func__);
4142*4882a593Smuzhiyun 		goto reject;
4143*4882a593Smuzhiyun 	}
4144*4882a593Smuzhiyun 	neigh = dst_neigh_lookup_skb(dst, skb);
4145*4882a593Smuzhiyun 
4146*4882a593Smuzhiyun 	if (!neigh) {
4147*4882a593Smuzhiyun 		pr_err("%s - failed to allocate neigh!\n", __func__);
4148*4882a593Smuzhiyun 		goto free_dst;
4149*4882a593Smuzhiyun 	}
4150*4882a593Smuzhiyun 
4151*4882a593Smuzhiyun 	if (neigh->dev->flags & IFF_LOOPBACK) {
4152*4882a593Smuzhiyun 		pdev = ip_dev_find(&init_net, iph->daddr);
4153*4882a593Smuzhiyun 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
4154*4882a593Smuzhiyun 				    pdev, 0);
4155*4882a593Smuzhiyun 		pi = (struct port_info *)netdev_priv(pdev);
4156*4882a593Smuzhiyun 		dev_put(pdev);
4157*4882a593Smuzhiyun 	} else {
4158*4882a593Smuzhiyun 		pdev = get_real_dev(neigh->dev);
4159*4882a593Smuzhiyun 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
4160*4882a593Smuzhiyun 					pdev, 0);
4161*4882a593Smuzhiyun 		pi = (struct port_info *)netdev_priv(pdev);
4162*4882a593Smuzhiyun 	}
4163*4882a593Smuzhiyun 	neigh_release(neigh);
4164*4882a593Smuzhiyun 	if (!e) {
4165*4882a593Smuzhiyun 		pr_err("%s - failed to allocate l2t entry!\n",
4166*4882a593Smuzhiyun 		       __func__);
4167*4882a593Smuzhiyun 		goto free_dst;
4168*4882a593Smuzhiyun 	}
4169*4882a593Smuzhiyun 
4170*4882a593Smuzhiyun 	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
4171*4882a593Smuzhiyun 	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
4172*4882a593Smuzhiyun 	window = (__force u16) htons((__force u16)tcph->window);
4173*4882a593Smuzhiyun 
4174*4882a593Smuzhiyun 	/* Calcuate filter portion for LE region. */
4175*4882a593Smuzhiyun 	filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
4176*4882a593Smuzhiyun 						    dev->rdev.lldi.ports[0],
4177*4882a593Smuzhiyun 						    e));
4178*4882a593Smuzhiyun 
4179*4882a593Smuzhiyun 	/*
4180*4882a593Smuzhiyun 	 * Synthesize the cpl_pass_accept_req. We have everything except the
4181*4882a593Smuzhiyun 	 * TID. Once firmware sends a reply with TID we update the TID field
4182*4882a593Smuzhiyun 	 * in cpl and pass it through the regular cpl_pass_accept_req path.
4183*4882a593Smuzhiyun 	 */
4184*4882a593Smuzhiyun 	build_cpl_pass_accept_req(skb, stid, iph->tos);
4185*4882a593Smuzhiyun 	send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
4186*4882a593Smuzhiyun 			      tcph->source, ntohl(tcph->seq), filter, window,
4187*4882a593Smuzhiyun 			      rss_qid, pi->port_id);
4188*4882a593Smuzhiyun 	cxgb4_l2t_release(e);
4189*4882a593Smuzhiyun free_dst:
4190*4882a593Smuzhiyun 	dst_release(dst);
4191*4882a593Smuzhiyun reject:
4192*4882a593Smuzhiyun 	if (lep)
4193*4882a593Smuzhiyun 		c4iw_put_ep(&lep->com);
4194*4882a593Smuzhiyun 	return 0;
4195*4882a593Smuzhiyun }
4196*4882a593Smuzhiyun 
4197*4882a593Smuzhiyun /*
4198*4882a593Smuzhiyun  * These are the real handlers that are called from a
4199*4882a593Smuzhiyun  * work queue.
4200*4882a593Smuzhiyun  */
4201*4882a593Smuzhiyun static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
4202*4882a593Smuzhiyun 	[CPL_ACT_ESTABLISH] = act_establish,
4203*4882a593Smuzhiyun 	[CPL_ACT_OPEN_RPL] = act_open_rpl,
4204*4882a593Smuzhiyun 	[CPL_RX_DATA] = rx_data,
4205*4882a593Smuzhiyun 	[CPL_ABORT_RPL_RSS] = abort_rpl,
4206*4882a593Smuzhiyun 	[CPL_ABORT_RPL] = abort_rpl,
4207*4882a593Smuzhiyun 	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
4208*4882a593Smuzhiyun 	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4209*4882a593Smuzhiyun 	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4210*4882a593Smuzhiyun 	[CPL_PASS_ESTABLISH] = pass_establish,
4211*4882a593Smuzhiyun 	[CPL_PEER_CLOSE] = peer_close,
4212*4882a593Smuzhiyun 	[CPL_ABORT_REQ_RSS] = peer_abort,
4213*4882a593Smuzhiyun 	[CPL_CLOSE_CON_RPL] = close_con_rpl,
4214*4882a593Smuzhiyun 	[CPL_RDMA_TERMINATE] = terminate,
4215*4882a593Smuzhiyun 	[CPL_FW4_ACK] = fw4_ack,
4216*4882a593Smuzhiyun 	[CPL_GET_TCB_RPL] = read_tcb_rpl,
4217*4882a593Smuzhiyun 	[CPL_FW6_MSG] = deferred_fw6_msg,
4218*4882a593Smuzhiyun 	[CPL_RX_PKT] = rx_pkt,
4219*4882a593Smuzhiyun 	[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4220*4882a593Smuzhiyun 	[FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
4221*4882a593Smuzhiyun };
4222*4882a593Smuzhiyun 
process_timeout(struct c4iw_ep * ep)4223*4882a593Smuzhiyun static void process_timeout(struct c4iw_ep *ep)
4224*4882a593Smuzhiyun {
4225*4882a593Smuzhiyun 	struct c4iw_qp_attributes attrs;
4226*4882a593Smuzhiyun 	int abort = 1;
4227*4882a593Smuzhiyun 
4228*4882a593Smuzhiyun 	mutex_lock(&ep->com.mutex);
4229*4882a593Smuzhiyun 	pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
4230*4882a593Smuzhiyun 	set_bit(TIMEDOUT, &ep->com.history);
4231*4882a593Smuzhiyun 	switch (ep->com.state) {
4232*4882a593Smuzhiyun 	case MPA_REQ_SENT:
4233*4882a593Smuzhiyun 		connect_reply_upcall(ep, -ETIMEDOUT);
4234*4882a593Smuzhiyun 		break;
4235*4882a593Smuzhiyun 	case MPA_REQ_WAIT:
4236*4882a593Smuzhiyun 	case MPA_REQ_RCVD:
4237*4882a593Smuzhiyun 	case MPA_REP_SENT:
4238*4882a593Smuzhiyun 	case FPDU_MODE:
4239*4882a593Smuzhiyun 		break;
4240*4882a593Smuzhiyun 	case CLOSING:
4241*4882a593Smuzhiyun 	case MORIBUND:
4242*4882a593Smuzhiyun 		if (ep->com.cm_id && ep->com.qp) {
4243*4882a593Smuzhiyun 			attrs.next_state = C4IW_QP_STATE_ERROR;
4244*4882a593Smuzhiyun 			c4iw_modify_qp(ep->com.qp->rhp,
4245*4882a593Smuzhiyun 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4246*4882a593Smuzhiyun 				     &attrs, 1);
4247*4882a593Smuzhiyun 		}
4248*4882a593Smuzhiyun 		close_complete_upcall(ep, -ETIMEDOUT);
4249*4882a593Smuzhiyun 		break;
4250*4882a593Smuzhiyun 	case ABORTING:
4251*4882a593Smuzhiyun 	case DEAD:
4252*4882a593Smuzhiyun 
4253*4882a593Smuzhiyun 		/*
4254*4882a593Smuzhiyun 		 * These states are expected if the ep timed out at the same
4255*4882a593Smuzhiyun 		 * time as another thread was calling stop_ep_timer().
4256*4882a593Smuzhiyun 		 * So we silently do nothing for these states.
4257*4882a593Smuzhiyun 		 */
4258*4882a593Smuzhiyun 		abort = 0;
4259*4882a593Smuzhiyun 		break;
4260*4882a593Smuzhiyun 	default:
4261*4882a593Smuzhiyun 		WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4262*4882a593Smuzhiyun 			__func__, ep, ep->hwtid, ep->com.state);
4263*4882a593Smuzhiyun 		abort = 0;
4264*4882a593Smuzhiyun 	}
4265*4882a593Smuzhiyun 	mutex_unlock(&ep->com.mutex);
4266*4882a593Smuzhiyun 	if (abort)
4267*4882a593Smuzhiyun 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4268*4882a593Smuzhiyun 	c4iw_put_ep(&ep->com);
4269*4882a593Smuzhiyun }
4270*4882a593Smuzhiyun 
process_timedout_eps(void)4271*4882a593Smuzhiyun static void process_timedout_eps(void)
4272*4882a593Smuzhiyun {
4273*4882a593Smuzhiyun 	struct c4iw_ep *ep;
4274*4882a593Smuzhiyun 
4275*4882a593Smuzhiyun 	spin_lock_irq(&timeout_lock);
4276*4882a593Smuzhiyun 	while (!list_empty(&timeout_list)) {
4277*4882a593Smuzhiyun 		struct list_head *tmp;
4278*4882a593Smuzhiyun 
4279*4882a593Smuzhiyun 		tmp = timeout_list.next;
4280*4882a593Smuzhiyun 		list_del(tmp);
4281*4882a593Smuzhiyun 		tmp->next = NULL;
4282*4882a593Smuzhiyun 		tmp->prev = NULL;
4283*4882a593Smuzhiyun 		spin_unlock_irq(&timeout_lock);
4284*4882a593Smuzhiyun 		ep = list_entry(tmp, struct c4iw_ep, entry);
4285*4882a593Smuzhiyun 		process_timeout(ep);
4286*4882a593Smuzhiyun 		spin_lock_irq(&timeout_lock);
4287*4882a593Smuzhiyun 	}
4288*4882a593Smuzhiyun 	spin_unlock_irq(&timeout_lock);
4289*4882a593Smuzhiyun }
4290*4882a593Smuzhiyun 
process_work(struct work_struct * work)4291*4882a593Smuzhiyun static void process_work(struct work_struct *work)
4292*4882a593Smuzhiyun {
4293*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
4294*4882a593Smuzhiyun 	struct c4iw_dev *dev;
4295*4882a593Smuzhiyun 	struct cpl_act_establish *rpl;
4296*4882a593Smuzhiyun 	unsigned int opcode;
4297*4882a593Smuzhiyun 	int ret;
4298*4882a593Smuzhiyun 
4299*4882a593Smuzhiyun 	process_timedout_eps();
4300*4882a593Smuzhiyun 	while ((skb = skb_dequeue(&rxq))) {
4301*4882a593Smuzhiyun 		rpl = cplhdr(skb);
4302*4882a593Smuzhiyun 		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4303*4882a593Smuzhiyun 		opcode = rpl->ot.opcode;
4304*4882a593Smuzhiyun 
4305*4882a593Smuzhiyun 		if (opcode >= ARRAY_SIZE(work_handlers) ||
4306*4882a593Smuzhiyun 		    !work_handlers[opcode]) {
4307*4882a593Smuzhiyun 			pr_err("No handler for opcode 0x%x.\n", opcode);
4308*4882a593Smuzhiyun 			kfree_skb(skb);
4309*4882a593Smuzhiyun 		} else {
4310*4882a593Smuzhiyun 			ret = work_handlers[opcode](dev, skb);
4311*4882a593Smuzhiyun 			if (!ret)
4312*4882a593Smuzhiyun 				kfree_skb(skb);
4313*4882a593Smuzhiyun 		}
4314*4882a593Smuzhiyun 		process_timedout_eps();
4315*4882a593Smuzhiyun 	}
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun static DECLARE_WORK(skb_work, process_work);
4319*4882a593Smuzhiyun 
ep_timeout(struct timer_list * t)4320*4882a593Smuzhiyun static void ep_timeout(struct timer_list *t)
4321*4882a593Smuzhiyun {
4322*4882a593Smuzhiyun 	struct c4iw_ep *ep = from_timer(ep, t, timer);
4323*4882a593Smuzhiyun 	int kickit = 0;
4324*4882a593Smuzhiyun 
4325*4882a593Smuzhiyun 	spin_lock(&timeout_lock);
4326*4882a593Smuzhiyun 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4327*4882a593Smuzhiyun 		/*
4328*4882a593Smuzhiyun 		 * Only insert if it is not already on the list.
4329*4882a593Smuzhiyun 		 */
4330*4882a593Smuzhiyun 		if (!ep->entry.next) {
4331*4882a593Smuzhiyun 			list_add_tail(&ep->entry, &timeout_list);
4332*4882a593Smuzhiyun 			kickit = 1;
4333*4882a593Smuzhiyun 		}
4334*4882a593Smuzhiyun 	}
4335*4882a593Smuzhiyun 	spin_unlock(&timeout_lock);
4336*4882a593Smuzhiyun 	if (kickit)
4337*4882a593Smuzhiyun 		queue_work(workq, &skb_work);
4338*4882a593Smuzhiyun }
4339*4882a593Smuzhiyun 
4340*4882a593Smuzhiyun /*
4341*4882a593Smuzhiyun  * All the CM events are handled on a work queue to have a safe context.
4342*4882a593Smuzhiyun  */
sched(struct c4iw_dev * dev,struct sk_buff * skb)4343*4882a593Smuzhiyun static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4344*4882a593Smuzhiyun {
4345*4882a593Smuzhiyun 
4346*4882a593Smuzhiyun 	/*
4347*4882a593Smuzhiyun 	 * Save dev in the skb->cb area.
4348*4882a593Smuzhiyun 	 */
4349*4882a593Smuzhiyun 	*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	/*
4352*4882a593Smuzhiyun 	 * Queue the skb and schedule the worker thread.
4353*4882a593Smuzhiyun 	 */
4354*4882a593Smuzhiyun 	skb_queue_tail(&rxq, skb);
4355*4882a593Smuzhiyun 	queue_work(workq, &skb_work);
4356*4882a593Smuzhiyun 	return 0;
4357*4882a593Smuzhiyun }
4358*4882a593Smuzhiyun 
set_tcb_rpl(struct c4iw_dev * dev,struct sk_buff * skb)4359*4882a593Smuzhiyun static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4360*4882a593Smuzhiyun {
4361*4882a593Smuzhiyun 	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4362*4882a593Smuzhiyun 
4363*4882a593Smuzhiyun 	if (rpl->status != CPL_ERR_NONE) {
4364*4882a593Smuzhiyun 		pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
4365*4882a593Smuzhiyun 		       rpl->status, GET_TID(rpl));
4366*4882a593Smuzhiyun 	}
4367*4882a593Smuzhiyun 	kfree_skb(skb);
4368*4882a593Smuzhiyun 	return 0;
4369*4882a593Smuzhiyun }
4370*4882a593Smuzhiyun 
fw6_msg(struct c4iw_dev * dev,struct sk_buff * skb)4371*4882a593Smuzhiyun static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4372*4882a593Smuzhiyun {
4373*4882a593Smuzhiyun 	struct cpl_fw6_msg *rpl = cplhdr(skb);
4374*4882a593Smuzhiyun 	struct c4iw_wr_wait *wr_waitp;
4375*4882a593Smuzhiyun 	int ret;
4376*4882a593Smuzhiyun 
4377*4882a593Smuzhiyun 	pr_debug("type %u\n", rpl->type);
4378*4882a593Smuzhiyun 
4379*4882a593Smuzhiyun 	switch (rpl->type) {
4380*4882a593Smuzhiyun 	case FW6_TYPE_WR_RPL:
4381*4882a593Smuzhiyun 		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
4382*4882a593Smuzhiyun 		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
4383*4882a593Smuzhiyun 		pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
4384*4882a593Smuzhiyun 		if (wr_waitp)
4385*4882a593Smuzhiyun 			c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
4386*4882a593Smuzhiyun 		kfree_skb(skb);
4387*4882a593Smuzhiyun 		break;
4388*4882a593Smuzhiyun 	case FW6_TYPE_CQE:
4389*4882a593Smuzhiyun 	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
4390*4882a593Smuzhiyun 		sched(dev, skb);
4391*4882a593Smuzhiyun 		break;
4392*4882a593Smuzhiyun 	default:
4393*4882a593Smuzhiyun 		pr_err("%s unexpected fw6 msg type %u\n",
4394*4882a593Smuzhiyun 		       __func__, rpl->type);
4395*4882a593Smuzhiyun 		kfree_skb(skb);
4396*4882a593Smuzhiyun 		break;
4397*4882a593Smuzhiyun 	}
4398*4882a593Smuzhiyun 	return 0;
4399*4882a593Smuzhiyun }
4400*4882a593Smuzhiyun 
peer_abort_intr(struct c4iw_dev * dev,struct sk_buff * skb)4401*4882a593Smuzhiyun static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4402*4882a593Smuzhiyun {
4403*4882a593Smuzhiyun 	struct cpl_abort_req_rss *req = cplhdr(skb);
4404*4882a593Smuzhiyun 	struct c4iw_ep *ep;
4405*4882a593Smuzhiyun 	unsigned int tid = GET_TID(req);
4406*4882a593Smuzhiyun 
4407*4882a593Smuzhiyun 	ep = get_ep_from_tid(dev, tid);
4408*4882a593Smuzhiyun 	/* This EP will be dereferenced in peer_abort() */
4409*4882a593Smuzhiyun 	if (!ep) {
4410*4882a593Smuzhiyun 		pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
4411*4882a593Smuzhiyun 		kfree_skb(skb);
4412*4882a593Smuzhiyun 		return 0;
4413*4882a593Smuzhiyun 	}
4414*4882a593Smuzhiyun 	if (cxgb_is_neg_adv(req->status)) {
4415*4882a593Smuzhiyun 		pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
4416*4882a593Smuzhiyun 			 ep->hwtid, req->status,
4417*4882a593Smuzhiyun 			 neg_adv_str(req->status));
4418*4882a593Smuzhiyun 		goto out;
4419*4882a593Smuzhiyun 	}
4420*4882a593Smuzhiyun 	pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
4421*4882a593Smuzhiyun 
4422*4882a593Smuzhiyun 	c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
4423*4882a593Smuzhiyun out:
4424*4882a593Smuzhiyun 	sched(dev, skb);
4425*4882a593Smuzhiyun 	return 0;
4426*4882a593Smuzhiyun }
4427*4882a593Smuzhiyun 
4428*4882a593Smuzhiyun /*
4429*4882a593Smuzhiyun  * Most upcalls from the T4 Core go to sched() to
4430*4882a593Smuzhiyun  * schedule the processing on a work queue.
4431*4882a593Smuzhiyun  */
4432*4882a593Smuzhiyun c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4433*4882a593Smuzhiyun 	[CPL_ACT_ESTABLISH] = sched,
4434*4882a593Smuzhiyun 	[CPL_ACT_OPEN_RPL] = sched,
4435*4882a593Smuzhiyun 	[CPL_RX_DATA] = sched,
4436*4882a593Smuzhiyun 	[CPL_ABORT_RPL_RSS] = sched,
4437*4882a593Smuzhiyun 	[CPL_ABORT_RPL] = sched,
4438*4882a593Smuzhiyun 	[CPL_PASS_OPEN_RPL] = sched,
4439*4882a593Smuzhiyun 	[CPL_CLOSE_LISTSRV_RPL] = sched,
4440*4882a593Smuzhiyun 	[CPL_PASS_ACCEPT_REQ] = sched,
4441*4882a593Smuzhiyun 	[CPL_PASS_ESTABLISH] = sched,
4442*4882a593Smuzhiyun 	[CPL_PEER_CLOSE] = sched,
4443*4882a593Smuzhiyun 	[CPL_CLOSE_CON_RPL] = sched,
4444*4882a593Smuzhiyun 	[CPL_ABORT_REQ_RSS] = peer_abort_intr,
4445*4882a593Smuzhiyun 	[CPL_RDMA_TERMINATE] = sched,
4446*4882a593Smuzhiyun 	[CPL_FW4_ACK] = sched,
4447*4882a593Smuzhiyun 	[CPL_SET_TCB_RPL] = set_tcb_rpl,
4448*4882a593Smuzhiyun 	[CPL_GET_TCB_RPL] = sched,
4449*4882a593Smuzhiyun 	[CPL_FW6_MSG] = fw6_msg,
4450*4882a593Smuzhiyun 	[CPL_RX_PKT] = sched
4451*4882a593Smuzhiyun };
4452*4882a593Smuzhiyun 
c4iw_cm_init(void)4453*4882a593Smuzhiyun int __init c4iw_cm_init(void)
4454*4882a593Smuzhiyun {
4455*4882a593Smuzhiyun 	spin_lock_init(&timeout_lock);
4456*4882a593Smuzhiyun 	skb_queue_head_init(&rxq);
4457*4882a593Smuzhiyun 
4458*4882a593Smuzhiyun 	workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
4459*4882a593Smuzhiyun 	if (!workq)
4460*4882a593Smuzhiyun 		return -ENOMEM;
4461*4882a593Smuzhiyun 
4462*4882a593Smuzhiyun 	return 0;
4463*4882a593Smuzhiyun }
4464*4882a593Smuzhiyun 
c4iw_cm_term(void)4465*4882a593Smuzhiyun void c4iw_cm_term(void)
4466*4882a593Smuzhiyun {
4467*4882a593Smuzhiyun 	WARN_ON(!list_empty(&timeout_list));
4468*4882a593Smuzhiyun 	flush_workqueue(workq);
4469*4882a593Smuzhiyun 	destroy_workqueue(workq);
4470*4882a593Smuzhiyun }
4471