xref: /OK3568_Linux_fs/kernel/net/ipv4/tcp_ipv4.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4*4882a593Smuzhiyun  *		operating system.  INET is implemented using the  BSD Socket
5*4882a593Smuzhiyun  *		interface as the means of communication with the user level.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *		Implementation of the Transmission Control Protocol(TCP).
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *		IPv4 specific functions
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *		code split from:
12*4882a593Smuzhiyun  *		linux/ipv4/tcp.c
13*4882a593Smuzhiyun  *		linux/ipv4/tcp_input.c
14*4882a593Smuzhiyun  *		linux/ipv4/tcp_output.c
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *		See tcp.c for author information
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Changes:
21*4882a593Smuzhiyun  *		David S. Miller	:	New socket lookup architecture.
22*4882a593Smuzhiyun  *					This code is dedicated to John Dyson.
23*4882a593Smuzhiyun  *		David S. Miller :	Change semantics of established hash,
24*4882a593Smuzhiyun  *					half is devoted to TIME_WAIT sockets
25*4882a593Smuzhiyun  *					and the rest go in the other half.
26*4882a593Smuzhiyun  *		Andi Kleen :		Add support for syncookies and fixed
27*4882a593Smuzhiyun  *					some bugs: ip options weren't passed to
28*4882a593Smuzhiyun  *					the TCP layer, missed a check for an
29*4882a593Smuzhiyun  *					ACK bit.
30*4882a593Smuzhiyun  *		Andi Kleen :		Implemented fast path mtu discovery.
31*4882a593Smuzhiyun  *	     				Fixed many serious bugs in the
32*4882a593Smuzhiyun  *					request_sock handling and moved
33*4882a593Smuzhiyun  *					most of it into the af independent code.
34*4882a593Smuzhiyun  *					Added tail drop and some other bugfixes.
35*4882a593Smuzhiyun  *					Added new listen semantics.
36*4882a593Smuzhiyun  *		Mike McLagan	:	Routing by source
37*4882a593Smuzhiyun  *	Juan Jose Ciarlante:		ip_dynaddr bits
38*4882a593Smuzhiyun  *		Andi Kleen:		various fixes.
39*4882a593Smuzhiyun  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
40*4882a593Smuzhiyun  *					coma.
41*4882a593Smuzhiyun  *	Andi Kleen		:	Fix new listen.
42*4882a593Smuzhiyun  *	Andi Kleen		:	Fix accept error reporting.
43*4882a593Smuzhiyun  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
44*4882a593Smuzhiyun  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
45*4882a593Smuzhiyun  *					a single port at the same time.
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define pr_fmt(fmt) "TCP: " fmt
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #include <linux/bottom_half.h>
51*4882a593Smuzhiyun #include <linux/types.h>
52*4882a593Smuzhiyun #include <linux/fcntl.h>
53*4882a593Smuzhiyun #include <linux/module.h>
54*4882a593Smuzhiyun #include <linux/random.h>
55*4882a593Smuzhiyun #include <linux/cache.h>
56*4882a593Smuzhiyun #include <linux/jhash.h>
57*4882a593Smuzhiyun #include <linux/init.h>
58*4882a593Smuzhiyun #include <linux/times.h>
59*4882a593Smuzhiyun #include <linux/slab.h>
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #include <net/net_namespace.h>
62*4882a593Smuzhiyun #include <net/icmp.h>
63*4882a593Smuzhiyun #include <net/inet_hashtables.h>
64*4882a593Smuzhiyun #include <net/tcp.h>
65*4882a593Smuzhiyun #include <net/transp_v6.h>
66*4882a593Smuzhiyun #include <net/ipv6.h>
67*4882a593Smuzhiyun #include <net/inet_common.h>
68*4882a593Smuzhiyun #include <net/timewait_sock.h>
69*4882a593Smuzhiyun #include <net/xfrm.h>
70*4882a593Smuzhiyun #include <net/secure_seq.h>
71*4882a593Smuzhiyun #include <net/busy_poll.h>
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #include <linux/inet.h>
74*4882a593Smuzhiyun #include <linux/ipv6.h>
75*4882a593Smuzhiyun #include <linux/stddef.h>
76*4882a593Smuzhiyun #include <linux/proc_fs.h>
77*4882a593Smuzhiyun #include <linux/seq_file.h>
78*4882a593Smuzhiyun #include <linux/inetdevice.h>
79*4882a593Smuzhiyun #include <linux/btf_ids.h>
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #include <crypto/hash.h>
82*4882a593Smuzhiyun #include <linux/scatterlist.h>
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #include <trace/events/tcp.h>
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
87*4882a593Smuzhiyun static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88*4882a593Smuzhiyun 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct inet_hashinfo tcp_hashinfo;
92*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_hashinfo);
93*4882a593Smuzhiyun 
tcp_v4_init_seq(const struct sk_buff * skb)94*4882a593Smuzhiyun static u32 tcp_v4_init_seq(const struct sk_buff *skb)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return secure_tcp_seq(ip_hdr(skb)->daddr,
97*4882a593Smuzhiyun 			      ip_hdr(skb)->saddr,
98*4882a593Smuzhiyun 			      tcp_hdr(skb)->dest,
99*4882a593Smuzhiyun 			      tcp_hdr(skb)->source);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
tcp_v4_init_ts_off(const struct net * net,const struct sk_buff * skb)102*4882a593Smuzhiyun static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
tcp_twsk_unique(struct sock * sk,struct sock * sktw,void * twp)107*4882a593Smuzhiyun int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
110*4882a593Smuzhiyun 	const struct inet_timewait_sock *tw = inet_twsk(sktw);
111*4882a593Smuzhiyun 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (reuse == 2) {
115*4882a593Smuzhiyun 		/* Still does not detect *everything* that goes through
116*4882a593Smuzhiyun 		 * lo, since we require a loopback src or dst address
117*4882a593Smuzhiyun 		 * or direct binding to 'lo' interface.
118*4882a593Smuzhiyun 		 */
119*4882a593Smuzhiyun 		bool loopback = false;
120*4882a593Smuzhiyun 		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
121*4882a593Smuzhiyun 			loopback = true;
122*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
123*4882a593Smuzhiyun 		if (tw->tw_family == AF_INET6) {
124*4882a593Smuzhiyun 			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
125*4882a593Smuzhiyun 			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
126*4882a593Smuzhiyun 			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127*4882a593Smuzhiyun 			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
128*4882a593Smuzhiyun 				loopback = true;
129*4882a593Smuzhiyun 		} else
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun 		{
132*4882a593Smuzhiyun 			if (ipv4_is_loopback(tw->tw_daddr) ||
133*4882a593Smuzhiyun 			    ipv4_is_loopback(tw->tw_rcv_saddr))
134*4882a593Smuzhiyun 				loopback = true;
135*4882a593Smuzhiyun 		}
136*4882a593Smuzhiyun 		if (!loopback)
137*4882a593Smuzhiyun 			reuse = 0;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* With PAWS, it is safe from the viewpoint
141*4882a593Smuzhiyun 	   of data integrity. Even without PAWS it is safe provided sequence
142*4882a593Smuzhiyun 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	   Actually, the idea is close to VJ's one, only timestamp cache is
145*4882a593Smuzhiyun 	   held not per host, but per port pair and TW bucket is used as state
146*4882a593Smuzhiyun 	   holder.
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	   If TW bucket has been already destroyed we fall back to VJ's scheme
149*4882a593Smuzhiyun 	   and use initial timestamp retrieved from peer table.
150*4882a593Smuzhiyun 	 */
151*4882a593Smuzhiyun 	if (tcptw->tw_ts_recent_stamp &&
152*4882a593Smuzhiyun 	    (!twp || (reuse && time_after32(ktime_get_seconds(),
153*4882a593Smuzhiyun 					    tcptw->tw_ts_recent_stamp)))) {
154*4882a593Smuzhiyun 		/* In case of repair and re-using TIME-WAIT sockets we still
155*4882a593Smuzhiyun 		 * want to be sure that it is safe as above but honor the
156*4882a593Smuzhiyun 		 * sequence numbers and time stamps set as part of the repair
157*4882a593Smuzhiyun 		 * process.
158*4882a593Smuzhiyun 		 *
159*4882a593Smuzhiyun 		 * Without this check re-using a TIME-WAIT socket with TCP
160*4882a593Smuzhiyun 		 * repair would accumulate a -1 on the repair assigned
161*4882a593Smuzhiyun 		 * sequence number. The first time it is reused the sequence
162*4882a593Smuzhiyun 		 * is -1, the second time -2, etc. This fixes that issue
163*4882a593Smuzhiyun 		 * without appearing to create any others.
164*4882a593Smuzhiyun 		 */
165*4882a593Smuzhiyun 		if (likely(!tp->repair)) {
166*4882a593Smuzhiyun 			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 			if (!seq)
169*4882a593Smuzhiyun 				seq = 1;
170*4882a593Smuzhiyun 			WRITE_ONCE(tp->write_seq, seq);
171*4882a593Smuzhiyun 			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
172*4882a593Smuzhiyun 			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 		sock_hold(sktw);
175*4882a593Smuzhiyun 		return 1;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tcp_twsk_unique);
181*4882a593Smuzhiyun 
tcp_v4_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)182*4882a593Smuzhiyun static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
183*4882a593Smuzhiyun 			      int addr_len)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	/* This check is replicated from tcp_v4_connect() and intended to
186*4882a593Smuzhiyun 	 * prevent BPF program called below from accessing bytes that are out
187*4882a593Smuzhiyun 	 * of the bound specified by user in addr_len.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	if (addr_len < sizeof(struct sockaddr_in))
190*4882a593Smuzhiyun 		return -EINVAL;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	sock_owned_by_me(sk);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /* This will initiate an outgoing connection. */
tcp_v4_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)198*4882a593Smuzhiyun int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
201*4882a593Smuzhiyun 	struct inet_sock *inet = inet_sk(sk);
202*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
203*4882a593Smuzhiyun 	__be16 orig_sport, orig_dport;
204*4882a593Smuzhiyun 	__be32 daddr, nexthop;
205*4882a593Smuzhiyun 	struct flowi4 *fl4;
206*4882a593Smuzhiyun 	struct rtable *rt;
207*4882a593Smuzhiyun 	int err;
208*4882a593Smuzhiyun 	struct ip_options_rcu *inet_opt;
209*4882a593Smuzhiyun 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (addr_len < sizeof(struct sockaddr_in))
212*4882a593Smuzhiyun 		return -EINVAL;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (usin->sin_family != AF_INET)
215*4882a593Smuzhiyun 		return -EAFNOSUPPORT;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	nexthop = daddr = usin->sin_addr.s_addr;
218*4882a593Smuzhiyun 	inet_opt = rcu_dereference_protected(inet->inet_opt,
219*4882a593Smuzhiyun 					     lockdep_sock_is_held(sk));
220*4882a593Smuzhiyun 	if (inet_opt && inet_opt->opt.srr) {
221*4882a593Smuzhiyun 		if (!daddr)
222*4882a593Smuzhiyun 			return -EINVAL;
223*4882a593Smuzhiyun 		nexthop = inet_opt->opt.faddr;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	orig_sport = inet->inet_sport;
227*4882a593Smuzhiyun 	orig_dport = usin->sin_port;
228*4882a593Smuzhiyun 	fl4 = &inet->cork.fl.u.ip4;
229*4882a593Smuzhiyun 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
230*4882a593Smuzhiyun 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
231*4882a593Smuzhiyun 			      IPPROTO_TCP,
232*4882a593Smuzhiyun 			      orig_sport, orig_dport, sk);
233*4882a593Smuzhiyun 	if (IS_ERR(rt)) {
234*4882a593Smuzhiyun 		err = PTR_ERR(rt);
235*4882a593Smuzhiyun 		if (err == -ENETUNREACH)
236*4882a593Smuzhiyun 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
237*4882a593Smuzhiyun 		return err;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
241*4882a593Smuzhiyun 		ip_rt_put(rt);
242*4882a593Smuzhiyun 		return -ENETUNREACH;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (!inet_opt || !inet_opt->opt.srr)
246*4882a593Smuzhiyun 		daddr = fl4->daddr;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (!inet->inet_saddr)
249*4882a593Smuzhiyun 		inet->inet_saddr = fl4->saddr;
250*4882a593Smuzhiyun 	sk_rcv_saddr_set(sk, inet->inet_saddr);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
253*4882a593Smuzhiyun 		/* Reset inherited state */
254*4882a593Smuzhiyun 		tp->rx_opt.ts_recent	   = 0;
255*4882a593Smuzhiyun 		tp->rx_opt.ts_recent_stamp = 0;
256*4882a593Smuzhiyun 		if (likely(!tp->repair))
257*4882a593Smuzhiyun 			WRITE_ONCE(tp->write_seq, 0);
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	inet->inet_dport = usin->sin_port;
261*4882a593Smuzhiyun 	sk_daddr_set(sk, daddr);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	inet_csk(sk)->icsk_ext_hdr_len = 0;
264*4882a593Smuzhiyun 	if (inet_opt)
265*4882a593Smuzhiyun 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Socket identity is still unknown (sport may be zero).
270*4882a593Smuzhiyun 	 * However we set state to SYN-SENT and not releasing socket
271*4882a593Smuzhiyun 	 * lock select source port, enter ourselves into the hash tables and
272*4882a593Smuzhiyun 	 * complete initialization after this.
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	tcp_set_state(sk, TCP_SYN_SENT);
275*4882a593Smuzhiyun 	err = inet_hash_connect(tcp_death_row, sk);
276*4882a593Smuzhiyun 	if (err)
277*4882a593Smuzhiyun 		goto failure;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	sk_set_txhash(sk);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
282*4882a593Smuzhiyun 			       inet->inet_sport, inet->inet_dport, sk);
283*4882a593Smuzhiyun 	if (IS_ERR(rt)) {
284*4882a593Smuzhiyun 		err = PTR_ERR(rt);
285*4882a593Smuzhiyun 		rt = NULL;
286*4882a593Smuzhiyun 		goto failure;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 	/* OK, now commit destination to socket.  */
289*4882a593Smuzhiyun 	sk->sk_gso_type = SKB_GSO_TCPV4;
290*4882a593Smuzhiyun 	sk_setup_caps(sk, &rt->dst);
291*4882a593Smuzhiyun 	rt = NULL;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (likely(!tp->repair)) {
294*4882a593Smuzhiyun 		if (!tp->write_seq)
295*4882a593Smuzhiyun 			WRITE_ONCE(tp->write_seq,
296*4882a593Smuzhiyun 				   secure_tcp_seq(inet->inet_saddr,
297*4882a593Smuzhiyun 						  inet->inet_daddr,
298*4882a593Smuzhiyun 						  inet->inet_sport,
299*4882a593Smuzhiyun 						  usin->sin_port));
300*4882a593Smuzhiyun 		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
301*4882a593Smuzhiyun 						 inet->inet_saddr,
302*4882a593Smuzhiyun 						 inet->inet_daddr);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	inet->inet_id = prandom_u32();
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (tcp_fastopen_defer_connect(sk, &err))
308*4882a593Smuzhiyun 		return err;
309*4882a593Smuzhiyun 	if (err)
310*4882a593Smuzhiyun 		goto failure;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	err = tcp_connect(sk);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (err)
315*4882a593Smuzhiyun 		goto failure;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun failure:
320*4882a593Smuzhiyun 	/*
321*4882a593Smuzhiyun 	 * This unhashes the socket and releases the local port,
322*4882a593Smuzhiyun 	 * if necessary.
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	tcp_set_state(sk, TCP_CLOSE);
325*4882a593Smuzhiyun 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
326*4882a593Smuzhiyun 		inet_reset_saddr(sk);
327*4882a593Smuzhiyun 	ip_rt_put(rt);
328*4882a593Smuzhiyun 	sk->sk_route_caps = 0;
329*4882a593Smuzhiyun 	inet->inet_dport = 0;
330*4882a593Smuzhiyun 	return err;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_connect);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
336*4882a593Smuzhiyun  * It can be called through tcp_release_cb() if socket was owned by user
337*4882a593Smuzhiyun  * at the time tcp_v4_err() was called to handle ICMP message.
338*4882a593Smuzhiyun  */
tcp_v4_mtu_reduced(struct sock * sk)339*4882a593Smuzhiyun void tcp_v4_mtu_reduced(struct sock *sk)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	struct inet_sock *inet = inet_sk(sk);
342*4882a593Smuzhiyun 	struct dst_entry *dst;
343*4882a593Smuzhiyun 	u32 mtu;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
346*4882a593Smuzhiyun 		return;
347*4882a593Smuzhiyun 	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
348*4882a593Smuzhiyun 	dst = inet_csk_update_pmtu(sk, mtu);
349*4882a593Smuzhiyun 	if (!dst)
350*4882a593Smuzhiyun 		return;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Something is about to be wrong... Remember soft error
353*4882a593Smuzhiyun 	 * for the case, if this connection will not able to recover.
354*4882a593Smuzhiyun 	 */
355*4882a593Smuzhiyun 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
356*4882a593Smuzhiyun 		sk->sk_err_soft = EMSGSIZE;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	mtu = dst_mtu(dst);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
361*4882a593Smuzhiyun 	    ip_sk_accept_pmtu(sk) &&
362*4882a593Smuzhiyun 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
363*4882a593Smuzhiyun 		tcp_sync_mss(sk, mtu);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		/* Resend the TCP packet because it's
366*4882a593Smuzhiyun 		 * clear that the old packet has been
367*4882a593Smuzhiyun 		 * dropped. This is the new "fast" path mtu
368*4882a593Smuzhiyun 		 * discovery.
369*4882a593Smuzhiyun 		 */
370*4882a593Smuzhiyun 		tcp_simple_retransmit(sk);
371*4882a593Smuzhiyun 	} /* else let the usual retransmit timer handle it */
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_mtu_reduced);
374*4882a593Smuzhiyun 
do_redirect(struct sk_buff * skb,struct sock * sk)375*4882a593Smuzhiyun static void do_redirect(struct sk_buff *skb, struct sock *sk)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct dst_entry *dst = __sk_dst_check(sk, 0);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (dst)
380*4882a593Smuzhiyun 		dst->ops->redirect(dst, sk, skb);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
tcp_req_err(struct sock * sk,u32 seq,bool abort)385*4882a593Smuzhiyun void tcp_req_err(struct sock *sk, u32 seq, bool abort)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct request_sock *req = inet_reqsk(sk);
388*4882a593Smuzhiyun 	struct net *net = sock_net(sk);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* ICMPs are not backlogged, hence we cannot get
391*4882a593Smuzhiyun 	 * an established socket here.
392*4882a593Smuzhiyun 	 */
393*4882a593Smuzhiyun 	if (seq != tcp_rsk(req)->snt_isn) {
394*4882a593Smuzhiyun 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
395*4882a593Smuzhiyun 	} else if (abort) {
396*4882a593Smuzhiyun 		/*
397*4882a593Smuzhiyun 		 * Still in SYN_RECV, just remove it silently.
398*4882a593Smuzhiyun 		 * There is no good way to pass the error to the newly
399*4882a593Smuzhiyun 		 * created socket, and POSIX does not want network
400*4882a593Smuzhiyun 		 * errors returned from accept().
401*4882a593Smuzhiyun 		 */
402*4882a593Smuzhiyun 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
403*4882a593Smuzhiyun 		tcp_listendrop(req->rsk_listener);
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 	reqsk_put(req);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_req_err);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun /* TCP-LD (RFC 6069) logic */
tcp_ld_RTO_revert(struct sock * sk,u32 seq)410*4882a593Smuzhiyun void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct inet_connection_sock *icsk = inet_csk(sk);
413*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
414*4882a593Smuzhiyun 	struct sk_buff *skb;
415*4882a593Smuzhiyun 	s32 remaining;
416*4882a593Smuzhiyun 	u32 delta_us;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (sock_owned_by_user(sk))
419*4882a593Smuzhiyun 		return;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
422*4882a593Smuzhiyun 	    !icsk->icsk_backoff)
423*4882a593Smuzhiyun 		return;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	skb = tcp_rtx_queue_head(sk);
426*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!skb))
427*4882a593Smuzhiyun 		return;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	icsk->icsk_backoff--;
430*4882a593Smuzhiyun 	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
431*4882a593Smuzhiyun 	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	tcp_mstamp_refresh(tp);
434*4882a593Smuzhiyun 	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
435*4882a593Smuzhiyun 	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (remaining > 0) {
438*4882a593Smuzhiyun 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
439*4882a593Smuzhiyun 					  remaining, TCP_RTO_MAX);
440*4882a593Smuzhiyun 	} else {
441*4882a593Smuzhiyun 		/* RTO revert clocked out retransmission.
442*4882a593Smuzhiyun 		 * Will retransmit now.
443*4882a593Smuzhiyun 		 */
444*4882a593Smuzhiyun 		tcp_retransmit_timer(sk);
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_ld_RTO_revert);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun  * This routine is called by the ICMP module when it gets some
451*4882a593Smuzhiyun  * sort of error condition.  If err < 0 then the socket should
452*4882a593Smuzhiyun  * be closed and the error returned to the user.  If err > 0
453*4882a593Smuzhiyun  * it's just the icmp type << 8 | icmp code.  After adjustment
454*4882a593Smuzhiyun  * header points to the first 8 bytes of the tcp header.  We need
455*4882a593Smuzhiyun  * to find the appropriate port.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * The locking strategy used here is very "optimistic". When
458*4882a593Smuzhiyun  * someone else accesses the socket the ICMP is just dropped
459*4882a593Smuzhiyun  * and for some paths there is no check at all.
460*4882a593Smuzhiyun  * A more general error queue to queue errors for later handling
461*4882a593Smuzhiyun  * is probably better.
462*4882a593Smuzhiyun  *
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun 
tcp_v4_err(struct sk_buff * skb,u32 info)465*4882a593Smuzhiyun int tcp_v4_err(struct sk_buff *skb, u32 info)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	const struct iphdr *iph = (const struct iphdr *)skb->data;
468*4882a593Smuzhiyun 	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
469*4882a593Smuzhiyun 	struct tcp_sock *tp;
470*4882a593Smuzhiyun 	struct inet_sock *inet;
471*4882a593Smuzhiyun 	const int type = icmp_hdr(skb)->type;
472*4882a593Smuzhiyun 	const int code = icmp_hdr(skb)->code;
473*4882a593Smuzhiyun 	struct sock *sk;
474*4882a593Smuzhiyun 	struct request_sock *fastopen;
475*4882a593Smuzhiyun 	u32 seq, snd_una;
476*4882a593Smuzhiyun 	int err;
477*4882a593Smuzhiyun 	struct net *net = dev_net(skb->dev);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
480*4882a593Smuzhiyun 				       th->dest, iph->saddr, ntohs(th->source),
481*4882a593Smuzhiyun 				       inet_iif(skb), 0);
482*4882a593Smuzhiyun 	if (!sk) {
483*4882a593Smuzhiyun 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
484*4882a593Smuzhiyun 		return -ENOENT;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 	if (sk->sk_state == TCP_TIME_WAIT) {
487*4882a593Smuzhiyun 		inet_twsk_put(inet_twsk(sk));
488*4882a593Smuzhiyun 		return 0;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 	seq = ntohl(th->seq);
491*4882a593Smuzhiyun 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
492*4882a593Smuzhiyun 		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
493*4882a593Smuzhiyun 				     type == ICMP_TIME_EXCEEDED ||
494*4882a593Smuzhiyun 				     (type == ICMP_DEST_UNREACH &&
495*4882a593Smuzhiyun 				      (code == ICMP_NET_UNREACH ||
496*4882a593Smuzhiyun 				       code == ICMP_HOST_UNREACH)));
497*4882a593Smuzhiyun 		return 0;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	bh_lock_sock(sk);
501*4882a593Smuzhiyun 	/* If too many ICMPs get dropped on busy
502*4882a593Smuzhiyun 	 * servers this needs to be solved differently.
503*4882a593Smuzhiyun 	 * We do take care of PMTU discovery (RFC1191) special case :
504*4882a593Smuzhiyun 	 * we can receive locally generated ICMP messages while socket is held.
505*4882a593Smuzhiyun 	 */
506*4882a593Smuzhiyun 	if (sock_owned_by_user(sk)) {
507*4882a593Smuzhiyun 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
508*4882a593Smuzhiyun 			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 	if (sk->sk_state == TCP_CLOSE)
511*4882a593Smuzhiyun 		goto out;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
514*4882a593Smuzhiyun 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
515*4882a593Smuzhiyun 		goto out;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	tp = tcp_sk(sk);
519*4882a593Smuzhiyun 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
520*4882a593Smuzhiyun 	fastopen = rcu_dereference(tp->fastopen_rsk);
521*4882a593Smuzhiyun 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
522*4882a593Smuzhiyun 	if (sk->sk_state != TCP_LISTEN &&
523*4882a593Smuzhiyun 	    !between(seq, snd_una, tp->snd_nxt)) {
524*4882a593Smuzhiyun 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
525*4882a593Smuzhiyun 		goto out;
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	switch (type) {
529*4882a593Smuzhiyun 	case ICMP_REDIRECT:
530*4882a593Smuzhiyun 		if (!sock_owned_by_user(sk))
531*4882a593Smuzhiyun 			do_redirect(skb, sk);
532*4882a593Smuzhiyun 		goto out;
533*4882a593Smuzhiyun 	case ICMP_SOURCE_QUENCH:
534*4882a593Smuzhiyun 		/* Just silently ignore these. */
535*4882a593Smuzhiyun 		goto out;
536*4882a593Smuzhiyun 	case ICMP_PARAMETERPROB:
537*4882a593Smuzhiyun 		err = EPROTO;
538*4882a593Smuzhiyun 		break;
539*4882a593Smuzhiyun 	case ICMP_DEST_UNREACH:
540*4882a593Smuzhiyun 		if (code > NR_ICMP_UNREACH)
541*4882a593Smuzhiyun 			goto out;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
544*4882a593Smuzhiyun 			/* We are not interested in TCP_LISTEN and open_requests
545*4882a593Smuzhiyun 			 * (SYN-ACKs send out by Linux are always <576bytes so
546*4882a593Smuzhiyun 			 * they should go through unfragmented).
547*4882a593Smuzhiyun 			 */
548*4882a593Smuzhiyun 			if (sk->sk_state == TCP_LISTEN)
549*4882a593Smuzhiyun 				goto out;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 			WRITE_ONCE(tp->mtu_info, info);
552*4882a593Smuzhiyun 			if (!sock_owned_by_user(sk)) {
553*4882a593Smuzhiyun 				tcp_v4_mtu_reduced(sk);
554*4882a593Smuzhiyun 			} else {
555*4882a593Smuzhiyun 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
556*4882a593Smuzhiyun 					sock_hold(sk);
557*4882a593Smuzhiyun 			}
558*4882a593Smuzhiyun 			goto out;
559*4882a593Smuzhiyun 		}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		err = icmp_err_convert[code].errno;
562*4882a593Smuzhiyun 		/* check if this ICMP message allows revert of backoff.
563*4882a593Smuzhiyun 		 * (see RFC 6069)
564*4882a593Smuzhiyun 		 */
565*4882a593Smuzhiyun 		if (!fastopen &&
566*4882a593Smuzhiyun 		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
567*4882a593Smuzhiyun 			tcp_ld_RTO_revert(sk, seq);
568*4882a593Smuzhiyun 		break;
569*4882a593Smuzhiyun 	case ICMP_TIME_EXCEEDED:
570*4882a593Smuzhiyun 		err = EHOSTUNREACH;
571*4882a593Smuzhiyun 		break;
572*4882a593Smuzhiyun 	default:
573*4882a593Smuzhiyun 		goto out;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	switch (sk->sk_state) {
577*4882a593Smuzhiyun 	case TCP_SYN_SENT:
578*4882a593Smuzhiyun 	case TCP_SYN_RECV:
579*4882a593Smuzhiyun 		/* Only in fast or simultaneous open. If a fast open socket is
580*4882a593Smuzhiyun 		 * already accepted it is treated as a connected one below.
581*4882a593Smuzhiyun 		 */
582*4882a593Smuzhiyun 		if (fastopen && !fastopen->sk)
583*4882a593Smuzhiyun 			break;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 		if (!sock_owned_by_user(sk)) {
588*4882a593Smuzhiyun 			sk->sk_err = err;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 			sk->sk_error_report(sk);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 			tcp_done(sk);
593*4882a593Smuzhiyun 		} else {
594*4882a593Smuzhiyun 			sk->sk_err_soft = err;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 		goto out;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* If we've already connected we will keep trying
600*4882a593Smuzhiyun 	 * until we time out, or the user gives up.
601*4882a593Smuzhiyun 	 *
602*4882a593Smuzhiyun 	 * rfc1122 4.2.3.9 allows to consider as hard errors
603*4882a593Smuzhiyun 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
604*4882a593Smuzhiyun 	 * but it is obsoleted by pmtu discovery).
605*4882a593Smuzhiyun 	 *
606*4882a593Smuzhiyun 	 * Note, that in modern internet, where routing is unreliable
607*4882a593Smuzhiyun 	 * and in each dark corner broken firewalls sit, sending random
608*4882a593Smuzhiyun 	 * errors ordered by their masters even this two messages finally lose
609*4882a593Smuzhiyun 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
610*4882a593Smuzhiyun 	 *
611*4882a593Smuzhiyun 	 * Now we are in compliance with RFCs.
612*4882a593Smuzhiyun 	 *							--ANK (980905)
613*4882a593Smuzhiyun 	 */
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	inet = inet_sk(sk);
616*4882a593Smuzhiyun 	if (!sock_owned_by_user(sk) && inet->recverr) {
617*4882a593Smuzhiyun 		sk->sk_err = err;
618*4882a593Smuzhiyun 		sk->sk_error_report(sk);
619*4882a593Smuzhiyun 	} else	{ /* Only an error on timeout */
620*4882a593Smuzhiyun 		sk->sk_err_soft = err;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun out:
624*4882a593Smuzhiyun 	bh_unlock_sock(sk);
625*4882a593Smuzhiyun 	sock_put(sk);
626*4882a593Smuzhiyun 	return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
__tcp_v4_send_check(struct sk_buff * skb,__be32 saddr,__be32 daddr)629*4882a593Smuzhiyun void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct tcphdr *th = tcp_hdr(skb);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
634*4882a593Smuzhiyun 	skb->csum_start = skb_transport_header(skb) - skb->head;
635*4882a593Smuzhiyun 	skb->csum_offset = offsetof(struct tcphdr, check);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /* This routine computes an IPv4 TCP checksum. */
tcp_v4_send_check(struct sock * sk,struct sk_buff * skb)639*4882a593Smuzhiyun void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	const struct inet_sock *inet = inet_sk(sk);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_send_check);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun  *	This routine will send an RST to the other tcp.
649*4882a593Smuzhiyun  *
650*4882a593Smuzhiyun  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
651*4882a593Smuzhiyun  *		      for reset.
652*4882a593Smuzhiyun  *	Answer: if a packet caused RST, it is not for a socket
653*4882a593Smuzhiyun  *		existing in our system, if it is matched to a socket,
654*4882a593Smuzhiyun  *		it is just duplicate segment or bug in other side's TCP.
655*4882a593Smuzhiyun  *		So that we build reply only basing on parameters
656*4882a593Smuzhiyun  *		arrived with segment.
657*4882a593Smuzhiyun  *	Exception: precedence violation. We do not implement it in any case.
658*4882a593Smuzhiyun  */
659*4882a593Smuzhiyun 
tcp_v4_send_reset(const struct sock * sk,struct sk_buff * skb)660*4882a593Smuzhiyun static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	const struct tcphdr *th = tcp_hdr(skb);
663*4882a593Smuzhiyun 	struct {
664*4882a593Smuzhiyun 		struct tcphdr th;
665*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
666*4882a593Smuzhiyun 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
667*4882a593Smuzhiyun #endif
668*4882a593Smuzhiyun 	} rep;
669*4882a593Smuzhiyun 	struct ip_reply_arg arg;
670*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
671*4882a593Smuzhiyun 	struct tcp_md5sig_key *key = NULL;
672*4882a593Smuzhiyun 	const __u8 *hash_location = NULL;
673*4882a593Smuzhiyun 	unsigned char newhash[16];
674*4882a593Smuzhiyun 	int genhash;
675*4882a593Smuzhiyun 	struct sock *sk1 = NULL;
676*4882a593Smuzhiyun #endif
677*4882a593Smuzhiyun 	u64 transmit_time = 0;
678*4882a593Smuzhiyun 	struct sock *ctl_sk;
679*4882a593Smuzhiyun 	struct net *net;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/* Never send a reset in response to a reset. */
682*4882a593Smuzhiyun 	if (th->rst)
683*4882a593Smuzhiyun 		return;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* If sk not NULL, it means we did a successful lookup and incoming
686*4882a593Smuzhiyun 	 * route had to be correct. prequeue might have dropped our dst.
687*4882a593Smuzhiyun 	 */
688*4882a593Smuzhiyun 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
689*4882a593Smuzhiyun 		return;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	/* Swap the send and the receive. */
692*4882a593Smuzhiyun 	memset(&rep, 0, sizeof(rep));
693*4882a593Smuzhiyun 	rep.th.dest   = th->source;
694*4882a593Smuzhiyun 	rep.th.source = th->dest;
695*4882a593Smuzhiyun 	rep.th.doff   = sizeof(struct tcphdr) / 4;
696*4882a593Smuzhiyun 	rep.th.rst    = 1;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (th->ack) {
699*4882a593Smuzhiyun 		rep.th.seq = th->ack_seq;
700*4882a593Smuzhiyun 	} else {
701*4882a593Smuzhiyun 		rep.th.ack = 1;
702*4882a593Smuzhiyun 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
703*4882a593Smuzhiyun 				       skb->len - (th->doff << 2));
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	memset(&arg, 0, sizeof(arg));
707*4882a593Smuzhiyun 	arg.iov[0].iov_base = (unsigned char *)&rep;
708*4882a593Smuzhiyun 	arg.iov[0].iov_len  = sizeof(rep.th);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
711*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
712*4882a593Smuzhiyun 	rcu_read_lock();
713*4882a593Smuzhiyun 	hash_location = tcp_parse_md5sig_option(th);
714*4882a593Smuzhiyun 	if (sk && sk_fullsock(sk)) {
715*4882a593Smuzhiyun 		const union tcp_md5_addr *addr;
716*4882a593Smuzhiyun 		int l3index;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 		/* sdif set, means packet ingressed via a device
719*4882a593Smuzhiyun 		 * in an L3 domain and inet_iif is set to it.
720*4882a593Smuzhiyun 		 */
721*4882a593Smuzhiyun 		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
722*4882a593Smuzhiyun 		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
723*4882a593Smuzhiyun 		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
724*4882a593Smuzhiyun 	} else if (hash_location) {
725*4882a593Smuzhiyun 		const union tcp_md5_addr *addr;
726*4882a593Smuzhiyun 		int sdif = tcp_v4_sdif(skb);
727*4882a593Smuzhiyun 		int dif = inet_iif(skb);
728*4882a593Smuzhiyun 		int l3index;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		/*
731*4882a593Smuzhiyun 		 * active side is lost. Try to find listening socket through
732*4882a593Smuzhiyun 		 * source port, and then find md5 key through listening socket.
733*4882a593Smuzhiyun 		 * we are not loose security here:
734*4882a593Smuzhiyun 		 * Incoming packet is checked with md5 hash with finding key,
735*4882a593Smuzhiyun 		 * no RST generated if md5 hash doesn't match.
736*4882a593Smuzhiyun 		 */
737*4882a593Smuzhiyun 		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
738*4882a593Smuzhiyun 					     ip_hdr(skb)->saddr,
739*4882a593Smuzhiyun 					     th->source, ip_hdr(skb)->daddr,
740*4882a593Smuzhiyun 					     ntohs(th->source), dif, sdif);
741*4882a593Smuzhiyun 		/* don't send rst if it can't find key */
742*4882a593Smuzhiyun 		if (!sk1)
743*4882a593Smuzhiyun 			goto out;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		/* sdif set, means packet ingressed via a device
746*4882a593Smuzhiyun 		 * in an L3 domain and dif is set to it.
747*4882a593Smuzhiyun 		 */
748*4882a593Smuzhiyun 		l3index = sdif ? dif : 0;
749*4882a593Smuzhiyun 		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
750*4882a593Smuzhiyun 		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
751*4882a593Smuzhiyun 		if (!key)
752*4882a593Smuzhiyun 			goto out;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
756*4882a593Smuzhiyun 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
757*4882a593Smuzhiyun 			goto out;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (key) {
762*4882a593Smuzhiyun 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
763*4882a593Smuzhiyun 				   (TCPOPT_NOP << 16) |
764*4882a593Smuzhiyun 				   (TCPOPT_MD5SIG << 8) |
765*4882a593Smuzhiyun 				   TCPOLEN_MD5SIG);
766*4882a593Smuzhiyun 		/* Update length and the length the header thinks exists */
767*4882a593Smuzhiyun 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
768*4882a593Smuzhiyun 		rep.th.doff = arg.iov[0].iov_len / 4;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
771*4882a593Smuzhiyun 				     key, ip_hdr(skb)->saddr,
772*4882a593Smuzhiyun 				     ip_hdr(skb)->daddr, &rep.th);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun #endif
775*4882a593Smuzhiyun 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
776*4882a593Smuzhiyun 				      ip_hdr(skb)->saddr, /* XXX */
777*4882a593Smuzhiyun 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
778*4882a593Smuzhiyun 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
779*4882a593Smuzhiyun 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* When socket is gone, all binding information is lost.
782*4882a593Smuzhiyun 	 * routing might fail in this case. No choice here, if we choose to force
783*4882a593Smuzhiyun 	 * input interface, we will misroute in case of asymmetric route.
784*4882a593Smuzhiyun 	 */
785*4882a593Smuzhiyun 	if (sk) {
786*4882a593Smuzhiyun 		arg.bound_dev_if = sk->sk_bound_dev_if;
787*4882a593Smuzhiyun 		if (sk_fullsock(sk))
788*4882a593Smuzhiyun 			trace_tcp_send_reset(sk, skb);
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
792*4882a593Smuzhiyun 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	arg.tos = ip_hdr(skb)->tos;
795*4882a593Smuzhiyun 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
796*4882a593Smuzhiyun 	local_bh_disable();
797*4882a593Smuzhiyun 	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
798*4882a593Smuzhiyun 	if (sk) {
799*4882a593Smuzhiyun 		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
800*4882a593Smuzhiyun 				   inet_twsk(sk)->tw_mark : sk->sk_mark;
801*4882a593Smuzhiyun 		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
802*4882a593Smuzhiyun 				   inet_twsk(sk)->tw_priority : sk->sk_priority;
803*4882a593Smuzhiyun 		transmit_time = tcp_transmit_time(sk);
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 	ip_send_unicast_reply(ctl_sk,
806*4882a593Smuzhiyun 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
807*4882a593Smuzhiyun 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
808*4882a593Smuzhiyun 			      &arg, arg.iov[0].iov_len,
809*4882a593Smuzhiyun 			      transmit_time);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	ctl_sk->sk_mark = 0;
812*4882a593Smuzhiyun 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
813*4882a593Smuzhiyun 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
814*4882a593Smuzhiyun 	local_bh_enable();
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
817*4882a593Smuzhiyun out:
818*4882a593Smuzhiyun 	rcu_read_unlock();
819*4882a593Smuzhiyun #endif
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
823*4882a593Smuzhiyun    outside socket context is ugly, certainly. What can I do?
824*4882a593Smuzhiyun  */
825*4882a593Smuzhiyun 
tcp_v4_send_ack(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_md5sig_key * key,int reply_flags,u8 tos)826*4882a593Smuzhiyun static void tcp_v4_send_ack(const struct sock *sk,
827*4882a593Smuzhiyun 			    struct sk_buff *skb, u32 seq, u32 ack,
828*4882a593Smuzhiyun 			    u32 win, u32 tsval, u32 tsecr, int oif,
829*4882a593Smuzhiyun 			    struct tcp_md5sig_key *key,
830*4882a593Smuzhiyun 			    int reply_flags, u8 tos)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	const struct tcphdr *th = tcp_hdr(skb);
833*4882a593Smuzhiyun 	struct {
834*4882a593Smuzhiyun 		struct tcphdr th;
835*4882a593Smuzhiyun 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
836*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
837*4882a593Smuzhiyun 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
838*4882a593Smuzhiyun #endif
839*4882a593Smuzhiyun 			];
840*4882a593Smuzhiyun 	} rep;
841*4882a593Smuzhiyun 	struct net *net = sock_net(sk);
842*4882a593Smuzhiyun 	struct ip_reply_arg arg;
843*4882a593Smuzhiyun 	struct sock *ctl_sk;
844*4882a593Smuzhiyun 	u64 transmit_time;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	memset(&rep.th, 0, sizeof(struct tcphdr));
847*4882a593Smuzhiyun 	memset(&arg, 0, sizeof(arg));
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	arg.iov[0].iov_base = (unsigned char *)&rep;
850*4882a593Smuzhiyun 	arg.iov[0].iov_len  = sizeof(rep.th);
851*4882a593Smuzhiyun 	if (tsecr) {
852*4882a593Smuzhiyun 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
853*4882a593Smuzhiyun 				   (TCPOPT_TIMESTAMP << 8) |
854*4882a593Smuzhiyun 				   TCPOLEN_TIMESTAMP);
855*4882a593Smuzhiyun 		rep.opt[1] = htonl(tsval);
856*4882a593Smuzhiyun 		rep.opt[2] = htonl(tsecr);
857*4882a593Smuzhiyun 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	/* Swap the send and the receive. */
861*4882a593Smuzhiyun 	rep.th.dest    = th->source;
862*4882a593Smuzhiyun 	rep.th.source  = th->dest;
863*4882a593Smuzhiyun 	rep.th.doff    = arg.iov[0].iov_len / 4;
864*4882a593Smuzhiyun 	rep.th.seq     = htonl(seq);
865*4882a593Smuzhiyun 	rep.th.ack_seq = htonl(ack);
866*4882a593Smuzhiyun 	rep.th.ack     = 1;
867*4882a593Smuzhiyun 	rep.th.window  = htons(win);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
870*4882a593Smuzhiyun 	if (key) {
871*4882a593Smuzhiyun 		int offset = (tsecr) ? 3 : 0;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
874*4882a593Smuzhiyun 					  (TCPOPT_NOP << 16) |
875*4882a593Smuzhiyun 					  (TCPOPT_MD5SIG << 8) |
876*4882a593Smuzhiyun 					  TCPOLEN_MD5SIG);
877*4882a593Smuzhiyun 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
878*4882a593Smuzhiyun 		rep.th.doff = arg.iov[0].iov_len/4;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
881*4882a593Smuzhiyun 				    key, ip_hdr(skb)->saddr,
882*4882a593Smuzhiyun 				    ip_hdr(skb)->daddr, &rep.th);
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun #endif
885*4882a593Smuzhiyun 	arg.flags = reply_flags;
886*4882a593Smuzhiyun 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
887*4882a593Smuzhiyun 				      ip_hdr(skb)->saddr, /* XXX */
888*4882a593Smuzhiyun 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
889*4882a593Smuzhiyun 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
890*4882a593Smuzhiyun 	if (oif)
891*4882a593Smuzhiyun 		arg.bound_dev_if = oif;
892*4882a593Smuzhiyun 	arg.tos = tos;
893*4882a593Smuzhiyun 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
894*4882a593Smuzhiyun 	local_bh_disable();
895*4882a593Smuzhiyun 	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
896*4882a593Smuzhiyun 	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
897*4882a593Smuzhiyun 			   inet_twsk(sk)->tw_mark : sk->sk_mark;
898*4882a593Smuzhiyun 	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
899*4882a593Smuzhiyun 			   inet_twsk(sk)->tw_priority : sk->sk_priority;
900*4882a593Smuzhiyun 	transmit_time = tcp_transmit_time(sk);
901*4882a593Smuzhiyun 	ip_send_unicast_reply(ctl_sk,
902*4882a593Smuzhiyun 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
903*4882a593Smuzhiyun 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
904*4882a593Smuzhiyun 			      &arg, arg.iov[0].iov_len,
905*4882a593Smuzhiyun 			      transmit_time);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	ctl_sk->sk_mark = 0;
908*4882a593Smuzhiyun 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
909*4882a593Smuzhiyun 	local_bh_enable();
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
tcp_v4_timewait_ack(struct sock * sk,struct sk_buff * skb)912*4882a593Smuzhiyun static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct inet_timewait_sock *tw = inet_twsk(sk);
915*4882a593Smuzhiyun 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	tcp_v4_send_ack(sk, skb,
918*4882a593Smuzhiyun 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
919*4882a593Smuzhiyun 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
920*4882a593Smuzhiyun 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
921*4882a593Smuzhiyun 			tcptw->tw_ts_recent,
922*4882a593Smuzhiyun 			tw->tw_bound_dev_if,
923*4882a593Smuzhiyun 			tcp_twsk_md5_key(tcptw),
924*4882a593Smuzhiyun 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
925*4882a593Smuzhiyun 			tw->tw_tos
926*4882a593Smuzhiyun 			);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	inet_twsk_put(tw);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
tcp_v4_reqsk_send_ack(const struct sock * sk,struct sk_buff * skb,struct request_sock * req)931*4882a593Smuzhiyun static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
932*4882a593Smuzhiyun 				  struct request_sock *req)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	const union tcp_md5_addr *addr;
935*4882a593Smuzhiyun 	int l3index;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
938*4882a593Smuzhiyun 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
939*4882a593Smuzhiyun 	 */
940*4882a593Smuzhiyun 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
941*4882a593Smuzhiyun 					     tcp_sk(sk)->snd_nxt;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/* RFC 7323 2.3
944*4882a593Smuzhiyun 	 * The window field (SEG.WND) of every outgoing segment, with the
945*4882a593Smuzhiyun 	 * exception of <SYN> segments, MUST be right-shifted by
946*4882a593Smuzhiyun 	 * Rcv.Wind.Shift bits:
947*4882a593Smuzhiyun 	 */
948*4882a593Smuzhiyun 	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
949*4882a593Smuzhiyun 	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
950*4882a593Smuzhiyun 	tcp_v4_send_ack(sk, skb, seq,
951*4882a593Smuzhiyun 			tcp_rsk(req)->rcv_nxt,
952*4882a593Smuzhiyun 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
953*4882a593Smuzhiyun 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
954*4882a593Smuzhiyun 			req->ts_recent,
955*4882a593Smuzhiyun 			0,
956*4882a593Smuzhiyun 			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
957*4882a593Smuzhiyun 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
958*4882a593Smuzhiyun 			ip_hdr(skb)->tos);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /*
962*4882a593Smuzhiyun  *	Send a SYN-ACK after having received a SYN.
963*4882a593Smuzhiyun  *	This still operates on a request_sock only, not on a big
964*4882a593Smuzhiyun  *	socket.
965*4882a593Smuzhiyun  */
tcp_v4_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)966*4882a593Smuzhiyun static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
967*4882a593Smuzhiyun 			      struct flowi *fl,
968*4882a593Smuzhiyun 			      struct request_sock *req,
969*4882a593Smuzhiyun 			      struct tcp_fastopen_cookie *foc,
970*4882a593Smuzhiyun 			      enum tcp_synack_type synack_type,
971*4882a593Smuzhiyun 			      struct sk_buff *syn_skb)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	const struct inet_request_sock *ireq = inet_rsk(req);
974*4882a593Smuzhiyun 	struct flowi4 fl4;
975*4882a593Smuzhiyun 	int err = -1;
976*4882a593Smuzhiyun 	struct sk_buff *skb;
977*4882a593Smuzhiyun 	u8 tos;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	/* First, grab a route. */
980*4882a593Smuzhiyun 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
981*4882a593Smuzhiyun 		return -1;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (skb) {
986*4882a593Smuzhiyun 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
989*4882a593Smuzhiyun 				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
990*4882a593Smuzhiyun 				(inet_sk(sk)->tos & INET_ECN_MASK) :
991*4882a593Smuzhiyun 				inet_sk(sk)->tos;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 		if (!INET_ECN_is_capable(tos) &&
994*4882a593Smuzhiyun 		    tcp_bpf_ca_needs_ecn((struct sock *)req))
995*4882a593Smuzhiyun 			tos |= INET_ECN_ECT_0;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 		rcu_read_lock();
998*4882a593Smuzhiyun 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
999*4882a593Smuzhiyun 					    ireq->ir_rmt_addr,
1000*4882a593Smuzhiyun 					    rcu_dereference(ireq->ireq_opt),
1001*4882a593Smuzhiyun 					    tos);
1002*4882a593Smuzhiyun 		rcu_read_unlock();
1003*4882a593Smuzhiyun 		err = net_xmit_eval(err);
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	return err;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun /*
1010*4882a593Smuzhiyun  *	IPv4 request_sock destructor.
1011*4882a593Smuzhiyun  */
tcp_v4_reqsk_destructor(struct request_sock * req)1012*4882a593Smuzhiyun static void tcp_v4_reqsk_destructor(struct request_sock *req)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1018*4882a593Smuzhiyun /*
1019*4882a593Smuzhiyun  * RFC2385 MD5 checksumming requires a mapping of
1020*4882a593Smuzhiyun  * IP address->MD5 Key.
1021*4882a593Smuzhiyun  * We need to maintain these in the sk structure.
1022*4882a593Smuzhiyun  */
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1025*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_md5_needed);
1026*4882a593Smuzhiyun 
better_md5_match(struct tcp_md5sig_key * old,struct tcp_md5sig_key * new)1027*4882a593Smuzhiyun static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun 	if (!old)
1030*4882a593Smuzhiyun 		return true;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	/* l3index always overrides non-l3index */
1033*4882a593Smuzhiyun 	if (old->l3index && new->l3index == 0)
1034*4882a593Smuzhiyun 		return false;
1035*4882a593Smuzhiyun 	if (old->l3index == 0 && new->l3index)
1036*4882a593Smuzhiyun 		return true;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return old->prefixlen < new->prefixlen;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun /* Find the Key structure for an address.  */
__tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1042*4882a593Smuzhiyun struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1043*4882a593Smuzhiyun 					   const union tcp_md5_addr *addr,
1044*4882a593Smuzhiyun 					   int family)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	const struct tcp_sock *tp = tcp_sk(sk);
1047*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1048*4882a593Smuzhiyun 	const struct tcp_md5sig_info *md5sig;
1049*4882a593Smuzhiyun 	__be32 mask;
1050*4882a593Smuzhiyun 	struct tcp_md5sig_key *best_match = NULL;
1051*4882a593Smuzhiyun 	bool match;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	/* caller either holds rcu_read_lock() or socket lock */
1054*4882a593Smuzhiyun 	md5sig = rcu_dereference_check(tp->md5sig_info,
1055*4882a593Smuzhiyun 				       lockdep_sock_is_held(sk));
1056*4882a593Smuzhiyun 	if (!md5sig)
1057*4882a593Smuzhiyun 		return NULL;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1060*4882a593Smuzhiyun 				 lockdep_sock_is_held(sk)) {
1061*4882a593Smuzhiyun 		if (key->family != family)
1062*4882a593Smuzhiyun 			continue;
1063*4882a593Smuzhiyun 		if (key->l3index && key->l3index != l3index)
1064*4882a593Smuzhiyun 			continue;
1065*4882a593Smuzhiyun 		if (family == AF_INET) {
1066*4882a593Smuzhiyun 			mask = inet_make_mask(key->prefixlen);
1067*4882a593Smuzhiyun 			match = (key->addr.a4.s_addr & mask) ==
1068*4882a593Smuzhiyun 				(addr->a4.s_addr & mask);
1069*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1070*4882a593Smuzhiyun 		} else if (family == AF_INET6) {
1071*4882a593Smuzhiyun 			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1072*4882a593Smuzhiyun 						  key->prefixlen);
1073*4882a593Smuzhiyun #endif
1074*4882a593Smuzhiyun 		} else {
1075*4882a593Smuzhiyun 			match = false;
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		if (match && better_md5_match(best_match, key))
1079*4882a593Smuzhiyun 			best_match = key;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 	return best_match;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun EXPORT_SYMBOL(__tcp_md5_do_lookup);
1084*4882a593Smuzhiyun 
tcp_md5_do_lookup_exact(const struct sock * sk,const union tcp_md5_addr * addr,int family,u8 prefixlen,int l3index)1085*4882a593Smuzhiyun static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1086*4882a593Smuzhiyun 						      const union tcp_md5_addr *addr,
1087*4882a593Smuzhiyun 						      int family, u8 prefixlen,
1088*4882a593Smuzhiyun 						      int l3index)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	const struct tcp_sock *tp = tcp_sk(sk);
1091*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1092*4882a593Smuzhiyun 	unsigned int size = sizeof(struct in_addr);
1093*4882a593Smuzhiyun 	const struct tcp_md5sig_info *md5sig;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	/* caller either holds rcu_read_lock() or socket lock */
1096*4882a593Smuzhiyun 	md5sig = rcu_dereference_check(tp->md5sig_info,
1097*4882a593Smuzhiyun 				       lockdep_sock_is_held(sk));
1098*4882a593Smuzhiyun 	if (!md5sig)
1099*4882a593Smuzhiyun 		return NULL;
1100*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1101*4882a593Smuzhiyun 	if (family == AF_INET6)
1102*4882a593Smuzhiyun 		size = sizeof(struct in6_addr);
1103*4882a593Smuzhiyun #endif
1104*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(key, &md5sig->head, node,
1105*4882a593Smuzhiyun 				 lockdep_sock_is_held(sk)) {
1106*4882a593Smuzhiyun 		if (key->family != family)
1107*4882a593Smuzhiyun 			continue;
1108*4882a593Smuzhiyun 		if (key->l3index != l3index)
1109*4882a593Smuzhiyun 			continue;
1110*4882a593Smuzhiyun 		if (!memcmp(&key->addr, addr, size) &&
1111*4882a593Smuzhiyun 		    key->prefixlen == prefixlen)
1112*4882a593Smuzhiyun 			return key;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 	return NULL;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
tcp_v4_md5_lookup(const struct sock * sk,const struct sock * addr_sk)1117*4882a593Smuzhiyun struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1118*4882a593Smuzhiyun 					 const struct sock *addr_sk)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	const union tcp_md5_addr *addr;
1121*4882a593Smuzhiyun 	int l3index;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1124*4882a593Smuzhiyun 						 addr_sk->sk_bound_dev_if);
1125*4882a593Smuzhiyun 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1126*4882a593Smuzhiyun 	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_md5_lookup);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun /* This can be called on a newly created socket, from other files */
tcp_md5_do_add(struct sock * sk,const union tcp_md5_addr * addr,int family,u8 prefixlen,int l3index,const u8 * newkey,u8 newkeylen,gfp_t gfp)1131*4882a593Smuzhiyun int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1132*4882a593Smuzhiyun 		   int family, u8 prefixlen, int l3index,
1133*4882a593Smuzhiyun 		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	/* Add Key to the list */
1136*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1137*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
1138*4882a593Smuzhiyun 	struct tcp_md5sig_info *md5sig;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1141*4882a593Smuzhiyun 	if (key) {
1142*4882a593Smuzhiyun 		/* Pre-existing entry - just update that one.
1143*4882a593Smuzhiyun 		 * Note that the key might be used concurrently.
1144*4882a593Smuzhiyun 		 * data_race() is telling kcsan that we do not care of
1145*4882a593Smuzhiyun 		 * key mismatches, since changing MD5 key on live flows
1146*4882a593Smuzhiyun 		 * can lead to packet drops.
1147*4882a593Smuzhiyun 		 */
1148*4882a593Smuzhiyun 		data_race(memcpy(key->key, newkey, newkeylen));
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		/* Pairs with READ_ONCE() in tcp_md5_hash_key().
1151*4882a593Smuzhiyun 		 * Also note that a reader could catch new key->keylen value
1152*4882a593Smuzhiyun 		 * but old key->key[], this is the reason we use __GFP_ZERO
1153*4882a593Smuzhiyun 		 * at sock_kmalloc() time below these lines.
1154*4882a593Smuzhiyun 		 */
1155*4882a593Smuzhiyun 		WRITE_ONCE(key->keylen, newkeylen);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 		return 0;
1158*4882a593Smuzhiyun 	}
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1161*4882a593Smuzhiyun 					   lockdep_sock_is_held(sk));
1162*4882a593Smuzhiyun 	if (!md5sig) {
1163*4882a593Smuzhiyun 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1164*4882a593Smuzhiyun 		if (!md5sig)
1165*4882a593Smuzhiyun 			return -ENOMEM;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1168*4882a593Smuzhiyun 		INIT_HLIST_HEAD(&md5sig->head);
1169*4882a593Smuzhiyun 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1173*4882a593Smuzhiyun 	if (!key)
1174*4882a593Smuzhiyun 		return -ENOMEM;
1175*4882a593Smuzhiyun 	if (!tcp_alloc_md5sig_pool()) {
1176*4882a593Smuzhiyun 		sock_kfree_s(sk, key, sizeof(*key));
1177*4882a593Smuzhiyun 		return -ENOMEM;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	memcpy(key->key, newkey, newkeylen);
1181*4882a593Smuzhiyun 	key->keylen = newkeylen;
1182*4882a593Smuzhiyun 	key->family = family;
1183*4882a593Smuzhiyun 	key->prefixlen = prefixlen;
1184*4882a593Smuzhiyun 	key->l3index = l3index;
1185*4882a593Smuzhiyun 	memcpy(&key->addr, addr,
1186*4882a593Smuzhiyun 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1187*4882a593Smuzhiyun 				      sizeof(struct in_addr));
1188*4882a593Smuzhiyun 	hlist_add_head_rcu(&key->node, &md5sig->head);
1189*4882a593Smuzhiyun 	return 0;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_md5_do_add);
1192*4882a593Smuzhiyun 
tcp_md5_do_del(struct sock * sk,const union tcp_md5_addr * addr,int family,u8 prefixlen,int l3index)1193*4882a593Smuzhiyun int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1194*4882a593Smuzhiyun 		   u8 prefixlen, int l3index)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1199*4882a593Smuzhiyun 	if (!key)
1200*4882a593Smuzhiyun 		return -ENOENT;
1201*4882a593Smuzhiyun 	hlist_del_rcu(&key->node);
1202*4882a593Smuzhiyun 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1203*4882a593Smuzhiyun 	kfree_rcu(key, rcu);
1204*4882a593Smuzhiyun 	return 0;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_md5_do_del);
1207*4882a593Smuzhiyun 
tcp_clear_md5_list(struct sock * sk)1208*4882a593Smuzhiyun static void tcp_clear_md5_list(struct sock *sk)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
1211*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1212*4882a593Smuzhiyun 	struct hlist_node *n;
1213*4882a593Smuzhiyun 	struct tcp_md5sig_info *md5sig;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1218*4882a593Smuzhiyun 		hlist_del_rcu(&key->node);
1219*4882a593Smuzhiyun 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1220*4882a593Smuzhiyun 		kfree_rcu(key, rcu);
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
tcp_v4_parse_md5_keys(struct sock * sk,int optname,sockptr_t optval,int optlen)1224*4882a593Smuzhiyun static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1225*4882a593Smuzhiyun 				 sockptr_t optval, int optlen)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	struct tcp_md5sig cmd;
1228*4882a593Smuzhiyun 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1229*4882a593Smuzhiyun 	const union tcp_md5_addr *addr;
1230*4882a593Smuzhiyun 	u8 prefixlen = 32;
1231*4882a593Smuzhiyun 	int l3index = 0;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	if (optlen < sizeof(cmd))
1234*4882a593Smuzhiyun 		return -EINVAL;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1237*4882a593Smuzhiyun 		return -EFAULT;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	if (sin->sin_family != AF_INET)
1240*4882a593Smuzhiyun 		return -EINVAL;
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	if (optname == TCP_MD5SIG_EXT &&
1243*4882a593Smuzhiyun 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1244*4882a593Smuzhiyun 		prefixlen = cmd.tcpm_prefixlen;
1245*4882a593Smuzhiyun 		if (prefixlen > 32)
1246*4882a593Smuzhiyun 			return -EINVAL;
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	if (optname == TCP_MD5SIG_EXT &&
1250*4882a593Smuzhiyun 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1251*4882a593Smuzhiyun 		struct net_device *dev;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 		rcu_read_lock();
1254*4882a593Smuzhiyun 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1255*4882a593Smuzhiyun 		if (dev && netif_is_l3_master(dev))
1256*4882a593Smuzhiyun 			l3index = dev->ifindex;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 		rcu_read_unlock();
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 		/* ok to reference set/not set outside of rcu;
1261*4882a593Smuzhiyun 		 * right now device MUST be an L3 master
1262*4882a593Smuzhiyun 		 */
1263*4882a593Smuzhiyun 		if (!dev || !l3index)
1264*4882a593Smuzhiyun 			return -EINVAL;
1265*4882a593Smuzhiyun 	}
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	if (!cmd.tcpm_keylen)
1270*4882a593Smuzhiyun 		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1273*4882a593Smuzhiyun 		return -EINVAL;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1276*4882a593Smuzhiyun 			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun 
tcp_v4_md5_hash_headers(struct tcp_md5sig_pool * hp,__be32 daddr,__be32 saddr,const struct tcphdr * th,int nbytes)1279*4882a593Smuzhiyun static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1280*4882a593Smuzhiyun 				   __be32 daddr, __be32 saddr,
1281*4882a593Smuzhiyun 				   const struct tcphdr *th, int nbytes)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	struct tcp4_pseudohdr *bp;
1284*4882a593Smuzhiyun 	struct scatterlist sg;
1285*4882a593Smuzhiyun 	struct tcphdr *_th;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	bp = hp->scratch;
1288*4882a593Smuzhiyun 	bp->saddr = saddr;
1289*4882a593Smuzhiyun 	bp->daddr = daddr;
1290*4882a593Smuzhiyun 	bp->pad = 0;
1291*4882a593Smuzhiyun 	bp->protocol = IPPROTO_TCP;
1292*4882a593Smuzhiyun 	bp->len = cpu_to_be16(nbytes);
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	_th = (struct tcphdr *)(bp + 1);
1295*4882a593Smuzhiyun 	memcpy(_th, th, sizeof(*th));
1296*4882a593Smuzhiyun 	_th->check = 0;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1299*4882a593Smuzhiyun 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1300*4882a593Smuzhiyun 				sizeof(*bp) + sizeof(*th));
1301*4882a593Smuzhiyun 	return crypto_ahash_update(hp->md5_req);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
tcp_v4_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,__be32 daddr,__be32 saddr,const struct tcphdr * th)1304*4882a593Smuzhiyun static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1305*4882a593Smuzhiyun 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	struct tcp_md5sig_pool *hp;
1308*4882a593Smuzhiyun 	struct ahash_request *req;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	hp = tcp_get_md5sig_pool();
1311*4882a593Smuzhiyun 	if (!hp)
1312*4882a593Smuzhiyun 		goto clear_hash_noput;
1313*4882a593Smuzhiyun 	req = hp->md5_req;
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (crypto_ahash_init(req))
1316*4882a593Smuzhiyun 		goto clear_hash;
1317*4882a593Smuzhiyun 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1318*4882a593Smuzhiyun 		goto clear_hash;
1319*4882a593Smuzhiyun 	if (tcp_md5_hash_key(hp, key))
1320*4882a593Smuzhiyun 		goto clear_hash;
1321*4882a593Smuzhiyun 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1322*4882a593Smuzhiyun 	if (crypto_ahash_final(req))
1323*4882a593Smuzhiyun 		goto clear_hash;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	tcp_put_md5sig_pool();
1326*4882a593Smuzhiyun 	return 0;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun clear_hash:
1329*4882a593Smuzhiyun 	tcp_put_md5sig_pool();
1330*4882a593Smuzhiyun clear_hash_noput:
1331*4882a593Smuzhiyun 	memset(md5_hash, 0, 16);
1332*4882a593Smuzhiyun 	return 1;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun 
tcp_v4_md5_hash_skb(char * md5_hash,const struct tcp_md5sig_key * key,const struct sock * sk,const struct sk_buff * skb)1335*4882a593Smuzhiyun int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1336*4882a593Smuzhiyun 			const struct sock *sk,
1337*4882a593Smuzhiyun 			const struct sk_buff *skb)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	struct tcp_md5sig_pool *hp;
1340*4882a593Smuzhiyun 	struct ahash_request *req;
1341*4882a593Smuzhiyun 	const struct tcphdr *th = tcp_hdr(skb);
1342*4882a593Smuzhiyun 	__be32 saddr, daddr;
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	if (sk) { /* valid for establish/request sockets */
1345*4882a593Smuzhiyun 		saddr = sk->sk_rcv_saddr;
1346*4882a593Smuzhiyun 		daddr = sk->sk_daddr;
1347*4882a593Smuzhiyun 	} else {
1348*4882a593Smuzhiyun 		const struct iphdr *iph = ip_hdr(skb);
1349*4882a593Smuzhiyun 		saddr = iph->saddr;
1350*4882a593Smuzhiyun 		daddr = iph->daddr;
1351*4882a593Smuzhiyun 	}
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	hp = tcp_get_md5sig_pool();
1354*4882a593Smuzhiyun 	if (!hp)
1355*4882a593Smuzhiyun 		goto clear_hash_noput;
1356*4882a593Smuzhiyun 	req = hp->md5_req;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (crypto_ahash_init(req))
1359*4882a593Smuzhiyun 		goto clear_hash;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1362*4882a593Smuzhiyun 		goto clear_hash;
1363*4882a593Smuzhiyun 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1364*4882a593Smuzhiyun 		goto clear_hash;
1365*4882a593Smuzhiyun 	if (tcp_md5_hash_key(hp, key))
1366*4882a593Smuzhiyun 		goto clear_hash;
1367*4882a593Smuzhiyun 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1368*4882a593Smuzhiyun 	if (crypto_ahash_final(req))
1369*4882a593Smuzhiyun 		goto clear_hash;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	tcp_put_md5sig_pool();
1372*4882a593Smuzhiyun 	return 0;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun clear_hash:
1375*4882a593Smuzhiyun 	tcp_put_md5sig_pool();
1376*4882a593Smuzhiyun clear_hash_noput:
1377*4882a593Smuzhiyun 	memset(md5_hash, 0, 16);
1378*4882a593Smuzhiyun 	return 1;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun #endif
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun /* Called with rcu_read_lock() */
tcp_v4_inbound_md5_hash(const struct sock * sk,const struct sk_buff * skb,int dif,int sdif)1385*4882a593Smuzhiyun static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1386*4882a593Smuzhiyun 				    const struct sk_buff *skb,
1387*4882a593Smuzhiyun 				    int dif, int sdif)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1390*4882a593Smuzhiyun 	/*
1391*4882a593Smuzhiyun 	 * This gets called for each TCP segment that arrives
1392*4882a593Smuzhiyun 	 * so we want to be efficient.
1393*4882a593Smuzhiyun 	 * We have 3 drop cases:
1394*4882a593Smuzhiyun 	 * o No MD5 hash and one expected.
1395*4882a593Smuzhiyun 	 * o MD5 hash and we're not expecting one.
1396*4882a593Smuzhiyun 	 * o MD5 hash and its wrong.
1397*4882a593Smuzhiyun 	 */
1398*4882a593Smuzhiyun 	const __u8 *hash_location = NULL;
1399*4882a593Smuzhiyun 	struct tcp_md5sig_key *hash_expected;
1400*4882a593Smuzhiyun 	const struct iphdr *iph = ip_hdr(skb);
1401*4882a593Smuzhiyun 	const struct tcphdr *th = tcp_hdr(skb);
1402*4882a593Smuzhiyun 	const union tcp_md5_addr *addr;
1403*4882a593Smuzhiyun 	unsigned char newhash[16];
1404*4882a593Smuzhiyun 	int genhash, l3index;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	/* sdif set, means packet ingressed via a device
1407*4882a593Smuzhiyun 	 * in an L3 domain and dif is set to the l3mdev
1408*4882a593Smuzhiyun 	 */
1409*4882a593Smuzhiyun 	l3index = sdif ? dif : 0;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	addr = (union tcp_md5_addr *)&iph->saddr;
1412*4882a593Smuzhiyun 	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1413*4882a593Smuzhiyun 	hash_location = tcp_parse_md5sig_option(th);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	/* We've parsed the options - do we have a hash? */
1416*4882a593Smuzhiyun 	if (!hash_expected && !hash_location)
1417*4882a593Smuzhiyun 		return false;
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	if (hash_expected && !hash_location) {
1420*4882a593Smuzhiyun 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1421*4882a593Smuzhiyun 		return true;
1422*4882a593Smuzhiyun 	}
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	if (!hash_expected && hash_location) {
1425*4882a593Smuzhiyun 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1426*4882a593Smuzhiyun 		return true;
1427*4882a593Smuzhiyun 	}
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	/* Okay, so this is hash_expected and hash_location -
1430*4882a593Smuzhiyun 	 * so we need to calculate the checksum.
1431*4882a593Smuzhiyun 	 */
1432*4882a593Smuzhiyun 	genhash = tcp_v4_md5_hash_skb(newhash,
1433*4882a593Smuzhiyun 				      hash_expected,
1434*4882a593Smuzhiyun 				      NULL, skb);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1437*4882a593Smuzhiyun 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1438*4882a593Smuzhiyun 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1439*4882a593Smuzhiyun 				     &iph->saddr, ntohs(th->source),
1440*4882a593Smuzhiyun 				     &iph->daddr, ntohs(th->dest),
1441*4882a593Smuzhiyun 				     genhash ? " tcp_v4_calc_md5_hash failed"
1442*4882a593Smuzhiyun 				     : "", l3index);
1443*4882a593Smuzhiyun 		return true;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 	return false;
1446*4882a593Smuzhiyun #endif
1447*4882a593Smuzhiyun 	return false;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun 
tcp_v4_init_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)1450*4882a593Smuzhiyun static void tcp_v4_init_req(struct request_sock *req,
1451*4882a593Smuzhiyun 			    const struct sock *sk_listener,
1452*4882a593Smuzhiyun 			    struct sk_buff *skb)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun 	struct inet_request_sock *ireq = inet_rsk(req);
1455*4882a593Smuzhiyun 	struct net *net = sock_net(sk_listener);
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1458*4882a593Smuzhiyun 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1459*4882a593Smuzhiyun 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun 
tcp_v4_route_req(const struct sock * sk,struct flowi * fl,const struct request_sock * req)1462*4882a593Smuzhiyun static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1463*4882a593Smuzhiyun 					  struct flowi *fl,
1464*4882a593Smuzhiyun 					  const struct request_sock *req)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun 	return inet_csk_route_req(sk, &fl->u.ip4, req);
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1470*4882a593Smuzhiyun 	.family		=	PF_INET,
1471*4882a593Smuzhiyun 	.obj_size	=	sizeof(struct tcp_request_sock),
1472*4882a593Smuzhiyun 	.rtx_syn_ack	=	tcp_rtx_synack,
1473*4882a593Smuzhiyun 	.send_ack	=	tcp_v4_reqsk_send_ack,
1474*4882a593Smuzhiyun 	.destructor	=	tcp_v4_reqsk_destructor,
1475*4882a593Smuzhiyun 	.send_reset	=	tcp_v4_send_reset,
1476*4882a593Smuzhiyun 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1477*4882a593Smuzhiyun };
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1480*4882a593Smuzhiyun 	.mss_clamp	=	TCP_MSS_DEFAULT,
1481*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1482*4882a593Smuzhiyun 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1483*4882a593Smuzhiyun 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1484*4882a593Smuzhiyun #endif
1485*4882a593Smuzhiyun 	.init_req	=	tcp_v4_init_req,
1486*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
1487*4882a593Smuzhiyun 	.cookie_init_seq =	cookie_v4_init_sequence,
1488*4882a593Smuzhiyun #endif
1489*4882a593Smuzhiyun 	.route_req	=	tcp_v4_route_req,
1490*4882a593Smuzhiyun 	.init_seq	=	tcp_v4_init_seq,
1491*4882a593Smuzhiyun 	.init_ts_off	=	tcp_v4_init_ts_off,
1492*4882a593Smuzhiyun 	.send_synack	=	tcp_v4_send_synack,
1493*4882a593Smuzhiyun };
1494*4882a593Smuzhiyun 
tcp_v4_conn_request(struct sock * sk,struct sk_buff * skb)1495*4882a593Smuzhiyun int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun 	/* Never answer to SYNs send to broadcast or multicast */
1498*4882a593Smuzhiyun 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1499*4882a593Smuzhiyun 		goto drop;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	return tcp_conn_request(&tcp_request_sock_ops,
1502*4882a593Smuzhiyun 				&tcp_request_sock_ipv4_ops, sk, skb);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun drop:
1505*4882a593Smuzhiyun 	tcp_listendrop(sk);
1506*4882a593Smuzhiyun 	return 0;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_conn_request);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun /*
1512*4882a593Smuzhiyun  * The three way handshake has completed - we got a valid synack -
1513*4882a593Smuzhiyun  * now create the new socket.
1514*4882a593Smuzhiyun  */
tcp_v4_syn_recv_sock(const struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst,struct request_sock * req_unhash,bool * own_req)1515*4882a593Smuzhiyun struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1516*4882a593Smuzhiyun 				  struct request_sock *req,
1517*4882a593Smuzhiyun 				  struct dst_entry *dst,
1518*4882a593Smuzhiyun 				  struct request_sock *req_unhash,
1519*4882a593Smuzhiyun 				  bool *own_req)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	struct inet_request_sock *ireq;
1522*4882a593Smuzhiyun 	bool found_dup_sk = false;
1523*4882a593Smuzhiyun 	struct inet_sock *newinet;
1524*4882a593Smuzhiyun 	struct tcp_sock *newtp;
1525*4882a593Smuzhiyun 	struct sock *newsk;
1526*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1527*4882a593Smuzhiyun 	const union tcp_md5_addr *addr;
1528*4882a593Smuzhiyun 	struct tcp_md5sig_key *key;
1529*4882a593Smuzhiyun 	int l3index;
1530*4882a593Smuzhiyun #endif
1531*4882a593Smuzhiyun 	struct ip_options_rcu *inet_opt;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	if (sk_acceptq_is_full(sk))
1534*4882a593Smuzhiyun 		goto exit_overflow;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	newsk = tcp_create_openreq_child(sk, req, skb);
1537*4882a593Smuzhiyun 	if (!newsk)
1538*4882a593Smuzhiyun 		goto exit_nonewsk;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1541*4882a593Smuzhiyun 	inet_sk_rx_dst_set(newsk, skb);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	newtp		      = tcp_sk(newsk);
1544*4882a593Smuzhiyun 	newinet		      = inet_sk(newsk);
1545*4882a593Smuzhiyun 	ireq		      = inet_rsk(req);
1546*4882a593Smuzhiyun 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1547*4882a593Smuzhiyun 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1548*4882a593Smuzhiyun 	newsk->sk_bound_dev_if = ireq->ir_iif;
1549*4882a593Smuzhiyun 	newinet->inet_saddr   = ireq->ir_loc_addr;
1550*4882a593Smuzhiyun 	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1551*4882a593Smuzhiyun 	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1552*4882a593Smuzhiyun 	newinet->mc_index     = inet_iif(skb);
1553*4882a593Smuzhiyun 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1554*4882a593Smuzhiyun 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1555*4882a593Smuzhiyun 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1556*4882a593Smuzhiyun 	if (inet_opt)
1557*4882a593Smuzhiyun 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1558*4882a593Smuzhiyun 	newinet->inet_id = prandom_u32();
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	/* Set ToS of the new socket based upon the value of incoming SYN.
1561*4882a593Smuzhiyun 	 * ECT bits are set later in tcp_init_transfer().
1562*4882a593Smuzhiyun 	 */
1563*4882a593Smuzhiyun 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1564*4882a593Smuzhiyun 		newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (!dst) {
1567*4882a593Smuzhiyun 		dst = inet_csk_route_child_sock(sk, newsk, req);
1568*4882a593Smuzhiyun 		if (!dst)
1569*4882a593Smuzhiyun 			goto put_and_exit;
1570*4882a593Smuzhiyun 	} else {
1571*4882a593Smuzhiyun 		/* syncookie case : see end of cookie_v4_check() */
1572*4882a593Smuzhiyun 	}
1573*4882a593Smuzhiyun 	sk_setup_caps(newsk, dst);
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	tcp_ca_openreq_child(newsk, dst);
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	tcp_sync_mss(newsk, dst_mtu(dst));
1578*4882a593Smuzhiyun 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	tcp_initialize_rcv_mss(newsk);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1583*4882a593Smuzhiyun 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1584*4882a593Smuzhiyun 	/* Copy over the MD5 key from the original socket */
1585*4882a593Smuzhiyun 	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1586*4882a593Smuzhiyun 	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1587*4882a593Smuzhiyun 	if (key) {
1588*4882a593Smuzhiyun 		/*
1589*4882a593Smuzhiyun 		 * We're using one, so create a matching key
1590*4882a593Smuzhiyun 		 * on the newsk structure. If we fail to get
1591*4882a593Smuzhiyun 		 * memory, then we end up not copying the key
1592*4882a593Smuzhiyun 		 * across. Shucks.
1593*4882a593Smuzhiyun 		 */
1594*4882a593Smuzhiyun 		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1595*4882a593Smuzhiyun 			       key->key, key->keylen, GFP_ATOMIC);
1596*4882a593Smuzhiyun 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1597*4882a593Smuzhiyun 	}
1598*4882a593Smuzhiyun #endif
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	if (__inet_inherit_port(sk, newsk) < 0)
1601*4882a593Smuzhiyun 		goto put_and_exit;
1602*4882a593Smuzhiyun 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1603*4882a593Smuzhiyun 				       &found_dup_sk);
1604*4882a593Smuzhiyun 	if (likely(*own_req)) {
1605*4882a593Smuzhiyun 		tcp_move_syn(newtp, req);
1606*4882a593Smuzhiyun 		ireq->ireq_opt = NULL;
1607*4882a593Smuzhiyun 	} else {
1608*4882a593Smuzhiyun 		newinet->inet_opt = NULL;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		if (!req_unhash && found_dup_sk) {
1611*4882a593Smuzhiyun 			/* This code path should only be executed in the
1612*4882a593Smuzhiyun 			 * syncookie case only
1613*4882a593Smuzhiyun 			 */
1614*4882a593Smuzhiyun 			bh_unlock_sock(newsk);
1615*4882a593Smuzhiyun 			sock_put(newsk);
1616*4882a593Smuzhiyun 			newsk = NULL;
1617*4882a593Smuzhiyun 		}
1618*4882a593Smuzhiyun 	}
1619*4882a593Smuzhiyun 	return newsk;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun exit_overflow:
1622*4882a593Smuzhiyun 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1623*4882a593Smuzhiyun exit_nonewsk:
1624*4882a593Smuzhiyun 	dst_release(dst);
1625*4882a593Smuzhiyun exit:
1626*4882a593Smuzhiyun 	tcp_listendrop(sk);
1627*4882a593Smuzhiyun 	return NULL;
1628*4882a593Smuzhiyun put_and_exit:
1629*4882a593Smuzhiyun 	newinet->inet_opt = NULL;
1630*4882a593Smuzhiyun 	inet_csk_prepare_forced_close(newsk);
1631*4882a593Smuzhiyun 	tcp_done(newsk);
1632*4882a593Smuzhiyun 	goto exit;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1635*4882a593Smuzhiyun 
tcp_v4_cookie_check(struct sock * sk,struct sk_buff * skb)1636*4882a593Smuzhiyun static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
1639*4882a593Smuzhiyun 	const struct tcphdr *th = tcp_hdr(skb);
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	if (!th->syn)
1642*4882a593Smuzhiyun 		sk = cookie_v4_check(sk, skb);
1643*4882a593Smuzhiyun #endif
1644*4882a593Smuzhiyun 	return sk;
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun 
tcp_v4_get_syncookie(struct sock * sk,struct iphdr * iph,struct tcphdr * th,u32 * cookie)1647*4882a593Smuzhiyun u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1648*4882a593Smuzhiyun 			 struct tcphdr *th, u32 *cookie)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun 	u16 mss = 0;
1651*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
1652*4882a593Smuzhiyun 	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1653*4882a593Smuzhiyun 				    &tcp_request_sock_ipv4_ops, sk, th);
1654*4882a593Smuzhiyun 	if (mss) {
1655*4882a593Smuzhiyun 		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
1656*4882a593Smuzhiyun 		tcp_synq_overflow(sk);
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun #endif
1659*4882a593Smuzhiyun 	return mss;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun /* The socket must have it's spinlock held when we get
1663*4882a593Smuzhiyun  * here, unless it is a TCP_LISTEN socket.
1664*4882a593Smuzhiyun  *
1665*4882a593Smuzhiyun  * We have a potential double-lock case here, so even when
1666*4882a593Smuzhiyun  * doing backlog processing we use the BH locking scheme.
1667*4882a593Smuzhiyun  * This is because we cannot sleep with the original spinlock
1668*4882a593Smuzhiyun  * held.
1669*4882a593Smuzhiyun  */
tcp_v4_do_rcv(struct sock * sk,struct sk_buff * skb)1670*4882a593Smuzhiyun int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	struct sock *rsk;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1675*4882a593Smuzhiyun 		struct dst_entry *dst;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		dst = rcu_dereference_protected(sk->sk_rx_dst,
1678*4882a593Smuzhiyun 						lockdep_sock_is_held(sk));
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 		sock_rps_save_rxhash(sk, skb);
1681*4882a593Smuzhiyun 		sk_mark_napi_id(sk, skb);
1682*4882a593Smuzhiyun 		if (dst) {
1683*4882a593Smuzhiyun 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1684*4882a593Smuzhiyun 			    !dst->ops->check(dst, 0)) {
1685*4882a593Smuzhiyun 				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1686*4882a593Smuzhiyun 				dst_release(dst);
1687*4882a593Smuzhiyun 			}
1688*4882a593Smuzhiyun 		}
1689*4882a593Smuzhiyun 		tcp_rcv_established(sk, skb);
1690*4882a593Smuzhiyun 		return 0;
1691*4882a593Smuzhiyun 	}
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	if (tcp_checksum_complete(skb))
1694*4882a593Smuzhiyun 		goto csum_err;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	if (sk->sk_state == TCP_LISTEN) {
1697*4882a593Smuzhiyun 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 		if (!nsk)
1700*4882a593Smuzhiyun 			goto discard;
1701*4882a593Smuzhiyun 		if (nsk != sk) {
1702*4882a593Smuzhiyun 			if (tcp_child_process(sk, nsk, skb)) {
1703*4882a593Smuzhiyun 				rsk = nsk;
1704*4882a593Smuzhiyun 				goto reset;
1705*4882a593Smuzhiyun 			}
1706*4882a593Smuzhiyun 			return 0;
1707*4882a593Smuzhiyun 		}
1708*4882a593Smuzhiyun 	} else
1709*4882a593Smuzhiyun 		sock_rps_save_rxhash(sk, skb);
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	if (tcp_rcv_state_process(sk, skb)) {
1712*4882a593Smuzhiyun 		rsk = sk;
1713*4882a593Smuzhiyun 		goto reset;
1714*4882a593Smuzhiyun 	}
1715*4882a593Smuzhiyun 	return 0;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun reset:
1718*4882a593Smuzhiyun 	tcp_v4_send_reset(rsk, skb);
1719*4882a593Smuzhiyun discard:
1720*4882a593Smuzhiyun 	kfree_skb(skb);
1721*4882a593Smuzhiyun 	/* Be careful here. If this function gets more complicated and
1722*4882a593Smuzhiyun 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1723*4882a593Smuzhiyun 	 * might be destroyed here. This current version compiles correctly,
1724*4882a593Smuzhiyun 	 * but you have been warned.
1725*4882a593Smuzhiyun 	 */
1726*4882a593Smuzhiyun 	return 0;
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun csum_err:
1729*4882a593Smuzhiyun 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1730*4882a593Smuzhiyun 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1731*4882a593Smuzhiyun 	goto discard;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_do_rcv);
1734*4882a593Smuzhiyun 
tcp_v4_early_demux(struct sk_buff * skb)1735*4882a593Smuzhiyun int tcp_v4_early_demux(struct sk_buff *skb)
1736*4882a593Smuzhiyun {
1737*4882a593Smuzhiyun 	const struct iphdr *iph;
1738*4882a593Smuzhiyun 	const struct tcphdr *th;
1739*4882a593Smuzhiyun 	struct sock *sk;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (skb->pkt_type != PACKET_HOST)
1742*4882a593Smuzhiyun 		return 0;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1745*4882a593Smuzhiyun 		return 0;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	iph = ip_hdr(skb);
1748*4882a593Smuzhiyun 	th = tcp_hdr(skb);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	if (th->doff < sizeof(struct tcphdr) / 4)
1751*4882a593Smuzhiyun 		return 0;
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1754*4882a593Smuzhiyun 				       iph->saddr, th->source,
1755*4882a593Smuzhiyun 				       iph->daddr, ntohs(th->dest),
1756*4882a593Smuzhiyun 				       skb->skb_iif, inet_sdif(skb));
1757*4882a593Smuzhiyun 	if (sk) {
1758*4882a593Smuzhiyun 		skb->sk = sk;
1759*4882a593Smuzhiyun 		skb->destructor = sock_edemux;
1760*4882a593Smuzhiyun 		if (sk_fullsock(sk)) {
1761*4882a593Smuzhiyun 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 			if (dst)
1764*4882a593Smuzhiyun 				dst = dst_check(dst, 0);
1765*4882a593Smuzhiyun 			if (dst &&
1766*4882a593Smuzhiyun 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1767*4882a593Smuzhiyun 				skb_dst_set_noref(skb, dst);
1768*4882a593Smuzhiyun 		}
1769*4882a593Smuzhiyun 	}
1770*4882a593Smuzhiyun 	return 0;
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun 
tcp_add_backlog(struct sock * sk,struct sk_buff * skb)1773*4882a593Smuzhiyun bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun 	u32 limit, tail_gso_size, tail_gso_segs;
1776*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
1777*4882a593Smuzhiyun 	const struct tcphdr *th;
1778*4882a593Smuzhiyun 	struct tcphdr *thtail;
1779*4882a593Smuzhiyun 	struct sk_buff *tail;
1780*4882a593Smuzhiyun 	unsigned int hdrlen;
1781*4882a593Smuzhiyun 	bool fragstolen;
1782*4882a593Smuzhiyun 	u32 gso_segs;
1783*4882a593Smuzhiyun 	u32 gso_size;
1784*4882a593Smuzhiyun 	int delta;
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1787*4882a593Smuzhiyun 	 * we can fix skb->truesize to its real value to avoid future drops.
1788*4882a593Smuzhiyun 	 * This is valid because skb is not yet charged to the socket.
1789*4882a593Smuzhiyun 	 * It has been noticed pure SACK packets were sometimes dropped
1790*4882a593Smuzhiyun 	 * (if cooked by drivers without copybreak feature).
1791*4882a593Smuzhiyun 	 */
1792*4882a593Smuzhiyun 	skb_condense(skb);
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	skb_dst_drop(skb);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	if (unlikely(tcp_checksum_complete(skb))) {
1797*4882a593Smuzhiyun 		bh_unlock_sock(sk);
1798*4882a593Smuzhiyun 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1799*4882a593Smuzhiyun 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1800*4882a593Smuzhiyun 		return true;
1801*4882a593Smuzhiyun 	}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	/* Attempt coalescing to last skb in backlog, even if we are
1804*4882a593Smuzhiyun 	 * above the limits.
1805*4882a593Smuzhiyun 	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1806*4882a593Smuzhiyun 	 */
1807*4882a593Smuzhiyun 	th = (const struct tcphdr *)skb->data;
1808*4882a593Smuzhiyun 	hdrlen = th->doff * 4;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	tail = sk->sk_backlog.tail;
1811*4882a593Smuzhiyun 	if (!tail)
1812*4882a593Smuzhiyun 		goto no_coalesce;
1813*4882a593Smuzhiyun 	thtail = (struct tcphdr *)tail->data;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1816*4882a593Smuzhiyun 	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1817*4882a593Smuzhiyun 	    ((TCP_SKB_CB(tail)->tcp_flags |
1818*4882a593Smuzhiyun 	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1819*4882a593Smuzhiyun 	    !((TCP_SKB_CB(tail)->tcp_flags &
1820*4882a593Smuzhiyun 	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1821*4882a593Smuzhiyun 	    ((TCP_SKB_CB(tail)->tcp_flags ^
1822*4882a593Smuzhiyun 	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1823*4882a593Smuzhiyun #ifdef CONFIG_TLS_DEVICE
1824*4882a593Smuzhiyun 	    tail->decrypted != skb->decrypted ||
1825*4882a593Smuzhiyun #endif
1826*4882a593Smuzhiyun 	    thtail->doff != th->doff ||
1827*4882a593Smuzhiyun 	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1828*4882a593Smuzhiyun 		goto no_coalesce;
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	__skb_pull(skb, hdrlen);
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	shinfo = skb_shinfo(skb);
1833*4882a593Smuzhiyun 	gso_size = shinfo->gso_size ?: skb->len;
1834*4882a593Smuzhiyun 	gso_segs = shinfo->gso_segs ?: 1;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	shinfo = skb_shinfo(tail);
1837*4882a593Smuzhiyun 	tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1838*4882a593Smuzhiyun 	tail_gso_segs = shinfo->gso_segs ?: 1;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1841*4882a593Smuzhiyun 		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 		if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1844*4882a593Smuzhiyun 			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1845*4882a593Smuzhiyun 			thtail->window = th->window;
1846*4882a593Smuzhiyun 		}
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1849*4882a593Smuzhiyun 		 * thtail->fin, so that the fast path in tcp_rcv_established()
1850*4882a593Smuzhiyun 		 * is not entered if we append a packet with a FIN.
1851*4882a593Smuzhiyun 		 * SYN, RST, URG are not present.
1852*4882a593Smuzhiyun 		 * ACK is set on both packets.
1853*4882a593Smuzhiyun 		 * PSH : we do not really care in TCP stack,
1854*4882a593Smuzhiyun 		 *       at least for 'GRO' packets.
1855*4882a593Smuzhiyun 		 */
1856*4882a593Smuzhiyun 		thtail->fin |= th->fin;
1857*4882a593Smuzhiyun 		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
1860*4882a593Smuzhiyun 			TCP_SKB_CB(tail)->has_rxtstamp = true;
1861*4882a593Smuzhiyun 			tail->tstamp = skb->tstamp;
1862*4882a593Smuzhiyun 			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1863*4882a593Smuzhiyun 		}
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 		/* Not as strict as GRO. We only need to carry mss max value */
1866*4882a593Smuzhiyun 		shinfo->gso_size = max(gso_size, tail_gso_size);
1867*4882a593Smuzhiyun 		shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 		sk->sk_backlog.len += delta;
1870*4882a593Smuzhiyun 		__NET_INC_STATS(sock_net(sk),
1871*4882a593Smuzhiyun 				LINUX_MIB_TCPBACKLOGCOALESCE);
1872*4882a593Smuzhiyun 		kfree_skb_partial(skb, fragstolen);
1873*4882a593Smuzhiyun 		return false;
1874*4882a593Smuzhiyun 	}
1875*4882a593Smuzhiyun 	__skb_push(skb, hdrlen);
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun no_coalesce:
1878*4882a593Smuzhiyun 	limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	/* Only socket owner can try to collapse/prune rx queues
1881*4882a593Smuzhiyun 	 * to reduce memory overhead, so add a little headroom here.
1882*4882a593Smuzhiyun 	 * Few sockets backlog are possibly concurrently non empty.
1883*4882a593Smuzhiyun 	 */
1884*4882a593Smuzhiyun 	limit += 64 * 1024;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1887*4882a593Smuzhiyun 		bh_unlock_sock(sk);
1888*4882a593Smuzhiyun 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1889*4882a593Smuzhiyun 		return true;
1890*4882a593Smuzhiyun 	}
1891*4882a593Smuzhiyun 	return false;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_add_backlog);
1894*4882a593Smuzhiyun 
tcp_filter(struct sock * sk,struct sk_buff * skb)1895*4882a593Smuzhiyun int tcp_filter(struct sock *sk, struct sk_buff *skb)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	struct tcphdr *th = (struct tcphdr *)skb->data;
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_filter);
1902*4882a593Smuzhiyun 
tcp_v4_restore_cb(struct sk_buff * skb)1903*4882a593Smuzhiyun static void tcp_v4_restore_cb(struct sk_buff *skb)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun 	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1906*4882a593Smuzhiyun 		sizeof(struct inet_skb_parm));
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun 
tcp_v4_fill_cb(struct sk_buff * skb,const struct iphdr * iph,const struct tcphdr * th)1909*4882a593Smuzhiyun static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1910*4882a593Smuzhiyun 			   const struct tcphdr *th)
1911*4882a593Smuzhiyun {
1912*4882a593Smuzhiyun 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1913*4882a593Smuzhiyun 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1914*4882a593Smuzhiyun 	 */
1915*4882a593Smuzhiyun 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1916*4882a593Smuzhiyun 		sizeof(struct inet_skb_parm));
1917*4882a593Smuzhiyun 	barrier();
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1920*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1921*4882a593Smuzhiyun 				    skb->len - th->doff * 4);
1922*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1923*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1924*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1925*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1926*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->sacked	 = 0;
1927*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->has_rxtstamp =
1928*4882a593Smuzhiyun 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun /*
1932*4882a593Smuzhiyun  *	From tcp_input.c
1933*4882a593Smuzhiyun  */
1934*4882a593Smuzhiyun 
tcp_v4_rcv(struct sk_buff * skb)1935*4882a593Smuzhiyun int tcp_v4_rcv(struct sk_buff *skb)
1936*4882a593Smuzhiyun {
1937*4882a593Smuzhiyun 	struct net *net = dev_net(skb->dev);
1938*4882a593Smuzhiyun 	struct sk_buff *skb_to_free;
1939*4882a593Smuzhiyun 	int sdif = inet_sdif(skb);
1940*4882a593Smuzhiyun 	int dif = inet_iif(skb);
1941*4882a593Smuzhiyun 	const struct iphdr *iph;
1942*4882a593Smuzhiyun 	const struct tcphdr *th;
1943*4882a593Smuzhiyun 	bool refcounted;
1944*4882a593Smuzhiyun 	struct sock *sk;
1945*4882a593Smuzhiyun 	int ret;
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	if (skb->pkt_type != PACKET_HOST)
1948*4882a593Smuzhiyun 		goto discard_it;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	/* Count it even if it's bad */
1951*4882a593Smuzhiyun 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1954*4882a593Smuzhiyun 		goto discard_it;
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	th = (const struct tcphdr *)skb->data;
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1959*4882a593Smuzhiyun 		goto bad_packet;
1960*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, th->doff * 4))
1961*4882a593Smuzhiyun 		goto discard_it;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	/* An explanation is required here, I think.
1964*4882a593Smuzhiyun 	 * Packet length and doff are validated by header prediction,
1965*4882a593Smuzhiyun 	 * provided case of th->doff==0 is eliminated.
1966*4882a593Smuzhiyun 	 * So, we defer the checks. */
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1969*4882a593Smuzhiyun 		goto csum_error;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	th = (const struct tcphdr *)skb->data;
1972*4882a593Smuzhiyun 	iph = ip_hdr(skb);
1973*4882a593Smuzhiyun lookup:
1974*4882a593Smuzhiyun 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1975*4882a593Smuzhiyun 			       th->dest, sdif, &refcounted);
1976*4882a593Smuzhiyun 	if (!sk)
1977*4882a593Smuzhiyun 		goto no_tcp_socket;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun process:
1980*4882a593Smuzhiyun 	if (sk->sk_state == TCP_TIME_WAIT)
1981*4882a593Smuzhiyun 		goto do_time_wait;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1984*4882a593Smuzhiyun 		struct request_sock *req = inet_reqsk(sk);
1985*4882a593Smuzhiyun 		bool req_stolen = false;
1986*4882a593Smuzhiyun 		struct sock *nsk;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 		sk = req->rsk_listener;
1989*4882a593Smuzhiyun 		if (unlikely(!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb) ||
1990*4882a593Smuzhiyun 			     tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1991*4882a593Smuzhiyun 			sk_drops_add(sk, skb);
1992*4882a593Smuzhiyun 			reqsk_put(req);
1993*4882a593Smuzhiyun 			goto discard_it;
1994*4882a593Smuzhiyun 		}
1995*4882a593Smuzhiyun 		if (tcp_checksum_complete(skb)) {
1996*4882a593Smuzhiyun 			reqsk_put(req);
1997*4882a593Smuzhiyun 			goto csum_error;
1998*4882a593Smuzhiyun 		}
1999*4882a593Smuzhiyun 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
2000*4882a593Smuzhiyun 			inet_csk_reqsk_queue_drop_and_put(sk, req);
2001*4882a593Smuzhiyun 			goto lookup;
2002*4882a593Smuzhiyun 		}
2003*4882a593Smuzhiyun 		/* We own a reference on the listener, increase it again
2004*4882a593Smuzhiyun 		 * as we might lose it too soon.
2005*4882a593Smuzhiyun 		 */
2006*4882a593Smuzhiyun 		sock_hold(sk);
2007*4882a593Smuzhiyun 		refcounted = true;
2008*4882a593Smuzhiyun 		nsk = NULL;
2009*4882a593Smuzhiyun 		if (!tcp_filter(sk, skb)) {
2010*4882a593Smuzhiyun 			th = (const struct tcphdr *)skb->data;
2011*4882a593Smuzhiyun 			iph = ip_hdr(skb);
2012*4882a593Smuzhiyun 			tcp_v4_fill_cb(skb, iph, th);
2013*4882a593Smuzhiyun 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2014*4882a593Smuzhiyun 		}
2015*4882a593Smuzhiyun 		if (!nsk) {
2016*4882a593Smuzhiyun 			reqsk_put(req);
2017*4882a593Smuzhiyun 			if (req_stolen) {
2018*4882a593Smuzhiyun 				/* Another cpu got exclusive access to req
2019*4882a593Smuzhiyun 				 * and created a full blown socket.
2020*4882a593Smuzhiyun 				 * Try to feed this packet to this socket
2021*4882a593Smuzhiyun 				 * instead of discarding it.
2022*4882a593Smuzhiyun 				 */
2023*4882a593Smuzhiyun 				tcp_v4_restore_cb(skb);
2024*4882a593Smuzhiyun 				sock_put(sk);
2025*4882a593Smuzhiyun 				goto lookup;
2026*4882a593Smuzhiyun 			}
2027*4882a593Smuzhiyun 			goto discard_and_relse;
2028*4882a593Smuzhiyun 		}
2029*4882a593Smuzhiyun 		nf_reset_ct(skb);
2030*4882a593Smuzhiyun 		if (nsk == sk) {
2031*4882a593Smuzhiyun 			reqsk_put(req);
2032*4882a593Smuzhiyun 			tcp_v4_restore_cb(skb);
2033*4882a593Smuzhiyun 		} else if (tcp_child_process(sk, nsk, skb)) {
2034*4882a593Smuzhiyun 			tcp_v4_send_reset(nsk, skb);
2035*4882a593Smuzhiyun 			goto discard_and_relse;
2036*4882a593Smuzhiyun 		} else {
2037*4882a593Smuzhiyun 			sock_put(sk);
2038*4882a593Smuzhiyun 			return 0;
2039*4882a593Smuzhiyun 		}
2040*4882a593Smuzhiyun 	}
2041*4882a593Smuzhiyun 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2042*4882a593Smuzhiyun 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2043*4882a593Smuzhiyun 		goto discard_and_relse;
2044*4882a593Smuzhiyun 	}
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2047*4882a593Smuzhiyun 		goto discard_and_relse;
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
2050*4882a593Smuzhiyun 		goto discard_and_relse;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	nf_reset_ct(skb);
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	if (tcp_filter(sk, skb))
2055*4882a593Smuzhiyun 		goto discard_and_relse;
2056*4882a593Smuzhiyun 	th = (const struct tcphdr *)skb->data;
2057*4882a593Smuzhiyun 	iph = ip_hdr(skb);
2058*4882a593Smuzhiyun 	tcp_v4_fill_cb(skb, iph, th);
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	skb->dev = NULL;
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	if (sk->sk_state == TCP_LISTEN) {
2063*4882a593Smuzhiyun 		ret = tcp_v4_do_rcv(sk, skb);
2064*4882a593Smuzhiyun 		goto put_and_return;
2065*4882a593Smuzhiyun 	}
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	sk_incoming_cpu_update(sk);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	bh_lock_sock_nested(sk);
2070*4882a593Smuzhiyun 	tcp_segs_in(tcp_sk(sk), skb);
2071*4882a593Smuzhiyun 	ret = 0;
2072*4882a593Smuzhiyun 	if (!sock_owned_by_user(sk)) {
2073*4882a593Smuzhiyun 		skb_to_free = sk->sk_rx_skb_cache;
2074*4882a593Smuzhiyun 		sk->sk_rx_skb_cache = NULL;
2075*4882a593Smuzhiyun 		ret = tcp_v4_do_rcv(sk, skb);
2076*4882a593Smuzhiyun 	} else {
2077*4882a593Smuzhiyun 		if (tcp_add_backlog(sk, skb))
2078*4882a593Smuzhiyun 			goto discard_and_relse;
2079*4882a593Smuzhiyun 		skb_to_free = NULL;
2080*4882a593Smuzhiyun 	}
2081*4882a593Smuzhiyun 	bh_unlock_sock(sk);
2082*4882a593Smuzhiyun 	if (skb_to_free)
2083*4882a593Smuzhiyun 		__kfree_skb(skb_to_free);
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun put_and_return:
2086*4882a593Smuzhiyun 	if (refcounted)
2087*4882a593Smuzhiyun 		sock_put(sk);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	return ret;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun no_tcp_socket:
2092*4882a593Smuzhiyun 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2093*4882a593Smuzhiyun 		goto discard_it;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	tcp_v4_fill_cb(skb, iph, th);
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	if (tcp_checksum_complete(skb)) {
2098*4882a593Smuzhiyun csum_error:
2099*4882a593Smuzhiyun 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2100*4882a593Smuzhiyun bad_packet:
2101*4882a593Smuzhiyun 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
2102*4882a593Smuzhiyun 	} else {
2103*4882a593Smuzhiyun 		tcp_v4_send_reset(NULL, skb);
2104*4882a593Smuzhiyun 	}
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun discard_it:
2107*4882a593Smuzhiyun 	/* Discard frame. */
2108*4882a593Smuzhiyun 	kfree_skb(skb);
2109*4882a593Smuzhiyun 	return 0;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun discard_and_relse:
2112*4882a593Smuzhiyun 	sk_drops_add(sk, skb);
2113*4882a593Smuzhiyun 	if (refcounted)
2114*4882a593Smuzhiyun 		sock_put(sk);
2115*4882a593Smuzhiyun 	goto discard_it;
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun do_time_wait:
2118*4882a593Smuzhiyun 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2119*4882a593Smuzhiyun 		inet_twsk_put(inet_twsk(sk));
2120*4882a593Smuzhiyun 		goto discard_it;
2121*4882a593Smuzhiyun 	}
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	tcp_v4_fill_cb(skb, iph, th);
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	if (tcp_checksum_complete(skb)) {
2126*4882a593Smuzhiyun 		inet_twsk_put(inet_twsk(sk));
2127*4882a593Smuzhiyun 		goto csum_error;
2128*4882a593Smuzhiyun 	}
2129*4882a593Smuzhiyun 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2130*4882a593Smuzhiyun 	case TCP_TW_SYN: {
2131*4882a593Smuzhiyun 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2132*4882a593Smuzhiyun 							&tcp_hashinfo, skb,
2133*4882a593Smuzhiyun 							__tcp_hdrlen(th),
2134*4882a593Smuzhiyun 							iph->saddr, th->source,
2135*4882a593Smuzhiyun 							iph->daddr, th->dest,
2136*4882a593Smuzhiyun 							inet_iif(skb),
2137*4882a593Smuzhiyun 							sdif);
2138*4882a593Smuzhiyun 		if (sk2) {
2139*4882a593Smuzhiyun 			inet_twsk_deschedule_put(inet_twsk(sk));
2140*4882a593Smuzhiyun 			sk = sk2;
2141*4882a593Smuzhiyun 			tcp_v4_restore_cb(skb);
2142*4882a593Smuzhiyun 			refcounted = false;
2143*4882a593Smuzhiyun 			goto process;
2144*4882a593Smuzhiyun 		}
2145*4882a593Smuzhiyun 	}
2146*4882a593Smuzhiyun 		/* to ACK */
2147*4882a593Smuzhiyun 		fallthrough;
2148*4882a593Smuzhiyun 	case TCP_TW_ACK:
2149*4882a593Smuzhiyun 		tcp_v4_timewait_ack(sk, skb);
2150*4882a593Smuzhiyun 		break;
2151*4882a593Smuzhiyun 	case TCP_TW_RST:
2152*4882a593Smuzhiyun 		tcp_v4_send_reset(sk, skb);
2153*4882a593Smuzhiyun 		inet_twsk_deschedule_put(inet_twsk(sk));
2154*4882a593Smuzhiyun 		goto discard_it;
2155*4882a593Smuzhiyun 	case TCP_TW_SUCCESS:;
2156*4882a593Smuzhiyun 	}
2157*4882a593Smuzhiyun 	goto discard_it;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun static struct timewait_sock_ops tcp_timewait_sock_ops = {
2161*4882a593Smuzhiyun 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2162*4882a593Smuzhiyun 	.twsk_unique	= tcp_twsk_unique,
2163*4882a593Smuzhiyun 	.twsk_destructor= tcp_twsk_destructor,
2164*4882a593Smuzhiyun };
2165*4882a593Smuzhiyun 
inet_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)2166*4882a593Smuzhiyun void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2167*4882a593Smuzhiyun {
2168*4882a593Smuzhiyun 	struct dst_entry *dst = skb_dst(skb);
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	if (dst && dst_hold_safe(dst)) {
2171*4882a593Smuzhiyun 		rcu_assign_pointer(sk->sk_rx_dst, dst);
2172*4882a593Smuzhiyun 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2173*4882a593Smuzhiyun 	}
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun EXPORT_SYMBOL(inet_sk_rx_dst_set);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun const struct inet_connection_sock_af_ops ipv4_specific = {
2178*4882a593Smuzhiyun 	.queue_xmit	   = ip_queue_xmit,
2179*4882a593Smuzhiyun 	.send_check	   = tcp_v4_send_check,
2180*4882a593Smuzhiyun 	.rebuild_header	   = inet_sk_rebuild_header,
2181*4882a593Smuzhiyun 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2182*4882a593Smuzhiyun 	.conn_request	   = tcp_v4_conn_request,
2183*4882a593Smuzhiyun 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2184*4882a593Smuzhiyun 	.net_header_len	   = sizeof(struct iphdr),
2185*4882a593Smuzhiyun 	.setsockopt	   = ip_setsockopt,
2186*4882a593Smuzhiyun 	.getsockopt	   = ip_getsockopt,
2187*4882a593Smuzhiyun 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2188*4882a593Smuzhiyun 	.sockaddr_len	   = sizeof(struct sockaddr_in),
2189*4882a593Smuzhiyun 	.mtu_reduced	   = tcp_v4_mtu_reduced,
2190*4882a593Smuzhiyun };
2191*4882a593Smuzhiyun EXPORT_SYMBOL(ipv4_specific);
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
2194*4882a593Smuzhiyun static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2195*4882a593Smuzhiyun 	.md5_lookup		= tcp_v4_md5_lookup,
2196*4882a593Smuzhiyun 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2197*4882a593Smuzhiyun 	.md5_parse		= tcp_v4_parse_md5_keys,
2198*4882a593Smuzhiyun };
2199*4882a593Smuzhiyun #endif
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun /* NOTE: A lot of things set to zero explicitly by call to
2202*4882a593Smuzhiyun  *       sk_alloc() so need not be done here.
2203*4882a593Smuzhiyun  */
tcp_v4_init_sock(struct sock * sk)2204*4882a593Smuzhiyun static int tcp_v4_init_sock(struct sock *sk)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun 	struct inet_connection_sock *icsk = inet_csk(sk);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	tcp_init_sock(sk);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	icsk->icsk_af_ops = &ipv4_specific;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
2213*4882a593Smuzhiyun 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2214*4882a593Smuzhiyun #endif
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	return 0;
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun 
tcp_v4_destroy_sock(struct sock * sk)2219*4882a593Smuzhiyun void tcp_v4_destroy_sock(struct sock *sk)
2220*4882a593Smuzhiyun {
2221*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	trace_tcp_destroy_sock(sk);
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	tcp_clear_xmit_timers(sk);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	tcp_cleanup_congestion_control(sk);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	tcp_cleanup_ulp(sk);
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	/* Cleanup up the write buffer. */
2232*4882a593Smuzhiyun 	tcp_write_queue_purge(sk);
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	/* Check if we want to disable active TFO */
2235*4882a593Smuzhiyun 	tcp_fastopen_active_disable_ofo_check(sk);
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	/* Cleans up our, hopefully empty, out_of_order_queue. */
2238*4882a593Smuzhiyun 	skb_rbtree_purge(&tp->out_of_order_queue);
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
2241*4882a593Smuzhiyun 	/* Clean up the MD5 key list, if any */
2242*4882a593Smuzhiyun 	if (tp->md5sig_info) {
2243*4882a593Smuzhiyun 		tcp_clear_md5_list(sk);
2244*4882a593Smuzhiyun 		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2245*4882a593Smuzhiyun 		tp->md5sig_info = NULL;
2246*4882a593Smuzhiyun 	}
2247*4882a593Smuzhiyun #endif
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/* Clean up a referenced TCP bind bucket. */
2250*4882a593Smuzhiyun 	if (inet_csk(sk)->icsk_bind_hash)
2251*4882a593Smuzhiyun 		inet_put_port(sk);
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	/* If socket is aborted during connect operation */
2256*4882a593Smuzhiyun 	tcp_free_fastopen_req(tp);
2257*4882a593Smuzhiyun 	tcp_fastopen_destroy_cipher(sk);
2258*4882a593Smuzhiyun 	tcp_saved_syn_free(tp);
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	sk_sockets_allocated_dec(sk);
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_v4_destroy_sock);
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
2265*4882a593Smuzhiyun /* Proc filesystem TCP sock list dumping. */
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun /*
2268*4882a593Smuzhiyun  * Get next listener socket follow cur.  If cur is NULL, get first socket
2269*4882a593Smuzhiyun  * starting from bucket given in st->bucket; when st->bucket is zero the
2270*4882a593Smuzhiyun  * very first socket in the hash table is returned.
2271*4882a593Smuzhiyun  */
listening_get_next(struct seq_file * seq,void * cur)2272*4882a593Smuzhiyun static void *listening_get_next(struct seq_file *seq, void *cur)
2273*4882a593Smuzhiyun {
2274*4882a593Smuzhiyun 	struct tcp_seq_afinfo *afinfo;
2275*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2276*4882a593Smuzhiyun 	struct net *net = seq_file_net(seq);
2277*4882a593Smuzhiyun 	struct inet_listen_hashbucket *ilb;
2278*4882a593Smuzhiyun 	struct hlist_nulls_node *node;
2279*4882a593Smuzhiyun 	struct sock *sk = cur;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	if (st->bpf_seq_afinfo)
2282*4882a593Smuzhiyun 		afinfo = st->bpf_seq_afinfo;
2283*4882a593Smuzhiyun 	else
2284*4882a593Smuzhiyun 		afinfo = PDE_DATA(file_inode(seq->file));
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	if (!sk) {
2287*4882a593Smuzhiyun get_head:
2288*4882a593Smuzhiyun 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2289*4882a593Smuzhiyun 		spin_lock(&ilb->lock);
2290*4882a593Smuzhiyun 		sk = sk_nulls_head(&ilb->nulls_head);
2291*4882a593Smuzhiyun 		st->offset = 0;
2292*4882a593Smuzhiyun 		goto get_sk;
2293*4882a593Smuzhiyun 	}
2294*4882a593Smuzhiyun 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2295*4882a593Smuzhiyun 	++st->num;
2296*4882a593Smuzhiyun 	++st->offset;
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	sk = sk_nulls_next(sk);
2299*4882a593Smuzhiyun get_sk:
2300*4882a593Smuzhiyun 	sk_nulls_for_each_from(sk, node) {
2301*4882a593Smuzhiyun 		if (!net_eq(sock_net(sk), net))
2302*4882a593Smuzhiyun 			continue;
2303*4882a593Smuzhiyun 		if (afinfo->family == AF_UNSPEC ||
2304*4882a593Smuzhiyun 		    sk->sk_family == afinfo->family)
2305*4882a593Smuzhiyun 			return sk;
2306*4882a593Smuzhiyun 	}
2307*4882a593Smuzhiyun 	spin_unlock(&ilb->lock);
2308*4882a593Smuzhiyun 	st->offset = 0;
2309*4882a593Smuzhiyun 	if (++st->bucket < INET_LHTABLE_SIZE)
2310*4882a593Smuzhiyun 		goto get_head;
2311*4882a593Smuzhiyun 	return NULL;
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun 
listening_get_idx(struct seq_file * seq,loff_t * pos)2314*4882a593Smuzhiyun static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2315*4882a593Smuzhiyun {
2316*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2317*4882a593Smuzhiyun 	void *rc;
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 	st->bucket = 0;
2320*4882a593Smuzhiyun 	st->offset = 0;
2321*4882a593Smuzhiyun 	rc = listening_get_next(seq, NULL);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	while (rc && *pos) {
2324*4882a593Smuzhiyun 		rc = listening_get_next(seq, rc);
2325*4882a593Smuzhiyun 		--*pos;
2326*4882a593Smuzhiyun 	}
2327*4882a593Smuzhiyun 	return rc;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun 
empty_bucket(const struct tcp_iter_state * st)2330*4882a593Smuzhiyun static inline bool empty_bucket(const struct tcp_iter_state *st)
2331*4882a593Smuzhiyun {
2332*4882a593Smuzhiyun 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun /*
2336*4882a593Smuzhiyun  * Get first established socket starting from bucket given in st->bucket.
2337*4882a593Smuzhiyun  * If st->bucket is zero, the very first socket in the hash is returned.
2338*4882a593Smuzhiyun  */
established_get_first(struct seq_file * seq)2339*4882a593Smuzhiyun static void *established_get_first(struct seq_file *seq)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	struct tcp_seq_afinfo *afinfo;
2342*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2343*4882a593Smuzhiyun 	struct net *net = seq_file_net(seq);
2344*4882a593Smuzhiyun 	void *rc = NULL;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	if (st->bpf_seq_afinfo)
2347*4882a593Smuzhiyun 		afinfo = st->bpf_seq_afinfo;
2348*4882a593Smuzhiyun 	else
2349*4882a593Smuzhiyun 		afinfo = PDE_DATA(file_inode(seq->file));
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	st->offset = 0;
2352*4882a593Smuzhiyun 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2353*4882a593Smuzhiyun 		struct sock *sk;
2354*4882a593Smuzhiyun 		struct hlist_nulls_node *node;
2355*4882a593Smuzhiyun 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		/* Lockless fast path for the common case of empty buckets */
2358*4882a593Smuzhiyun 		if (empty_bucket(st))
2359*4882a593Smuzhiyun 			continue;
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 		spin_lock_bh(lock);
2362*4882a593Smuzhiyun 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2363*4882a593Smuzhiyun 			if ((afinfo->family != AF_UNSPEC &&
2364*4882a593Smuzhiyun 			     sk->sk_family != afinfo->family) ||
2365*4882a593Smuzhiyun 			    !net_eq(sock_net(sk), net)) {
2366*4882a593Smuzhiyun 				continue;
2367*4882a593Smuzhiyun 			}
2368*4882a593Smuzhiyun 			rc = sk;
2369*4882a593Smuzhiyun 			goto out;
2370*4882a593Smuzhiyun 		}
2371*4882a593Smuzhiyun 		spin_unlock_bh(lock);
2372*4882a593Smuzhiyun 	}
2373*4882a593Smuzhiyun out:
2374*4882a593Smuzhiyun 	return rc;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun 
established_get_next(struct seq_file * seq,void * cur)2377*4882a593Smuzhiyun static void *established_get_next(struct seq_file *seq, void *cur)
2378*4882a593Smuzhiyun {
2379*4882a593Smuzhiyun 	struct tcp_seq_afinfo *afinfo;
2380*4882a593Smuzhiyun 	struct sock *sk = cur;
2381*4882a593Smuzhiyun 	struct hlist_nulls_node *node;
2382*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2383*4882a593Smuzhiyun 	struct net *net = seq_file_net(seq);
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	if (st->bpf_seq_afinfo)
2386*4882a593Smuzhiyun 		afinfo = st->bpf_seq_afinfo;
2387*4882a593Smuzhiyun 	else
2388*4882a593Smuzhiyun 		afinfo = PDE_DATA(file_inode(seq->file));
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	++st->num;
2391*4882a593Smuzhiyun 	++st->offset;
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	sk = sk_nulls_next(sk);
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	sk_nulls_for_each_from(sk, node) {
2396*4882a593Smuzhiyun 		if ((afinfo->family == AF_UNSPEC ||
2397*4882a593Smuzhiyun 		     sk->sk_family == afinfo->family) &&
2398*4882a593Smuzhiyun 		    net_eq(sock_net(sk), net))
2399*4882a593Smuzhiyun 			return sk;
2400*4882a593Smuzhiyun 	}
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2403*4882a593Smuzhiyun 	++st->bucket;
2404*4882a593Smuzhiyun 	return established_get_first(seq);
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun 
established_get_idx(struct seq_file * seq,loff_t pos)2407*4882a593Smuzhiyun static void *established_get_idx(struct seq_file *seq, loff_t pos)
2408*4882a593Smuzhiyun {
2409*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2410*4882a593Smuzhiyun 	void *rc;
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun 	st->bucket = 0;
2413*4882a593Smuzhiyun 	rc = established_get_first(seq);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 	while (rc && pos) {
2416*4882a593Smuzhiyun 		rc = established_get_next(seq, rc);
2417*4882a593Smuzhiyun 		--pos;
2418*4882a593Smuzhiyun 	}
2419*4882a593Smuzhiyun 	return rc;
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun 
tcp_get_idx(struct seq_file * seq,loff_t pos)2422*4882a593Smuzhiyun static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2423*4882a593Smuzhiyun {
2424*4882a593Smuzhiyun 	void *rc;
2425*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 	st->state = TCP_SEQ_STATE_LISTENING;
2428*4882a593Smuzhiyun 	rc	  = listening_get_idx(seq, &pos);
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	if (!rc) {
2431*4882a593Smuzhiyun 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2432*4882a593Smuzhiyun 		rc	  = established_get_idx(seq, pos);
2433*4882a593Smuzhiyun 	}
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	return rc;
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun 
tcp_seek_last_pos(struct seq_file * seq)2438*4882a593Smuzhiyun static void *tcp_seek_last_pos(struct seq_file *seq)
2439*4882a593Smuzhiyun {
2440*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2441*4882a593Smuzhiyun 	int bucket = st->bucket;
2442*4882a593Smuzhiyun 	int offset = st->offset;
2443*4882a593Smuzhiyun 	int orig_num = st->num;
2444*4882a593Smuzhiyun 	void *rc = NULL;
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	switch (st->state) {
2447*4882a593Smuzhiyun 	case TCP_SEQ_STATE_LISTENING:
2448*4882a593Smuzhiyun 		if (st->bucket >= INET_LHTABLE_SIZE)
2449*4882a593Smuzhiyun 			break;
2450*4882a593Smuzhiyun 		st->state = TCP_SEQ_STATE_LISTENING;
2451*4882a593Smuzhiyun 		rc = listening_get_next(seq, NULL);
2452*4882a593Smuzhiyun 		while (offset-- && rc && bucket == st->bucket)
2453*4882a593Smuzhiyun 			rc = listening_get_next(seq, rc);
2454*4882a593Smuzhiyun 		if (rc)
2455*4882a593Smuzhiyun 			break;
2456*4882a593Smuzhiyun 		st->bucket = 0;
2457*4882a593Smuzhiyun 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2458*4882a593Smuzhiyun 		fallthrough;
2459*4882a593Smuzhiyun 	case TCP_SEQ_STATE_ESTABLISHED:
2460*4882a593Smuzhiyun 		if (st->bucket > tcp_hashinfo.ehash_mask)
2461*4882a593Smuzhiyun 			break;
2462*4882a593Smuzhiyun 		rc = established_get_first(seq);
2463*4882a593Smuzhiyun 		while (offset-- && rc && bucket == st->bucket)
2464*4882a593Smuzhiyun 			rc = established_get_next(seq, rc);
2465*4882a593Smuzhiyun 	}
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	st->num = orig_num;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	return rc;
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun 
tcp_seq_start(struct seq_file * seq,loff_t * pos)2472*4882a593Smuzhiyun void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2475*4882a593Smuzhiyun 	void *rc;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	if (*pos && *pos == st->last_pos) {
2478*4882a593Smuzhiyun 		rc = tcp_seek_last_pos(seq);
2479*4882a593Smuzhiyun 		if (rc)
2480*4882a593Smuzhiyun 			goto out;
2481*4882a593Smuzhiyun 	}
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	st->state = TCP_SEQ_STATE_LISTENING;
2484*4882a593Smuzhiyun 	st->num = 0;
2485*4882a593Smuzhiyun 	st->bucket = 0;
2486*4882a593Smuzhiyun 	st->offset = 0;
2487*4882a593Smuzhiyun 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun out:
2490*4882a593Smuzhiyun 	st->last_pos = *pos;
2491*4882a593Smuzhiyun 	return rc;
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_seq_start);
2494*4882a593Smuzhiyun 
tcp_seq_next(struct seq_file * seq,void * v,loff_t * pos)2495*4882a593Smuzhiyun void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2496*4882a593Smuzhiyun {
2497*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2498*4882a593Smuzhiyun 	void *rc = NULL;
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun 	if (v == SEQ_START_TOKEN) {
2501*4882a593Smuzhiyun 		rc = tcp_get_idx(seq, 0);
2502*4882a593Smuzhiyun 		goto out;
2503*4882a593Smuzhiyun 	}
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	switch (st->state) {
2506*4882a593Smuzhiyun 	case TCP_SEQ_STATE_LISTENING:
2507*4882a593Smuzhiyun 		rc = listening_get_next(seq, v);
2508*4882a593Smuzhiyun 		if (!rc) {
2509*4882a593Smuzhiyun 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2510*4882a593Smuzhiyun 			st->bucket = 0;
2511*4882a593Smuzhiyun 			st->offset = 0;
2512*4882a593Smuzhiyun 			rc	  = established_get_first(seq);
2513*4882a593Smuzhiyun 		}
2514*4882a593Smuzhiyun 		break;
2515*4882a593Smuzhiyun 	case TCP_SEQ_STATE_ESTABLISHED:
2516*4882a593Smuzhiyun 		rc = established_get_next(seq, v);
2517*4882a593Smuzhiyun 		break;
2518*4882a593Smuzhiyun 	}
2519*4882a593Smuzhiyun out:
2520*4882a593Smuzhiyun 	++*pos;
2521*4882a593Smuzhiyun 	st->last_pos = *pos;
2522*4882a593Smuzhiyun 	return rc;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_seq_next);
2525*4882a593Smuzhiyun 
tcp_seq_stop(struct seq_file * seq,void * v)2526*4882a593Smuzhiyun void tcp_seq_stop(struct seq_file *seq, void *v)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun 	struct tcp_iter_state *st = seq->private;
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	switch (st->state) {
2531*4882a593Smuzhiyun 	case TCP_SEQ_STATE_LISTENING:
2532*4882a593Smuzhiyun 		if (v != SEQ_START_TOKEN)
2533*4882a593Smuzhiyun 			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2534*4882a593Smuzhiyun 		break;
2535*4882a593Smuzhiyun 	case TCP_SEQ_STATE_ESTABLISHED:
2536*4882a593Smuzhiyun 		if (v)
2537*4882a593Smuzhiyun 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2538*4882a593Smuzhiyun 		break;
2539*4882a593Smuzhiyun 	}
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_seq_stop);
2542*4882a593Smuzhiyun 
get_openreq4(const struct request_sock * req,struct seq_file * f,int i)2543*4882a593Smuzhiyun static void get_openreq4(const struct request_sock *req,
2544*4882a593Smuzhiyun 			 struct seq_file *f, int i)
2545*4882a593Smuzhiyun {
2546*4882a593Smuzhiyun 	const struct inet_request_sock *ireq = inet_rsk(req);
2547*4882a593Smuzhiyun 	long delta = req->rsk_timer.expires - jiffies;
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2550*4882a593Smuzhiyun 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2551*4882a593Smuzhiyun 		i,
2552*4882a593Smuzhiyun 		ireq->ir_loc_addr,
2553*4882a593Smuzhiyun 		ireq->ir_num,
2554*4882a593Smuzhiyun 		ireq->ir_rmt_addr,
2555*4882a593Smuzhiyun 		ntohs(ireq->ir_rmt_port),
2556*4882a593Smuzhiyun 		TCP_SYN_RECV,
2557*4882a593Smuzhiyun 		0, 0, /* could print option size, but that is af dependent. */
2558*4882a593Smuzhiyun 		1,    /* timers active (only the expire timer) */
2559*4882a593Smuzhiyun 		jiffies_delta_to_clock_t(delta),
2560*4882a593Smuzhiyun 		req->num_timeout,
2561*4882a593Smuzhiyun 		from_kuid_munged(seq_user_ns(f),
2562*4882a593Smuzhiyun 				 sock_i_uid(req->rsk_listener)),
2563*4882a593Smuzhiyun 		0,  /* non standard timer */
2564*4882a593Smuzhiyun 		0, /* open_requests have no inode */
2565*4882a593Smuzhiyun 		0,
2566*4882a593Smuzhiyun 		req);
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun 
get_tcp4_sock(struct sock * sk,struct seq_file * f,int i)2569*4882a593Smuzhiyun static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2570*4882a593Smuzhiyun {
2571*4882a593Smuzhiyun 	int timer_active;
2572*4882a593Smuzhiyun 	unsigned long timer_expires;
2573*4882a593Smuzhiyun 	const struct tcp_sock *tp = tcp_sk(sk);
2574*4882a593Smuzhiyun 	const struct inet_connection_sock *icsk = inet_csk(sk);
2575*4882a593Smuzhiyun 	const struct inet_sock *inet = inet_sk(sk);
2576*4882a593Smuzhiyun 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2577*4882a593Smuzhiyun 	__be32 dest = inet->inet_daddr;
2578*4882a593Smuzhiyun 	__be32 src = inet->inet_rcv_saddr;
2579*4882a593Smuzhiyun 	__u16 destp = ntohs(inet->inet_dport);
2580*4882a593Smuzhiyun 	__u16 srcp = ntohs(inet->inet_sport);
2581*4882a593Smuzhiyun 	int rx_queue;
2582*4882a593Smuzhiyun 	int state;
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2585*4882a593Smuzhiyun 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2586*4882a593Smuzhiyun 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2587*4882a593Smuzhiyun 		timer_active	= 1;
2588*4882a593Smuzhiyun 		timer_expires	= icsk->icsk_timeout;
2589*4882a593Smuzhiyun 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2590*4882a593Smuzhiyun 		timer_active	= 4;
2591*4882a593Smuzhiyun 		timer_expires	= icsk->icsk_timeout;
2592*4882a593Smuzhiyun 	} else if (timer_pending(&sk->sk_timer)) {
2593*4882a593Smuzhiyun 		timer_active	= 2;
2594*4882a593Smuzhiyun 		timer_expires	= sk->sk_timer.expires;
2595*4882a593Smuzhiyun 	} else {
2596*4882a593Smuzhiyun 		timer_active	= 0;
2597*4882a593Smuzhiyun 		timer_expires = jiffies;
2598*4882a593Smuzhiyun 	}
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 	state = inet_sk_state_load(sk);
2601*4882a593Smuzhiyun 	if (state == TCP_LISTEN)
2602*4882a593Smuzhiyun 		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2603*4882a593Smuzhiyun 	else
2604*4882a593Smuzhiyun 		/* Because we don't lock the socket,
2605*4882a593Smuzhiyun 		 * we might find a transient negative value.
2606*4882a593Smuzhiyun 		 */
2607*4882a593Smuzhiyun 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2608*4882a593Smuzhiyun 				      READ_ONCE(tp->copied_seq), 0);
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2611*4882a593Smuzhiyun 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2612*4882a593Smuzhiyun 		i, src, srcp, dest, destp, state,
2613*4882a593Smuzhiyun 		READ_ONCE(tp->write_seq) - tp->snd_una,
2614*4882a593Smuzhiyun 		rx_queue,
2615*4882a593Smuzhiyun 		timer_active,
2616*4882a593Smuzhiyun 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2617*4882a593Smuzhiyun 		icsk->icsk_retransmits,
2618*4882a593Smuzhiyun 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2619*4882a593Smuzhiyun 		icsk->icsk_probes_out,
2620*4882a593Smuzhiyun 		sock_i_ino(sk),
2621*4882a593Smuzhiyun 		refcount_read(&sk->sk_refcnt), sk,
2622*4882a593Smuzhiyun 		jiffies_to_clock_t(icsk->icsk_rto),
2623*4882a593Smuzhiyun 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2624*4882a593Smuzhiyun 		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2625*4882a593Smuzhiyun 		tp->snd_cwnd,
2626*4882a593Smuzhiyun 		state == TCP_LISTEN ?
2627*4882a593Smuzhiyun 		    fastopenq->max_qlen :
2628*4882a593Smuzhiyun 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun 
get_timewait4_sock(const struct inet_timewait_sock * tw,struct seq_file * f,int i)2631*4882a593Smuzhiyun static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2632*4882a593Smuzhiyun 			       struct seq_file *f, int i)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun 	long delta = tw->tw_timer.expires - jiffies;
2635*4882a593Smuzhiyun 	__be32 dest, src;
2636*4882a593Smuzhiyun 	__u16 destp, srcp;
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	dest  = tw->tw_daddr;
2639*4882a593Smuzhiyun 	src   = tw->tw_rcv_saddr;
2640*4882a593Smuzhiyun 	destp = ntohs(tw->tw_dport);
2641*4882a593Smuzhiyun 	srcp  = ntohs(tw->tw_sport);
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2644*4882a593Smuzhiyun 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2645*4882a593Smuzhiyun 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2646*4882a593Smuzhiyun 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2647*4882a593Smuzhiyun 		refcount_read(&tw->tw_refcnt), tw);
2648*4882a593Smuzhiyun }
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun #define TMPSZ 150
2651*4882a593Smuzhiyun 
tcp4_seq_show(struct seq_file * seq,void * v)2652*4882a593Smuzhiyun static int tcp4_seq_show(struct seq_file *seq, void *v)
2653*4882a593Smuzhiyun {
2654*4882a593Smuzhiyun 	struct tcp_iter_state *st;
2655*4882a593Smuzhiyun 	struct sock *sk = v;
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	seq_setwidth(seq, TMPSZ - 1);
2658*4882a593Smuzhiyun 	if (v == SEQ_START_TOKEN) {
2659*4882a593Smuzhiyun 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2660*4882a593Smuzhiyun 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2661*4882a593Smuzhiyun 			   "inode");
2662*4882a593Smuzhiyun 		goto out;
2663*4882a593Smuzhiyun 	}
2664*4882a593Smuzhiyun 	st = seq->private;
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	if (sk->sk_state == TCP_TIME_WAIT)
2667*4882a593Smuzhiyun 		get_timewait4_sock(v, seq, st->num);
2668*4882a593Smuzhiyun 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2669*4882a593Smuzhiyun 		get_openreq4(v, seq, st->num);
2670*4882a593Smuzhiyun 	else
2671*4882a593Smuzhiyun 		get_tcp4_sock(v, seq, st->num);
2672*4882a593Smuzhiyun out:
2673*4882a593Smuzhiyun 	seq_pad(seq, '\n');
2674*4882a593Smuzhiyun 	return 0;
2675*4882a593Smuzhiyun }
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun #ifdef CONFIG_BPF_SYSCALL
2678*4882a593Smuzhiyun struct bpf_iter__tcp {
2679*4882a593Smuzhiyun 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2680*4882a593Smuzhiyun 	__bpf_md_ptr(struct sock_common *, sk_common);
2681*4882a593Smuzhiyun 	uid_t uid __aligned(8);
2682*4882a593Smuzhiyun };
2683*4882a593Smuzhiyun 
tcp_prog_seq_show(struct bpf_prog * prog,struct bpf_iter_meta * meta,struct sock_common * sk_common,uid_t uid)2684*4882a593Smuzhiyun static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2685*4882a593Smuzhiyun 			     struct sock_common *sk_common, uid_t uid)
2686*4882a593Smuzhiyun {
2687*4882a593Smuzhiyun 	struct bpf_iter__tcp ctx;
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
2690*4882a593Smuzhiyun 	ctx.meta = meta;
2691*4882a593Smuzhiyun 	ctx.sk_common = sk_common;
2692*4882a593Smuzhiyun 	ctx.uid = uid;
2693*4882a593Smuzhiyun 	return bpf_iter_run_prog(prog, &ctx);
2694*4882a593Smuzhiyun }
2695*4882a593Smuzhiyun 
bpf_iter_tcp_seq_show(struct seq_file * seq,void * v)2696*4882a593Smuzhiyun static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2697*4882a593Smuzhiyun {
2698*4882a593Smuzhiyun 	struct bpf_iter_meta meta;
2699*4882a593Smuzhiyun 	struct bpf_prog *prog;
2700*4882a593Smuzhiyun 	struct sock *sk = v;
2701*4882a593Smuzhiyun 	uid_t uid;
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 	if (v == SEQ_START_TOKEN)
2704*4882a593Smuzhiyun 		return 0;
2705*4882a593Smuzhiyun 
2706*4882a593Smuzhiyun 	if (sk->sk_state == TCP_TIME_WAIT) {
2707*4882a593Smuzhiyun 		uid = 0;
2708*4882a593Smuzhiyun 	} else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2709*4882a593Smuzhiyun 		const struct request_sock *req = v;
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 		uid = from_kuid_munged(seq_user_ns(seq),
2712*4882a593Smuzhiyun 				       sock_i_uid(req->rsk_listener));
2713*4882a593Smuzhiyun 	} else {
2714*4882a593Smuzhiyun 		uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2715*4882a593Smuzhiyun 	}
2716*4882a593Smuzhiyun 
2717*4882a593Smuzhiyun 	meta.seq = seq;
2718*4882a593Smuzhiyun 	prog = bpf_iter_get_info(&meta, false);
2719*4882a593Smuzhiyun 	return tcp_prog_seq_show(prog, &meta, v, uid);
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun 
bpf_iter_tcp_seq_stop(struct seq_file * seq,void * v)2722*4882a593Smuzhiyun static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun 	struct bpf_iter_meta meta;
2725*4882a593Smuzhiyun 	struct bpf_prog *prog;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	if (!v) {
2728*4882a593Smuzhiyun 		meta.seq = seq;
2729*4882a593Smuzhiyun 		prog = bpf_iter_get_info(&meta, true);
2730*4882a593Smuzhiyun 		if (prog)
2731*4882a593Smuzhiyun 			(void)tcp_prog_seq_show(prog, &meta, v, 0);
2732*4882a593Smuzhiyun 	}
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	tcp_seq_stop(seq, v);
2735*4882a593Smuzhiyun }
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun static const struct seq_operations bpf_iter_tcp_seq_ops = {
2738*4882a593Smuzhiyun 	.show		= bpf_iter_tcp_seq_show,
2739*4882a593Smuzhiyun 	.start		= tcp_seq_start,
2740*4882a593Smuzhiyun 	.next		= tcp_seq_next,
2741*4882a593Smuzhiyun 	.stop		= bpf_iter_tcp_seq_stop,
2742*4882a593Smuzhiyun };
2743*4882a593Smuzhiyun #endif
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun static const struct seq_operations tcp4_seq_ops = {
2746*4882a593Smuzhiyun 	.show		= tcp4_seq_show,
2747*4882a593Smuzhiyun 	.start		= tcp_seq_start,
2748*4882a593Smuzhiyun 	.next		= tcp_seq_next,
2749*4882a593Smuzhiyun 	.stop		= tcp_seq_stop,
2750*4882a593Smuzhiyun };
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2753*4882a593Smuzhiyun 	.family		= AF_INET,
2754*4882a593Smuzhiyun };
2755*4882a593Smuzhiyun 
tcp4_proc_init_net(struct net * net)2756*4882a593Smuzhiyun static int __net_init tcp4_proc_init_net(struct net *net)
2757*4882a593Smuzhiyun {
2758*4882a593Smuzhiyun 	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2759*4882a593Smuzhiyun 			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2760*4882a593Smuzhiyun 		return -ENOMEM;
2761*4882a593Smuzhiyun 	return 0;
2762*4882a593Smuzhiyun }
2763*4882a593Smuzhiyun 
tcp4_proc_exit_net(struct net * net)2764*4882a593Smuzhiyun static void __net_exit tcp4_proc_exit_net(struct net *net)
2765*4882a593Smuzhiyun {
2766*4882a593Smuzhiyun 	remove_proc_entry("tcp", net->proc_net);
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun static struct pernet_operations tcp4_net_ops = {
2770*4882a593Smuzhiyun 	.init = tcp4_proc_init_net,
2771*4882a593Smuzhiyun 	.exit = tcp4_proc_exit_net,
2772*4882a593Smuzhiyun };
2773*4882a593Smuzhiyun 
tcp4_proc_init(void)2774*4882a593Smuzhiyun int __init tcp4_proc_init(void)
2775*4882a593Smuzhiyun {
2776*4882a593Smuzhiyun 	return register_pernet_subsys(&tcp4_net_ops);
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun 
tcp4_proc_exit(void)2779*4882a593Smuzhiyun void tcp4_proc_exit(void)
2780*4882a593Smuzhiyun {
2781*4882a593Smuzhiyun 	unregister_pernet_subsys(&tcp4_net_ops);
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS */
2784*4882a593Smuzhiyun 
2785*4882a593Smuzhiyun struct proto tcp_prot = {
2786*4882a593Smuzhiyun 	.name			= "TCP",
2787*4882a593Smuzhiyun 	.owner			= THIS_MODULE,
2788*4882a593Smuzhiyun 	.close			= tcp_close,
2789*4882a593Smuzhiyun 	.pre_connect		= tcp_v4_pre_connect,
2790*4882a593Smuzhiyun 	.connect		= tcp_v4_connect,
2791*4882a593Smuzhiyun 	.disconnect		= tcp_disconnect,
2792*4882a593Smuzhiyun 	.accept			= inet_csk_accept,
2793*4882a593Smuzhiyun 	.ioctl			= tcp_ioctl,
2794*4882a593Smuzhiyun 	.init			= tcp_v4_init_sock,
2795*4882a593Smuzhiyun 	.destroy		= tcp_v4_destroy_sock,
2796*4882a593Smuzhiyun 	.shutdown		= tcp_shutdown,
2797*4882a593Smuzhiyun 	.setsockopt		= tcp_setsockopt,
2798*4882a593Smuzhiyun 	.getsockopt		= tcp_getsockopt,
2799*4882a593Smuzhiyun 	.keepalive		= tcp_set_keepalive,
2800*4882a593Smuzhiyun 	.recvmsg		= tcp_recvmsg,
2801*4882a593Smuzhiyun 	.sendmsg		= tcp_sendmsg,
2802*4882a593Smuzhiyun 	.sendpage		= tcp_sendpage,
2803*4882a593Smuzhiyun 	.backlog_rcv		= tcp_v4_do_rcv,
2804*4882a593Smuzhiyun 	.release_cb		= tcp_release_cb,
2805*4882a593Smuzhiyun 	.hash			= inet_hash,
2806*4882a593Smuzhiyun 	.unhash			= inet_unhash,
2807*4882a593Smuzhiyun 	.get_port		= inet_csk_get_port,
2808*4882a593Smuzhiyun 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2809*4882a593Smuzhiyun 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2810*4882a593Smuzhiyun 	.stream_memory_free	= tcp_stream_memory_free,
2811*4882a593Smuzhiyun 	.sockets_allocated	= &tcp_sockets_allocated,
2812*4882a593Smuzhiyun 	.orphan_count		= &tcp_orphan_count,
2813*4882a593Smuzhiyun 	.memory_allocated	= &tcp_memory_allocated,
2814*4882a593Smuzhiyun 	.memory_pressure	= &tcp_memory_pressure,
2815*4882a593Smuzhiyun 	.sysctl_mem		= sysctl_tcp_mem,
2816*4882a593Smuzhiyun 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2817*4882a593Smuzhiyun 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2818*4882a593Smuzhiyun 	.max_header		= MAX_TCP_HEADER,
2819*4882a593Smuzhiyun 	.obj_size		= sizeof(struct tcp_sock),
2820*4882a593Smuzhiyun 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2821*4882a593Smuzhiyun 	.twsk_prot		= &tcp_timewait_sock_ops,
2822*4882a593Smuzhiyun 	.rsk_prot		= &tcp_request_sock_ops,
2823*4882a593Smuzhiyun 	.h.hashinfo		= &tcp_hashinfo,
2824*4882a593Smuzhiyun 	.no_autobind		= true,
2825*4882a593Smuzhiyun 	.diag_destroy		= tcp_abort,
2826*4882a593Smuzhiyun };
2827*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_prot);
2828*4882a593Smuzhiyun 
tcp_sk_exit(struct net * net)2829*4882a593Smuzhiyun static void __net_exit tcp_sk_exit(struct net *net)
2830*4882a593Smuzhiyun {
2831*4882a593Smuzhiyun 	int cpu;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	if (net->ipv4.tcp_congestion_control)
2834*4882a593Smuzhiyun 		bpf_module_put(net->ipv4.tcp_congestion_control,
2835*4882a593Smuzhiyun 			       net->ipv4.tcp_congestion_control->owner);
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
2838*4882a593Smuzhiyun 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2839*4882a593Smuzhiyun 	free_percpu(net->ipv4.tcp_sk);
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun 
tcp_sk_init(struct net * net)2842*4882a593Smuzhiyun static int __net_init tcp_sk_init(struct net *net)
2843*4882a593Smuzhiyun {
2844*4882a593Smuzhiyun 	int res, cpu, cnt;
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2847*4882a593Smuzhiyun 	if (!net->ipv4.tcp_sk)
2848*4882a593Smuzhiyun 		return -ENOMEM;
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
2851*4882a593Smuzhiyun 		struct sock *sk;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2854*4882a593Smuzhiyun 					   IPPROTO_TCP, net);
2855*4882a593Smuzhiyun 		if (res)
2856*4882a593Smuzhiyun 			goto fail;
2857*4882a593Smuzhiyun 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 		/* Please enforce IP_DF and IPID==0 for RST and
2860*4882a593Smuzhiyun 		 * ACK sent in SYN-RECV and TIME-WAIT state.
2861*4882a593Smuzhiyun 		 */
2862*4882a593Smuzhiyun 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2865*4882a593Smuzhiyun 	}
2866*4882a593Smuzhiyun 
2867*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_ecn = 2;
2868*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2871*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2872*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2873*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2874*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2877*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2878*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2881*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2882*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_syncookies = 1;
2883*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2884*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2885*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2886*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_orphan_retries = 0;
2887*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2888*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2889*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_tw_reuse = 2;
2890*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	cnt = tcp_hashinfo.ehash_mask + 1;
2893*4882a593Smuzhiyun 	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2894*4882a593Smuzhiyun 	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2897*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_sack = 1;
2898*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_window_scaling = 1;
2899*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_timestamps = 1;
2900*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_early_retrans = 3;
2901*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2902*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2903*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2904*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_max_reordering = 300;
2905*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_dsack = 1;
2906*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_app_win = 31;
2907*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_adv_win_scale = 1;
2908*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_frto = 2;
2909*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2910*4882a593Smuzhiyun 	/* This limits the percentage of the congestion window which we
2911*4882a593Smuzhiyun 	 * will allow a single TSO frame to consume.  Building TSO frames
2912*4882a593Smuzhiyun 	 * which are too large can cause TCP streams to be bursty.
2913*4882a593Smuzhiyun 	 */
2914*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2915*4882a593Smuzhiyun 	/* Default TSQ limit of 16 TSO segments */
2916*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2917*4882a593Smuzhiyun 	/* rfc5961 challenge ack rate limiting */
2918*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2919*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2920*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2921*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_autocorking = 1;
2922*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2923*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2924*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2925*4882a593Smuzhiyun 	if (net != &init_net) {
2926*4882a593Smuzhiyun 		memcpy(net->ipv4.sysctl_tcp_rmem,
2927*4882a593Smuzhiyun 		       init_net.ipv4.sysctl_tcp_rmem,
2928*4882a593Smuzhiyun 		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
2929*4882a593Smuzhiyun 		memcpy(net->ipv4.sysctl_tcp_wmem,
2930*4882a593Smuzhiyun 		       init_net.ipv4.sysctl_tcp_wmem,
2931*4882a593Smuzhiyun 		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
2932*4882a593Smuzhiyun 	}
2933*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2934*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
2935*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2936*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2937*4882a593Smuzhiyun 	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2938*4882a593Smuzhiyun 	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
2939*4882a593Smuzhiyun 	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun 	/* Reno is always built in */
2942*4882a593Smuzhiyun 	if (!net_eq(net, &init_net) &&
2943*4882a593Smuzhiyun 	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
2944*4882a593Smuzhiyun 			       init_net.ipv4.tcp_congestion_control->owner))
2945*4882a593Smuzhiyun 		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2946*4882a593Smuzhiyun 	else
2947*4882a593Smuzhiyun 		net->ipv4.tcp_congestion_control = &tcp_reno;
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	return 0;
2950*4882a593Smuzhiyun fail:
2951*4882a593Smuzhiyun 	tcp_sk_exit(net);
2952*4882a593Smuzhiyun 
2953*4882a593Smuzhiyun 	return res;
2954*4882a593Smuzhiyun }
2955*4882a593Smuzhiyun 
tcp_sk_exit_batch(struct list_head * net_exit_list)2956*4882a593Smuzhiyun static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2957*4882a593Smuzhiyun {
2958*4882a593Smuzhiyun 	struct net *net;
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun 	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	list_for_each_entry(net, net_exit_list, exit_list)
2963*4882a593Smuzhiyun 		tcp_fastopen_ctx_destroy(net);
2964*4882a593Smuzhiyun }
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun static struct pernet_operations __net_initdata tcp_sk_ops = {
2967*4882a593Smuzhiyun        .init	   = tcp_sk_init,
2968*4882a593Smuzhiyun        .exit	   = tcp_sk_exit,
2969*4882a593Smuzhiyun        .exit_batch = tcp_sk_exit_batch,
2970*4882a593Smuzhiyun };
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(tcp,struct bpf_iter_meta * meta,struct sock_common * sk_common,uid_t uid)2973*4882a593Smuzhiyun DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
2974*4882a593Smuzhiyun 		     struct sock_common *sk_common, uid_t uid)
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
2977*4882a593Smuzhiyun {
2978*4882a593Smuzhiyun 	struct tcp_iter_state *st = priv_data;
2979*4882a593Smuzhiyun 	struct tcp_seq_afinfo *afinfo;
2980*4882a593Smuzhiyun 	int ret;
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
2983*4882a593Smuzhiyun 	if (!afinfo)
2984*4882a593Smuzhiyun 		return -ENOMEM;
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	afinfo->family = AF_UNSPEC;
2987*4882a593Smuzhiyun 	st->bpf_seq_afinfo = afinfo;
2988*4882a593Smuzhiyun 	ret = bpf_iter_init_seq_net(priv_data, aux);
2989*4882a593Smuzhiyun 	if (ret)
2990*4882a593Smuzhiyun 		kfree(afinfo);
2991*4882a593Smuzhiyun 	return ret;
2992*4882a593Smuzhiyun }
2993*4882a593Smuzhiyun 
bpf_iter_fini_tcp(void * priv_data)2994*4882a593Smuzhiyun static void bpf_iter_fini_tcp(void *priv_data)
2995*4882a593Smuzhiyun {
2996*4882a593Smuzhiyun 	struct tcp_iter_state *st = priv_data;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 	kfree(st->bpf_seq_afinfo);
2999*4882a593Smuzhiyun 	bpf_iter_fini_seq_net(priv_data);
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun static const struct bpf_iter_seq_info tcp_seq_info = {
3003*4882a593Smuzhiyun 	.seq_ops		= &bpf_iter_tcp_seq_ops,
3004*4882a593Smuzhiyun 	.init_seq_private	= bpf_iter_init_tcp,
3005*4882a593Smuzhiyun 	.fini_seq_private	= bpf_iter_fini_tcp,
3006*4882a593Smuzhiyun 	.seq_priv_size		= sizeof(struct tcp_iter_state),
3007*4882a593Smuzhiyun };
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun static struct bpf_iter_reg tcp_reg_info = {
3010*4882a593Smuzhiyun 	.target			= "tcp",
3011*4882a593Smuzhiyun 	.ctx_arg_info_size	= 1,
3012*4882a593Smuzhiyun 	.ctx_arg_info		= {
3013*4882a593Smuzhiyun 		{ offsetof(struct bpf_iter__tcp, sk_common),
3014*4882a593Smuzhiyun 		  PTR_TO_BTF_ID_OR_NULL },
3015*4882a593Smuzhiyun 	},
3016*4882a593Smuzhiyun 	.seq_info		= &tcp_seq_info,
3017*4882a593Smuzhiyun };
3018*4882a593Smuzhiyun 
bpf_iter_register(void)3019*4882a593Smuzhiyun static void __init bpf_iter_register(void)
3020*4882a593Smuzhiyun {
3021*4882a593Smuzhiyun 	tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3022*4882a593Smuzhiyun 	if (bpf_iter_reg_target(&tcp_reg_info))
3023*4882a593Smuzhiyun 		pr_warn("Warning: could not register bpf iterator tcp\n");
3024*4882a593Smuzhiyun }
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun #endif
3027*4882a593Smuzhiyun 
tcp_v4_init(void)3028*4882a593Smuzhiyun void __init tcp_v4_init(void)
3029*4882a593Smuzhiyun {
3030*4882a593Smuzhiyun 	if (register_pernet_subsys(&tcp_sk_ops))
3031*4882a593Smuzhiyun 		panic("Failed to create the TCP control socket.\n");
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3034*4882a593Smuzhiyun 	bpf_iter_register();
3035*4882a593Smuzhiyun #endif
3036*4882a593Smuzhiyun }
3037