1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * INET An implementation of the TCP/IP protocol suite for the LINUX
4*4882a593Smuzhiyun * operating system. INET is implemented using the BSD Socket
5*4882a593Smuzhiyun * interface as the means of communication with the user level.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Definitions for the TCP module.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Version: @(#)tcp.h 1.0.5 05/23/93
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Authors: Ross Biro
12*4882a593Smuzhiyun * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun #ifndef _TCP_H
15*4882a593Smuzhiyun #define _TCP_H
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define FASTRETRANS_DEBUG 1
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/tcp.h>
21*4882a593Smuzhiyun #include <linux/bug.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/cache.h>
24*4882a593Smuzhiyun #include <linux/percpu.h>
25*4882a593Smuzhiyun #include <linux/skbuff.h>
26*4882a593Smuzhiyun #include <linux/kref.h>
27*4882a593Smuzhiyun #include <linux/ktime.h>
28*4882a593Smuzhiyun #include <linux/indirect_call_wrapper.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <net/inet_connection_sock.h>
31*4882a593Smuzhiyun #include <net/inet_timewait_sock.h>
32*4882a593Smuzhiyun #include <net/inet_hashtables.h>
33*4882a593Smuzhiyun #include <net/checksum.h>
34*4882a593Smuzhiyun #include <net/request_sock.h>
35*4882a593Smuzhiyun #include <net/sock_reuseport.h>
36*4882a593Smuzhiyun #include <net/sock.h>
37*4882a593Smuzhiyun #include <net/snmp.h>
38*4882a593Smuzhiyun #include <net/ip.h>
39*4882a593Smuzhiyun #include <net/tcp_states.h>
40*4882a593Smuzhiyun #include <net/inet_ecn.h>
41*4882a593Smuzhiyun #include <net/dst.h>
42*4882a593Smuzhiyun #include <net/mptcp.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <linux/seq_file.h>
45*4882a593Smuzhiyun #include <linux/memcontrol.h>
46*4882a593Smuzhiyun #include <linux/bpf-cgroup.h>
47*4882a593Smuzhiyun #include <linux/siphash.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun extern struct inet_hashinfo tcp_hashinfo;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun extern struct percpu_counter tcp_orphan_count;
52*4882a593Smuzhiyun void tcp_time_wait(struct sock *sk, int state, int timeo);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
55*4882a593Smuzhiyun #define MAX_TCP_OPTION_SPACE 40
56*4882a593Smuzhiyun #define TCP_MIN_SND_MSS 48
57*4882a593Smuzhiyun #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Never offer a window over 32767 without using window scaling. Some
61*4882a593Smuzhiyun * poor stacks do signed 16bit maths!
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun #define MAX_TCP_WINDOW 32767U
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
66*4882a593Smuzhiyun #define TCP_MIN_MSS 88U
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* The initial MTU to use for probing */
69*4882a593Smuzhiyun #define TCP_BASE_MSS 1024
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* probing interval, default to 10 minutes as per RFC4821 */
72*4882a593Smuzhiyun #define TCP_PROBE_INTERVAL 600
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Specify interval when tcp mtu probing will stop */
75*4882a593Smuzhiyun #define TCP_PROBE_THRESHOLD 8
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* After receiving this amount of duplicate ACKs fast retransmit starts. */
78*4882a593Smuzhiyun #define TCP_FASTRETRANS_THRESH 3
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Maximal number of ACKs sent quickly to accelerate slow-start. */
81*4882a593Smuzhiyun #define TCP_MAX_QUICKACKS 16U
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Maximal number of window scale according to RFC1323 */
84*4882a593Smuzhiyun #define TCP_MAX_WSCALE 14U
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* urg_data states */
87*4882a593Smuzhiyun #define TCP_URG_VALID 0x0100
88*4882a593Smuzhiyun #define TCP_URG_NOTYET 0x0200
89*4882a593Smuzhiyun #define TCP_URG_READ 0x0400
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define TCP_RETR1 3 /*
92*4882a593Smuzhiyun * This is how many retries it does before it
93*4882a593Smuzhiyun * tries to figure out if the gateway is
94*4882a593Smuzhiyun * down. Minimal RFC value is 3; it corresponds
95*4882a593Smuzhiyun * to ~3sec-8min depending on RTO.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define TCP_RETR2 15 /*
99*4882a593Smuzhiyun * This should take at least
100*4882a593Smuzhiyun * 90 minutes to time out.
101*4882a593Smuzhiyun * RFC1122 says that the limit is 100 sec.
102*4882a593Smuzhiyun * 15 is ~13-30min depending on RTO.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define TCP_SYN_RETRIES 6 /* This is how many retries are done
106*4882a593Smuzhiyun * when active opening a connection.
107*4882a593Smuzhiyun * RFC1122 says the minimum retry MUST
108*4882a593Smuzhiyun * be at least 180secs. Nevertheless
109*4882a593Smuzhiyun * this value is corresponding to
110*4882a593Smuzhiyun * 63secs of retransmission with the
111*4882a593Smuzhiyun * current initial RTO.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
115*4882a593Smuzhiyun * when passive opening a connection.
116*4882a593Smuzhiyun * This is corresponding to 31secs of
117*4882a593Smuzhiyun * retransmission with the current
118*4882a593Smuzhiyun * initial RTO.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
122*4882a593Smuzhiyun * state, about 60 seconds */
123*4882a593Smuzhiyun #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
124*4882a593Smuzhiyun /* BSD style FIN_WAIT2 deadlock breaker.
125*4882a593Smuzhiyun * It used to be 3min, new value is 60sec,
126*4882a593Smuzhiyun * to combine FIN-WAIT-2 timeout with
127*4882a593Smuzhiyun * TIME-WAIT timer.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
132*4882a593Smuzhiyun #if HZ >= 100
133*4882a593Smuzhiyun #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
134*4882a593Smuzhiyun #define TCP_ATO_MIN ((unsigned)(HZ/25))
135*4882a593Smuzhiyun #else
136*4882a593Smuzhiyun #define TCP_DELACK_MIN 4U
137*4882a593Smuzhiyun #define TCP_ATO_MIN 4U
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun #define TCP_RTO_MAX ((unsigned)(120*HZ))
140*4882a593Smuzhiyun #define TCP_RTO_MIN ((unsigned)(HZ/5))
141*4882a593Smuzhiyun #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
142*4882a593Smuzhiyun #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
143*4882a593Smuzhiyun #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
144*4882a593Smuzhiyun * used as a fallback RTO for the
145*4882a593Smuzhiyun * initial data transmission if no
146*4882a593Smuzhiyun * valid RTT sample has been acquired,
147*4882a593Smuzhiyun * most likely due to retrans in 3WHS.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
151*4882a593Smuzhiyun * for local resources.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
154*4882a593Smuzhiyun #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
155*4882a593Smuzhiyun #define TCP_KEEPALIVE_INTVL (75*HZ)
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #define MAX_TCP_KEEPIDLE 32767
158*4882a593Smuzhiyun #define MAX_TCP_KEEPINTVL 32767
159*4882a593Smuzhiyun #define MAX_TCP_KEEPCNT 127
160*4882a593Smuzhiyun #define MAX_TCP_SYNCNT 127
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
165*4882a593Smuzhiyun #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
166*4882a593Smuzhiyun * after this time. It should be equal
167*4882a593Smuzhiyun * (or greater than) TCP_TIMEWAIT_LEN
168*4882a593Smuzhiyun * to provide reliability equal to one
169*4882a593Smuzhiyun * provided by timewait state.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
172*4882a593Smuzhiyun * timestamps. It must be less than
173*4882a593Smuzhiyun * minimal timewait lifetime.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * TCP option
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #define TCPOPT_NOP 1 /* Padding */
180*4882a593Smuzhiyun #define TCPOPT_EOL 0 /* End of options */
181*4882a593Smuzhiyun #define TCPOPT_MSS 2 /* Segment size negotiating */
182*4882a593Smuzhiyun #define TCPOPT_WINDOW 3 /* Window scaling */
183*4882a593Smuzhiyun #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
184*4882a593Smuzhiyun #define TCPOPT_SACK 5 /* SACK Block */
185*4882a593Smuzhiyun #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
186*4882a593Smuzhiyun #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
187*4882a593Smuzhiyun #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
188*4882a593Smuzhiyun #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
189*4882a593Smuzhiyun #define TCPOPT_EXP 254 /* Experimental */
190*4882a593Smuzhiyun /* Magic number to be after the option value for sharing TCP
191*4882a593Smuzhiyun * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun #define TCPOPT_FASTOPEN_MAGIC 0xF989
194*4882a593Smuzhiyun #define TCPOPT_SMC_MAGIC 0xE2D4C3D9
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * TCP option lengths
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #define TCPOLEN_MSS 4
201*4882a593Smuzhiyun #define TCPOLEN_WINDOW 3
202*4882a593Smuzhiyun #define TCPOLEN_SACK_PERM 2
203*4882a593Smuzhiyun #define TCPOLEN_TIMESTAMP 10
204*4882a593Smuzhiyun #define TCPOLEN_MD5SIG 18
205*4882a593Smuzhiyun #define TCPOLEN_FASTOPEN_BASE 2
206*4882a593Smuzhiyun #define TCPOLEN_EXP_FASTOPEN_BASE 4
207*4882a593Smuzhiyun #define TCPOLEN_EXP_SMC_BASE 6
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* But this is what stacks really send out. */
210*4882a593Smuzhiyun #define TCPOLEN_TSTAMP_ALIGNED 12
211*4882a593Smuzhiyun #define TCPOLEN_WSCALE_ALIGNED 4
212*4882a593Smuzhiyun #define TCPOLEN_SACKPERM_ALIGNED 4
213*4882a593Smuzhiyun #define TCPOLEN_SACK_BASE 2
214*4882a593Smuzhiyun #define TCPOLEN_SACK_BASE_ALIGNED 4
215*4882a593Smuzhiyun #define TCPOLEN_SACK_PERBLOCK 8
216*4882a593Smuzhiyun #define TCPOLEN_MD5SIG_ALIGNED 20
217*4882a593Smuzhiyun #define TCPOLEN_MSS_ALIGNED 4
218*4882a593Smuzhiyun #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Flags in tp->nonagle */
221*4882a593Smuzhiyun #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
222*4882a593Smuzhiyun #define TCP_NAGLE_CORK 2 /* Socket is corked */
223*4882a593Smuzhiyun #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* TCP thin-stream limits */
226*4882a593Smuzhiyun #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* TCP initial congestion window as per rfc6928 */
229*4882a593Smuzhiyun #define TCP_INIT_CWND 10
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Bit Flags for sysctl_tcp_fastopen */
232*4882a593Smuzhiyun #define TFO_CLIENT_ENABLE 1
233*4882a593Smuzhiyun #define TFO_SERVER_ENABLE 2
234*4882a593Smuzhiyun #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Accept SYN data w/o any cookie option */
237*4882a593Smuzhiyun #define TFO_SERVER_COOKIE_NOT_REQD 0x200
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Force enable TFO on all listeners, i.e., not requiring the
240*4882a593Smuzhiyun * TCP_FASTOPEN socket option.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun #define TFO_SERVER_WO_SOCKOPT1 0x400
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* sysctl variables for tcp */
246*4882a593Smuzhiyun extern int sysctl_tcp_max_orphans;
247*4882a593Smuzhiyun extern long sysctl_tcp_mem[3];
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
250*4882a593Smuzhiyun #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
251*4882a593Smuzhiyun #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun extern atomic_long_t tcp_memory_allocated;
254*4882a593Smuzhiyun extern struct percpu_counter tcp_sockets_allocated;
255*4882a593Smuzhiyun extern unsigned long tcp_memory_pressure;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* optimized version of sk_under_memory_pressure() for TCP sockets */
tcp_under_memory_pressure(const struct sock * sk)258*4882a593Smuzhiyun static inline bool tcp_under_memory_pressure(const struct sock *sk)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
261*4882a593Smuzhiyun mem_cgroup_under_socket_pressure(sk->sk_memcg))
262*4882a593Smuzhiyun return true;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return READ_ONCE(tcp_memory_pressure);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * The next routines deal with comparing 32 bit unsigned ints
268*4882a593Smuzhiyun * and worry about wraparound (automatic with unsigned arithmetic).
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun
before(__u32 seq1,__u32 seq2)271*4882a593Smuzhiyun static inline bool before(__u32 seq1, __u32 seq2)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun return (__s32)(seq1-seq2) < 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun #define after(seq2, seq1) before(seq1, seq2)
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)278*4882a593Smuzhiyun static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun return seq3 - seq2 >= seq1 - seq2;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
tcp_out_of_memory(struct sock * sk)283*4882a593Smuzhiyun static inline bool tcp_out_of_memory(struct sock *sk)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
286*4882a593Smuzhiyun sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
287*4882a593Smuzhiyun return true;
288*4882a593Smuzhiyun return false;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun void sk_forced_mem_schedule(struct sock *sk, int size);
292*4882a593Smuzhiyun
tcp_too_many_orphans(struct sock * sk,int shift)293*4882a593Smuzhiyun static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct percpu_counter *ocp = sk->sk_prot->orphan_count;
296*4882a593Smuzhiyun int orphans = percpu_counter_read_positive(ocp);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (orphans << shift > sysctl_tcp_max_orphans) {
299*4882a593Smuzhiyun orphans = percpu_counter_sum_positive(ocp);
300*4882a593Smuzhiyun if (orphans << shift > sysctl_tcp_max_orphans)
301*4882a593Smuzhiyun return true;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun return false;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun bool tcp_check_oom(struct sock *sk, int shift);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun extern struct proto tcp_prot;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
312*4882a593Smuzhiyun #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313*4882a593Smuzhiyun #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
314*4882a593Smuzhiyun #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun void tcp_tasklet_init(void);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun int tcp_v4_err(struct sk_buff *skb, u32);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun void tcp_shutdown(struct sock *sk, int how);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun int tcp_v4_early_demux(struct sk_buff *skb);
323*4882a593Smuzhiyun int tcp_v4_rcv(struct sk_buff *skb);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
326*4882a593Smuzhiyun int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
327*4882a593Smuzhiyun int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
328*4882a593Smuzhiyun int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
329*4882a593Smuzhiyun int flags);
330*4882a593Smuzhiyun int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
331*4882a593Smuzhiyun size_t size, int flags);
332*4882a593Smuzhiyun ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
333*4882a593Smuzhiyun size_t size, int flags);
334*4882a593Smuzhiyun int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
335*4882a593Smuzhiyun void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
336*4882a593Smuzhiyun int size_goal);
337*4882a593Smuzhiyun void tcp_release_cb(struct sock *sk);
338*4882a593Smuzhiyun void tcp_wfree(struct sk_buff *skb);
339*4882a593Smuzhiyun void tcp_write_timer_handler(struct sock *sk);
340*4882a593Smuzhiyun void tcp_delack_timer_handler(struct sock *sk);
341*4882a593Smuzhiyun int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
342*4882a593Smuzhiyun int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
343*4882a593Smuzhiyun void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
344*4882a593Smuzhiyun void tcp_rcv_space_adjust(struct sock *sk);
345*4882a593Smuzhiyun int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
346*4882a593Smuzhiyun void tcp_twsk_destructor(struct sock *sk);
347*4882a593Smuzhiyun ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
348*4882a593Smuzhiyun struct pipe_inode_info *pipe, size_t len,
349*4882a593Smuzhiyun unsigned int flags);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
tcp_dec_quickack_mode(struct sock * sk,const unsigned int pkts)352*4882a593Smuzhiyun static inline void tcp_dec_quickack_mode(struct sock *sk,
353*4882a593Smuzhiyun const unsigned int pkts)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (icsk->icsk_ack.quick) {
358*4882a593Smuzhiyun if (pkts >= icsk->icsk_ack.quick) {
359*4882a593Smuzhiyun icsk->icsk_ack.quick = 0;
360*4882a593Smuzhiyun /* Leaving quickack mode we deflate ATO. */
361*4882a593Smuzhiyun icsk->icsk_ack.ato = TCP_ATO_MIN;
362*4882a593Smuzhiyun } else
363*4882a593Smuzhiyun icsk->icsk_ack.quick -= pkts;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #define TCP_ECN_OK 1
368*4882a593Smuzhiyun #define TCP_ECN_QUEUE_CWR 2
369*4882a593Smuzhiyun #define TCP_ECN_DEMAND_CWR 4
370*4882a593Smuzhiyun #define TCP_ECN_SEEN 8
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun enum tcp_tw_status {
373*4882a593Smuzhiyun TCP_TW_SUCCESS = 0,
374*4882a593Smuzhiyun TCP_TW_RST = 1,
375*4882a593Smuzhiyun TCP_TW_ACK = 2,
376*4882a593Smuzhiyun TCP_TW_SYN = 3
377*4882a593Smuzhiyun };
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
381*4882a593Smuzhiyun struct sk_buff *skb,
382*4882a593Smuzhiyun const struct tcphdr *th);
383*4882a593Smuzhiyun struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
384*4882a593Smuzhiyun struct request_sock *req, bool fastopen,
385*4882a593Smuzhiyun bool *lost_race);
386*4882a593Smuzhiyun int tcp_child_process(struct sock *parent, struct sock *child,
387*4882a593Smuzhiyun struct sk_buff *skb);
388*4882a593Smuzhiyun void tcp_enter_loss(struct sock *sk);
389*4882a593Smuzhiyun void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
390*4882a593Smuzhiyun void tcp_clear_retrans(struct tcp_sock *tp);
391*4882a593Smuzhiyun void tcp_update_metrics(struct sock *sk);
392*4882a593Smuzhiyun void tcp_init_metrics(struct sock *sk);
393*4882a593Smuzhiyun void tcp_metrics_init(void);
394*4882a593Smuzhiyun bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
395*4882a593Smuzhiyun void tcp_close(struct sock *sk, long timeout);
396*4882a593Smuzhiyun void tcp_init_sock(struct sock *sk);
397*4882a593Smuzhiyun void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
398*4882a593Smuzhiyun __poll_t tcp_poll(struct file *file, struct socket *sock,
399*4882a593Smuzhiyun struct poll_table_struct *wait);
400*4882a593Smuzhiyun int tcp_getsockopt(struct sock *sk, int level, int optname,
401*4882a593Smuzhiyun char __user *optval, int __user *optlen);
402*4882a593Smuzhiyun int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
403*4882a593Smuzhiyun unsigned int optlen);
404*4882a593Smuzhiyun void tcp_set_keepalive(struct sock *sk, int val);
405*4882a593Smuzhiyun void tcp_syn_ack_timeout(const struct request_sock *req);
406*4882a593Smuzhiyun int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
407*4882a593Smuzhiyun int flags, int *addr_len);
408*4882a593Smuzhiyun int tcp_set_rcvlowat(struct sock *sk, int val);
409*4882a593Smuzhiyun void tcp_data_ready(struct sock *sk);
410*4882a593Smuzhiyun #ifdef CONFIG_MMU
411*4882a593Smuzhiyun int tcp_mmap(struct file *file, struct socket *sock,
412*4882a593Smuzhiyun struct vm_area_struct *vma);
413*4882a593Smuzhiyun #endif
414*4882a593Smuzhiyun void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
415*4882a593Smuzhiyun struct tcp_options_received *opt_rx,
416*4882a593Smuzhiyun int estab, struct tcp_fastopen_cookie *foc);
417*4882a593Smuzhiyun const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * BPF SKB-less helpers
421*4882a593Smuzhiyun */
422*4882a593Smuzhiyun u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
423*4882a593Smuzhiyun struct tcphdr *th, u32 *cookie);
424*4882a593Smuzhiyun u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
425*4882a593Smuzhiyun struct tcphdr *th, u32 *cookie);
426*4882a593Smuzhiyun u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
427*4882a593Smuzhiyun const struct tcp_request_sock_ops *af_ops,
428*4882a593Smuzhiyun struct sock *sk, struct tcphdr *th);
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * TCP v4 functions exported for the inet6 API
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
434*4882a593Smuzhiyun void tcp_v4_mtu_reduced(struct sock *sk);
435*4882a593Smuzhiyun void tcp_req_err(struct sock *sk, u32 seq, bool abort);
436*4882a593Smuzhiyun void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
437*4882a593Smuzhiyun int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
438*4882a593Smuzhiyun struct sock *tcp_create_openreq_child(const struct sock *sk,
439*4882a593Smuzhiyun struct request_sock *req,
440*4882a593Smuzhiyun struct sk_buff *skb);
441*4882a593Smuzhiyun void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
442*4882a593Smuzhiyun struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
443*4882a593Smuzhiyun struct request_sock *req,
444*4882a593Smuzhiyun struct dst_entry *dst,
445*4882a593Smuzhiyun struct request_sock *req_unhash,
446*4882a593Smuzhiyun bool *own_req);
447*4882a593Smuzhiyun int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
448*4882a593Smuzhiyun int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
449*4882a593Smuzhiyun int tcp_connect(struct sock *sk);
450*4882a593Smuzhiyun enum tcp_synack_type {
451*4882a593Smuzhiyun TCP_SYNACK_NORMAL,
452*4882a593Smuzhiyun TCP_SYNACK_FASTOPEN,
453*4882a593Smuzhiyun TCP_SYNACK_COOKIE,
454*4882a593Smuzhiyun };
455*4882a593Smuzhiyun struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
456*4882a593Smuzhiyun struct request_sock *req,
457*4882a593Smuzhiyun struct tcp_fastopen_cookie *foc,
458*4882a593Smuzhiyun enum tcp_synack_type synack_type,
459*4882a593Smuzhiyun struct sk_buff *syn_skb);
460*4882a593Smuzhiyun int tcp_disconnect(struct sock *sk, int flags);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
463*4882a593Smuzhiyun int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
464*4882a593Smuzhiyun void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* From syncookies.c */
467*4882a593Smuzhiyun struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
468*4882a593Smuzhiyun struct request_sock *req,
469*4882a593Smuzhiyun struct dst_entry *dst, u32 tsoff);
470*4882a593Smuzhiyun int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
471*4882a593Smuzhiyun u32 cookie);
472*4882a593Smuzhiyun struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
473*4882a593Smuzhiyun struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
474*4882a593Smuzhiyun const struct tcp_request_sock_ops *af_ops,
475*4882a593Smuzhiyun struct sock *sk, struct sk_buff *skb);
476*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Syncookies use a monotonic timer which increments every 60 seconds.
479*4882a593Smuzhiyun * This counter is used both as a hash input and partially encoded into
480*4882a593Smuzhiyun * the cookie value. A cookie is only validated further if the delta
481*4882a593Smuzhiyun * between the current counter value and the encoded one is less than this,
482*4882a593Smuzhiyun * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
483*4882a593Smuzhiyun * the counter advances immediately after a cookie is generated).
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun #define MAX_SYNCOOKIE_AGE 2
486*4882a593Smuzhiyun #define TCP_SYNCOOKIE_PERIOD (60 * HZ)
487*4882a593Smuzhiyun #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* syncookies: remember time of last synqueue overflow
490*4882a593Smuzhiyun * But do not dirty this field too often (once per second is enough)
491*4882a593Smuzhiyun * It is racy as we do not hold a lock, but race is very minor.
492*4882a593Smuzhiyun */
tcp_synq_overflow(const struct sock * sk)493*4882a593Smuzhiyun static inline void tcp_synq_overflow(const struct sock *sk)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun unsigned int last_overflow;
496*4882a593Smuzhiyun unsigned int now = jiffies;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (sk->sk_reuseport) {
499*4882a593Smuzhiyun struct sock_reuseport *reuse;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun reuse = rcu_dereference(sk->sk_reuseport_cb);
502*4882a593Smuzhiyun if (likely(reuse)) {
503*4882a593Smuzhiyun last_overflow = READ_ONCE(reuse->synq_overflow_ts);
504*4882a593Smuzhiyun if (!time_between32(now, last_overflow,
505*4882a593Smuzhiyun last_overflow + HZ))
506*4882a593Smuzhiyun WRITE_ONCE(reuse->synq_overflow_ts, now);
507*4882a593Smuzhiyun return;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
512*4882a593Smuzhiyun if (!time_between32(now, last_overflow, last_overflow + HZ))
513*4882a593Smuzhiyun WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* syncookies: no recent synqueue overflow on this listening socket? */
tcp_synq_no_recent_overflow(const struct sock * sk)517*4882a593Smuzhiyun static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun unsigned int last_overflow;
520*4882a593Smuzhiyun unsigned int now = jiffies;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (sk->sk_reuseport) {
523*4882a593Smuzhiyun struct sock_reuseport *reuse;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun reuse = rcu_dereference(sk->sk_reuseport_cb);
526*4882a593Smuzhiyun if (likely(reuse)) {
527*4882a593Smuzhiyun last_overflow = READ_ONCE(reuse->synq_overflow_ts);
528*4882a593Smuzhiyun return !time_between32(now, last_overflow - HZ,
529*4882a593Smuzhiyun last_overflow +
530*4882a593Smuzhiyun TCP_SYNCOOKIE_VALID);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
537*4882a593Smuzhiyun * then we're under synflood. However, we have to use
538*4882a593Smuzhiyun * 'last_overflow - HZ' as lower bound. That's because a concurrent
539*4882a593Smuzhiyun * tcp_synq_overflow() could update .ts_recent_stamp after we read
540*4882a593Smuzhiyun * jiffies but before we store .ts_recent_stamp into last_overflow,
541*4882a593Smuzhiyun * which could lead to rejecting a valid syncookie.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun return !time_between32(now, last_overflow - HZ,
544*4882a593Smuzhiyun last_overflow + TCP_SYNCOOKIE_VALID);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
tcp_cookie_time(void)547*4882a593Smuzhiyun static inline u32 tcp_cookie_time(void)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun u64 val = get_jiffies_64();
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun do_div(val, TCP_SYNCOOKIE_PERIOD);
552*4882a593Smuzhiyun return val;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
556*4882a593Smuzhiyun u16 *mssp);
557*4882a593Smuzhiyun __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
558*4882a593Smuzhiyun u64 cookie_init_timestamp(struct request_sock *req, u64 now);
559*4882a593Smuzhiyun bool cookie_timestamp_decode(const struct net *net,
560*4882a593Smuzhiyun struct tcp_options_received *opt);
561*4882a593Smuzhiyun bool cookie_ecn_ok(const struct tcp_options_received *opt,
562*4882a593Smuzhiyun const struct net *net, const struct dst_entry *dst);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* From net/ipv6/syncookies.c */
565*4882a593Smuzhiyun int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
566*4882a593Smuzhiyun u32 cookie);
567*4882a593Smuzhiyun struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
570*4882a593Smuzhiyun const struct tcphdr *th, u16 *mssp);
571*4882a593Smuzhiyun __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
572*4882a593Smuzhiyun #endif
573*4882a593Smuzhiyun /* tcp_output.c */
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
576*4882a593Smuzhiyun int nonagle);
577*4882a593Smuzhiyun int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
578*4882a593Smuzhiyun int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
579*4882a593Smuzhiyun void tcp_retransmit_timer(struct sock *sk);
580*4882a593Smuzhiyun void tcp_xmit_retransmit_queue(struct sock *);
581*4882a593Smuzhiyun void tcp_simple_retransmit(struct sock *);
582*4882a593Smuzhiyun void tcp_enter_recovery(struct sock *sk, bool ece_ack);
583*4882a593Smuzhiyun int tcp_trim_head(struct sock *, struct sk_buff *, u32);
584*4882a593Smuzhiyun enum tcp_queue {
585*4882a593Smuzhiyun TCP_FRAG_IN_WRITE_QUEUE,
586*4882a593Smuzhiyun TCP_FRAG_IN_RTX_QUEUE,
587*4882a593Smuzhiyun };
588*4882a593Smuzhiyun int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
589*4882a593Smuzhiyun struct sk_buff *skb, u32 len,
590*4882a593Smuzhiyun unsigned int mss_now, gfp_t gfp);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun void tcp_send_probe0(struct sock *);
593*4882a593Smuzhiyun void tcp_send_partial(struct sock *);
594*4882a593Smuzhiyun int tcp_write_wakeup(struct sock *, int mib);
595*4882a593Smuzhiyun void tcp_send_fin(struct sock *sk);
596*4882a593Smuzhiyun void tcp_send_active_reset(struct sock *sk, gfp_t priority);
597*4882a593Smuzhiyun int tcp_send_synack(struct sock *);
598*4882a593Smuzhiyun void tcp_push_one(struct sock *, unsigned int mss_now);
599*4882a593Smuzhiyun void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
600*4882a593Smuzhiyun void tcp_send_ack(struct sock *sk);
601*4882a593Smuzhiyun void tcp_send_delayed_ack(struct sock *sk);
602*4882a593Smuzhiyun void tcp_send_loss_probe(struct sock *sk);
603*4882a593Smuzhiyun bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
604*4882a593Smuzhiyun void tcp_skb_collapse_tstamp(struct sk_buff *skb,
605*4882a593Smuzhiyun const struct sk_buff *next_skb);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* tcp_input.c */
608*4882a593Smuzhiyun void tcp_rearm_rto(struct sock *sk);
609*4882a593Smuzhiyun void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
610*4882a593Smuzhiyun void tcp_reset(struct sock *sk);
611*4882a593Smuzhiyun void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
612*4882a593Smuzhiyun void tcp_fin(struct sock *sk);
613*4882a593Smuzhiyun void tcp_check_space(struct sock *sk);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* tcp_timer.c */
616*4882a593Smuzhiyun void tcp_init_xmit_timers(struct sock *);
tcp_clear_xmit_timers(struct sock * sk)617*4882a593Smuzhiyun static inline void tcp_clear_xmit_timers(struct sock *sk)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
620*4882a593Smuzhiyun __sock_put(sk);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
623*4882a593Smuzhiyun __sock_put(sk);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun inet_csk_clear_xmit_timers(sk);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
629*4882a593Smuzhiyun unsigned int tcp_current_mss(struct sock *sk);
630*4882a593Smuzhiyun u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* Bound MSS / TSO packet size with the half of the window */
tcp_bound_to_half_wnd(struct tcp_sock * tp,int pktsize)633*4882a593Smuzhiyun static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun int cutoff;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* When peer uses tiny windows, there is no use in packetizing
638*4882a593Smuzhiyun * to sub-MSS pieces for the sake of SWS or making sure there
639*4882a593Smuzhiyun * are enough packets in the pipe for fast recovery.
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * On the other hand, for extremely large MSS devices, handling
642*4882a593Smuzhiyun * smaller than MSS windows in this way does make sense.
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun if (tp->max_window > TCP_MSS_DEFAULT)
645*4882a593Smuzhiyun cutoff = (tp->max_window >> 1);
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun cutoff = tp->max_window;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (cutoff && pktsize > cutoff)
650*4882a593Smuzhiyun return max_t(int, cutoff, 68U - tp->tcp_header_len);
651*4882a593Smuzhiyun else
652*4882a593Smuzhiyun return pktsize;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* tcp.c */
656*4882a593Smuzhiyun void tcp_get_info(struct sock *, struct tcp_info *);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Read 'sendfile()'-style from a TCP socket */
659*4882a593Smuzhiyun int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
660*4882a593Smuzhiyun sk_read_actor_t recv_actor);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun void tcp_initialize_rcv_mss(struct sock *sk);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun int tcp_mtu_to_mss(struct sock *sk, int pmtu);
665*4882a593Smuzhiyun int tcp_mss_to_mtu(struct sock *sk, int mss);
666*4882a593Smuzhiyun void tcp_mtup_init(struct sock *sk);
667*4882a593Smuzhiyun
tcp_bound_rto(const struct sock * sk)668*4882a593Smuzhiyun static inline void tcp_bound_rto(const struct sock *sk)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
671*4882a593Smuzhiyun inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
__tcp_set_rto(const struct tcp_sock * tp)674*4882a593Smuzhiyun static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
__tcp_fast_path_on(struct tcp_sock * tp,u32 snd_wnd)679*4882a593Smuzhiyun static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun /* mptcp hooks are only on the slow path */
682*4882a593Smuzhiyun if (sk_is_mptcp((struct sock *)tp))
683*4882a593Smuzhiyun return;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun tp->pred_flags = htonl((tp->tcp_header_len << 26) |
686*4882a593Smuzhiyun ntohl(TCP_FLAG_ACK) |
687*4882a593Smuzhiyun snd_wnd);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
tcp_fast_path_on(struct tcp_sock * tp)690*4882a593Smuzhiyun static inline void tcp_fast_path_on(struct tcp_sock *tp)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
tcp_fast_path_check(struct sock * sk)695*4882a593Smuzhiyun static inline void tcp_fast_path_check(struct sock *sk)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
700*4882a593Smuzhiyun tp->rcv_wnd &&
701*4882a593Smuzhiyun atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
702*4882a593Smuzhiyun !tp->urg_data)
703*4882a593Smuzhiyun tcp_fast_path_on(tp);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* Compute the actual rto_min value */
tcp_rto_min(struct sock * sk)707*4882a593Smuzhiyun static inline u32 tcp_rto_min(struct sock *sk)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun const struct dst_entry *dst = __sk_dst_get(sk);
710*4882a593Smuzhiyun u32 rto_min = inet_csk(sk)->icsk_rto_min;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
713*4882a593Smuzhiyun rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
714*4882a593Smuzhiyun return rto_min;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
tcp_rto_min_us(struct sock * sk)717*4882a593Smuzhiyun static inline u32 tcp_rto_min_us(struct sock *sk)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun return jiffies_to_usecs(tcp_rto_min(sk));
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
tcp_ca_dst_locked(const struct dst_entry * dst)722*4882a593Smuzhiyun static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun return dst_metric_locked(dst, RTAX_CC_ALGO);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /* Minimum RTT in usec. ~0 means not available. */
tcp_min_rtt(const struct tcp_sock * tp)728*4882a593Smuzhiyun static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun return minmax_get(&tp->rtt_min);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* Compute the actual receive window we are currently advertising.
734*4882a593Smuzhiyun * Rcv_nxt can be after the window if our peer push more data
735*4882a593Smuzhiyun * than the offered window.
736*4882a593Smuzhiyun */
tcp_receive_window(const struct tcp_sock * tp)737*4882a593Smuzhiyun static inline u32 tcp_receive_window(const struct tcp_sock *tp)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (win < 0)
742*4882a593Smuzhiyun win = 0;
743*4882a593Smuzhiyun return (u32) win;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun /* Choose a new window, without checks for shrinking, and without
747*4882a593Smuzhiyun * scaling applied to the result. The caller does these things
748*4882a593Smuzhiyun * if necessary. This is a "raw" window selection.
749*4882a593Smuzhiyun */
750*4882a593Smuzhiyun u32 __tcp_select_window(struct sock *sk);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun void tcp_send_window_probe(struct sock *sk);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* TCP uses 32bit jiffies to save some space.
755*4882a593Smuzhiyun * Note that this is different from tcp_time_stamp, which
756*4882a593Smuzhiyun * historically has been the same until linux-4.13.
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun #define tcp_jiffies32 ((u32)jiffies)
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * Deliver a 32bit value for TCP timestamp option (RFC 7323)
762*4882a593Smuzhiyun * It is no longer tied to jiffies, but to 1 ms clock.
763*4882a593Smuzhiyun * Note: double check if you want to use tcp_jiffies32 instead of this.
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun #define TCP_TS_HZ 1000
766*4882a593Smuzhiyun
tcp_clock_ns(void)767*4882a593Smuzhiyun static inline u64 tcp_clock_ns(void)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun return ktime_get_ns();
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
tcp_clock_us(void)772*4882a593Smuzhiyun static inline u64 tcp_clock_us(void)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* This should only be used in contexts where tp->tcp_mstamp is up to date */
tcp_time_stamp(const struct tcp_sock * tp)778*4882a593Smuzhiyun static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
tcp_ns_to_ts(u64 ns)784*4882a593Smuzhiyun static inline u32 tcp_ns_to_ts(u64 ns)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
tcp_time_stamp_raw(void)790*4882a593Smuzhiyun static inline u32 tcp_time_stamp_raw(void)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun return tcp_ns_to_ts(tcp_clock_ns());
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun void tcp_mstamp_refresh(struct tcp_sock *tp);
796*4882a593Smuzhiyun
tcp_stamp_us_delta(u64 t1,u64 t0)797*4882a593Smuzhiyun static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun return max_t(s64, t1 - t0, 0);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
tcp_skb_timestamp(const struct sk_buff * skb)802*4882a593Smuzhiyun static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun return tcp_ns_to_ts(skb->skb_mstamp_ns);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* provide the departure time in us unit */
tcp_skb_timestamp_us(const struct sk_buff * skb)808*4882a593Smuzhiyun static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun #define TCPHDR_FIN 0x01
817*4882a593Smuzhiyun #define TCPHDR_SYN 0x02
818*4882a593Smuzhiyun #define TCPHDR_RST 0x04
819*4882a593Smuzhiyun #define TCPHDR_PSH 0x08
820*4882a593Smuzhiyun #define TCPHDR_ACK 0x10
821*4882a593Smuzhiyun #define TCPHDR_URG 0x20
822*4882a593Smuzhiyun #define TCPHDR_ECE 0x40
823*4882a593Smuzhiyun #define TCPHDR_CWR 0x80
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* This is what the send packet queuing engine uses to pass
828*4882a593Smuzhiyun * TCP per-packet control information to the transmission code.
829*4882a593Smuzhiyun * We also store the host-order sequence numbers in here too.
830*4882a593Smuzhiyun * This is 44 bytes if IPV6 is enabled.
831*4882a593Smuzhiyun * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
832*4882a593Smuzhiyun */
833*4882a593Smuzhiyun struct tcp_skb_cb {
834*4882a593Smuzhiyun __u32 seq; /* Starting sequence number */
835*4882a593Smuzhiyun __u32 end_seq; /* SEQ + FIN + SYN + datalen */
836*4882a593Smuzhiyun union {
837*4882a593Smuzhiyun /* Note : tcp_tw_isn is used in input path only
838*4882a593Smuzhiyun * (isn chosen by tcp_timewait_state_process())
839*4882a593Smuzhiyun *
840*4882a593Smuzhiyun * tcp_gso_segs/size are used in write queue only,
841*4882a593Smuzhiyun * cf tcp_skb_pcount()/tcp_skb_mss()
842*4882a593Smuzhiyun */
843*4882a593Smuzhiyun __u32 tcp_tw_isn;
844*4882a593Smuzhiyun struct {
845*4882a593Smuzhiyun u16 tcp_gso_segs;
846*4882a593Smuzhiyun u16 tcp_gso_size;
847*4882a593Smuzhiyun };
848*4882a593Smuzhiyun };
849*4882a593Smuzhiyun __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun __u8 sacked; /* State flags for SACK. */
852*4882a593Smuzhiyun #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
853*4882a593Smuzhiyun #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
854*4882a593Smuzhiyun #define TCPCB_LOST 0x04 /* SKB is lost */
855*4882a593Smuzhiyun #define TCPCB_TAGBITS 0x07 /* All tag bits */
856*4882a593Smuzhiyun #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
857*4882a593Smuzhiyun #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
858*4882a593Smuzhiyun #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
859*4882a593Smuzhiyun TCPCB_REPAIRED)
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
862*4882a593Smuzhiyun __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
863*4882a593Smuzhiyun eor:1, /* Is skb MSG_EOR marked? */
864*4882a593Smuzhiyun has_rxtstamp:1, /* SKB has a RX timestamp */
865*4882a593Smuzhiyun unused:5;
866*4882a593Smuzhiyun __u32 ack_seq; /* Sequence number ACK'd */
867*4882a593Smuzhiyun union {
868*4882a593Smuzhiyun struct {
869*4882a593Smuzhiyun /* There is space for up to 24 bytes */
870*4882a593Smuzhiyun __u32 in_flight:30,/* Bytes in flight at transmit */
871*4882a593Smuzhiyun is_app_limited:1, /* cwnd not fully used? */
872*4882a593Smuzhiyun unused:1;
873*4882a593Smuzhiyun /* pkts S/ACKed so far upon tx of skb, incl retrans: */
874*4882a593Smuzhiyun __u32 delivered;
875*4882a593Smuzhiyun /* start of send pipeline phase */
876*4882a593Smuzhiyun u64 first_tx_mstamp;
877*4882a593Smuzhiyun /* when we reached the "delivered" count */
878*4882a593Smuzhiyun u64 delivered_mstamp;
879*4882a593Smuzhiyun } tx; /* only used for outgoing skbs */
880*4882a593Smuzhiyun union {
881*4882a593Smuzhiyun struct inet_skb_parm h4;
882*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
883*4882a593Smuzhiyun struct inet6_skb_parm h6;
884*4882a593Smuzhiyun #endif
885*4882a593Smuzhiyun } header; /* For incoming skbs */
886*4882a593Smuzhiyun struct {
887*4882a593Smuzhiyun __u32 flags;
888*4882a593Smuzhiyun struct sock *sk_redir;
889*4882a593Smuzhiyun void *data_end;
890*4882a593Smuzhiyun } bpf;
891*4882a593Smuzhiyun };
892*4882a593Smuzhiyun };
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
895*4882a593Smuzhiyun
bpf_compute_data_end_sk_skb(struct sk_buff * skb)896*4882a593Smuzhiyun static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
tcp_skb_bpf_ingress(const struct sk_buff * skb)901*4882a593Smuzhiyun static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
tcp_skb_bpf_redirect_fetch(struct sk_buff * skb)906*4882a593Smuzhiyun static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun return TCP_SKB_CB(skb)->bpf.sk_redir;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
tcp_skb_bpf_redirect_clear(struct sk_buff * skb)911*4882a593Smuzhiyun static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun extern const struct inet_connection_sock_af_ops ipv4_specific;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
919*4882a593Smuzhiyun /* This is the variant of inet6_iif() that must be used by TCP,
920*4882a593Smuzhiyun * as TCP moves IP6CB into a different location in skb->cb[]
921*4882a593Smuzhiyun */
tcp_v6_iif(const struct sk_buff * skb)922*4882a593Smuzhiyun static inline int tcp_v6_iif(const struct sk_buff *skb)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun return TCP_SKB_CB(skb)->header.h6.iif;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
tcp_v6_iif_l3_slave(const struct sk_buff * skb)927*4882a593Smuzhiyun static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v6_sdif(const struct sk_buff * skb)935*4882a593Smuzhiyun static inline int tcp_v6_sdif(const struct sk_buff *skb)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
938*4882a593Smuzhiyun if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
939*4882a593Smuzhiyun return TCP_SKB_CB(skb)->header.h6.iif;
940*4882a593Smuzhiyun #endif
941*4882a593Smuzhiyun return 0;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun extern const struct inet_connection_sock_af_ops ipv6_specific;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
947*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
948*4882a593Smuzhiyun void tcp_v6_early_demux(struct sk_buff *skb);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun #endif
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v4_sdif(struct sk_buff * skb)953*4882a593Smuzhiyun static inline int tcp_v4_sdif(struct sk_buff *skb)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
956*4882a593Smuzhiyun if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
957*4882a593Smuzhiyun return TCP_SKB_CB(skb)->header.h4.iif;
958*4882a593Smuzhiyun #endif
959*4882a593Smuzhiyun return 0;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /* Due to TSO, an SKB can be composed of multiple actual
963*4882a593Smuzhiyun * packets. To keep these tracked properly, we use this.
964*4882a593Smuzhiyun */
tcp_skb_pcount(const struct sk_buff * skb)965*4882a593Smuzhiyun static inline int tcp_skb_pcount(const struct sk_buff *skb)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun return TCP_SKB_CB(skb)->tcp_gso_segs;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
tcp_skb_pcount_set(struct sk_buff * skb,int segs)970*4882a593Smuzhiyun static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun TCP_SKB_CB(skb)->tcp_gso_segs = segs;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
tcp_skb_pcount_add(struct sk_buff * skb,int segs)975*4882a593Smuzhiyun static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun TCP_SKB_CB(skb)->tcp_gso_segs += segs;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
tcp_skb_mss(const struct sk_buff * skb)981*4882a593Smuzhiyun static inline int tcp_skb_mss(const struct sk_buff *skb)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun return TCP_SKB_CB(skb)->tcp_gso_size;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
tcp_skb_can_collapse_to(const struct sk_buff * skb)986*4882a593Smuzhiyun static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun return likely(!TCP_SKB_CB(skb)->eor);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
tcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)991*4882a593Smuzhiyun static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
992*4882a593Smuzhiyun const struct sk_buff *from)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun return likely(tcp_skb_can_collapse_to(to) &&
995*4882a593Smuzhiyun mptcp_skb_can_collapse(to, from));
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /* Events passed to congestion control interface */
999*4882a593Smuzhiyun enum tcp_ca_event {
1000*4882a593Smuzhiyun CA_EVENT_TX_START, /* first transmit when no packets in flight */
1001*4882a593Smuzhiyun CA_EVENT_CWND_RESTART, /* congestion window restart */
1002*4882a593Smuzhiyun CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1003*4882a593Smuzhiyun CA_EVENT_LOSS, /* loss timeout */
1004*4882a593Smuzhiyun CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1005*4882a593Smuzhiyun CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1006*4882a593Smuzhiyun };
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1009*4882a593Smuzhiyun enum tcp_ca_ack_event_flags {
1010*4882a593Smuzhiyun CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1011*4882a593Smuzhiyun CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1012*4882a593Smuzhiyun CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1013*4882a593Smuzhiyun };
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /*
1016*4882a593Smuzhiyun * Interface for adding new TCP congestion control handlers
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun #define TCP_CA_NAME_MAX 16
1019*4882a593Smuzhiyun #define TCP_CA_MAX 128
1020*4882a593Smuzhiyun #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun #define TCP_CA_UNSPEC 0
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1025*4882a593Smuzhiyun #define TCP_CONG_NON_RESTRICTED 0x1
1026*4882a593Smuzhiyun /* Requires ECN/ECT set on all packets */
1027*4882a593Smuzhiyun #define TCP_CONG_NEEDS_ECN 0x2
1028*4882a593Smuzhiyun #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun union tcp_cc_info;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun struct ack_sample {
1033*4882a593Smuzhiyun u32 pkts_acked;
1034*4882a593Smuzhiyun s32 rtt_us;
1035*4882a593Smuzhiyun u32 in_flight;
1036*4882a593Smuzhiyun };
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /* A rate sample measures the number of (original/retransmitted) data
1039*4882a593Smuzhiyun * packets delivered "delivered" over an interval of time "interval_us".
1040*4882a593Smuzhiyun * The tcp_rate.c code fills in the rate sample, and congestion
1041*4882a593Smuzhiyun * control modules that define a cong_control function to run at the end
1042*4882a593Smuzhiyun * of ACK processing can optionally chose to consult this sample when
1043*4882a593Smuzhiyun * setting cwnd and pacing rate.
1044*4882a593Smuzhiyun * A sample is invalid if "delivered" or "interval_us" is negative.
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun struct rate_sample {
1047*4882a593Smuzhiyun u64 prior_mstamp; /* starting timestamp for interval */
1048*4882a593Smuzhiyun u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1049*4882a593Smuzhiyun s32 delivered; /* number of packets delivered over interval */
1050*4882a593Smuzhiyun long interval_us; /* time for tp->delivered to incr "delivered" */
1051*4882a593Smuzhiyun u32 snd_interval_us; /* snd interval for delivered packets */
1052*4882a593Smuzhiyun u32 rcv_interval_us; /* rcv interval for delivered packets */
1053*4882a593Smuzhiyun long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1054*4882a593Smuzhiyun int losses; /* number of packets marked lost upon ACK */
1055*4882a593Smuzhiyun u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1056*4882a593Smuzhiyun u32 prior_in_flight; /* in flight before this ACK */
1057*4882a593Smuzhiyun bool is_app_limited; /* is sample from packet with bubble in pipe? */
1058*4882a593Smuzhiyun bool is_retrans; /* is sample from retransmission? */
1059*4882a593Smuzhiyun bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1060*4882a593Smuzhiyun };
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun struct tcp_congestion_ops {
1063*4882a593Smuzhiyun struct list_head list;
1064*4882a593Smuzhiyun u32 key;
1065*4882a593Smuzhiyun u32 flags;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun /* initialize private data (optional) */
1068*4882a593Smuzhiyun void (*init)(struct sock *sk);
1069*4882a593Smuzhiyun /* cleanup private data (optional) */
1070*4882a593Smuzhiyun void (*release)(struct sock *sk);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun /* return slow start threshold (required) */
1073*4882a593Smuzhiyun u32 (*ssthresh)(struct sock *sk);
1074*4882a593Smuzhiyun /* do new cwnd calculation (required) */
1075*4882a593Smuzhiyun void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1076*4882a593Smuzhiyun /* call before changing ca_state (optional) */
1077*4882a593Smuzhiyun void (*set_state)(struct sock *sk, u8 new_state);
1078*4882a593Smuzhiyun /* call when cwnd event occurs (optional) */
1079*4882a593Smuzhiyun void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1080*4882a593Smuzhiyun /* call when ack arrives (optional) */
1081*4882a593Smuzhiyun void (*in_ack_event)(struct sock *sk, u32 flags);
1082*4882a593Smuzhiyun /* new value of cwnd after loss (required) */
1083*4882a593Smuzhiyun u32 (*undo_cwnd)(struct sock *sk);
1084*4882a593Smuzhiyun /* hook for packet ack accounting (optional) */
1085*4882a593Smuzhiyun void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1086*4882a593Smuzhiyun /* override sysctl_tcp_min_tso_segs */
1087*4882a593Smuzhiyun u32 (*min_tso_segs)(struct sock *sk);
1088*4882a593Smuzhiyun /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1089*4882a593Smuzhiyun u32 (*sndbuf_expand)(struct sock *sk);
1090*4882a593Smuzhiyun /* call when packets are delivered to update cwnd and pacing rate,
1091*4882a593Smuzhiyun * after all the ca_state processing. (optional)
1092*4882a593Smuzhiyun */
1093*4882a593Smuzhiyun void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1094*4882a593Smuzhiyun /* get info for inet_diag (optional) */
1095*4882a593Smuzhiyun size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1096*4882a593Smuzhiyun union tcp_cc_info *info);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun char name[TCP_CA_NAME_MAX];
1099*4882a593Smuzhiyun struct module *owner;
1100*4882a593Smuzhiyun };
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1103*4882a593Smuzhiyun void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun void tcp_assign_congestion_control(struct sock *sk);
1106*4882a593Smuzhiyun void tcp_init_congestion_control(struct sock *sk);
1107*4882a593Smuzhiyun void tcp_cleanup_congestion_control(struct sock *sk);
1108*4882a593Smuzhiyun int tcp_set_default_congestion_control(struct net *net, const char *name);
1109*4882a593Smuzhiyun void tcp_get_default_congestion_control(struct net *net, char *name);
1110*4882a593Smuzhiyun void tcp_get_available_congestion_control(char *buf, size_t len);
1111*4882a593Smuzhiyun void tcp_get_allowed_congestion_control(char *buf, size_t len);
1112*4882a593Smuzhiyun int tcp_set_allowed_congestion_control(char *allowed);
1113*4882a593Smuzhiyun int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1114*4882a593Smuzhiyun bool cap_net_admin);
1115*4882a593Smuzhiyun u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1116*4882a593Smuzhiyun void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun u32 tcp_reno_ssthresh(struct sock *sk);
1119*4882a593Smuzhiyun u32 tcp_reno_undo_cwnd(struct sock *sk);
1120*4882a593Smuzhiyun void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1121*4882a593Smuzhiyun extern struct tcp_congestion_ops tcp_reno;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun struct tcp_congestion_ops *tcp_ca_find(const char *name);
1124*4882a593Smuzhiyun struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1125*4882a593Smuzhiyun u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1126*4882a593Smuzhiyun #ifdef CONFIG_INET
1127*4882a593Smuzhiyun char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1128*4882a593Smuzhiyun #else
tcp_ca_get_name_by_key(u32 key,char * buffer)1129*4882a593Smuzhiyun static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun return NULL;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun #endif
1134*4882a593Smuzhiyun
tcp_ca_needs_ecn(const struct sock * sk)1135*4882a593Smuzhiyun static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun const struct inet_connection_sock *icsk = inet_csk(sk);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
tcp_set_ca_state(struct sock * sk,const u8 ca_state)1142*4882a593Smuzhiyun static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun if (icsk->icsk_ca_ops->set_state)
1147*4882a593Smuzhiyun icsk->icsk_ca_ops->set_state(sk, ca_state);
1148*4882a593Smuzhiyun icsk->icsk_ca_state = ca_state;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
tcp_ca_event(struct sock * sk,const enum tcp_ca_event event)1151*4882a593Smuzhiyun static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun const struct inet_connection_sock *icsk = inet_csk(sk);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun if (icsk->icsk_ca_ops->cwnd_event)
1156*4882a593Smuzhiyun icsk->icsk_ca_ops->cwnd_event(sk, event);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* From tcp_rate.c */
1160*4882a593Smuzhiyun void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1161*4882a593Smuzhiyun void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1162*4882a593Smuzhiyun struct rate_sample *rs);
1163*4882a593Smuzhiyun void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1164*4882a593Smuzhiyun bool is_sack_reneg, struct rate_sample *rs);
1165*4882a593Smuzhiyun void tcp_rate_check_app_limited(struct sock *sk);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* These functions determine how the current flow behaves in respect of SACK
1168*4882a593Smuzhiyun * handling. SACK is negotiated with the peer, and therefore it can vary
1169*4882a593Smuzhiyun * between different flows.
1170*4882a593Smuzhiyun *
1171*4882a593Smuzhiyun * tcp_is_sack - SACK enabled
1172*4882a593Smuzhiyun * tcp_is_reno - No SACK
1173*4882a593Smuzhiyun */
tcp_is_sack(const struct tcp_sock * tp)1174*4882a593Smuzhiyun static inline int tcp_is_sack(const struct tcp_sock *tp)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun return likely(tp->rx_opt.sack_ok);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
tcp_is_reno(const struct tcp_sock * tp)1179*4882a593Smuzhiyun static inline bool tcp_is_reno(const struct tcp_sock *tp)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun return !tcp_is_sack(tp);
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
tcp_left_out(const struct tcp_sock * tp)1184*4882a593Smuzhiyun static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun return tp->sacked_out + tp->lost_out;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /* This determines how many packets are "in the network" to the best
1190*4882a593Smuzhiyun * of our knowledge. In many cases it is conservative, but where
1191*4882a593Smuzhiyun * detailed information is available from the receiver (via SACK
1192*4882a593Smuzhiyun * blocks etc.) we can make more aggressive calculations.
1193*4882a593Smuzhiyun *
1194*4882a593Smuzhiyun * Use this for decisions involving congestion control, use just
1195*4882a593Smuzhiyun * tp->packets_out to determine if the send queue is empty or not.
1196*4882a593Smuzhiyun *
1197*4882a593Smuzhiyun * Read this equation as:
1198*4882a593Smuzhiyun *
1199*4882a593Smuzhiyun * "Packets sent once on transmission queue" MINUS
1200*4882a593Smuzhiyun * "Packets left network, but not honestly ACKed yet" PLUS
1201*4882a593Smuzhiyun * "Packets fast retransmitted"
1202*4882a593Smuzhiyun */
tcp_packets_in_flight(const struct tcp_sock * tp)1203*4882a593Smuzhiyun static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun #define TCP_INFINITE_SSTHRESH 0x7fffffff
1209*4882a593Smuzhiyun
tcp_in_slow_start(const struct tcp_sock * tp)1210*4882a593Smuzhiyun static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun return tp->snd_cwnd < tp->snd_ssthresh;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
tcp_in_initial_slowstart(const struct tcp_sock * tp)1215*4882a593Smuzhiyun static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
tcp_in_cwnd_reduction(const struct sock * sk)1220*4882a593Smuzhiyun static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1223*4882a593Smuzhiyun (1 << inet_csk(sk)->icsk_ca_state);
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1227*4882a593Smuzhiyun * The exception is cwnd reduction phase, when cwnd is decreasing towards
1228*4882a593Smuzhiyun * ssthresh.
1229*4882a593Smuzhiyun */
tcp_current_ssthresh(const struct sock * sk)1230*4882a593Smuzhiyun static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun const struct tcp_sock *tp = tcp_sk(sk);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (tcp_in_cwnd_reduction(sk))
1235*4882a593Smuzhiyun return tp->snd_ssthresh;
1236*4882a593Smuzhiyun else
1237*4882a593Smuzhiyun return max(tp->snd_ssthresh,
1238*4882a593Smuzhiyun ((tp->snd_cwnd >> 1) +
1239*4882a593Smuzhiyun (tp->snd_cwnd >> 2)));
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /* Use define here intentionally to get WARN_ON location shown at the caller */
1243*4882a593Smuzhiyun #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun void tcp_enter_cwr(struct sock *sk);
1246*4882a593Smuzhiyun __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun /* The maximum number of MSS of available cwnd for which TSO defers
1249*4882a593Smuzhiyun * sending if not using sysctl_tcp_tso_win_divisor.
1250*4882a593Smuzhiyun */
tcp_max_tso_deferred_mss(const struct tcp_sock * tp)1251*4882a593Smuzhiyun static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun return 3;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /* Returns end sequence number of the receiver's advertised window */
tcp_wnd_end(const struct tcp_sock * tp)1257*4882a593Smuzhiyun static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun return tp->snd_una + tp->snd_wnd;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1263*4882a593Smuzhiyun * flexible approach. The RFC suggests cwnd should not be raised unless
1264*4882a593Smuzhiyun * it was fully used previously. And that's exactly what we do in
1265*4882a593Smuzhiyun * congestion avoidance mode. But in slow start we allow cwnd to grow
1266*4882a593Smuzhiyun * as long as the application has used half the cwnd.
1267*4882a593Smuzhiyun * Example :
1268*4882a593Smuzhiyun * cwnd is 10 (IW10), but application sends 9 frames.
1269*4882a593Smuzhiyun * We allow cwnd to reach 18 when all frames are ACKed.
1270*4882a593Smuzhiyun * This check is safe because it's as aggressive as slow start which already
1271*4882a593Smuzhiyun * risks 100% overshoot. The advantage is that we discourage application to
1272*4882a593Smuzhiyun * either send more filler packets or data to artificially blow up the cwnd
1273*4882a593Smuzhiyun * usage, and allow application-limited process to probe bw more aggressively.
1274*4882a593Smuzhiyun */
tcp_is_cwnd_limited(const struct sock * sk)1275*4882a593Smuzhiyun static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun const struct tcp_sock *tp = tcp_sk(sk);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun if (tp->is_cwnd_limited)
1280*4882a593Smuzhiyun return true;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1283*4882a593Smuzhiyun if (tcp_in_slow_start(tp))
1284*4882a593Smuzhiyun return tp->snd_cwnd < 2 * tp->max_packets_out;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun return false;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun /* BBR congestion control needs pacing.
1290*4882a593Smuzhiyun * Same remark for SO_MAX_PACING_RATE.
1291*4882a593Smuzhiyun * sch_fq packet scheduler is efficiently handling pacing,
1292*4882a593Smuzhiyun * but is not always installed/used.
1293*4882a593Smuzhiyun * Return true if TCP stack should pace packets itself.
1294*4882a593Smuzhiyun */
tcp_needs_internal_pacing(const struct sock * sk)1295*4882a593Smuzhiyun static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* Estimates in how many jiffies next packet for this flow can be sent.
1301*4882a593Smuzhiyun * Scheduling a retransmit timer too early would be silly.
1302*4882a593Smuzhiyun */
tcp_pacing_delay(const struct sock * sk)1303*4882a593Smuzhiyun static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
tcp_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,const unsigned long max_when)1310*4882a593Smuzhiyun static inline void tcp_reset_xmit_timer(struct sock *sk,
1311*4882a593Smuzhiyun const int what,
1312*4882a593Smuzhiyun unsigned long when,
1313*4882a593Smuzhiyun const unsigned long max_when)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1316*4882a593Smuzhiyun max_when);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* Something is really bad, we could not queue an additional packet,
1320*4882a593Smuzhiyun * because qdisc is full or receiver sent a 0 window, or we are paced.
1321*4882a593Smuzhiyun * We do not want to add fuel to the fire, or abort too early,
1322*4882a593Smuzhiyun * so make sure the timer we arm now is at least 200ms in the future,
1323*4882a593Smuzhiyun * regardless of current icsk_rto value (as it could be ~2ms)
1324*4882a593Smuzhiyun */
tcp_probe0_base(const struct sock * sk)1325*4882a593Smuzhiyun static inline unsigned long tcp_probe0_base(const struct sock *sk)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /* Variant of inet_csk_rto_backoff() used for zero window probes */
tcp_probe0_when(const struct sock * sk,unsigned long max_when)1331*4882a593Smuzhiyun static inline unsigned long tcp_probe0_when(const struct sock *sk,
1332*4882a593Smuzhiyun unsigned long max_when)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun return (unsigned long)min_t(u64, when, max_when);
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
tcp_check_probe_timer(struct sock * sk)1339*4882a593Smuzhiyun static inline void tcp_check_probe_timer(struct sock *sk)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1342*4882a593Smuzhiyun tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1343*4882a593Smuzhiyun tcp_probe0_base(sk), TCP_RTO_MAX);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
tcp_init_wl(struct tcp_sock * tp,u32 seq)1346*4882a593Smuzhiyun static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun tp->snd_wl1 = seq;
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun
tcp_update_wl(struct tcp_sock * tp,u32 seq)1351*4882a593Smuzhiyun static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun tp->snd_wl1 = seq;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /*
1357*4882a593Smuzhiyun * Calculate(/check) TCP checksum
1358*4882a593Smuzhiyun */
tcp_v4_check(int len,__be32 saddr,__be32 daddr,__wsum base)1359*4882a593Smuzhiyun static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1360*4882a593Smuzhiyun __be32 daddr, __wsum base)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
tcp_checksum_complete(struct sk_buff * skb)1365*4882a593Smuzhiyun static inline bool tcp_checksum_complete(struct sk_buff *skb)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun return !skb_csum_unnecessary(skb) &&
1368*4882a593Smuzhiyun __skb_checksum_complete(skb);
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1372*4882a593Smuzhiyun int tcp_filter(struct sock *sk, struct sk_buff *skb);
1373*4882a593Smuzhiyun void tcp_set_state(struct sock *sk, int state);
1374*4882a593Smuzhiyun void tcp_done(struct sock *sk);
1375*4882a593Smuzhiyun int tcp_abort(struct sock *sk, int err);
1376*4882a593Smuzhiyun
tcp_sack_reset(struct tcp_options_received * rx_opt)1377*4882a593Smuzhiyun static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun rx_opt->dsack = 0;
1380*4882a593Smuzhiyun rx_opt->num_sacks = 0;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun void tcp_cwnd_restart(struct sock *sk, s32 delta);
1384*4882a593Smuzhiyun
tcp_slow_start_after_idle_check(struct sock * sk)1385*4882a593Smuzhiyun static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1388*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
1389*4882a593Smuzhiyun s32 delta;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1392*4882a593Smuzhiyun tp->packets_out || ca_ops->cong_control)
1393*4882a593Smuzhiyun return;
1394*4882a593Smuzhiyun delta = tcp_jiffies32 - tp->lsndtime;
1395*4882a593Smuzhiyun if (delta > inet_csk(sk)->icsk_rto)
1396*4882a593Smuzhiyun tcp_cwnd_restart(sk, delta);
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /* Determine a window scaling and initial window to offer. */
1400*4882a593Smuzhiyun void tcp_select_initial_window(const struct sock *sk, int __space,
1401*4882a593Smuzhiyun __u32 mss, __u32 *rcv_wnd,
1402*4882a593Smuzhiyun __u32 *window_clamp, int wscale_ok,
1403*4882a593Smuzhiyun __u8 *rcv_wscale, __u32 init_rcv_wnd);
1404*4882a593Smuzhiyun
tcp_win_from_space(const struct sock * sk,int space)1405*4882a593Smuzhiyun static inline int tcp_win_from_space(const struct sock *sk, int space)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun return tcp_adv_win_scale <= 0 ?
1410*4882a593Smuzhiyun (space>>(-tcp_adv_win_scale)) :
1411*4882a593Smuzhiyun space - (space>>tcp_adv_win_scale);
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /* Note: caller must be prepared to deal with negative returns */
tcp_space(const struct sock * sk)1415*4882a593Smuzhiyun static inline int tcp_space(const struct sock *sk)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1418*4882a593Smuzhiyun READ_ONCE(sk->sk_backlog.len) -
1419*4882a593Smuzhiyun atomic_read(&sk->sk_rmem_alloc));
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
tcp_full_space(const struct sock * sk)1422*4882a593Smuzhiyun static inline int tcp_full_space(const struct sock *sk)
1423*4882a593Smuzhiyun {
1424*4882a593Smuzhiyun return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun void tcp_cleanup_rbuf(struct sock *sk, int copied);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1430*4882a593Smuzhiyun * If 87.5 % (7/8) of the space has been consumed, we want to override
1431*4882a593Smuzhiyun * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1432*4882a593Smuzhiyun * len/truesize ratio.
1433*4882a593Smuzhiyun */
tcp_rmem_pressure(const struct sock * sk)1434*4882a593Smuzhiyun static inline bool tcp_rmem_pressure(const struct sock *sk)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun int rcvbuf, threshold;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if (tcp_under_memory_pressure(sk))
1439*4882a593Smuzhiyun return true;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1442*4882a593Smuzhiyun threshold = rcvbuf - (rcvbuf >> 3);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun return atomic_read(&sk->sk_rmem_alloc) > threshold;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun extern void tcp_openreq_init_rwin(struct request_sock *req,
1448*4882a593Smuzhiyun const struct sock *sk_listener,
1449*4882a593Smuzhiyun const struct dst_entry *dst);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun void tcp_enter_memory_pressure(struct sock *sk);
1452*4882a593Smuzhiyun void tcp_leave_memory_pressure(struct sock *sk);
1453*4882a593Smuzhiyun
keepalive_intvl_when(const struct tcp_sock * tp)1454*4882a593Smuzhiyun static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun struct net *net = sock_net((struct sock *)tp);
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun return tp->keepalive_intvl ? :
1459*4882a593Smuzhiyun READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
keepalive_time_when(const struct tcp_sock * tp)1462*4882a593Smuzhiyun static inline int keepalive_time_when(const struct tcp_sock *tp)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun struct net *net = sock_net((struct sock *)tp);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun return tp->keepalive_time ? :
1467*4882a593Smuzhiyun READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
keepalive_probes(const struct tcp_sock * tp)1470*4882a593Smuzhiyun static inline int keepalive_probes(const struct tcp_sock *tp)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun struct net *net = sock_net((struct sock *)tp);
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun return tp->keepalive_probes ? :
1475*4882a593Smuzhiyun READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
keepalive_time_elapsed(const struct tcp_sock * tp)1478*4882a593Smuzhiyun static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun const struct inet_connection_sock *icsk = &tp->inet_conn;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1483*4882a593Smuzhiyun tcp_jiffies32 - tp->rcv_tstamp);
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun
tcp_fin_time(const struct sock * sk)1486*4882a593Smuzhiyun static inline int tcp_fin_time(const struct sock *sk)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun int fin_timeout = tcp_sk(sk)->linger2 ? :
1489*4882a593Smuzhiyun READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1490*4882a593Smuzhiyun const int rto = inet_csk(sk)->icsk_rto;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun if (fin_timeout < (rto << 2) - (rto >> 1))
1493*4882a593Smuzhiyun fin_timeout = (rto << 2) - (rto >> 1);
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun return fin_timeout;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
tcp_paws_check(const struct tcp_options_received * rx_opt,int paws_win)1498*4882a593Smuzhiyun static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1499*4882a593Smuzhiyun int paws_win)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1502*4882a593Smuzhiyun return true;
1503*4882a593Smuzhiyun if (unlikely(!time_before32(ktime_get_seconds(),
1504*4882a593Smuzhiyun rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1505*4882a593Smuzhiyun return true;
1506*4882a593Smuzhiyun /*
1507*4882a593Smuzhiyun * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1508*4882a593Smuzhiyun * then following tcp messages have valid values. Ignore 0 value,
1509*4882a593Smuzhiyun * or else 'negative' tsval might forbid us to accept their packets.
1510*4882a593Smuzhiyun */
1511*4882a593Smuzhiyun if (!rx_opt->ts_recent)
1512*4882a593Smuzhiyun return true;
1513*4882a593Smuzhiyun return false;
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun
tcp_paws_reject(const struct tcp_options_received * rx_opt,int rst)1516*4882a593Smuzhiyun static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1517*4882a593Smuzhiyun int rst)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun if (tcp_paws_check(rx_opt, 0))
1520*4882a593Smuzhiyun return false;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /* RST segments are not recommended to carry timestamp,
1523*4882a593Smuzhiyun and, if they do, it is recommended to ignore PAWS because
1524*4882a593Smuzhiyun "their cleanup function should take precedence over timestamps."
1525*4882a593Smuzhiyun Certainly, it is mistake. It is necessary to understand the reasons
1526*4882a593Smuzhiyun of this constraint to relax it: if peer reboots, clock may go
1527*4882a593Smuzhiyun out-of-sync and half-open connections will not be reset.
1528*4882a593Smuzhiyun Actually, the problem would be not existing if all
1529*4882a593Smuzhiyun the implementations followed draft about maintaining clock
1530*4882a593Smuzhiyun via reboots. Linux-2.2 DOES NOT!
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun However, we can relax time bounds for RST segments to MSL.
1533*4882a593Smuzhiyun */
1534*4882a593Smuzhiyun if (rst && !time_before32(ktime_get_seconds(),
1535*4882a593Smuzhiyun rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1536*4882a593Smuzhiyun return false;
1537*4882a593Smuzhiyun return true;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1541*4882a593Smuzhiyun int mib_idx, u32 *last_oow_ack_time);
1542*4882a593Smuzhiyun
tcp_mib_init(struct net * net)1543*4882a593Smuzhiyun static inline void tcp_mib_init(struct net *net)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun /* See RFC 2012 */
1546*4882a593Smuzhiyun TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1547*4882a593Smuzhiyun TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1548*4882a593Smuzhiyun TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1549*4882a593Smuzhiyun TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun /* from STCP */
tcp_clear_retrans_hints_partial(struct tcp_sock * tp)1553*4882a593Smuzhiyun static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun tp->lost_skb_hint = NULL;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
tcp_clear_all_retrans_hints(struct tcp_sock * tp)1558*4882a593Smuzhiyun static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun tcp_clear_retrans_hints_partial(tp);
1561*4882a593Smuzhiyun tp->retransmit_skb_hint = NULL;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun union tcp_md5_addr {
1565*4882a593Smuzhiyun struct in_addr a4;
1566*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1567*4882a593Smuzhiyun struct in6_addr a6;
1568*4882a593Smuzhiyun #endif
1569*4882a593Smuzhiyun };
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun /* - key database */
1572*4882a593Smuzhiyun struct tcp_md5sig_key {
1573*4882a593Smuzhiyun struct hlist_node node;
1574*4882a593Smuzhiyun u8 keylen;
1575*4882a593Smuzhiyun u8 family; /* AF_INET or AF_INET6 */
1576*4882a593Smuzhiyun u8 prefixlen;
1577*4882a593Smuzhiyun union tcp_md5_addr addr;
1578*4882a593Smuzhiyun int l3index; /* set if key added with L3 scope */
1579*4882a593Smuzhiyun u8 key[TCP_MD5SIG_MAXKEYLEN];
1580*4882a593Smuzhiyun struct rcu_head rcu;
1581*4882a593Smuzhiyun };
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun /* - sock block */
1584*4882a593Smuzhiyun struct tcp_md5sig_info {
1585*4882a593Smuzhiyun struct hlist_head head;
1586*4882a593Smuzhiyun struct rcu_head rcu;
1587*4882a593Smuzhiyun };
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun /* - pseudo header */
1590*4882a593Smuzhiyun struct tcp4_pseudohdr {
1591*4882a593Smuzhiyun __be32 saddr;
1592*4882a593Smuzhiyun __be32 daddr;
1593*4882a593Smuzhiyun __u8 pad;
1594*4882a593Smuzhiyun __u8 protocol;
1595*4882a593Smuzhiyun __be16 len;
1596*4882a593Smuzhiyun };
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun struct tcp6_pseudohdr {
1599*4882a593Smuzhiyun struct in6_addr saddr;
1600*4882a593Smuzhiyun struct in6_addr daddr;
1601*4882a593Smuzhiyun __be32 len;
1602*4882a593Smuzhiyun __be32 protocol; /* including padding */
1603*4882a593Smuzhiyun };
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun union tcp_md5sum_block {
1606*4882a593Smuzhiyun struct tcp4_pseudohdr ip4;
1607*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1608*4882a593Smuzhiyun struct tcp6_pseudohdr ip6;
1609*4882a593Smuzhiyun #endif
1610*4882a593Smuzhiyun };
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun /* - pool: digest algorithm, hash description and scratch buffer */
1613*4882a593Smuzhiyun struct tcp_md5sig_pool {
1614*4882a593Smuzhiyun struct ahash_request *md5_req;
1615*4882a593Smuzhiyun void *scratch;
1616*4882a593Smuzhiyun };
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun /* - functions */
1619*4882a593Smuzhiyun int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1620*4882a593Smuzhiyun const struct sock *sk, const struct sk_buff *skb);
1621*4882a593Smuzhiyun int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1622*4882a593Smuzhiyun int family, u8 prefixlen, int l3index,
1623*4882a593Smuzhiyun const u8 *newkey, u8 newkeylen, gfp_t gfp);
1624*4882a593Smuzhiyun int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1625*4882a593Smuzhiyun int family, u8 prefixlen, int l3index);
1626*4882a593Smuzhiyun struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1627*4882a593Smuzhiyun const struct sock *addr_sk);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
1630*4882a593Smuzhiyun #include <linux/jump_label.h>
1631*4882a593Smuzhiyun extern struct static_key_false tcp_md5_needed;
1632*4882a593Smuzhiyun struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1633*4882a593Smuzhiyun const union tcp_md5_addr *addr,
1634*4882a593Smuzhiyun int family);
1635*4882a593Smuzhiyun static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1636*4882a593Smuzhiyun tcp_md5_do_lookup(const struct sock *sk, int l3index,
1637*4882a593Smuzhiyun const union tcp_md5_addr *addr, int family)
1638*4882a593Smuzhiyun {
1639*4882a593Smuzhiyun if (!static_branch_unlikely(&tcp_md5_needed))
1640*4882a593Smuzhiyun return NULL;
1641*4882a593Smuzhiyun return __tcp_md5_do_lookup(sk, l3index, addr, family);
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1645*4882a593Smuzhiyun #else
1646*4882a593Smuzhiyun static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1647*4882a593Smuzhiyun tcp_md5_do_lookup(const struct sock *sk, int l3index,
1648*4882a593Smuzhiyun const union tcp_md5_addr *addr, int family)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun return NULL;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun #define tcp_twsk_md5_key(twsk) NULL
1653*4882a593Smuzhiyun #endif
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun bool tcp_alloc_md5sig_pool(void);
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
tcp_put_md5sig_pool(void)1658*4882a593Smuzhiyun static inline void tcp_put_md5sig_pool(void)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun local_bh_enable();
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1664*4882a593Smuzhiyun unsigned int header_len);
1665*4882a593Smuzhiyun int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1666*4882a593Smuzhiyun const struct tcp_md5sig_key *key);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /* From tcp_fastopen.c */
1669*4882a593Smuzhiyun void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1670*4882a593Smuzhiyun struct tcp_fastopen_cookie *cookie);
1671*4882a593Smuzhiyun void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1672*4882a593Smuzhiyun struct tcp_fastopen_cookie *cookie, bool syn_lost,
1673*4882a593Smuzhiyun u16 try_exp);
1674*4882a593Smuzhiyun struct tcp_fastopen_request {
1675*4882a593Smuzhiyun /* Fast Open cookie. Size 0 means a cookie request */
1676*4882a593Smuzhiyun struct tcp_fastopen_cookie cookie;
1677*4882a593Smuzhiyun struct msghdr *data; /* data in MSG_FASTOPEN */
1678*4882a593Smuzhiyun size_t size;
1679*4882a593Smuzhiyun int copied; /* queued in tcp_connect() */
1680*4882a593Smuzhiyun struct ubuf_info *uarg;
1681*4882a593Smuzhiyun };
1682*4882a593Smuzhiyun void tcp_free_fastopen_req(struct tcp_sock *tp);
1683*4882a593Smuzhiyun void tcp_fastopen_destroy_cipher(struct sock *sk);
1684*4882a593Smuzhiyun void tcp_fastopen_ctx_destroy(struct net *net);
1685*4882a593Smuzhiyun int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1686*4882a593Smuzhiyun void *primary_key, void *backup_key);
1687*4882a593Smuzhiyun int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1688*4882a593Smuzhiyun u64 *key);
1689*4882a593Smuzhiyun void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1690*4882a593Smuzhiyun struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1691*4882a593Smuzhiyun struct request_sock *req,
1692*4882a593Smuzhiyun struct tcp_fastopen_cookie *foc,
1693*4882a593Smuzhiyun const struct dst_entry *dst);
1694*4882a593Smuzhiyun void tcp_fastopen_init_key_once(struct net *net);
1695*4882a593Smuzhiyun bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1696*4882a593Smuzhiyun struct tcp_fastopen_cookie *cookie);
1697*4882a593Smuzhiyun bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1698*4882a593Smuzhiyun #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1699*4882a593Smuzhiyun #define TCP_FASTOPEN_KEY_MAX 2
1700*4882a593Smuzhiyun #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1701*4882a593Smuzhiyun (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun /* Fastopen key context */
1704*4882a593Smuzhiyun struct tcp_fastopen_context {
1705*4882a593Smuzhiyun siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1706*4882a593Smuzhiyun int num;
1707*4882a593Smuzhiyun struct rcu_head rcu;
1708*4882a593Smuzhiyun };
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1711*4882a593Smuzhiyun void tcp_fastopen_active_disable(struct sock *sk);
1712*4882a593Smuzhiyun bool tcp_fastopen_active_should_disable(struct sock *sk);
1713*4882a593Smuzhiyun void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1714*4882a593Smuzhiyun void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun /* Caller needs to wrap with rcu_read_(un)lock() */
1717*4882a593Smuzhiyun static inline
tcp_fastopen_get_ctx(const struct sock * sk)1718*4882a593Smuzhiyun struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun struct tcp_fastopen_context *ctx;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1723*4882a593Smuzhiyun if (!ctx)
1724*4882a593Smuzhiyun ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1725*4882a593Smuzhiyun return ctx;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun static inline
tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie * foc,const struct tcp_fastopen_cookie * orig)1729*4882a593Smuzhiyun bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1730*4882a593Smuzhiyun const struct tcp_fastopen_cookie *orig)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1733*4882a593Smuzhiyun orig->len == foc->len &&
1734*4882a593Smuzhiyun !memcmp(orig->val, foc->val, foc->len))
1735*4882a593Smuzhiyun return true;
1736*4882a593Smuzhiyun return false;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun static inline
tcp_fastopen_context_len(const struct tcp_fastopen_context * ctx)1740*4882a593Smuzhiyun int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun return ctx->num;
1743*4882a593Smuzhiyun }
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun /* Latencies incurred by various limits for a sender. They are
1746*4882a593Smuzhiyun * chronograph-like stats that are mutually exclusive.
1747*4882a593Smuzhiyun */
1748*4882a593Smuzhiyun enum tcp_chrono {
1749*4882a593Smuzhiyun TCP_CHRONO_UNSPEC,
1750*4882a593Smuzhiyun TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1751*4882a593Smuzhiyun TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1752*4882a593Smuzhiyun TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1753*4882a593Smuzhiyun __TCP_CHRONO_MAX,
1754*4882a593Smuzhiyun };
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1757*4882a593Smuzhiyun void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun /* This helper is needed, because skb->tcp_tsorted_anchor uses
1760*4882a593Smuzhiyun * the same memory storage than skb->destructor/_skb_refdst
1761*4882a593Smuzhiyun */
tcp_skb_tsorted_anchor_cleanup(struct sk_buff * skb)1762*4882a593Smuzhiyun static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun skb->destructor = NULL;
1765*4882a593Smuzhiyun skb->_skb_refdst = 0UL;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun #define tcp_skb_tsorted_save(skb) { \
1769*4882a593Smuzhiyun unsigned long _save = skb->_skb_refdst; \
1770*4882a593Smuzhiyun skb->_skb_refdst = 0UL;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun #define tcp_skb_tsorted_restore(skb) \
1773*4882a593Smuzhiyun skb->_skb_refdst = _save; \
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun void tcp_write_queue_purge(struct sock *sk);
1777*4882a593Smuzhiyun
tcp_rtx_queue_head(const struct sock * sk)1778*4882a593Smuzhiyun static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun return skb_rb_first(&sk->tcp_rtx_queue);
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
tcp_rtx_queue_tail(const struct sock * sk)1783*4882a593Smuzhiyun static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun return skb_rb_last(&sk->tcp_rtx_queue);
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun
tcp_write_queue_head(const struct sock * sk)1788*4882a593Smuzhiyun static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1789*4882a593Smuzhiyun {
1790*4882a593Smuzhiyun return skb_peek(&sk->sk_write_queue);
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun
tcp_write_queue_tail(const struct sock * sk)1793*4882a593Smuzhiyun static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun return skb_peek_tail(&sk->sk_write_queue);
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1799*4882a593Smuzhiyun skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1800*4882a593Smuzhiyun
tcp_send_head(const struct sock * sk)1801*4882a593Smuzhiyun static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1802*4882a593Smuzhiyun {
1803*4882a593Smuzhiyun return skb_peek(&sk->sk_write_queue);
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun
tcp_skb_is_last(const struct sock * sk,const struct sk_buff * skb)1806*4882a593Smuzhiyun static inline bool tcp_skb_is_last(const struct sock *sk,
1807*4882a593Smuzhiyun const struct sk_buff *skb)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun return skb_queue_is_last(&sk->sk_write_queue, skb);
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun /**
1813*4882a593Smuzhiyun * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1814*4882a593Smuzhiyun * @sk: socket
1815*4882a593Smuzhiyun *
1816*4882a593Smuzhiyun * Since the write queue can have a temporary empty skb in it,
1817*4882a593Smuzhiyun * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1818*4882a593Smuzhiyun */
tcp_write_queue_empty(const struct sock * sk)1819*4882a593Smuzhiyun static inline bool tcp_write_queue_empty(const struct sock *sk)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun const struct tcp_sock *tp = tcp_sk(sk);
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun return tp->write_seq == tp->snd_nxt;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
tcp_rtx_queue_empty(const struct sock * sk)1826*4882a593Smuzhiyun static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1827*4882a593Smuzhiyun {
1828*4882a593Smuzhiyun return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
tcp_rtx_and_write_queues_empty(const struct sock * sk)1831*4882a593Smuzhiyun static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
tcp_add_write_queue_tail(struct sock * sk,struct sk_buff * skb)1836*4882a593Smuzhiyun static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun __skb_queue_tail(&sk->sk_write_queue, skb);
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun /* Queue it, remembering where we must start sending. */
1841*4882a593Smuzhiyun if (sk->sk_write_queue.next == skb)
1842*4882a593Smuzhiyun tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun /* Insert new before skb on the write queue of sk. */
tcp_insert_write_queue_before(struct sk_buff * new,struct sk_buff * skb,struct sock * sk)1846*4882a593Smuzhiyun static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1847*4882a593Smuzhiyun struct sk_buff *skb,
1848*4882a593Smuzhiyun struct sock *sk)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun __skb_queue_before(&sk->sk_write_queue, skb, new);
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
tcp_unlink_write_queue(struct sk_buff * skb,struct sock * sk)1853*4882a593Smuzhiyun static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1854*4882a593Smuzhiyun {
1855*4882a593Smuzhiyun tcp_skb_tsorted_anchor_cleanup(skb);
1856*4882a593Smuzhiyun __skb_unlink(skb, &sk->sk_write_queue);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1860*4882a593Smuzhiyun
tcp_rtx_queue_unlink(struct sk_buff * skb,struct sock * sk)1861*4882a593Smuzhiyun static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1862*4882a593Smuzhiyun {
1863*4882a593Smuzhiyun tcp_skb_tsorted_anchor_cleanup(skb);
1864*4882a593Smuzhiyun rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun
tcp_rtx_queue_unlink_and_free(struct sk_buff * skb,struct sock * sk)1867*4882a593Smuzhiyun static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1868*4882a593Smuzhiyun {
1869*4882a593Smuzhiyun list_del(&skb->tcp_tsorted_anchor);
1870*4882a593Smuzhiyun tcp_rtx_queue_unlink(skb, sk);
1871*4882a593Smuzhiyun sk_wmem_free_skb(sk, skb);
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun
tcp_push_pending_frames(struct sock * sk)1874*4882a593Smuzhiyun static inline void tcp_push_pending_frames(struct sock *sk)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun if (tcp_send_head(sk)) {
1877*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun /* Start sequence of the skb just after the highest skb with SACKed
1884*4882a593Smuzhiyun * bit, valid only if sacked_out > 0 or when the caller has ensured
1885*4882a593Smuzhiyun * validity by itself.
1886*4882a593Smuzhiyun */
tcp_highest_sack_seq(struct tcp_sock * tp)1887*4882a593Smuzhiyun static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1888*4882a593Smuzhiyun {
1889*4882a593Smuzhiyun if (!tp->sacked_out)
1890*4882a593Smuzhiyun return tp->snd_una;
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun if (tp->highest_sack == NULL)
1893*4882a593Smuzhiyun return tp->snd_nxt;
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun return TCP_SKB_CB(tp->highest_sack)->seq;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
tcp_advance_highest_sack(struct sock * sk,struct sk_buff * skb)1898*4882a593Smuzhiyun static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun
tcp_highest_sack(struct sock * sk)1903*4882a593Smuzhiyun static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun return tcp_sk(sk)->highest_sack;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
tcp_highest_sack_reset(struct sock * sk)1908*4882a593Smuzhiyun static inline void tcp_highest_sack_reset(struct sock *sk)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun /* Called when old skb is about to be deleted and replaced by new skb */
tcp_highest_sack_replace(struct sock * sk,struct sk_buff * old,struct sk_buff * new)1914*4882a593Smuzhiyun static inline void tcp_highest_sack_replace(struct sock *sk,
1915*4882a593Smuzhiyun struct sk_buff *old,
1916*4882a593Smuzhiyun struct sk_buff *new)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun if (old == tcp_highest_sack(sk))
1919*4882a593Smuzhiyun tcp_sk(sk)->highest_sack = new;
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun /* This helper checks if socket has IP_TRANSPARENT set */
inet_sk_transparent(const struct sock * sk)1923*4882a593Smuzhiyun static inline bool inet_sk_transparent(const struct sock *sk)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun switch (sk->sk_state) {
1926*4882a593Smuzhiyun case TCP_TIME_WAIT:
1927*4882a593Smuzhiyun return inet_twsk(sk)->tw_transparent;
1928*4882a593Smuzhiyun case TCP_NEW_SYN_RECV:
1929*4882a593Smuzhiyun return inet_rsk(inet_reqsk(sk))->no_srccheck;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun return inet_sk(sk)->transparent;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun /* Determines whether this is a thin stream (which may suffer from
1935*4882a593Smuzhiyun * increased latency). Used to trigger latency-reducing mechanisms.
1936*4882a593Smuzhiyun */
tcp_stream_is_thin(struct tcp_sock * tp)1937*4882a593Smuzhiyun static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun /* /proc */
1943*4882a593Smuzhiyun enum tcp_seq_states {
1944*4882a593Smuzhiyun TCP_SEQ_STATE_LISTENING,
1945*4882a593Smuzhiyun TCP_SEQ_STATE_ESTABLISHED,
1946*4882a593Smuzhiyun };
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1949*4882a593Smuzhiyun void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1950*4882a593Smuzhiyun void tcp_seq_stop(struct seq_file *seq, void *v);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun struct tcp_seq_afinfo {
1953*4882a593Smuzhiyun sa_family_t family;
1954*4882a593Smuzhiyun };
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun struct tcp_iter_state {
1957*4882a593Smuzhiyun struct seq_net_private p;
1958*4882a593Smuzhiyun enum tcp_seq_states state;
1959*4882a593Smuzhiyun struct sock *syn_wait_sk;
1960*4882a593Smuzhiyun struct tcp_seq_afinfo *bpf_seq_afinfo;
1961*4882a593Smuzhiyun int bucket, offset, sbucket, num;
1962*4882a593Smuzhiyun loff_t last_pos;
1963*4882a593Smuzhiyun };
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun extern struct request_sock_ops tcp_request_sock_ops;
1966*4882a593Smuzhiyun extern struct request_sock_ops tcp6_request_sock_ops;
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun void tcp_v4_destroy_sock(struct sock *sk);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1971*4882a593Smuzhiyun netdev_features_t features);
1972*4882a593Smuzhiyun struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1973*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1974*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1975*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1976*4882a593Smuzhiyun INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1977*4882a593Smuzhiyun int tcp_gro_complete(struct sk_buff *skb);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1980*4882a593Smuzhiyun
tcp_notsent_lowat(const struct tcp_sock * tp)1981*4882a593Smuzhiyun static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun struct net *net = sock_net((struct sock *)tp);
1984*4882a593Smuzhiyun return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun /* @wake is one when sk_stream_write_space() calls us.
1988*4882a593Smuzhiyun * This sends EPOLLOUT only if notsent_bytes is half the limit.
1989*4882a593Smuzhiyun * This mimics the strategy used in sock_def_write_space().
1990*4882a593Smuzhiyun */
tcp_stream_memory_free(const struct sock * sk,int wake)1991*4882a593Smuzhiyun static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
1992*4882a593Smuzhiyun {
1993*4882a593Smuzhiyun const struct tcp_sock *tp = tcp_sk(sk);
1994*4882a593Smuzhiyun u32 notsent_bytes = READ_ONCE(tp->write_seq) -
1995*4882a593Smuzhiyun READ_ONCE(tp->snd_nxt);
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
2001*4882a593Smuzhiyun int tcp4_proc_init(void);
2002*4882a593Smuzhiyun void tcp4_proc_exit(void);
2003*4882a593Smuzhiyun #endif
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2006*4882a593Smuzhiyun int tcp_conn_request(struct request_sock_ops *rsk_ops,
2007*4882a593Smuzhiyun const struct tcp_request_sock_ops *af_ops,
2008*4882a593Smuzhiyun struct sock *sk, struct sk_buff *skb);
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun /* TCP af-specific functions */
2011*4882a593Smuzhiyun struct tcp_sock_af_ops {
2012*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
2013*4882a593Smuzhiyun struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2014*4882a593Smuzhiyun const struct sock *addr_sk);
2015*4882a593Smuzhiyun int (*calc_md5_hash)(char *location,
2016*4882a593Smuzhiyun const struct tcp_md5sig_key *md5,
2017*4882a593Smuzhiyun const struct sock *sk,
2018*4882a593Smuzhiyun const struct sk_buff *skb);
2019*4882a593Smuzhiyun int (*md5_parse)(struct sock *sk,
2020*4882a593Smuzhiyun int optname,
2021*4882a593Smuzhiyun sockptr_t optval,
2022*4882a593Smuzhiyun int optlen);
2023*4882a593Smuzhiyun #endif
2024*4882a593Smuzhiyun };
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun struct tcp_request_sock_ops {
2027*4882a593Smuzhiyun u16 mss_clamp;
2028*4882a593Smuzhiyun #ifdef CONFIG_TCP_MD5SIG
2029*4882a593Smuzhiyun struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2030*4882a593Smuzhiyun const struct sock *addr_sk);
2031*4882a593Smuzhiyun int (*calc_md5_hash) (char *location,
2032*4882a593Smuzhiyun const struct tcp_md5sig_key *md5,
2033*4882a593Smuzhiyun const struct sock *sk,
2034*4882a593Smuzhiyun const struct sk_buff *skb);
2035*4882a593Smuzhiyun #endif
2036*4882a593Smuzhiyun void (*init_req)(struct request_sock *req,
2037*4882a593Smuzhiyun const struct sock *sk_listener,
2038*4882a593Smuzhiyun struct sk_buff *skb);
2039*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
2040*4882a593Smuzhiyun __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2041*4882a593Smuzhiyun __u16 *mss);
2042*4882a593Smuzhiyun #endif
2043*4882a593Smuzhiyun struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
2044*4882a593Smuzhiyun const struct request_sock *req);
2045*4882a593Smuzhiyun u32 (*init_seq)(const struct sk_buff *skb);
2046*4882a593Smuzhiyun u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2047*4882a593Smuzhiyun int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2048*4882a593Smuzhiyun struct flowi *fl, struct request_sock *req,
2049*4882a593Smuzhiyun struct tcp_fastopen_cookie *foc,
2050*4882a593Smuzhiyun enum tcp_synack_type synack_type,
2051*4882a593Smuzhiyun struct sk_buff *syn_skb);
2052*4882a593Smuzhiyun };
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2055*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
2056*4882a593Smuzhiyun extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2057*4882a593Smuzhiyun #endif
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun #ifdef CONFIG_SYN_COOKIES
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2060*4882a593Smuzhiyun static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2061*4882a593Smuzhiyun const struct sock *sk, struct sk_buff *skb,
2062*4882a593Smuzhiyun __u16 *mss)
2063*4882a593Smuzhiyun {
2064*4882a593Smuzhiyun tcp_synq_overflow(sk);
2065*4882a593Smuzhiyun __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2066*4882a593Smuzhiyun return ops->cookie_init_seq(skb, mss);
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun #else
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2069*4882a593Smuzhiyun static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2070*4882a593Smuzhiyun const struct sock *sk, struct sk_buff *skb,
2071*4882a593Smuzhiyun __u16 *mss)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun return 0;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun #endif
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun int tcpv4_offload_init(void);
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun void tcp_v4_init(void);
2080*4882a593Smuzhiyun void tcp_init(void);
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun /* tcp_recovery.c */
2083*4882a593Smuzhiyun void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2084*4882a593Smuzhiyun void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2085*4882a593Smuzhiyun extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2086*4882a593Smuzhiyun u32 reo_wnd);
2087*4882a593Smuzhiyun extern bool tcp_rack_mark_lost(struct sock *sk);
2088*4882a593Smuzhiyun extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2089*4882a593Smuzhiyun u64 xmit_time);
2090*4882a593Smuzhiyun extern void tcp_rack_reo_timeout(struct sock *sk);
2091*4882a593Smuzhiyun extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun /* At how many usecs into the future should the RTO fire? */
tcp_rto_delta_us(const struct sock * sk)2094*4882a593Smuzhiyun static inline s64 tcp_rto_delta_us(const struct sock *sk)
2095*4882a593Smuzhiyun {
2096*4882a593Smuzhiyun const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2097*4882a593Smuzhiyun u32 rto = inet_csk(sk)->icsk_rto;
2098*4882a593Smuzhiyun u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun /*
2104*4882a593Smuzhiyun * Save and compile IPv4 options, return a pointer to it
2105*4882a593Smuzhiyun */
tcp_v4_save_options(struct net * net,struct sk_buff * skb)2106*4882a593Smuzhiyun static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2107*4882a593Smuzhiyun struct sk_buff *skb)
2108*4882a593Smuzhiyun {
2109*4882a593Smuzhiyun const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2110*4882a593Smuzhiyun struct ip_options_rcu *dopt = NULL;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun if (opt->optlen) {
2113*4882a593Smuzhiyun int opt_size = sizeof(*dopt) + opt->optlen;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun dopt = kmalloc(opt_size, GFP_ATOMIC);
2116*4882a593Smuzhiyun if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2117*4882a593Smuzhiyun kfree(dopt);
2118*4882a593Smuzhiyun dopt = NULL;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun return dopt;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun /* locally generated TCP pure ACKs have skb->truesize == 2
2125*4882a593Smuzhiyun * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2126*4882a593Smuzhiyun * This is much faster than dissecting the packet to find out.
2127*4882a593Smuzhiyun * (Think of GRE encapsulations, IPv4, IPv6, ...)
2128*4882a593Smuzhiyun */
skb_is_tcp_pure_ack(const struct sk_buff * skb)2129*4882a593Smuzhiyun static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2130*4882a593Smuzhiyun {
2131*4882a593Smuzhiyun return skb->truesize == 2;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
skb_set_tcp_pure_ack(struct sk_buff * skb)2134*4882a593Smuzhiyun static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun skb->truesize = 2;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
tcp_inq(struct sock * sk)2139*4882a593Smuzhiyun static inline int tcp_inq(struct sock *sk)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
2142*4882a593Smuzhiyun int answ;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2145*4882a593Smuzhiyun answ = 0;
2146*4882a593Smuzhiyun } else if (sock_flag(sk, SOCK_URGINLINE) ||
2147*4882a593Smuzhiyun !tp->urg_data ||
2148*4882a593Smuzhiyun before(tp->urg_seq, tp->copied_seq) ||
2149*4882a593Smuzhiyun !before(tp->urg_seq, tp->rcv_nxt)) {
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun answ = tp->rcv_nxt - tp->copied_seq;
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun /* Subtract 1, if FIN was received */
2154*4882a593Smuzhiyun if (answ && sock_flag(sk, SOCK_DONE))
2155*4882a593Smuzhiyun answ--;
2156*4882a593Smuzhiyun } else {
2157*4882a593Smuzhiyun answ = tp->urg_seq - tp->copied_seq;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun return answ;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun int tcp_peek_len(struct socket *sock);
2164*4882a593Smuzhiyun
tcp_segs_in(struct tcp_sock * tp,const struct sk_buff * skb)2165*4882a593Smuzhiyun static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun u16 segs_in;
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2170*4882a593Smuzhiyun tp->segs_in += segs_in;
2171*4882a593Smuzhiyun if (skb->len > tcp_hdrlen(skb))
2172*4882a593Smuzhiyun tp->data_segs_in += segs_in;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun /*
2176*4882a593Smuzhiyun * TCP listen path runs lockless.
2177*4882a593Smuzhiyun * We forced "struct sock" to be const qualified to make sure
2178*4882a593Smuzhiyun * we don't modify one of its field by mistake.
2179*4882a593Smuzhiyun * Here, we increment sk_drops which is an atomic_t, so we can safely
2180*4882a593Smuzhiyun * make sock writable again.
2181*4882a593Smuzhiyun */
tcp_listendrop(const struct sock * sk)2182*4882a593Smuzhiyun static inline void tcp_listendrop(const struct sock *sk)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun atomic_inc(&((struct sock *)sk)->sk_drops);
2185*4882a593Smuzhiyun __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun /*
2191*4882a593Smuzhiyun * Interface for adding Upper Level Protocols over TCP
2192*4882a593Smuzhiyun */
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun #define TCP_ULP_NAME_MAX 16
2195*4882a593Smuzhiyun #define TCP_ULP_MAX 128
2196*4882a593Smuzhiyun #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun struct tcp_ulp_ops {
2199*4882a593Smuzhiyun struct list_head list;
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun /* initialize ulp */
2202*4882a593Smuzhiyun int (*init)(struct sock *sk);
2203*4882a593Smuzhiyun /* update ulp */
2204*4882a593Smuzhiyun void (*update)(struct sock *sk, struct proto *p,
2205*4882a593Smuzhiyun void (*write_space)(struct sock *sk));
2206*4882a593Smuzhiyun /* cleanup ulp */
2207*4882a593Smuzhiyun void (*release)(struct sock *sk);
2208*4882a593Smuzhiyun /* diagnostic */
2209*4882a593Smuzhiyun int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2210*4882a593Smuzhiyun size_t (*get_info_size)(const struct sock *sk);
2211*4882a593Smuzhiyun /* clone ulp */
2212*4882a593Smuzhiyun void (*clone)(const struct request_sock *req, struct sock *newsk,
2213*4882a593Smuzhiyun const gfp_t priority);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun char name[TCP_ULP_NAME_MAX];
2216*4882a593Smuzhiyun struct module *owner;
2217*4882a593Smuzhiyun };
2218*4882a593Smuzhiyun int tcp_register_ulp(struct tcp_ulp_ops *type);
2219*4882a593Smuzhiyun void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2220*4882a593Smuzhiyun int tcp_set_ulp(struct sock *sk, const char *name);
2221*4882a593Smuzhiyun void tcp_get_available_ulp(char *buf, size_t len);
2222*4882a593Smuzhiyun void tcp_cleanup_ulp(struct sock *sk);
2223*4882a593Smuzhiyun void tcp_update_ulp(struct sock *sk, struct proto *p,
2224*4882a593Smuzhiyun void (*write_space)(struct sock *sk));
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun #define MODULE_ALIAS_TCP_ULP(name) \
2227*4882a593Smuzhiyun __MODULE_INFO(alias, alias_userspace, name); \
2228*4882a593Smuzhiyun __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun struct sk_msg;
2231*4882a593Smuzhiyun struct sk_psock;
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun #ifdef CONFIG_BPF_STREAM_PARSER
2234*4882a593Smuzhiyun struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2235*4882a593Smuzhiyun void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2236*4882a593Smuzhiyun #else
tcp_bpf_clone(const struct sock * sk,struct sock * newsk)2237*4882a593Smuzhiyun static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2238*4882a593Smuzhiyun {
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun #endif /* CONFIG_BPF_STREAM_PARSER */
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun #ifdef CONFIG_NET_SOCK_MSG
2243*4882a593Smuzhiyun int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2244*4882a593Smuzhiyun int flags);
2245*4882a593Smuzhiyun int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2246*4882a593Smuzhiyun struct msghdr *msg, int len, int flags);
2247*4882a593Smuzhiyun #endif /* CONFIG_NET_SOCK_MSG */
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_BPF
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2250*4882a593Smuzhiyun static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2251*4882a593Smuzhiyun struct sk_buff *skb,
2252*4882a593Smuzhiyun unsigned int end_offset)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun skops->skb = skb;
2255*4882a593Smuzhiyun skops->skb_data_end = skb->data + end_offset;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun #else
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2258*4882a593Smuzhiyun static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2259*4882a593Smuzhiyun struct sk_buff *skb,
2260*4882a593Smuzhiyun unsigned int end_offset)
2261*4882a593Smuzhiyun {
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun #endif
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun /* Call BPF_SOCK_OPS program that returns an int. If the return value
2266*4882a593Smuzhiyun * is < 0, then the BPF op failed (for example if the loaded BPF
2267*4882a593Smuzhiyun * program does not support the chosen operation or there is no BPF
2268*4882a593Smuzhiyun * program loaded).
2269*4882a593Smuzhiyun */
2270*4882a593Smuzhiyun #ifdef CONFIG_BPF
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2271*4882a593Smuzhiyun static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun struct bpf_sock_ops_kern sock_ops;
2274*4882a593Smuzhiyun int ret;
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2277*4882a593Smuzhiyun if (sk_fullsock(sk)) {
2278*4882a593Smuzhiyun sock_ops.is_fullsock = 1;
2279*4882a593Smuzhiyun sock_owned_by_me(sk);
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun
2282*4882a593Smuzhiyun sock_ops.sk = sk;
2283*4882a593Smuzhiyun sock_ops.op = op;
2284*4882a593Smuzhiyun if (nargs > 0)
2285*4882a593Smuzhiyun memcpy(sock_ops.args, args, nargs * sizeof(*args));
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2288*4882a593Smuzhiyun if (ret == 0)
2289*4882a593Smuzhiyun ret = sock_ops.reply;
2290*4882a593Smuzhiyun else
2291*4882a593Smuzhiyun ret = -1;
2292*4882a593Smuzhiyun return ret;
2293*4882a593Smuzhiyun }
2294*4882a593Smuzhiyun
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2295*4882a593Smuzhiyun static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun u32 args[2] = {arg1, arg2};
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun return tcp_call_bpf(sk, op, 2, args);
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2302*4882a593Smuzhiyun static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2303*4882a593Smuzhiyun u32 arg3)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun u32 args[3] = {arg1, arg2, arg3};
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun return tcp_call_bpf(sk, op, 3, args);
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun #else
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2311*4882a593Smuzhiyun static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun return -EPERM;
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2316*4882a593Smuzhiyun static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun return -EPERM;
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2321*4882a593Smuzhiyun static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2322*4882a593Smuzhiyun u32 arg3)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun return -EPERM;
2325*4882a593Smuzhiyun }
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun #endif
2328*4882a593Smuzhiyun
tcp_timeout_init(struct sock * sk)2329*4882a593Smuzhiyun static inline u32 tcp_timeout_init(struct sock *sk)
2330*4882a593Smuzhiyun {
2331*4882a593Smuzhiyun int timeout;
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun if (timeout <= 0)
2336*4882a593Smuzhiyun timeout = TCP_TIMEOUT_INIT;
2337*4882a593Smuzhiyun return timeout;
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun
tcp_rwnd_init_bpf(struct sock * sk)2340*4882a593Smuzhiyun static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2341*4882a593Smuzhiyun {
2342*4882a593Smuzhiyun int rwnd;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun if (rwnd < 0)
2347*4882a593Smuzhiyun rwnd = 0;
2348*4882a593Smuzhiyun return rwnd;
2349*4882a593Smuzhiyun }
2350*4882a593Smuzhiyun
tcp_bpf_ca_needs_ecn(struct sock * sk)2351*4882a593Smuzhiyun static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2352*4882a593Smuzhiyun {
2353*4882a593Smuzhiyun return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2354*4882a593Smuzhiyun }
2355*4882a593Smuzhiyun
tcp_bpf_rtt(struct sock * sk)2356*4882a593Smuzhiyun static inline void tcp_bpf_rtt(struct sock *sk)
2357*4882a593Smuzhiyun {
2358*4882a593Smuzhiyun if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2359*4882a593Smuzhiyun tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SMC)
2363*4882a593Smuzhiyun extern struct static_key_false tcp_have_smc;
2364*4882a593Smuzhiyun #endif
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_TLS_DEVICE)
2367*4882a593Smuzhiyun void clean_acked_data_enable(struct inet_connection_sock *icsk,
2368*4882a593Smuzhiyun void (*cad)(struct sock *sk, u32 ack_seq));
2369*4882a593Smuzhiyun void clean_acked_data_disable(struct inet_connection_sock *icsk);
2370*4882a593Smuzhiyun void clean_acked_data_flush(void);
2371*4882a593Smuzhiyun #endif
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
tcp_add_tx_delay(struct sk_buff * skb,const struct tcp_sock * tp)2374*4882a593Smuzhiyun static inline void tcp_add_tx_delay(struct sk_buff *skb,
2375*4882a593Smuzhiyun const struct tcp_sock *tp)
2376*4882a593Smuzhiyun {
2377*4882a593Smuzhiyun if (static_branch_unlikely(&tcp_tx_delay_enabled))
2378*4882a593Smuzhiyun skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun /* Compute Earliest Departure Time for some control packets
2382*4882a593Smuzhiyun * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2383*4882a593Smuzhiyun */
tcp_transmit_time(const struct sock * sk)2384*4882a593Smuzhiyun static inline u64 tcp_transmit_time(const struct sock *sk)
2385*4882a593Smuzhiyun {
2386*4882a593Smuzhiyun if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2387*4882a593Smuzhiyun u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2388*4882a593Smuzhiyun tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2391*4882a593Smuzhiyun }
2392*4882a593Smuzhiyun return 0;
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun #endif /* _TCP_H */
2396