1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/dccp/timer.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * An implementation of the DCCP protocol
6*4882a593Smuzhiyun * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dccp.h>
10*4882a593Smuzhiyun #include <linux/skbuff.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "dccp.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* sysctl variables governing numbers of retransmission attempts */
16*4882a593Smuzhiyun int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
17*4882a593Smuzhiyun int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
18*4882a593Smuzhiyun int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
19*4882a593Smuzhiyun
dccp_write_err(struct sock * sk)20*4882a593Smuzhiyun static void dccp_write_err(struct sock *sk)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
23*4882a593Smuzhiyun sk->sk_error_report(sk);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
26*4882a593Smuzhiyun dccp_done(sk);
27*4882a593Smuzhiyun __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* A write timeout has occurred. Process the after effects. */
dccp_write_timeout(struct sock * sk)31*4882a593Smuzhiyun static int dccp_write_timeout(struct sock *sk)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun const struct inet_connection_sock *icsk = inet_csk(sk);
34*4882a593Smuzhiyun int retry_until;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
37*4882a593Smuzhiyun if (icsk->icsk_retransmits != 0)
38*4882a593Smuzhiyun dst_negative_advice(sk);
39*4882a593Smuzhiyun retry_until = icsk->icsk_syn_retries ?
40*4882a593Smuzhiyun : sysctl_dccp_request_retries;
41*4882a593Smuzhiyun } else {
42*4882a593Smuzhiyun if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
43*4882a593Smuzhiyun /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
44*4882a593Smuzhiyun black hole detection. :-(
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun It is place to make it. It is not made. I do not want
47*4882a593Smuzhiyun to make it. It is disguisting. It does not work in any
48*4882a593Smuzhiyun case. Let me to cite the same draft, which requires for
49*4882a593Smuzhiyun us to implement this:
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun "The one security concern raised by this memo is that ICMP black holes
52*4882a593Smuzhiyun are often caused by over-zealous security administrators who block
53*4882a593Smuzhiyun all ICMP messages. It is vitally important that those who design and
54*4882a593Smuzhiyun deploy security systems understand the impact of strict filtering on
55*4882a593Smuzhiyun upper-layer protocols. The safest web site in the world is worthless
56*4882a593Smuzhiyun if most TCP implementations cannot transfer data from it. It would
57*4882a593Smuzhiyun be far nicer to have all of the black holes fixed rather than fixing
58*4882a593Smuzhiyun all of the TCP implementations."
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun Golden words :-).
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun dst_negative_advice(sk);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun retry_until = sysctl_dccp_retries2;
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * FIXME: see tcp_write_timout and tcp_out_of_resources
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (icsk->icsk_retransmits >= retry_until) {
73*4882a593Smuzhiyun /* Has it gone just too far? */
74*4882a593Smuzhiyun dccp_write_err(sk);
75*4882a593Smuzhiyun return 1;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * The DCCP retransmit timer.
82*4882a593Smuzhiyun */
dccp_retransmit_timer(struct sock * sk)83*4882a593Smuzhiyun static void dccp_retransmit_timer(struct sock *sk)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * More than 4MSL (8 minutes) has passed, a RESET(aborted) was
89*4882a593Smuzhiyun * sent, no need to retransmit, this sock is dead.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun if (dccp_write_timeout(sk))
92*4882a593Smuzhiyun return;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * We want to know the number of packets retransmitted, not the
96*4882a593Smuzhiyun * total number of retransmissions of clones of original packets.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun if (icsk->icsk_retransmits == 0)
99*4882a593Smuzhiyun __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (dccp_retransmit_skb(sk) != 0) {
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Retransmission failed because of local congestion,
104*4882a593Smuzhiyun * do not backoff.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun if (--icsk->icsk_retransmits == 0)
107*4882a593Smuzhiyun icsk->icsk_retransmits = 1;
108*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
109*4882a593Smuzhiyun min(icsk->icsk_rto,
110*4882a593Smuzhiyun TCP_RESOURCE_PROBE_INTERVAL),
111*4882a593Smuzhiyun DCCP_RTO_MAX);
112*4882a593Smuzhiyun return;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun icsk->icsk_backoff++;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
118*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
119*4882a593Smuzhiyun DCCP_RTO_MAX);
120*4882a593Smuzhiyun if (icsk->icsk_retransmits > sysctl_dccp_retries1)
121*4882a593Smuzhiyun __sk_dst_reset(sk);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
dccp_write_timer(struct timer_list * t)124*4882a593Smuzhiyun static void dccp_write_timer(struct timer_list *t)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct inet_connection_sock *icsk =
127*4882a593Smuzhiyun from_timer(icsk, t, icsk_retransmit_timer);
128*4882a593Smuzhiyun struct sock *sk = &icsk->icsk_inet.sk;
129*4882a593Smuzhiyun int event = 0;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun bh_lock_sock(sk);
132*4882a593Smuzhiyun if (sock_owned_by_user(sk)) {
133*4882a593Smuzhiyun /* Try again later */
134*4882a593Smuzhiyun sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
135*4882a593Smuzhiyun jiffies + (HZ / 20));
136*4882a593Smuzhiyun goto out;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
140*4882a593Smuzhiyun goto out;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (time_after(icsk->icsk_timeout, jiffies)) {
143*4882a593Smuzhiyun sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
144*4882a593Smuzhiyun icsk->icsk_timeout);
145*4882a593Smuzhiyun goto out;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun event = icsk->icsk_pending;
149*4882a593Smuzhiyun icsk->icsk_pending = 0;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun switch (event) {
152*4882a593Smuzhiyun case ICSK_TIME_RETRANS:
153*4882a593Smuzhiyun dccp_retransmit_timer(sk);
154*4882a593Smuzhiyun break;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun out:
157*4882a593Smuzhiyun bh_unlock_sock(sk);
158*4882a593Smuzhiyun sock_put(sk);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
dccp_keepalive_timer(struct timer_list * t)161*4882a593Smuzhiyun static void dccp_keepalive_timer(struct timer_list *t)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct sock *sk = from_timer(sk, t, sk_timer);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun pr_err("dccp should not use a keepalive timer !\n");
166*4882a593Smuzhiyun sock_put(sk);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
dccp_delack_timer(struct timer_list * t)170*4882a593Smuzhiyun static void dccp_delack_timer(struct timer_list *t)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct inet_connection_sock *icsk =
173*4882a593Smuzhiyun from_timer(icsk, t, icsk_delack_timer);
174*4882a593Smuzhiyun struct sock *sk = &icsk->icsk_inet.sk;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun bh_lock_sock(sk);
177*4882a593Smuzhiyun if (sock_owned_by_user(sk)) {
178*4882a593Smuzhiyun /* Try again later. */
179*4882a593Smuzhiyun __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
180*4882a593Smuzhiyun sk_reset_timer(sk, &icsk->icsk_delack_timer,
181*4882a593Smuzhiyun jiffies + TCP_DELACK_MIN);
182*4882a593Smuzhiyun goto out;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (sk->sk_state == DCCP_CLOSED ||
186*4882a593Smuzhiyun !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
187*4882a593Smuzhiyun goto out;
188*4882a593Smuzhiyun if (time_after(icsk->icsk_ack.timeout, jiffies)) {
189*4882a593Smuzhiyun sk_reset_timer(sk, &icsk->icsk_delack_timer,
190*4882a593Smuzhiyun icsk->icsk_ack.timeout);
191*4882a593Smuzhiyun goto out;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (inet_csk_ack_scheduled(sk)) {
197*4882a593Smuzhiyun if (!inet_csk_in_pingpong_mode(sk)) {
198*4882a593Smuzhiyun /* Delayed ACK missed: inflate ATO. */
199*4882a593Smuzhiyun icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
200*4882a593Smuzhiyun icsk->icsk_rto);
201*4882a593Smuzhiyun } else {
202*4882a593Smuzhiyun /* Delayed ACK missed: leave pingpong mode and
203*4882a593Smuzhiyun * deflate ATO.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun inet_csk_exit_pingpong_mode(sk);
206*4882a593Smuzhiyun icsk->icsk_ack.ato = TCP_ATO_MIN;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun dccp_send_ack(sk);
209*4882a593Smuzhiyun __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun out:
212*4882a593Smuzhiyun bh_unlock_sock(sk);
213*4882a593Smuzhiyun sock_put(sk);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
218*4882a593Smuzhiyun * @data: Socket to act on
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * See the comments above %ccid_dequeueing_decision for supported modes.
221*4882a593Smuzhiyun */
dccp_write_xmitlet(unsigned long data)222*4882a593Smuzhiyun static void dccp_write_xmitlet(unsigned long data)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct sock *sk = (struct sock *)data;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun bh_lock_sock(sk);
227*4882a593Smuzhiyun if (sock_owned_by_user(sk))
228*4882a593Smuzhiyun sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun dccp_write_xmit(sk);
231*4882a593Smuzhiyun bh_unlock_sock(sk);
232*4882a593Smuzhiyun sock_put(sk);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
dccp_write_xmit_timer(struct timer_list * t)235*4882a593Smuzhiyun static void dccp_write_xmit_timer(struct timer_list *t)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
238*4882a593Smuzhiyun struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun dccp_write_xmitlet((unsigned long)sk);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
dccp_init_xmit_timers(struct sock * sk)243*4882a593Smuzhiyun void dccp_init_xmit_timers(struct sock *sk)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
248*4882a593Smuzhiyun timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
249*4882a593Smuzhiyun inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
250*4882a593Smuzhiyun &dccp_keepalive_timer);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun static ktime_t dccp_timestamp_seed;
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * dccp_timestamp - 10s of microseconds time source
256*4882a593Smuzhiyun * Returns the number of 10s of microseconds since loading DCCP. This is native
257*4882a593Smuzhiyun * DCCP time difference format (RFC 4340, sec. 13).
258*4882a593Smuzhiyun * Please note: This will wrap around about circa every 11.9 hours.
259*4882a593Smuzhiyun */
dccp_timestamp(void)260*4882a593Smuzhiyun u32 dccp_timestamp(void)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun do_div(delta, 10);
265*4882a593Smuzhiyun return delta;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_timestamp);
268*4882a593Smuzhiyun
dccp_timestamping_init(void)269*4882a593Smuzhiyun void __init dccp_timestamping_init(void)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun dccp_timestamp_seed = ktime_get_real();
272*4882a593Smuzhiyun }
273