xref: /OK3568_Linux_fs/kernel/net/ipv4/tcp_recovery.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/tcp.h>
3*4882a593Smuzhiyun #include <net/tcp.h>
4*4882a593Smuzhiyun 
tcp_rack_sent_after(u64 t1,u64 t2,u32 seq1,u32 seq2)5*4882a593Smuzhiyun static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
6*4882a593Smuzhiyun {
7*4882a593Smuzhiyun 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
8*4882a593Smuzhiyun }
9*4882a593Smuzhiyun 
tcp_rack_reo_wnd(const struct sock * sk)10*4882a593Smuzhiyun static u32 tcp_rack_reo_wnd(const struct sock *sk)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	if (!tp->reord_seen) {
15*4882a593Smuzhiyun 		/* If reordering has not been observed, be aggressive during
16*4882a593Smuzhiyun 		 * the recovery or starting the recovery by DUPACK threshold.
17*4882a593Smuzhiyun 		 */
18*4882a593Smuzhiyun 		if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
19*4882a593Smuzhiyun 			return 0;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 		if (tp->sacked_out >= tp->reordering &&
22*4882a593Smuzhiyun 		    !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
23*4882a593Smuzhiyun 		      TCP_RACK_NO_DUPTHRESH))
24*4882a593Smuzhiyun 			return 0;
25*4882a593Smuzhiyun 	}
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	/* To be more reordering resilient, allow min_rtt/4 settling delay.
28*4882a593Smuzhiyun 	 * Use min_rtt instead of the smoothed RTT because reordering is
29*4882a593Smuzhiyun 	 * often a path property and less related to queuing or delayed ACKs.
30*4882a593Smuzhiyun 	 * Upon receiving DSACKs, linearly increase the window up to the
31*4882a593Smuzhiyun 	 * smoothed RTT.
32*4882a593Smuzhiyun 	 */
33*4882a593Smuzhiyun 	return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
34*4882a593Smuzhiyun 		   tp->srtt_us >> 3);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
tcp_rack_skb_timeout(struct tcp_sock * tp,struct sk_buff * skb,u32 reo_wnd)37*4882a593Smuzhiyun s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	return tp->rack.rtt_us + reo_wnd -
40*4882a593Smuzhiyun 	       tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * Marks a packet lost, if some packet sent later has been (s)acked.
46*4882a593Smuzhiyun  * The underlying idea is similar to the traditional dupthresh and FACK
47*4882a593Smuzhiyun  * but they look at different metrics:
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * dupthresh: 3 OOO packets delivered (packet count)
50*4882a593Smuzhiyun  * FACK: sequence delta to highest sacked sequence (sequence space)
51*4882a593Smuzhiyun  * RACK: sent time delta to the latest delivered packet (time domain)
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * The advantage of RACK is it applies to both original and retransmitted
54*4882a593Smuzhiyun  * packet and therefore is robust against tail losses. Another advantage
55*4882a593Smuzhiyun  * is being more resilient to reordering by simply allowing some
56*4882a593Smuzhiyun  * "settling delay", instead of tweaking the dupthresh.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * When tcp_rack_detect_loss() detects some packets are lost and we
59*4882a593Smuzhiyun  * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
60*4882a593Smuzhiyun  * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
61*4882a593Smuzhiyun  * make us enter the CA_Recovery state.
62*4882a593Smuzhiyun  */
tcp_rack_detect_loss(struct sock * sk,u32 * reo_timeout)63*4882a593Smuzhiyun static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
66*4882a593Smuzhiyun 	struct sk_buff *skb, *n;
67*4882a593Smuzhiyun 	u32 reo_wnd;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	*reo_timeout = 0;
70*4882a593Smuzhiyun 	reo_wnd = tcp_rack_reo_wnd(sk);
71*4882a593Smuzhiyun 	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
72*4882a593Smuzhiyun 				 tcp_tsorted_anchor) {
73*4882a593Smuzhiyun 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
74*4882a593Smuzhiyun 		s32 remaining;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		/* Skip ones marked lost but not yet retransmitted */
77*4882a593Smuzhiyun 		if ((scb->sacked & TCPCB_LOST) &&
78*4882a593Smuzhiyun 		    !(scb->sacked & TCPCB_SACKED_RETRANS))
79*4882a593Smuzhiyun 			continue;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		if (!tcp_rack_sent_after(tp->rack.mstamp,
82*4882a593Smuzhiyun 					 tcp_skb_timestamp_us(skb),
83*4882a593Smuzhiyun 					 tp->rack.end_seq, scb->end_seq))
84*4882a593Smuzhiyun 			break;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		/* A packet is lost if it has not been s/acked beyond
87*4882a593Smuzhiyun 		 * the recent RTT plus the reordering window.
88*4882a593Smuzhiyun 		 */
89*4882a593Smuzhiyun 		remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
90*4882a593Smuzhiyun 		if (remaining <= 0) {
91*4882a593Smuzhiyun 			tcp_mark_skb_lost(sk, skb);
92*4882a593Smuzhiyun 			list_del_init(&skb->tcp_tsorted_anchor);
93*4882a593Smuzhiyun 		} else {
94*4882a593Smuzhiyun 			/* Record maximum wait time */
95*4882a593Smuzhiyun 			*reo_timeout = max_t(u32, *reo_timeout, remaining);
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
tcp_rack_mark_lost(struct sock * sk)100*4882a593Smuzhiyun bool tcp_rack_mark_lost(struct sock *sk)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
103*4882a593Smuzhiyun 	u32 timeout;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (!tp->rack.advanced)
106*4882a593Smuzhiyun 		return false;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Reset the advanced flag to avoid unnecessary queue scanning */
109*4882a593Smuzhiyun 	tp->rack.advanced = 0;
110*4882a593Smuzhiyun 	tcp_rack_detect_loss(sk, &timeout);
111*4882a593Smuzhiyun 	if (timeout) {
112*4882a593Smuzhiyun 		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
113*4882a593Smuzhiyun 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
114*4882a593Smuzhiyun 					  timeout, inet_csk(sk)->icsk_rto);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	return !!timeout;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* Record the most recently (re)sent time among the (s)acked packets
120*4882a593Smuzhiyun  * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
121*4882a593Smuzhiyun  * draft-cheng-tcpm-rack-00.txt
122*4882a593Smuzhiyun  */
tcp_rack_advance(struct tcp_sock * tp,u8 sacked,u32 end_seq,u64 xmit_time)123*4882a593Smuzhiyun void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
124*4882a593Smuzhiyun 		      u64 xmit_time)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	u32 rtt_us;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
129*4882a593Smuzhiyun 	if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
130*4882a593Smuzhiyun 		/* If the sacked packet was retransmitted, it's ambiguous
131*4882a593Smuzhiyun 		 * whether the retransmission or the original (or the prior
132*4882a593Smuzhiyun 		 * retransmission) was sacked.
133*4882a593Smuzhiyun 		 *
134*4882a593Smuzhiyun 		 * If the original is lost, there is no ambiguity. Otherwise
135*4882a593Smuzhiyun 		 * we assume the original can be delayed up to aRTT + min_rtt.
136*4882a593Smuzhiyun 		 * the aRTT term is bounded by the fast recovery or timeout,
137*4882a593Smuzhiyun 		 * so it's at least one RTT (i.e., retransmission is at least
138*4882a593Smuzhiyun 		 * an RTT later).
139*4882a593Smuzhiyun 		 */
140*4882a593Smuzhiyun 		return;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 	tp->rack.advanced = 1;
143*4882a593Smuzhiyun 	tp->rack.rtt_us = rtt_us;
144*4882a593Smuzhiyun 	if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
145*4882a593Smuzhiyun 				end_seq, tp->rack.end_seq)) {
146*4882a593Smuzhiyun 		tp->rack.mstamp = xmit_time;
147*4882a593Smuzhiyun 		tp->rack.end_seq = end_seq;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /* We have waited long enough to accommodate reordering. Mark the expired
152*4882a593Smuzhiyun  * packets lost and retransmit them.
153*4882a593Smuzhiyun  */
tcp_rack_reo_timeout(struct sock * sk)154*4882a593Smuzhiyun void tcp_rack_reo_timeout(struct sock *sk)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
157*4882a593Smuzhiyun 	u32 timeout, prior_inflight;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	prior_inflight = tcp_packets_in_flight(tp);
160*4882a593Smuzhiyun 	tcp_rack_detect_loss(sk, &timeout);
161*4882a593Smuzhiyun 	if (prior_inflight != tcp_packets_in_flight(tp)) {
162*4882a593Smuzhiyun 		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
163*4882a593Smuzhiyun 			tcp_enter_recovery(sk, false);
164*4882a593Smuzhiyun 			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
165*4882a593Smuzhiyun 				tcp_cwnd_reduction(sk, 1, 0);
166*4882a593Smuzhiyun 		}
167*4882a593Smuzhiyun 		tcp_xmit_retransmit_queue(sk);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
170*4882a593Smuzhiyun 		tcp_rearm_rto(sk);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
176*4882a593Smuzhiyun  * by srtt), since there is possibility that spurious retransmission was
177*4882a593Smuzhiyun  * due to reordering delay longer than reo_wnd.
178*4882a593Smuzhiyun  *
179*4882a593Smuzhiyun  * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
180*4882a593Smuzhiyun  * no. of successful recoveries (accounts for full DSACK-based loss
181*4882a593Smuzhiyun  * recovery undo). After that, reset it to default (min_rtt/4).
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * At max, reo_wnd is incremented only once per rtt. So that the new
184*4882a593Smuzhiyun  * DSACK on which we are reacting, is due to the spurious retx (approx)
185*4882a593Smuzhiyun  * after the reo_wnd has been updated last time.
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
188*4882a593Smuzhiyun  * absolute value to account for change in rtt.
189*4882a593Smuzhiyun  */
tcp_rack_update_reo_wnd(struct sock * sk,struct rate_sample * rs)190*4882a593Smuzhiyun void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
195*4882a593Smuzhiyun 	     TCP_RACK_STATIC_REO_WND) ||
196*4882a593Smuzhiyun 	    !rs->prior_delivered)
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
200*4882a593Smuzhiyun 	if (before(rs->prior_delivered, tp->rack.last_delivered))
201*4882a593Smuzhiyun 		tp->rack.dsack_seen = 0;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Adjust the reo_wnd if update is pending */
204*4882a593Smuzhiyun 	if (tp->rack.dsack_seen) {
205*4882a593Smuzhiyun 		tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
206*4882a593Smuzhiyun 					       tp->rack.reo_wnd_steps + 1);
207*4882a593Smuzhiyun 		tp->rack.dsack_seen = 0;
208*4882a593Smuzhiyun 		tp->rack.last_delivered = tp->delivered;
209*4882a593Smuzhiyun 		tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
210*4882a593Smuzhiyun 	} else if (!tp->rack.reo_wnd_persist) {
211*4882a593Smuzhiyun 		tp->rack.reo_wnd_steps = 1;
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
216*4882a593Smuzhiyun  * the next unacked packet upon receiving
217*4882a593Smuzhiyun  * a) three or more DUPACKs to start the fast recovery
218*4882a593Smuzhiyun  * b) an ACK acknowledging new data during the fast recovery.
219*4882a593Smuzhiyun  */
tcp_newreno_mark_lost(struct sock * sk,bool snd_una_advanced)220*4882a593Smuzhiyun void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	const u8 state = inet_csk(sk)->icsk_ca_state;
223*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
226*4882a593Smuzhiyun 	    (state == TCP_CA_Recovery && snd_una_advanced)) {
227*4882a593Smuzhiyun 		struct sk_buff *skb = tcp_rtx_queue_head(sk);
228*4882a593Smuzhiyun 		u32 mss;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
231*4882a593Smuzhiyun 			return;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		mss = tcp_skb_mss(skb);
234*4882a593Smuzhiyun 		if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
235*4882a593Smuzhiyun 			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
236*4882a593Smuzhiyun 				     mss, mss, GFP_ATOMIC);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		tcp_mark_skb_lost(sk, skb);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun }
241