1*4882a593Smuzhiyun /* Bottleneck Bandwidth and RTT (BBR) congestion control
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * BBR congestion control computes the sending rate based on the delivery
4*4882a593Smuzhiyun * rate (throughput) estimated from ACKs. In a nutshell:
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * On each ACK, update our model of the network path:
7*4882a593Smuzhiyun * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
8*4882a593Smuzhiyun * min_rtt = windowed_min(rtt, 10 seconds)
9*4882a593Smuzhiyun * pacing_rate = pacing_gain * bottleneck_bandwidth
10*4882a593Smuzhiyun * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The core algorithm does not react directly to packet losses or delays,
13*4882a593Smuzhiyun * although BBR may adjust the size of next send per ACK when loss is
14*4882a593Smuzhiyun * observed, or adjust the sending rate if it estimates there is a
15*4882a593Smuzhiyun * traffic policer, in order to keep the drop rate reasonable.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Here is a state transition diagram for BBR:
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * |
20*4882a593Smuzhiyun * V
21*4882a593Smuzhiyun * +---> STARTUP ----+
22*4882a593Smuzhiyun * | | |
23*4882a593Smuzhiyun * | V |
24*4882a593Smuzhiyun * | DRAIN ----+
25*4882a593Smuzhiyun * | | |
26*4882a593Smuzhiyun * | V |
27*4882a593Smuzhiyun * +---> PROBE_BW ----+
28*4882a593Smuzhiyun * | ^ | |
29*4882a593Smuzhiyun * | | | |
30*4882a593Smuzhiyun * | +----+ |
31*4882a593Smuzhiyun * | |
32*4882a593Smuzhiyun * +---- PROBE_RTT <--+
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
35*4882a593Smuzhiyun * When it estimates the pipe is full, it enters DRAIN to drain the queue.
36*4882a593Smuzhiyun * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
37*4882a593Smuzhiyun * A long-lived BBR flow spends the vast majority of its time remaining
38*4882a593Smuzhiyun * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
39*4882a593Smuzhiyun * in a fair manner, with a small, bounded queue. *If* a flow has been
40*4882a593Smuzhiyun * continuously sending for the entire min_rtt window, and hasn't seen an RTT
41*4882a593Smuzhiyun * sample that matches or decreases its min_rtt estimate for 10 seconds, then
42*4882a593Smuzhiyun * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
43*4882a593Smuzhiyun * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
44*4882a593Smuzhiyun * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
45*4882a593Smuzhiyun * otherwise we enter STARTUP to try to fill the pipe.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * BBR is described in detail in:
48*4882a593Smuzhiyun * "BBR: Congestion-Based Congestion Control",
49*4882a593Smuzhiyun * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
50*4882a593Smuzhiyun * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * There is a public e-mail list for discussing BBR development and testing:
53*4882a593Smuzhiyun * https://groups.google.com/forum/#!forum/bbr-dev
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
56*4882a593Smuzhiyun * otherwise TCP stack falls back to an internal pacing using one high
57*4882a593Smuzhiyun * resolution timer per TCP socket and may use more resources.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #include <linux/module.h>
60*4882a593Smuzhiyun #include <net/tcp.h>
61*4882a593Smuzhiyun #include <linux/inet_diag.h>
62*4882a593Smuzhiyun #include <linux/inet.h>
63*4882a593Smuzhiyun #include <linux/random.h>
64*4882a593Smuzhiyun #include <linux/win_minmax.h>
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
67*4882a593Smuzhiyun * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
68*4882a593Smuzhiyun * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
69*4882a593Smuzhiyun * Since the minimum window is >=4 packets, the lower bound isn't
70*4882a593Smuzhiyun * an issue. The upper bound isn't an issue with existing technologies.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define BW_SCALE 24
73*4882a593Smuzhiyun #define BW_UNIT (1 << BW_SCALE)
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
76*4882a593Smuzhiyun #define BBR_UNIT (1 << BBR_SCALE)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* BBR has the following modes for deciding how fast to send: */
79*4882a593Smuzhiyun enum bbr_mode {
80*4882a593Smuzhiyun BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
81*4882a593Smuzhiyun BBR_DRAIN, /* drain any queue created during startup */
82*4882a593Smuzhiyun BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
83*4882a593Smuzhiyun BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* BBR congestion control block */
87*4882a593Smuzhiyun struct bbr {
88*4882a593Smuzhiyun u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
89*4882a593Smuzhiyun u32 min_rtt_stamp; /* timestamp of min_rtt_us */
90*4882a593Smuzhiyun u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
91*4882a593Smuzhiyun struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
92*4882a593Smuzhiyun u32 rtt_cnt; /* count of packet-timed rounds elapsed */
93*4882a593Smuzhiyun u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
94*4882a593Smuzhiyun u64 cycle_mstamp; /* time of this cycle phase start */
95*4882a593Smuzhiyun u32 mode:3, /* current bbr_mode in state machine */
96*4882a593Smuzhiyun prev_ca_state:3, /* CA state on previous ACK */
97*4882a593Smuzhiyun packet_conservation:1, /* use packet conservation? */
98*4882a593Smuzhiyun round_start:1, /* start of packet-timed tx->ack round? */
99*4882a593Smuzhiyun idle_restart:1, /* restarting after idle? */
100*4882a593Smuzhiyun probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
101*4882a593Smuzhiyun unused:13,
102*4882a593Smuzhiyun lt_is_sampling:1, /* taking long-term ("LT") samples now? */
103*4882a593Smuzhiyun lt_rtt_cnt:7, /* round trips in long-term interval */
104*4882a593Smuzhiyun lt_use_bw:1; /* use lt_bw as our bw estimate? */
105*4882a593Smuzhiyun u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
106*4882a593Smuzhiyun u32 lt_last_delivered; /* LT intvl start: tp->delivered */
107*4882a593Smuzhiyun u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
108*4882a593Smuzhiyun u32 lt_last_lost; /* LT intvl start: tp->lost */
109*4882a593Smuzhiyun u32 pacing_gain:10, /* current gain for setting pacing rate */
110*4882a593Smuzhiyun cwnd_gain:10, /* current gain for setting cwnd */
111*4882a593Smuzhiyun full_bw_reached:1, /* reached full bw in Startup? */
112*4882a593Smuzhiyun full_bw_cnt:2, /* number of rounds without large bw gains */
113*4882a593Smuzhiyun cycle_idx:3, /* current index in pacing_gain cycle array */
114*4882a593Smuzhiyun has_seen_rtt:1, /* have we seen an RTT sample yet? */
115*4882a593Smuzhiyun unused_b:5;
116*4882a593Smuzhiyun u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
117*4882a593Smuzhiyun u32 full_bw; /* recent bw, to estimate if pipe is full */
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* For tracking ACK aggregation: */
120*4882a593Smuzhiyun u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
121*4882a593Smuzhiyun u16 extra_acked[2]; /* max excess data ACKed in epoch */
122*4882a593Smuzhiyun u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
123*4882a593Smuzhiyun extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
124*4882a593Smuzhiyun extra_acked_win_idx:1, /* current index in extra_acked array */
125*4882a593Smuzhiyun unused_c:6;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Window length of bw filter (in rounds): */
131*4882a593Smuzhiyun static const int bbr_bw_rtts = CYCLE_LEN + 2;
132*4882a593Smuzhiyun /* Window length of min_rtt filter (in sec): */
133*4882a593Smuzhiyun static const u32 bbr_min_rtt_win_sec = 10;
134*4882a593Smuzhiyun /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
135*4882a593Smuzhiyun static const u32 bbr_probe_rtt_mode_ms = 200;
136*4882a593Smuzhiyun /* Skip TSO below the following bandwidth (bits/sec): */
137*4882a593Smuzhiyun static const int bbr_min_tso_rate = 1200000;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
140*4882a593Smuzhiyun * In order to help drive the network toward lower queues and low latency while
141*4882a593Smuzhiyun * maintaining high utilization, the average pacing rate aims to be slightly
142*4882a593Smuzhiyun * lower than the estimated bandwidth. This is an important aspect of the
143*4882a593Smuzhiyun * design.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun static const int bbr_pacing_margin_percent = 1;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
148*4882a593Smuzhiyun * that will allow a smoothly increasing pacing rate that will double each RTT
149*4882a593Smuzhiyun * and send the same number of packets per RTT that an un-paced, slow-starting
150*4882a593Smuzhiyun * Reno or CUBIC flow would:
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
153*4882a593Smuzhiyun /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
154*4882a593Smuzhiyun * the queue created in BBR_STARTUP in a single round:
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
157*4882a593Smuzhiyun /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
158*4882a593Smuzhiyun static const int bbr_cwnd_gain = BBR_UNIT * 2;
159*4882a593Smuzhiyun /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
160*4882a593Smuzhiyun static const int bbr_pacing_gain[] = {
161*4882a593Smuzhiyun BBR_UNIT * 5 / 4, /* probe for more available bw */
162*4882a593Smuzhiyun BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
163*4882a593Smuzhiyun BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
164*4882a593Smuzhiyun BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun /* Randomize the starting gain cycling phase over N phases: */
167*4882a593Smuzhiyun static const u32 bbr_cycle_rand = 7;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* Try to keep at least this many packets in flight, if things go smoothly. For
170*4882a593Smuzhiyun * smooth functioning, a sliding window protocol ACKing every other packet
171*4882a593Smuzhiyun * needs at least 4 packets in flight:
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun static const u32 bbr_cwnd_min_target = 4;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
176*4882a593Smuzhiyun /* If bw has increased significantly (1.25x), there may be more bw available: */
177*4882a593Smuzhiyun static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
178*4882a593Smuzhiyun /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
179*4882a593Smuzhiyun static const u32 bbr_full_bw_cnt = 3;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* "long-term" ("LT") bandwidth estimator parameters... */
182*4882a593Smuzhiyun /* The minimum number of rounds in an LT bw sampling interval: */
183*4882a593Smuzhiyun static const u32 bbr_lt_intvl_min_rtts = 4;
184*4882a593Smuzhiyun /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
185*4882a593Smuzhiyun static const u32 bbr_lt_loss_thresh = 50;
186*4882a593Smuzhiyun /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
187*4882a593Smuzhiyun static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
188*4882a593Smuzhiyun /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
189*4882a593Smuzhiyun static const u32 bbr_lt_bw_diff = 4000 / 8;
190*4882a593Smuzhiyun /* If we estimate we're policed, use lt_bw for this many round trips: */
191*4882a593Smuzhiyun static const u32 bbr_lt_bw_max_rtts = 48;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Gain factor for adding extra_acked to target cwnd: */
194*4882a593Smuzhiyun static const int bbr_extra_acked_gain = BBR_UNIT;
195*4882a593Smuzhiyun /* Window length of extra_acked window. */
196*4882a593Smuzhiyun static const u32 bbr_extra_acked_win_rtts = 5;
197*4882a593Smuzhiyun /* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
198*4882a593Smuzhiyun static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
199*4882a593Smuzhiyun /* Time period for clamping cwnd increment due to ack aggregation */
200*4882a593Smuzhiyun static const u32 bbr_extra_acked_max_us = 100 * 1000;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun static void bbr_check_probe_rtt_done(struct sock *sk);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Do we estimate that STARTUP filled the pipe? */
bbr_full_bw_reached(const struct sock * sk)205*4882a593Smuzhiyun static bool bbr_full_bw_reached(const struct sock *sk)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun const struct bbr *bbr = inet_csk_ca(sk);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return bbr->full_bw_reached;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
bbr_max_bw(const struct sock * sk)213*4882a593Smuzhiyun static u32 bbr_max_bw(const struct sock *sk)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return minmax_get(&bbr->bw);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
bbr_bw(const struct sock * sk)221*4882a593Smuzhiyun static u32 bbr_bw(const struct sock *sk)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Return maximum extra acked in past k-2k round trips,
229*4882a593Smuzhiyun * where k = bbr_extra_acked_win_rtts.
230*4882a593Smuzhiyun */
bbr_extra_acked(const struct sock * sk)231*4882a593Smuzhiyun static u16 bbr_extra_acked(const struct sock *sk)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return max(bbr->extra_acked[0], bbr->extra_acked[1]);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Return rate in bytes per second, optionally with a gain.
239*4882a593Smuzhiyun * The order here is chosen carefully to avoid overflow of u64. This should
240*4882a593Smuzhiyun * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
241*4882a593Smuzhiyun */
bbr_rate_bytes_per_sec(struct sock * sk,u64 rate,int gain)242*4882a593Smuzhiyun static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun unsigned int mss = tcp_sk(sk)->mss_cache;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun rate *= mss;
247*4882a593Smuzhiyun rate *= gain;
248*4882a593Smuzhiyun rate >>= BBR_SCALE;
249*4882a593Smuzhiyun rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
250*4882a593Smuzhiyun return rate >> BW_SCALE;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
bbr_bw_to_pacing_rate(struct sock * sk,u32 bw,int gain)254*4882a593Smuzhiyun static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun u64 rate = bw;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun rate = bbr_rate_bytes_per_sec(sk, rate, gain);
259*4882a593Smuzhiyun rate = min_t(u64, rate, sk->sk_max_pacing_rate);
260*4882a593Smuzhiyun return rate;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
bbr_init_pacing_rate_from_rtt(struct sock * sk)264*4882a593Smuzhiyun static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
267*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
268*4882a593Smuzhiyun u64 bw;
269*4882a593Smuzhiyun u32 rtt_us;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (tp->srtt_us) { /* any RTT sample yet? */
272*4882a593Smuzhiyun rtt_us = max(tp->srtt_us >> 3, 1U);
273*4882a593Smuzhiyun bbr->has_seen_rtt = 1;
274*4882a593Smuzhiyun } else { /* no RTT sample yet */
275*4882a593Smuzhiyun rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun bw = (u64)tp->snd_cwnd * BW_UNIT;
278*4882a593Smuzhiyun do_div(bw, rtt_us);
279*4882a593Smuzhiyun sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Pace using current bw estimate and a gain factor. */
bbr_set_pacing_rate(struct sock * sk,u32 bw,int gain)283*4882a593Smuzhiyun static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
286*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
287*4882a593Smuzhiyun unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
290*4882a593Smuzhiyun bbr_init_pacing_rate_from_rtt(sk);
291*4882a593Smuzhiyun if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
292*4882a593Smuzhiyun sk->sk_pacing_rate = rate;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* override sysctl_tcp_min_tso_segs */
bbr_min_tso_segs(struct sock * sk)296*4882a593Smuzhiyun static u32 bbr_min_tso_segs(struct sock *sk)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
bbr_tso_segs_goal(struct sock * sk)301*4882a593Smuzhiyun static u32 bbr_tso_segs_goal(struct sock *sk)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
304*4882a593Smuzhiyun u32 segs, bytes;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Sort of tcp_tso_autosize() but ignoring
307*4882a593Smuzhiyun * driver provided sk_gso_max_size.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun bytes = min_t(unsigned long,
310*4882a593Smuzhiyun sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
311*4882a593Smuzhiyun GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
312*4882a593Smuzhiyun segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return min(segs, 0x7FU);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
bbr_save_cwnd(struct sock * sk)318*4882a593Smuzhiyun static void bbr_save_cwnd(struct sock *sk)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
321*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
324*4882a593Smuzhiyun bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
325*4882a593Smuzhiyun else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
326*4882a593Smuzhiyun bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
bbr_cwnd_event(struct sock * sk,enum tcp_ca_event event)329*4882a593Smuzhiyun static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
332*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (event == CA_EVENT_TX_START && tp->app_limited) {
335*4882a593Smuzhiyun bbr->idle_restart = 1;
336*4882a593Smuzhiyun bbr->ack_epoch_mstamp = tp->tcp_mstamp;
337*4882a593Smuzhiyun bbr->ack_epoch_acked = 0;
338*4882a593Smuzhiyun /* Avoid pointless buffer overflows: pace at est. bw if we don't
339*4882a593Smuzhiyun * need more speed (we're restarting from idle and app-limited).
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_BW)
342*4882a593Smuzhiyun bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
343*4882a593Smuzhiyun else if (bbr->mode == BBR_PROBE_RTT)
344*4882a593Smuzhiyun bbr_check_probe_rtt_done(sk);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * bdp = ceil(bw * min_rtt * gain)
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * The key factor, gain, controls the amount of queue. While a small gain
353*4882a593Smuzhiyun * builds a smaller queue, it becomes more vulnerable to noise in RTT
354*4882a593Smuzhiyun * measurements (e.g., delayed ACKs or other ACK compression effects). This
355*4882a593Smuzhiyun * noise may cause BBR to under-estimate the rate.
356*4882a593Smuzhiyun */
bbr_bdp(struct sock * sk,u32 bw,int gain)357*4882a593Smuzhiyun static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
360*4882a593Smuzhiyun u32 bdp;
361*4882a593Smuzhiyun u64 w;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* If we've never had a valid RTT sample, cap cwnd at the initial
364*4882a593Smuzhiyun * default. This should only happen when the connection is not using TCP
365*4882a593Smuzhiyun * timestamps and has retransmitted all of the SYN/SYNACK/data packets
366*4882a593Smuzhiyun * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
367*4882a593Smuzhiyun * case we need to slow-start up toward something safe: TCP_INIT_CWND.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
370*4882a593Smuzhiyun return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun w = (u64)bw * bbr->min_rtt_us;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Apply a gain to the given value, remove the BW_SCALE shift, and
375*4882a593Smuzhiyun * round the value up to avoid a negative feedback loop.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return bdp;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* To achieve full performance in high-speed paths, we budget enough cwnd to
383*4882a593Smuzhiyun * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
384*4882a593Smuzhiyun * - one skb in sending host Qdisc,
385*4882a593Smuzhiyun * - one skb in sending host TSO/GSO engine
386*4882a593Smuzhiyun * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
387*4882a593Smuzhiyun * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
388*4882a593Smuzhiyun * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
389*4882a593Smuzhiyun * which allows 2 outstanding 2-packet sequences, to try to keep pipe
390*4882a593Smuzhiyun * full even with ACK-every-other-packet delayed ACKs.
391*4882a593Smuzhiyun */
bbr_quantization_budget(struct sock * sk,u32 cwnd)392*4882a593Smuzhiyun static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Allow enough full-sized skbs in flight to utilize end systems. */
397*4882a593Smuzhiyun cwnd += 3 * bbr_tso_segs_goal(sk);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
400*4882a593Smuzhiyun cwnd = (cwnd + 1) & ~1U;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
403*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
404*4882a593Smuzhiyun cwnd += 2;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return cwnd;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
bbr_inflight(struct sock * sk,u32 bw,int gain)410*4882a593Smuzhiyun static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun u32 inflight;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun inflight = bbr_bdp(sk, bw, gain);
415*4882a593Smuzhiyun inflight = bbr_quantization_budget(sk, inflight);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return inflight;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* With pacing at lower layers, there's often less data "in the network" than
421*4882a593Smuzhiyun * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
422*4882a593Smuzhiyun * we often have several skbs queued in the pacing layer with a pre-scheduled
423*4882a593Smuzhiyun * earliest departure time (EDT). BBR adapts its pacing rate based on the
424*4882a593Smuzhiyun * inflight level that it estimates has already been "baked in" by previous
425*4882a593Smuzhiyun * departure time decisions. We calculate a rough estimate of the number of our
426*4882a593Smuzhiyun * packets that might be in the network at the earliest departure time for the
427*4882a593Smuzhiyun * next skb scheduled:
428*4882a593Smuzhiyun * in_network_at_edt = inflight_at_edt - (EDT - now) * bw
429*4882a593Smuzhiyun * If we're increasing inflight, then we want to know if the transmit of the
430*4882a593Smuzhiyun * EDT skb will push inflight above the target, so inflight_at_edt includes
431*4882a593Smuzhiyun * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
432*4882a593Smuzhiyun * then estimate if inflight will sink too low just before the EDT transmit.
433*4882a593Smuzhiyun */
bbr_packets_in_net_at_edt(struct sock * sk,u32 inflight_now)434*4882a593Smuzhiyun static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
437*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
438*4882a593Smuzhiyun u64 now_ns, edt_ns, interval_us;
439*4882a593Smuzhiyun u32 interval_delivered, inflight_at_edt;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun now_ns = tp->tcp_clock_cache;
442*4882a593Smuzhiyun edt_ns = max(tp->tcp_wstamp_ns, now_ns);
443*4882a593Smuzhiyun interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
444*4882a593Smuzhiyun interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
445*4882a593Smuzhiyun inflight_at_edt = inflight_now;
446*4882a593Smuzhiyun if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */
447*4882a593Smuzhiyun inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */
448*4882a593Smuzhiyun if (interval_delivered >= inflight_at_edt)
449*4882a593Smuzhiyun return 0;
450*4882a593Smuzhiyun return inflight_at_edt - interval_delivered;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Find the cwnd increment based on estimate of ack aggregation */
bbr_ack_aggregation_cwnd(struct sock * sk)454*4882a593Smuzhiyun static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun u32 max_aggr_cwnd, aggr_cwnd = 0;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
459*4882a593Smuzhiyun max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
460*4882a593Smuzhiyun / BW_UNIT;
461*4882a593Smuzhiyun aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
462*4882a593Smuzhiyun >> BBR_SCALE;
463*4882a593Smuzhiyun aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return aggr_cwnd;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* An optimization in BBR to reduce losses: On the first round of recovery, we
470*4882a593Smuzhiyun * follow the packet conservation principle: send P packets per P packets acked.
471*4882a593Smuzhiyun * After that, we slow-start and send at most 2*P packets per P packets acked.
472*4882a593Smuzhiyun * After recovery finishes, or upon undo, we restore the cwnd we had when
473*4882a593Smuzhiyun * recovery started (capped by the target cwnd based on estimated BDP).
474*4882a593Smuzhiyun *
475*4882a593Smuzhiyun * TODO(ycheng/ncardwell): implement a rate-based approach.
476*4882a593Smuzhiyun */
bbr_set_cwnd_to_recover_or_restore(struct sock * sk,const struct rate_sample * rs,u32 acked,u32 * new_cwnd)477*4882a593Smuzhiyun static bool bbr_set_cwnd_to_recover_or_restore(
478*4882a593Smuzhiyun struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
481*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
482*4882a593Smuzhiyun u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
483*4882a593Smuzhiyun u32 cwnd = tp->snd_cwnd;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* An ACK for P pkts should release at most 2*P packets. We do this
486*4882a593Smuzhiyun * in two steps. First, here we deduct the number of lost packets.
487*4882a593Smuzhiyun * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun if (rs->losses > 0)
490*4882a593Smuzhiyun cwnd = max_t(s32, cwnd - rs->losses, 1);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
493*4882a593Smuzhiyun /* Starting 1st round of Recovery, so do packet conservation. */
494*4882a593Smuzhiyun bbr->packet_conservation = 1;
495*4882a593Smuzhiyun bbr->next_rtt_delivered = tp->delivered; /* start round now */
496*4882a593Smuzhiyun /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
497*4882a593Smuzhiyun cwnd = tcp_packets_in_flight(tp) + acked;
498*4882a593Smuzhiyun } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
499*4882a593Smuzhiyun /* Exiting loss recovery; restore cwnd saved before recovery. */
500*4882a593Smuzhiyun cwnd = max(cwnd, bbr->prior_cwnd);
501*4882a593Smuzhiyun bbr->packet_conservation = 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun bbr->prev_ca_state = state;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (bbr->packet_conservation) {
506*4882a593Smuzhiyun *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
507*4882a593Smuzhiyun return true; /* yes, using packet conservation */
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun *new_cwnd = cwnd;
510*4882a593Smuzhiyun return false;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
514*4882a593Smuzhiyun * has drawn us down below target), or snap down to target if we're above it.
515*4882a593Smuzhiyun */
bbr_set_cwnd(struct sock * sk,const struct rate_sample * rs,u32 acked,u32 bw,int gain)516*4882a593Smuzhiyun static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
517*4882a593Smuzhiyun u32 acked, u32 bw, int gain)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
520*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
521*4882a593Smuzhiyun u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (!acked)
524*4882a593Smuzhiyun goto done; /* no packet fully ACKed; just apply caps */
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
527*4882a593Smuzhiyun goto done;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun target_cwnd = bbr_bdp(sk, bw, gain);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* Increment the cwnd to account for excess ACKed data that seems
532*4882a593Smuzhiyun * due to aggregation (of data and/or ACKs) visible in the ACK stream.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun target_cwnd += bbr_ack_aggregation_cwnd(sk);
535*4882a593Smuzhiyun target_cwnd = bbr_quantization_budget(sk, target_cwnd);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* If we're below target cwnd, slow start cwnd toward target cwnd. */
538*4882a593Smuzhiyun if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
539*4882a593Smuzhiyun cwnd = min(cwnd + acked, target_cwnd);
540*4882a593Smuzhiyun else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
541*4882a593Smuzhiyun cwnd = cwnd + acked;
542*4882a593Smuzhiyun cwnd = max(cwnd, bbr_cwnd_min_target);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun done:
545*4882a593Smuzhiyun tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
546*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
547*4882a593Smuzhiyun tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
bbr_is_next_cycle_phase(struct sock * sk,const struct rate_sample * rs)551*4882a593Smuzhiyun static bool bbr_is_next_cycle_phase(struct sock *sk,
552*4882a593Smuzhiyun const struct rate_sample *rs)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
555*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
556*4882a593Smuzhiyun bool is_full_length =
557*4882a593Smuzhiyun tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
558*4882a593Smuzhiyun bbr->min_rtt_us;
559*4882a593Smuzhiyun u32 inflight, bw;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
562*4882a593Smuzhiyun * use the pipe without increasing the queue.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun if (bbr->pacing_gain == BBR_UNIT)
565*4882a593Smuzhiyun return is_full_length; /* just use wall clock time */
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
568*4882a593Smuzhiyun bw = bbr_max_bw(sk);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
571*4882a593Smuzhiyun * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
572*4882a593Smuzhiyun * small (e.g. on a LAN). We do not persist if packets are lost, since
573*4882a593Smuzhiyun * a path with small buffers may not hold that much.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun if (bbr->pacing_gain > BBR_UNIT)
576*4882a593Smuzhiyun return is_full_length &&
577*4882a593Smuzhiyun (rs->losses || /* perhaps pacing_gain*BDP won't fit */
578*4882a593Smuzhiyun inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
581*4882a593Smuzhiyun * probing didn't find more bw. If inflight falls to match BDP then we
582*4882a593Smuzhiyun * estimate queue is drained; persisting would underutilize the pipe.
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun return is_full_length ||
585*4882a593Smuzhiyun inflight <= bbr_inflight(sk, bw, BBR_UNIT);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
bbr_advance_cycle_phase(struct sock * sk)588*4882a593Smuzhiyun static void bbr_advance_cycle_phase(struct sock *sk)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
591*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
594*4882a593Smuzhiyun bbr->cycle_mstamp = tp->delivered_mstamp;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
bbr_update_cycle_phase(struct sock * sk,const struct rate_sample * rs)598*4882a593Smuzhiyun static void bbr_update_cycle_phase(struct sock *sk,
599*4882a593Smuzhiyun const struct rate_sample *rs)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
604*4882a593Smuzhiyun bbr_advance_cycle_phase(sk);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
bbr_reset_startup_mode(struct sock * sk)607*4882a593Smuzhiyun static void bbr_reset_startup_mode(struct sock *sk)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun bbr->mode = BBR_STARTUP;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
bbr_reset_probe_bw_mode(struct sock * sk)614*4882a593Smuzhiyun static void bbr_reset_probe_bw_mode(struct sock *sk)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun bbr->mode = BBR_PROBE_BW;
619*4882a593Smuzhiyun bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
620*4882a593Smuzhiyun bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
bbr_reset_mode(struct sock * sk)623*4882a593Smuzhiyun static void bbr_reset_mode(struct sock *sk)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun if (!bbr_full_bw_reached(sk))
626*4882a593Smuzhiyun bbr_reset_startup_mode(sk);
627*4882a593Smuzhiyun else
628*4882a593Smuzhiyun bbr_reset_probe_bw_mode(sk);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Start a new long-term sampling interval. */
bbr_reset_lt_bw_sampling_interval(struct sock * sk)632*4882a593Smuzhiyun static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
635*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
638*4882a593Smuzhiyun bbr->lt_last_delivered = tp->delivered;
639*4882a593Smuzhiyun bbr->lt_last_lost = tp->lost;
640*4882a593Smuzhiyun bbr->lt_rtt_cnt = 0;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Completely reset long-term bandwidth sampling. */
bbr_reset_lt_bw_sampling(struct sock * sk)644*4882a593Smuzhiyun static void bbr_reset_lt_bw_sampling(struct sock *sk)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun bbr->lt_bw = 0;
649*4882a593Smuzhiyun bbr->lt_use_bw = 0;
650*4882a593Smuzhiyun bbr->lt_is_sampling = false;
651*4882a593Smuzhiyun bbr_reset_lt_bw_sampling_interval(sk);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* Long-term bw sampling interval is done. Estimate whether we're policed. */
bbr_lt_bw_interval_done(struct sock * sk,u32 bw)655*4882a593Smuzhiyun static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
658*4882a593Smuzhiyun u32 diff;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (bbr->lt_bw) { /* do we have bw from a previous interval? */
661*4882a593Smuzhiyun /* Is new bw close to the lt_bw from the previous interval? */
662*4882a593Smuzhiyun diff = abs(bw - bbr->lt_bw);
663*4882a593Smuzhiyun if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
664*4882a593Smuzhiyun (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
665*4882a593Smuzhiyun bbr_lt_bw_diff)) {
666*4882a593Smuzhiyun /* All criteria are met; estimate we're policed. */
667*4882a593Smuzhiyun bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
668*4882a593Smuzhiyun bbr->lt_use_bw = 1;
669*4882a593Smuzhiyun bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
670*4882a593Smuzhiyun bbr->lt_rtt_cnt = 0;
671*4882a593Smuzhiyun return;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun bbr->lt_bw = bw;
675*4882a593Smuzhiyun bbr_reset_lt_bw_sampling_interval(sk);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
679*4882a593Smuzhiyun * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
680*4882a593Smuzhiyun * explicitly models their policed rate, to reduce unnecessary losses. We
681*4882a593Smuzhiyun * estimate that we're policed if we see 2 consecutive sampling intervals with
682*4882a593Smuzhiyun * consistent throughput and high packet loss. If we think we're being policed,
683*4882a593Smuzhiyun * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
684*4882a593Smuzhiyun */
bbr_lt_bw_sampling(struct sock * sk,const struct rate_sample * rs)685*4882a593Smuzhiyun static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
688*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
689*4882a593Smuzhiyun u32 lost, delivered;
690*4882a593Smuzhiyun u64 bw;
691*4882a593Smuzhiyun u32 t;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
694*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
695*4882a593Smuzhiyun ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
696*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
697*4882a593Smuzhiyun bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun return;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Wait for the first loss before sampling, to let the policer exhaust
703*4882a593Smuzhiyun * its tokens and estimate the steady-state rate allowed by the policer.
704*4882a593Smuzhiyun * Starting samples earlier includes bursts that over-estimate the bw.
705*4882a593Smuzhiyun */
706*4882a593Smuzhiyun if (!bbr->lt_is_sampling) {
707*4882a593Smuzhiyun if (!rs->losses)
708*4882a593Smuzhiyun return;
709*4882a593Smuzhiyun bbr_reset_lt_bw_sampling_interval(sk);
710*4882a593Smuzhiyun bbr->lt_is_sampling = true;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /* To avoid underestimates, reset sampling if we run out of data. */
714*4882a593Smuzhiyun if (rs->is_app_limited) {
715*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk);
716*4882a593Smuzhiyun return;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (bbr->round_start)
720*4882a593Smuzhiyun bbr->lt_rtt_cnt++; /* count round trips in this interval */
721*4882a593Smuzhiyun if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
722*4882a593Smuzhiyun return; /* sampling interval needs to be longer */
723*4882a593Smuzhiyun if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
724*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk); /* interval is too long */
725*4882a593Smuzhiyun return;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* End sampling interval when a packet is lost, so we estimate the
729*4882a593Smuzhiyun * policer tokens were exhausted. Stopping the sampling before the
730*4882a593Smuzhiyun * tokens are exhausted under-estimates the policed rate.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun if (!rs->losses)
733*4882a593Smuzhiyun return;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* Calculate packets lost and delivered in sampling interval. */
736*4882a593Smuzhiyun lost = tp->lost - bbr->lt_last_lost;
737*4882a593Smuzhiyun delivered = tp->delivered - bbr->lt_last_delivered;
738*4882a593Smuzhiyun /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
739*4882a593Smuzhiyun if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
740*4882a593Smuzhiyun return;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /* Find average delivery rate in this sampling interval. */
743*4882a593Smuzhiyun t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
744*4882a593Smuzhiyun if ((s32)t < 1)
745*4882a593Smuzhiyun return; /* interval is less than one ms, so wait */
746*4882a593Smuzhiyun /* Check if can multiply without overflow */
747*4882a593Smuzhiyun if (t >= ~0U / USEC_PER_MSEC) {
748*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
749*4882a593Smuzhiyun return;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun t *= USEC_PER_MSEC;
752*4882a593Smuzhiyun bw = (u64)delivered * BW_UNIT;
753*4882a593Smuzhiyun do_div(bw, t);
754*4882a593Smuzhiyun bbr_lt_bw_interval_done(sk, bw);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* Estimate the bandwidth based on how fast packets are delivered */
bbr_update_bw(struct sock * sk,const struct rate_sample * rs)758*4882a593Smuzhiyun static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
761*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
762*4882a593Smuzhiyun u64 bw;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun bbr->round_start = 0;
765*4882a593Smuzhiyun if (rs->delivered < 0 || rs->interval_us <= 0)
766*4882a593Smuzhiyun return; /* Not a valid observation */
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* See if we've reached the next RTT */
769*4882a593Smuzhiyun if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
770*4882a593Smuzhiyun bbr->next_rtt_delivered = tp->delivered;
771*4882a593Smuzhiyun bbr->rtt_cnt++;
772*4882a593Smuzhiyun bbr->round_start = 1;
773*4882a593Smuzhiyun bbr->packet_conservation = 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun bbr_lt_bw_sampling(sk, rs);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /* Divide delivered by the interval to find a (lower bound) bottleneck
779*4882a593Smuzhiyun * bandwidth sample. Delivered is in packets and interval_us in uS and
780*4882a593Smuzhiyun * ratio will be <<1 for most connections. So delivered is first scaled.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* If this sample is application-limited, it is likely to have a very
785*4882a593Smuzhiyun * low delivered count that represents application behavior rather than
786*4882a593Smuzhiyun * the available network rate. Such a sample could drag down estimated
787*4882a593Smuzhiyun * bw, causing needless slow-down. Thus, to continue to send at the
788*4882a593Smuzhiyun * last measured network rate, we filter out app-limited samples unless
789*4882a593Smuzhiyun * they describe the path bw at least as well as our bw model.
790*4882a593Smuzhiyun *
791*4882a593Smuzhiyun * So the goal during app-limited phase is to proceed with the best
792*4882a593Smuzhiyun * network rate no matter how long. We automatically leave this
793*4882a593Smuzhiyun * phase when app writes faster than the network can deliver :)
794*4882a593Smuzhiyun */
795*4882a593Smuzhiyun if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
796*4882a593Smuzhiyun /* Incorporate new sample into our max bw filter. */
797*4882a593Smuzhiyun minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /* Estimates the windowed max degree of ack aggregation.
802*4882a593Smuzhiyun * This is used to provision extra in-flight data to keep sending during
803*4882a593Smuzhiyun * inter-ACK silences.
804*4882a593Smuzhiyun *
805*4882a593Smuzhiyun * Degree of ack aggregation is estimated as extra data acked beyond expected.
806*4882a593Smuzhiyun *
807*4882a593Smuzhiyun * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
808*4882a593Smuzhiyun * cwnd += max_extra_acked
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
811*4882a593Smuzhiyun * Max filter is an approximate sliding window of 5-10 (packet timed) round
812*4882a593Smuzhiyun * trips.
813*4882a593Smuzhiyun */
bbr_update_ack_aggregation(struct sock * sk,const struct rate_sample * rs)814*4882a593Smuzhiyun static void bbr_update_ack_aggregation(struct sock *sk,
815*4882a593Smuzhiyun const struct rate_sample *rs)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun u32 epoch_us, expected_acked, extra_acked;
818*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
819*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
822*4882a593Smuzhiyun rs->delivered < 0 || rs->interval_us <= 0)
823*4882a593Smuzhiyun return;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (bbr->round_start) {
826*4882a593Smuzhiyun bbr->extra_acked_win_rtts = min(0x1F,
827*4882a593Smuzhiyun bbr->extra_acked_win_rtts + 1);
828*4882a593Smuzhiyun if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
829*4882a593Smuzhiyun bbr->extra_acked_win_rtts = 0;
830*4882a593Smuzhiyun bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
831*4882a593Smuzhiyun 0 : 1;
832*4882a593Smuzhiyun bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /* Compute how many packets we expected to be delivered over epoch. */
837*4882a593Smuzhiyun epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
838*4882a593Smuzhiyun bbr->ack_epoch_mstamp);
839*4882a593Smuzhiyun expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Reset the aggregation epoch if ACK rate is below expected rate or
842*4882a593Smuzhiyun * significantly large no. of ack received since epoch (potentially
843*4882a593Smuzhiyun * quite old epoch).
844*4882a593Smuzhiyun */
845*4882a593Smuzhiyun if (bbr->ack_epoch_acked <= expected_acked ||
846*4882a593Smuzhiyun (bbr->ack_epoch_acked + rs->acked_sacked >=
847*4882a593Smuzhiyun bbr_ack_epoch_acked_reset_thresh)) {
848*4882a593Smuzhiyun bbr->ack_epoch_acked = 0;
849*4882a593Smuzhiyun bbr->ack_epoch_mstamp = tp->delivered_mstamp;
850*4882a593Smuzhiyun expected_acked = 0;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* Compute excess data delivered, beyond what was expected. */
854*4882a593Smuzhiyun bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
855*4882a593Smuzhiyun bbr->ack_epoch_acked + rs->acked_sacked);
856*4882a593Smuzhiyun extra_acked = bbr->ack_epoch_acked - expected_acked;
857*4882a593Smuzhiyun extra_acked = min(extra_acked, tp->snd_cwnd);
858*4882a593Smuzhiyun if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
859*4882a593Smuzhiyun bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* Estimate when the pipe is full, using the change in delivery rate: BBR
863*4882a593Smuzhiyun * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
864*4882a593Smuzhiyun * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
865*4882a593Smuzhiyun * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
866*4882a593Smuzhiyun * higher rwin, 3: we get higher delivery rate samples. Or transient
867*4882a593Smuzhiyun * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
868*4882a593Smuzhiyun * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
869*4882a593Smuzhiyun */
bbr_check_full_bw_reached(struct sock * sk,const struct rate_sample * rs)870*4882a593Smuzhiyun static void bbr_check_full_bw_reached(struct sock *sk,
871*4882a593Smuzhiyun const struct rate_sample *rs)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
874*4882a593Smuzhiyun u32 bw_thresh;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
877*4882a593Smuzhiyun return;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
880*4882a593Smuzhiyun if (bbr_max_bw(sk) >= bw_thresh) {
881*4882a593Smuzhiyun bbr->full_bw = bbr_max_bw(sk);
882*4882a593Smuzhiyun bbr->full_bw_cnt = 0;
883*4882a593Smuzhiyun return;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun ++bbr->full_bw_cnt;
886*4882a593Smuzhiyun bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /* If pipe is probably full, drain the queue and then enter steady-state. */
bbr_check_drain(struct sock * sk,const struct rate_sample * rs)890*4882a593Smuzhiyun static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
895*4882a593Smuzhiyun bbr->mode = BBR_DRAIN; /* drain queue we created */
896*4882a593Smuzhiyun tcp_sk(sk)->snd_ssthresh =
897*4882a593Smuzhiyun bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
898*4882a593Smuzhiyun } /* fall through to check if in-flight is already small: */
899*4882a593Smuzhiyun if (bbr->mode == BBR_DRAIN &&
900*4882a593Smuzhiyun bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
901*4882a593Smuzhiyun bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
902*4882a593Smuzhiyun bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
bbr_check_probe_rtt_done(struct sock * sk)905*4882a593Smuzhiyun static void bbr_check_probe_rtt_done(struct sock *sk)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
908*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (!(bbr->probe_rtt_done_stamp &&
911*4882a593Smuzhiyun after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
912*4882a593Smuzhiyun return;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
915*4882a593Smuzhiyun tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
916*4882a593Smuzhiyun bbr_reset_mode(sk);
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
920*4882a593Smuzhiyun * periodically drain the bottleneck queue, to converge to measure the true
921*4882a593Smuzhiyun * min_rtt (unloaded propagation delay). This allows the flows to keep queues
922*4882a593Smuzhiyun * small (reducing queuing delay and packet loss) and achieve fairness among
923*4882a593Smuzhiyun * BBR flows.
924*4882a593Smuzhiyun *
925*4882a593Smuzhiyun * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
926*4882a593Smuzhiyun * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
927*4882a593Smuzhiyun * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
928*4882a593Smuzhiyun * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
929*4882a593Smuzhiyun * re-enter the previous mode. BBR uses 200ms to approximately bound the
930*4882a593Smuzhiyun * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * Note that flows need only pay 2% if they are busy sending over the last 10
933*4882a593Smuzhiyun * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
934*4882a593Smuzhiyun * natural silences or low-rate periods within 10 seconds where the rate is low
935*4882a593Smuzhiyun * enough for long enough to drain its queue in the bottleneck. We pick up
936*4882a593Smuzhiyun * these min RTT measurements opportunistically with our min_rtt filter. :-)
937*4882a593Smuzhiyun */
bbr_update_min_rtt(struct sock * sk,const struct rate_sample * rs)938*4882a593Smuzhiyun static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
941*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
942*4882a593Smuzhiyun bool filter_expired;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Track min RTT seen in the min_rtt_win_sec filter window: */
945*4882a593Smuzhiyun filter_expired = after(tcp_jiffies32,
946*4882a593Smuzhiyun bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
947*4882a593Smuzhiyun if (rs->rtt_us >= 0 &&
948*4882a593Smuzhiyun (rs->rtt_us < bbr->min_rtt_us ||
949*4882a593Smuzhiyun (filter_expired && !rs->is_ack_delayed))) {
950*4882a593Smuzhiyun bbr->min_rtt_us = rs->rtt_us;
951*4882a593Smuzhiyun bbr->min_rtt_stamp = tcp_jiffies32;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
955*4882a593Smuzhiyun !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
956*4882a593Smuzhiyun bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
957*4882a593Smuzhiyun bbr_save_cwnd(sk); /* note cwnd so we can restore it */
958*4882a593Smuzhiyun bbr->probe_rtt_done_stamp = 0;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun if (bbr->mode == BBR_PROBE_RTT) {
962*4882a593Smuzhiyun /* Ignore low rate samples during this mode. */
963*4882a593Smuzhiyun tp->app_limited =
964*4882a593Smuzhiyun (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
965*4882a593Smuzhiyun /* Maintain min packets in flight for max(200 ms, 1 round). */
966*4882a593Smuzhiyun if (!bbr->probe_rtt_done_stamp &&
967*4882a593Smuzhiyun tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
968*4882a593Smuzhiyun bbr->probe_rtt_done_stamp = tcp_jiffies32 +
969*4882a593Smuzhiyun msecs_to_jiffies(bbr_probe_rtt_mode_ms);
970*4882a593Smuzhiyun bbr->probe_rtt_round_done = 0;
971*4882a593Smuzhiyun bbr->next_rtt_delivered = tp->delivered;
972*4882a593Smuzhiyun } else if (bbr->probe_rtt_done_stamp) {
973*4882a593Smuzhiyun if (bbr->round_start)
974*4882a593Smuzhiyun bbr->probe_rtt_round_done = 1;
975*4882a593Smuzhiyun if (bbr->probe_rtt_round_done)
976*4882a593Smuzhiyun bbr_check_probe_rtt_done(sk);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun /* Restart after idle ends only once we process a new S/ACK for data */
980*4882a593Smuzhiyun if (rs->delivered > 0)
981*4882a593Smuzhiyun bbr->idle_restart = 0;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
bbr_update_gains(struct sock * sk)984*4882a593Smuzhiyun static void bbr_update_gains(struct sock *sk)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun switch (bbr->mode) {
989*4882a593Smuzhiyun case BBR_STARTUP:
990*4882a593Smuzhiyun bbr->pacing_gain = bbr_high_gain;
991*4882a593Smuzhiyun bbr->cwnd_gain = bbr_high_gain;
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun case BBR_DRAIN:
994*4882a593Smuzhiyun bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */
995*4882a593Smuzhiyun bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
996*4882a593Smuzhiyun break;
997*4882a593Smuzhiyun case BBR_PROBE_BW:
998*4882a593Smuzhiyun bbr->pacing_gain = (bbr->lt_use_bw ?
999*4882a593Smuzhiyun BBR_UNIT :
1000*4882a593Smuzhiyun bbr_pacing_gain[bbr->cycle_idx]);
1001*4882a593Smuzhiyun bbr->cwnd_gain = bbr_cwnd_gain;
1002*4882a593Smuzhiyun break;
1003*4882a593Smuzhiyun case BBR_PROBE_RTT:
1004*4882a593Smuzhiyun bbr->pacing_gain = BBR_UNIT;
1005*4882a593Smuzhiyun bbr->cwnd_gain = BBR_UNIT;
1006*4882a593Smuzhiyun break;
1007*4882a593Smuzhiyun default:
1008*4882a593Smuzhiyun WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
1009*4882a593Smuzhiyun break;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
bbr_update_model(struct sock * sk,const struct rate_sample * rs)1013*4882a593Smuzhiyun static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun bbr_update_bw(sk, rs);
1016*4882a593Smuzhiyun bbr_update_ack_aggregation(sk, rs);
1017*4882a593Smuzhiyun bbr_update_cycle_phase(sk, rs);
1018*4882a593Smuzhiyun bbr_check_full_bw_reached(sk, rs);
1019*4882a593Smuzhiyun bbr_check_drain(sk, rs);
1020*4882a593Smuzhiyun bbr_update_min_rtt(sk, rs);
1021*4882a593Smuzhiyun bbr_update_gains(sk);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
bbr_main(struct sock * sk,const struct rate_sample * rs)1024*4882a593Smuzhiyun static void bbr_main(struct sock *sk, const struct rate_sample *rs)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
1027*4882a593Smuzhiyun u32 bw;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun bbr_update_model(sk, rs);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun bw = bbr_bw(sk);
1032*4882a593Smuzhiyun bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
1033*4882a593Smuzhiyun bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
bbr_init(struct sock * sk)1036*4882a593Smuzhiyun static void bbr_init(struct sock *sk)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
1039*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun bbr->prior_cwnd = 0;
1042*4882a593Smuzhiyun tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1043*4882a593Smuzhiyun bbr->rtt_cnt = 0;
1044*4882a593Smuzhiyun bbr->next_rtt_delivered = tp->delivered;
1045*4882a593Smuzhiyun bbr->prev_ca_state = TCP_CA_Open;
1046*4882a593Smuzhiyun bbr->packet_conservation = 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun bbr->probe_rtt_done_stamp = 0;
1049*4882a593Smuzhiyun bbr->probe_rtt_round_done = 0;
1050*4882a593Smuzhiyun bbr->min_rtt_us = tcp_min_rtt(tp);
1051*4882a593Smuzhiyun bbr->min_rtt_stamp = tcp_jiffies32;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun bbr->has_seen_rtt = 0;
1056*4882a593Smuzhiyun bbr_init_pacing_rate_from_rtt(sk);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun bbr->round_start = 0;
1059*4882a593Smuzhiyun bbr->idle_restart = 0;
1060*4882a593Smuzhiyun bbr->full_bw_reached = 0;
1061*4882a593Smuzhiyun bbr->full_bw = 0;
1062*4882a593Smuzhiyun bbr->full_bw_cnt = 0;
1063*4882a593Smuzhiyun bbr->cycle_mstamp = 0;
1064*4882a593Smuzhiyun bbr->cycle_idx = 0;
1065*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk);
1066*4882a593Smuzhiyun bbr_reset_startup_mode(sk);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun bbr->ack_epoch_mstamp = tp->tcp_mstamp;
1069*4882a593Smuzhiyun bbr->ack_epoch_acked = 0;
1070*4882a593Smuzhiyun bbr->extra_acked_win_rtts = 0;
1071*4882a593Smuzhiyun bbr->extra_acked_win_idx = 0;
1072*4882a593Smuzhiyun bbr->extra_acked[0] = 0;
1073*4882a593Smuzhiyun bbr->extra_acked[1] = 0;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
bbr_sndbuf_expand(struct sock * sk)1078*4882a593Smuzhiyun static u32 bbr_sndbuf_expand(struct sock *sk)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
1081*4882a593Smuzhiyun return 3;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /* In theory BBR does not need to undo the cwnd since it does not
1085*4882a593Smuzhiyun * always reduce cwnd on losses (see bbr_main()). Keep it for now.
1086*4882a593Smuzhiyun */
bbr_undo_cwnd(struct sock * sk)1087*4882a593Smuzhiyun static u32 bbr_undo_cwnd(struct sock *sk)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
1092*4882a593Smuzhiyun bbr->full_bw_cnt = 0;
1093*4882a593Smuzhiyun bbr_reset_lt_bw_sampling(sk);
1094*4882a593Smuzhiyun return tcp_sk(sk)->snd_cwnd;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
bbr_ssthresh(struct sock * sk)1098*4882a593Smuzhiyun static u32 bbr_ssthresh(struct sock *sk)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun bbr_save_cwnd(sk);
1101*4882a593Smuzhiyun return tcp_sk(sk)->snd_ssthresh;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun
bbr_get_info(struct sock * sk,u32 ext,int * attr,union tcp_cc_info * info)1104*4882a593Smuzhiyun static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
1105*4882a593Smuzhiyun union tcp_cc_info *info)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
1108*4882a593Smuzhiyun ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
1109*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
1110*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
1111*4882a593Smuzhiyun u64 bw = bbr_bw(sk);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
1114*4882a593Smuzhiyun memset(&info->bbr, 0, sizeof(info->bbr));
1115*4882a593Smuzhiyun info->bbr.bbr_bw_lo = (u32)bw;
1116*4882a593Smuzhiyun info->bbr.bbr_bw_hi = (u32)(bw >> 32);
1117*4882a593Smuzhiyun info->bbr.bbr_min_rtt = bbr->min_rtt_us;
1118*4882a593Smuzhiyun info->bbr.bbr_pacing_gain = bbr->pacing_gain;
1119*4882a593Smuzhiyun info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
1120*4882a593Smuzhiyun *attr = INET_DIAG_BBRINFO;
1121*4882a593Smuzhiyun return sizeof(info->bbr);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
bbr_set_state(struct sock * sk,u8 new_state)1126*4882a593Smuzhiyun static void bbr_set_state(struct sock *sk, u8 new_state)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun struct bbr *bbr = inet_csk_ca(sk);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun if (new_state == TCP_CA_Loss) {
1131*4882a593Smuzhiyun struct rate_sample rs = { .losses = 1 };
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun bbr->prev_ca_state = TCP_CA_Loss;
1134*4882a593Smuzhiyun bbr->full_bw = 0;
1135*4882a593Smuzhiyun bbr->round_start = 1; /* treat RTO like end of a round */
1136*4882a593Smuzhiyun bbr_lt_bw_sampling(sk, &rs);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
1141*4882a593Smuzhiyun .flags = TCP_CONG_NON_RESTRICTED,
1142*4882a593Smuzhiyun .name = "bbr",
1143*4882a593Smuzhiyun .owner = THIS_MODULE,
1144*4882a593Smuzhiyun .init = bbr_init,
1145*4882a593Smuzhiyun .cong_control = bbr_main,
1146*4882a593Smuzhiyun .sndbuf_expand = bbr_sndbuf_expand,
1147*4882a593Smuzhiyun .undo_cwnd = bbr_undo_cwnd,
1148*4882a593Smuzhiyun .cwnd_event = bbr_cwnd_event,
1149*4882a593Smuzhiyun .ssthresh = bbr_ssthresh,
1150*4882a593Smuzhiyun .min_tso_segs = bbr_min_tso_segs,
1151*4882a593Smuzhiyun .get_info = bbr_get_info,
1152*4882a593Smuzhiyun .set_state = bbr_set_state,
1153*4882a593Smuzhiyun };
1154*4882a593Smuzhiyun
bbr_register(void)1155*4882a593Smuzhiyun static int __init bbr_register(void)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
1158*4882a593Smuzhiyun return tcp_register_congestion_control(&tcp_bbr_cong_ops);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
bbr_unregister(void)1161*4882a593Smuzhiyun static void __exit bbr_unregister(void)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun module_init(bbr_register);
1167*4882a593Smuzhiyun module_exit(bbr_unregister);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
1170*4882a593Smuzhiyun MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
1171*4882a593Smuzhiyun MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
1172*4882a593Smuzhiyun MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
1173*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
1174*4882a593Smuzhiyun MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
1175