xref: /OK3568_Linux_fs/kernel/net/ipv4/tcp_cdg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CAIA Delay-Gradient (CDG) congestion control
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This implementation is based on the paper:
6*4882a593Smuzhiyun  *   D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
7*4882a593Smuzhiyun  *   delay gradients." In IFIP Networking, pages 328-341. Springer, 2011.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Scavenger traffic (Less-than-Best-Effort) should disable coexistence
10*4882a593Smuzhiyun  * heuristics using parameters use_shadow=0 and use_ineff=0.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Parameters window, backoff_beta, and backoff_factor are crucial for
13*4882a593Smuzhiyun  * throughput and delay. Future work is needed to determine better defaults,
14*4882a593Smuzhiyun  * and to provide guidelines for use in different environments/contexts.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * Except for window, knobs are configured via /sys/module/tcp_cdg/parameters/.
17*4882a593Smuzhiyun  * Parameter window is only configurable when loading tcp_cdg as a module.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Notable differences from paper/FreeBSD:
20*4882a593Smuzhiyun  *   o Using Hybrid Slow start and Proportional Rate Reduction.
21*4882a593Smuzhiyun  *   o Add toggle for shadow window mechanism. Suggested by David Hayes.
22*4882a593Smuzhiyun  *   o Add toggle for non-congestion loss tolerance.
23*4882a593Smuzhiyun  *   o Scaling parameter G is changed to a backoff factor;
24*4882a593Smuzhiyun  *     conversion is given by: backoff_factor = 1000/(G * window).
25*4882a593Smuzhiyun  *   o Limit shadow window to 2 * cwnd, or to cwnd when application limited.
26*4882a593Smuzhiyun  *   o More accurate e^-x.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun #include <linux/kernel.h>
29*4882a593Smuzhiyun #include <linux/random.h>
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/sched/clock.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <net/tcp.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define HYSTART_ACK_TRAIN	1
36*4882a593Smuzhiyun #define HYSTART_DELAY		2
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static int window __read_mostly = 8;
39*4882a593Smuzhiyun static unsigned int backoff_beta __read_mostly = 0.7071 * 1024; /* sqrt 0.5 */
40*4882a593Smuzhiyun static unsigned int backoff_factor __read_mostly = 42;
41*4882a593Smuzhiyun static unsigned int hystart_detect __read_mostly = 3;
42*4882a593Smuzhiyun static unsigned int use_ineff __read_mostly = 5;
43*4882a593Smuzhiyun static bool use_shadow __read_mostly = true;
44*4882a593Smuzhiyun static bool use_tolerance __read_mostly;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun module_param(window, int, 0444);
47*4882a593Smuzhiyun MODULE_PARM_DESC(window, "gradient window size (power of two <= 256)");
48*4882a593Smuzhiyun module_param(backoff_beta, uint, 0644);
49*4882a593Smuzhiyun MODULE_PARM_DESC(backoff_beta, "backoff beta (0-1024)");
50*4882a593Smuzhiyun module_param(backoff_factor, uint, 0644);
51*4882a593Smuzhiyun MODULE_PARM_DESC(backoff_factor, "backoff probability scale factor");
52*4882a593Smuzhiyun module_param(hystart_detect, uint, 0644);
53*4882a593Smuzhiyun MODULE_PARM_DESC(hystart_detect, "use Hybrid Slow start "
54*4882a593Smuzhiyun 		 "(0: disabled, 1: ACK train, 2: delay threshold, 3: both)");
55*4882a593Smuzhiyun module_param(use_ineff, uint, 0644);
56*4882a593Smuzhiyun MODULE_PARM_DESC(use_ineff, "use ineffectual backoff detection (threshold)");
57*4882a593Smuzhiyun module_param(use_shadow, bool, 0644);
58*4882a593Smuzhiyun MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
59*4882a593Smuzhiyun module_param(use_tolerance, bool, 0644);
60*4882a593Smuzhiyun MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct cdg_minmax {
63*4882a593Smuzhiyun 	union {
64*4882a593Smuzhiyun 		struct {
65*4882a593Smuzhiyun 			s32 min;
66*4882a593Smuzhiyun 			s32 max;
67*4882a593Smuzhiyun 		};
68*4882a593Smuzhiyun 		u64 v64;
69*4882a593Smuzhiyun 	};
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun enum cdg_state {
73*4882a593Smuzhiyun 	CDG_UNKNOWN = 0,
74*4882a593Smuzhiyun 	CDG_NONFULL = 1,
75*4882a593Smuzhiyun 	CDG_FULL    = 2,
76*4882a593Smuzhiyun 	CDG_BACKOFF = 3,
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct cdg {
80*4882a593Smuzhiyun 	struct cdg_minmax rtt;
81*4882a593Smuzhiyun 	struct cdg_minmax rtt_prev;
82*4882a593Smuzhiyun 	struct cdg_minmax *gradients;
83*4882a593Smuzhiyun 	struct cdg_minmax gsum;
84*4882a593Smuzhiyun 	bool gfilled;
85*4882a593Smuzhiyun 	u8  tail;
86*4882a593Smuzhiyun 	u8  state;
87*4882a593Smuzhiyun 	u8  delack;
88*4882a593Smuzhiyun 	u32 rtt_seq;
89*4882a593Smuzhiyun 	u32 shadow_wnd;
90*4882a593Smuzhiyun 	u16 backoff_cnt;
91*4882a593Smuzhiyun 	u16 sample_cnt;
92*4882a593Smuzhiyun 	s32 delay_min;
93*4882a593Smuzhiyun 	u32 last_ack;
94*4882a593Smuzhiyun 	u32 round_start;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun  * nexp_u32 - negative base-e exponential
99*4882a593Smuzhiyun  * @ux: x in units of micro
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * Returns exp(ux * -1e-6) * U32_MAX.
102*4882a593Smuzhiyun  */
nexp_u32(u32 ux)103*4882a593Smuzhiyun static u32 __pure nexp_u32(u32 ux)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	static const u16 v[] = {
106*4882a593Smuzhiyun 		/* exp(-x)*65536-1 for x = 0, 0.000256, 0.000512, ... */
107*4882a593Smuzhiyun 		65535,
108*4882a593Smuzhiyun 		65518, 65501, 65468, 65401, 65267, 65001, 64470, 63422,
109*4882a593Smuzhiyun 		61378, 57484, 50423, 38795, 22965, 8047,  987,   14,
110*4882a593Smuzhiyun 	};
111*4882a593Smuzhiyun 	u32 msb = ux >> 8;
112*4882a593Smuzhiyun 	u32 res;
113*4882a593Smuzhiyun 	int i;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* Cut off when ux >= 2^24 (actual result is <= 222/U32_MAX). */
116*4882a593Smuzhiyun 	if (msb > U16_MAX)
117*4882a593Smuzhiyun 		return 0;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* Scale first eight bits linearly: */
120*4882a593Smuzhiyun 	res = U32_MAX - (ux & 0xff) * (U32_MAX / 1000000);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* Obtain e^(x + y + ...) by computing e^x * e^y * ...: */
123*4882a593Smuzhiyun 	for (i = 1; msb; i++, msb >>= 1) {
124*4882a593Smuzhiyun 		u32 y = v[i & -(msb & 1)] + U32_C(1);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		res = ((u64)res * y) >> 16;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return res;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /* Based on the HyStart algorithm (by Ha et al.) that is implemented in
133*4882a593Smuzhiyun  * tcp_cubic. Differences/experimental changes:
134*4882a593Smuzhiyun  *   o Using Hayes' delayed ACK filter.
135*4882a593Smuzhiyun  *   o Using a usec clock for the ACK train.
136*4882a593Smuzhiyun  *   o Reset ACK train when application limited.
137*4882a593Smuzhiyun  *   o Invoked at any cwnd (i.e. also when cwnd < 16).
138*4882a593Smuzhiyun  *   o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh).
139*4882a593Smuzhiyun  */
tcp_cdg_hystart_update(struct sock * sk)140*4882a593Smuzhiyun static void tcp_cdg_hystart_update(struct sock *sk)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
143*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min);
146*4882a593Smuzhiyun 	if (ca->delay_min == 0)
147*4882a593Smuzhiyun 		return;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (hystart_detect & HYSTART_ACK_TRAIN) {
150*4882a593Smuzhiyun 		u32 now_us = tp->tcp_mstamp;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
153*4882a593Smuzhiyun 			ca->last_ack = now_us;
154*4882a593Smuzhiyun 			ca->round_start = now_us;
155*4882a593Smuzhiyun 		} else if (before(now_us, ca->last_ack + 3000)) {
156*4882a593Smuzhiyun 			u32 base_owd = max(ca->delay_min / 2U, 125U);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 			ca->last_ack = now_us;
159*4882a593Smuzhiyun 			if (after(now_us, ca->round_start + base_owd)) {
160*4882a593Smuzhiyun 				NET_INC_STATS(sock_net(sk),
161*4882a593Smuzhiyun 					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
162*4882a593Smuzhiyun 				NET_ADD_STATS(sock_net(sk),
163*4882a593Smuzhiyun 					      LINUX_MIB_TCPHYSTARTTRAINCWND,
164*4882a593Smuzhiyun 					      tp->snd_cwnd);
165*4882a593Smuzhiyun 				tp->snd_ssthresh = tp->snd_cwnd;
166*4882a593Smuzhiyun 				return;
167*4882a593Smuzhiyun 			}
168*4882a593Smuzhiyun 		}
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (hystart_detect & HYSTART_DELAY) {
172*4882a593Smuzhiyun 		if (ca->sample_cnt < 8) {
173*4882a593Smuzhiyun 			ca->sample_cnt++;
174*4882a593Smuzhiyun 		} else {
175*4882a593Smuzhiyun 			s32 thresh = max(ca->delay_min + ca->delay_min / 8U,
176*4882a593Smuzhiyun 					 125U);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 			if (ca->rtt.min > thresh) {
179*4882a593Smuzhiyun 				NET_INC_STATS(sock_net(sk),
180*4882a593Smuzhiyun 					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
181*4882a593Smuzhiyun 				NET_ADD_STATS(sock_net(sk),
182*4882a593Smuzhiyun 					      LINUX_MIB_TCPHYSTARTDELAYCWND,
183*4882a593Smuzhiyun 					      tp->snd_cwnd);
184*4882a593Smuzhiyun 				tp->snd_ssthresh = tp->snd_cwnd;
185*4882a593Smuzhiyun 			}
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
tcp_cdg_grad(struct cdg * ca)190*4882a593Smuzhiyun static s32 tcp_cdg_grad(struct cdg *ca)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	s32 gmin = ca->rtt.min - ca->rtt_prev.min;
193*4882a593Smuzhiyun 	s32 gmax = ca->rtt.max - ca->rtt_prev.max;
194*4882a593Smuzhiyun 	s32 grad;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (ca->gradients) {
197*4882a593Smuzhiyun 		ca->gsum.min += gmin - ca->gradients[ca->tail].min;
198*4882a593Smuzhiyun 		ca->gsum.max += gmax - ca->gradients[ca->tail].max;
199*4882a593Smuzhiyun 		ca->gradients[ca->tail].min = gmin;
200*4882a593Smuzhiyun 		ca->gradients[ca->tail].max = gmax;
201*4882a593Smuzhiyun 		ca->tail = (ca->tail + 1) & (window - 1);
202*4882a593Smuzhiyun 		gmin = ca->gsum.min;
203*4882a593Smuzhiyun 		gmax = ca->gsum.max;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* We keep sums to ignore gradients during cwnd reductions;
207*4882a593Smuzhiyun 	 * the paper's smoothed gradients otherwise simplify to:
208*4882a593Smuzhiyun 	 * (rtt_latest - rtt_oldest) / window.
209*4882a593Smuzhiyun 	 *
210*4882a593Smuzhiyun 	 * We also drop division by window here.
211*4882a593Smuzhiyun 	 */
212*4882a593Smuzhiyun 	grad = gmin > 0 ? gmin : gmax;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Extrapolate missing values in gradient window: */
215*4882a593Smuzhiyun 	if (!ca->gfilled) {
216*4882a593Smuzhiyun 		if (!ca->gradients && window > 1)
217*4882a593Smuzhiyun 			grad *= window; /* Memory allocation failed. */
218*4882a593Smuzhiyun 		else if (ca->tail == 0)
219*4882a593Smuzhiyun 			ca->gfilled = true;
220*4882a593Smuzhiyun 		else
221*4882a593Smuzhiyun 			grad = (grad * window) / (int)ca->tail;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Backoff was effectual: */
225*4882a593Smuzhiyun 	if (gmin <= -32 || gmax <= -32)
226*4882a593Smuzhiyun 		ca->backoff_cnt = 0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (use_tolerance) {
229*4882a593Smuzhiyun 		/* Reduce small variations to zero: */
230*4882a593Smuzhiyun 		gmin = DIV_ROUND_CLOSEST(gmin, 64);
231*4882a593Smuzhiyun 		gmax = DIV_ROUND_CLOSEST(gmax, 64);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		if (gmin > 0 && gmax <= 0)
234*4882a593Smuzhiyun 			ca->state = CDG_FULL;
235*4882a593Smuzhiyun 		else if ((gmin > 0 && gmax > 0) || gmax < 0)
236*4882a593Smuzhiyun 			ca->state = CDG_NONFULL;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	return grad;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
tcp_cdg_backoff(struct sock * sk,u32 grad)241*4882a593Smuzhiyun static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
244*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (prandom_u32() <= nexp_u32(grad * backoff_factor))
247*4882a593Smuzhiyun 		return false;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (use_ineff) {
250*4882a593Smuzhiyun 		ca->backoff_cnt++;
251*4882a593Smuzhiyun 		if (ca->backoff_cnt > use_ineff)
252*4882a593Smuzhiyun 			return false;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
256*4882a593Smuzhiyun 	ca->state = CDG_BACKOFF;
257*4882a593Smuzhiyun 	tcp_enter_cwr(sk);
258*4882a593Smuzhiyun 	return true;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /* Not called in CWR or Recovery state. */
tcp_cdg_cong_avoid(struct sock * sk,u32 ack,u32 acked)262*4882a593Smuzhiyun static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
265*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
266*4882a593Smuzhiyun 	u32 prior_snd_cwnd;
267*4882a593Smuzhiyun 	u32 incr;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (tcp_in_slow_start(tp) && hystart_detect)
270*4882a593Smuzhiyun 		tcp_cdg_hystart_update(sk);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
273*4882a593Smuzhiyun 		s32 grad = 0;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		if (ca->rtt_prev.v64)
276*4882a593Smuzhiyun 			grad = tcp_cdg_grad(ca);
277*4882a593Smuzhiyun 		ca->rtt_seq = tp->snd_nxt;
278*4882a593Smuzhiyun 		ca->rtt_prev = ca->rtt;
279*4882a593Smuzhiyun 		ca->rtt.v64 = 0;
280*4882a593Smuzhiyun 		ca->last_ack = 0;
281*4882a593Smuzhiyun 		ca->sample_cnt = 0;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		if (grad > 0 && tcp_cdg_backoff(sk, grad))
284*4882a593Smuzhiyun 			return;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (!tcp_is_cwnd_limited(sk)) {
288*4882a593Smuzhiyun 		ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
289*4882a593Smuzhiyun 		return;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	prior_snd_cwnd = tp->snd_cwnd;
293*4882a593Smuzhiyun 	tcp_reno_cong_avoid(sk, ack, acked);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	incr = tp->snd_cwnd - prior_snd_cwnd;
296*4882a593Smuzhiyun 	ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
tcp_cdg_acked(struct sock * sk,const struct ack_sample * sample)299*4882a593Smuzhiyun static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
302*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (sample->rtt_us <= 0)
305*4882a593Smuzhiyun 		return;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* A heuristic for filtering delayed ACKs, adapted from:
308*4882a593Smuzhiyun 	 * D.A. Hayes. "Timing enhancements to the FreeBSD kernel to support
309*4882a593Smuzhiyun 	 * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
310*4882a593Smuzhiyun 	 */
311*4882a593Smuzhiyun 	if (tp->sacked_out == 0) {
312*4882a593Smuzhiyun 		if (sample->pkts_acked == 1 && ca->delack) {
313*4882a593Smuzhiyun 			/* A delayed ACK is only used for the minimum if it is
314*4882a593Smuzhiyun 			 * provenly lower than an existing non-zero minimum.
315*4882a593Smuzhiyun 			 */
316*4882a593Smuzhiyun 			ca->rtt.min = min(ca->rtt.min, sample->rtt_us);
317*4882a593Smuzhiyun 			ca->delack--;
318*4882a593Smuzhiyun 			return;
319*4882a593Smuzhiyun 		} else if (sample->pkts_acked > 1 && ca->delack < 5) {
320*4882a593Smuzhiyun 			ca->delack++;
321*4882a593Smuzhiyun 		}
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us);
325*4882a593Smuzhiyun 	ca->rtt.max = max(ca->rtt.max, sample->rtt_us);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
tcp_cdg_ssthresh(struct sock * sk)328*4882a593Smuzhiyun static u32 tcp_cdg_ssthresh(struct sock *sk)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
331*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (ca->state == CDG_BACKOFF)
334*4882a593Smuzhiyun 		return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (ca->state == CDG_NONFULL && use_tolerance)
337*4882a593Smuzhiyun 		return tp->snd_cwnd;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
340*4882a593Smuzhiyun 	if (use_shadow)
341*4882a593Smuzhiyun 		return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
342*4882a593Smuzhiyun 	return max(2U, tp->snd_cwnd >> 1);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
tcp_cdg_cwnd_event(struct sock * sk,const enum tcp_ca_event ev)345*4882a593Smuzhiyun static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
348*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
349*4882a593Smuzhiyun 	struct cdg_minmax *gradients;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	switch (ev) {
352*4882a593Smuzhiyun 	case CA_EVENT_CWND_RESTART:
353*4882a593Smuzhiyun 		gradients = ca->gradients;
354*4882a593Smuzhiyun 		if (gradients)
355*4882a593Smuzhiyun 			memset(gradients, 0, window * sizeof(gradients[0]));
356*4882a593Smuzhiyun 		memset(ca, 0, sizeof(*ca));
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		ca->gradients = gradients;
359*4882a593Smuzhiyun 		ca->rtt_seq = tp->snd_nxt;
360*4882a593Smuzhiyun 		ca->shadow_wnd = tp->snd_cwnd;
361*4882a593Smuzhiyun 		break;
362*4882a593Smuzhiyun 	case CA_EVENT_COMPLETE_CWR:
363*4882a593Smuzhiyun 		ca->state = CDG_UNKNOWN;
364*4882a593Smuzhiyun 		ca->rtt_seq = tp->snd_nxt;
365*4882a593Smuzhiyun 		ca->rtt_prev = ca->rtt;
366*4882a593Smuzhiyun 		ca->rtt.v64 = 0;
367*4882a593Smuzhiyun 		break;
368*4882a593Smuzhiyun 	default:
369*4882a593Smuzhiyun 		break;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
tcp_cdg_init(struct sock * sk)373*4882a593Smuzhiyun static void tcp_cdg_init(struct sock *sk)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
376*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	ca->gradients = NULL;
379*4882a593Smuzhiyun 	/* We silently fall back to window = 1 if allocation fails. */
380*4882a593Smuzhiyun 	if (window > 1)
381*4882a593Smuzhiyun 		ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
382*4882a593Smuzhiyun 					GFP_NOWAIT | __GFP_NOWARN);
383*4882a593Smuzhiyun 	ca->rtt_seq = tp->snd_nxt;
384*4882a593Smuzhiyun 	ca->shadow_wnd = tp->snd_cwnd;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
tcp_cdg_release(struct sock * sk)387*4882a593Smuzhiyun static void tcp_cdg_release(struct sock *sk)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct cdg *ca = inet_csk_ca(sk);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	kfree(ca->gradients);
392*4882a593Smuzhiyun 	ca->gradients = NULL;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun static struct tcp_congestion_ops tcp_cdg __read_mostly = {
396*4882a593Smuzhiyun 	.cong_avoid = tcp_cdg_cong_avoid,
397*4882a593Smuzhiyun 	.cwnd_event = tcp_cdg_cwnd_event,
398*4882a593Smuzhiyun 	.pkts_acked = tcp_cdg_acked,
399*4882a593Smuzhiyun 	.undo_cwnd = tcp_reno_undo_cwnd,
400*4882a593Smuzhiyun 	.ssthresh = tcp_cdg_ssthresh,
401*4882a593Smuzhiyun 	.release = tcp_cdg_release,
402*4882a593Smuzhiyun 	.init = tcp_cdg_init,
403*4882a593Smuzhiyun 	.owner = THIS_MODULE,
404*4882a593Smuzhiyun 	.name = "cdg",
405*4882a593Smuzhiyun };
406*4882a593Smuzhiyun 
tcp_cdg_register(void)407*4882a593Smuzhiyun static int __init tcp_cdg_register(void)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	if (backoff_beta > 1024 || window < 1 || window > 256)
410*4882a593Smuzhiyun 		return -ERANGE;
411*4882a593Smuzhiyun 	if (!is_power_of_2(window))
412*4882a593Smuzhiyun 		return -EINVAL;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct cdg) > ICSK_CA_PRIV_SIZE);
415*4882a593Smuzhiyun 	tcp_register_congestion_control(&tcp_cdg);
416*4882a593Smuzhiyun 	return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
tcp_cdg_unregister(void)419*4882a593Smuzhiyun static void __exit tcp_cdg_unregister(void)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	tcp_unregister_congestion_control(&tcp_cdg);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun module_init(tcp_cdg_register);
425*4882a593Smuzhiyun module_exit(tcp_cdg_unregister);
426*4882a593Smuzhiyun MODULE_AUTHOR("Kenneth Klette Jonassen");
427*4882a593Smuzhiyun MODULE_LICENSE("GPL");
428*4882a593Smuzhiyun MODULE_DESCRIPTION("TCP CDG");
429