1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * This implementation should follow RFC 4341
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include "../feat.h"
15*4882a593Smuzhiyun #include "ccid2.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
19*4882a593Smuzhiyun static bool ccid2_debug;
20*4882a593Smuzhiyun #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
21*4882a593Smuzhiyun #else
22*4882a593Smuzhiyun #define ccid2_pr_debug(format, a...)
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock * hc)25*4882a593Smuzhiyun static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct ccid2_seq *seqp;
28*4882a593Smuzhiyun int i;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* check if we have space to preserve the pointer to the buffer */
31*4882a593Smuzhiyun if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
32*4882a593Smuzhiyun sizeof(struct ccid2_seq *)))
33*4882a593Smuzhiyun return -ENOMEM;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* allocate buffer and initialize linked list */
36*4882a593Smuzhiyun seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq),
37*4882a593Smuzhiyun gfp_any());
38*4882a593Smuzhiyun if (seqp == NULL)
39*4882a593Smuzhiyun return -ENOMEM;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
42*4882a593Smuzhiyun seqp[i].ccid2s_next = &seqp[i + 1];
43*4882a593Smuzhiyun seqp[i + 1].ccid2s_prev = &seqp[i];
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
46*4882a593Smuzhiyun seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* This is the first allocation. Initiate the head and tail. */
49*4882a593Smuzhiyun if (hc->tx_seqbufc == 0)
50*4882a593Smuzhiyun hc->tx_seqh = hc->tx_seqt = seqp;
51*4882a593Smuzhiyun else {
52*4882a593Smuzhiyun /* link the existing list with the one we just created */
53*4882a593Smuzhiyun hc->tx_seqh->ccid2s_next = seqp;
54*4882a593Smuzhiyun seqp->ccid2s_prev = hc->tx_seqh;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
57*4882a593Smuzhiyun seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* store the original pointer to the buffer so we can free it */
61*4882a593Smuzhiyun hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
62*4882a593Smuzhiyun hc->tx_seqbufc++;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun return 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
ccid2_hc_tx_send_packet(struct sock * sk,struct sk_buff * skb)67*4882a593Smuzhiyun static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
70*4882a593Smuzhiyun return CCID_PACKET_WILL_DEQUEUE_LATER;
71*4882a593Smuzhiyun return CCID_PACKET_SEND_AT_ONCE;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
ccid2_change_l_ack_ratio(struct sock * sk,u32 val)74*4882a593Smuzhiyun static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
80*4882a593Smuzhiyun * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
81*4882a593Smuzhiyun * acceptable since this causes starvation/deadlock whenever cwnd < 2.
82*4882a593Smuzhiyun * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun if (val == 0 || val > max_ratio) {
85*4882a593Smuzhiyun DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
86*4882a593Smuzhiyun val = max_ratio;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
89*4882a593Smuzhiyun min_t(u32, val, DCCPF_ACK_RATIO_MAX));
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
ccid2_check_l_ack_ratio(struct sock * sk)92*4882a593Smuzhiyun static void ccid2_check_l_ack_ratio(struct sock *sk)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * After a loss, idle period, application limited period, or RTO we
98*4882a593Smuzhiyun * need to check that the ack ratio is still less than the congestion
99*4882a593Smuzhiyun * window. Otherwise, we will send an entire congestion window of
100*4882a593Smuzhiyun * packets and got no response because we haven't sent ack ratio
101*4882a593Smuzhiyun * packets yet.
102*4882a593Smuzhiyun * If the ack ratio does need to be reduced, we reduce it to half of
103*4882a593Smuzhiyun * the congestion window (or 1 if that's zero) instead of to the
104*4882a593Smuzhiyun * congestion window. This prevents problems if one ack is lost.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
107*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
ccid2_change_l_seq_window(struct sock * sk,u64 val)110*4882a593Smuzhiyun static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
113*4882a593Smuzhiyun clamp_val(val, DCCPF_SEQ_WMIN,
114*4882a593Smuzhiyun DCCPF_SEQ_WMAX));
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
dccp_tasklet_schedule(struct sock * sk)117*4882a593Smuzhiyun static void dccp_tasklet_schedule(struct sock *sk)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
122*4882a593Smuzhiyun sock_hold(sk);
123*4882a593Smuzhiyun __tasklet_schedule(t);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
ccid2_hc_tx_rto_expire(struct timer_list * t)127*4882a593Smuzhiyun static void ccid2_hc_tx_rto_expire(struct timer_list *t)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
130*4882a593Smuzhiyun struct sock *sk = hc->sk;
131*4882a593Smuzhiyun const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun bh_lock_sock(sk);
134*4882a593Smuzhiyun if (sock_owned_by_user(sk)) {
135*4882a593Smuzhiyun sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
136*4882a593Smuzhiyun goto out;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun ccid2_pr_debug("RTO_EXPIRE\n");
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (sk->sk_state == DCCP_CLOSED)
142*4882a593Smuzhiyun goto out;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* back-off timer */
145*4882a593Smuzhiyun hc->tx_rto <<= 1;
146*4882a593Smuzhiyun if (hc->tx_rto > DCCP_RTO_MAX)
147*4882a593Smuzhiyun hc->tx_rto = DCCP_RTO_MAX;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* adjust pipe, cwnd etc */
150*4882a593Smuzhiyun hc->tx_ssthresh = hc->tx_cwnd / 2;
151*4882a593Smuzhiyun if (hc->tx_ssthresh < 2)
152*4882a593Smuzhiyun hc->tx_ssthresh = 2;
153*4882a593Smuzhiyun hc->tx_cwnd = 1;
154*4882a593Smuzhiyun hc->tx_pipe = 0;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* clear state about stuff we sent */
157*4882a593Smuzhiyun hc->tx_seqt = hc->tx_seqh;
158*4882a593Smuzhiyun hc->tx_packets_acked = 0;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* clear ack ratio state. */
161*4882a593Smuzhiyun hc->tx_rpseq = 0;
162*4882a593Smuzhiyun hc->tx_rpdupack = -1;
163*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, 1);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* if we were blocked before, we may now send cwnd=1 packet */
166*4882a593Smuzhiyun if (sender_was_blocked)
167*4882a593Smuzhiyun dccp_tasklet_schedule(sk);
168*4882a593Smuzhiyun /* restart backed-off timer */
169*4882a593Smuzhiyun sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
170*4882a593Smuzhiyun out:
171*4882a593Smuzhiyun bh_unlock_sock(sk);
172*4882a593Smuzhiyun sock_put(sk);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Congestion window validation (RFC 2861).
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun static bool ccid2_do_cwv = true;
179*4882a593Smuzhiyun module_param(ccid2_do_cwv, bool, 0644);
180*4882a593Smuzhiyun MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * ccid2_update_used_window - Track how much of cwnd is actually used
184*4882a593Smuzhiyun * This is done in addition to CWV. The sender needs to have an idea of how many
185*4882a593Smuzhiyun * packets may be in flight, to set the local Sequence Window value accordingly
186*4882a593Smuzhiyun * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
187*4882a593Smuzhiyun * maximum-used window. We use an EWMA low-pass filter to filter out noise.
188*4882a593Smuzhiyun */
ccid2_update_used_window(struct ccid2_hc_tx_sock * hc,u32 new_wnd)189*4882a593Smuzhiyun static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* This borrows the code of tcp_cwnd_application_limited() */
ccid2_cwnd_application_limited(struct sock * sk,const u32 now)195*4882a593Smuzhiyun static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
198*4882a593Smuzhiyun /* don't reduce cwnd below the initial window (IW) */
199*4882a593Smuzhiyun u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
200*4882a593Smuzhiyun win_used = max(hc->tx_cwnd_used, init_win);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (win_used < hc->tx_cwnd) {
203*4882a593Smuzhiyun hc->tx_ssthresh = max(hc->tx_ssthresh,
204*4882a593Smuzhiyun (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
205*4882a593Smuzhiyun hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun hc->tx_cwnd_used = 0;
208*4882a593Smuzhiyun hc->tx_cwnd_stamp = now;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun ccid2_check_l_ack_ratio(sk);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* This borrows the code of tcp_cwnd_restart() */
ccid2_cwnd_restart(struct sock * sk,const u32 now)214*4882a593Smuzhiyun static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
217*4882a593Smuzhiyun u32 cwnd = hc->tx_cwnd, restart_cwnd,
218*4882a593Smuzhiyun iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
219*4882a593Smuzhiyun s32 delta = now - hc->tx_lsndtime;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* don't reduce cwnd below the initial window (IW) */
224*4882a593Smuzhiyun restart_cwnd = min(cwnd, iwnd);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
227*4882a593Smuzhiyun cwnd >>= 1;
228*4882a593Smuzhiyun hc->tx_cwnd = max(cwnd, restart_cwnd);
229*4882a593Smuzhiyun hc->tx_cwnd_stamp = now;
230*4882a593Smuzhiyun hc->tx_cwnd_used = 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun ccid2_check_l_ack_ratio(sk);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
ccid2_hc_tx_packet_sent(struct sock * sk,unsigned int len)235*4882a593Smuzhiyun static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
238*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
239*4882a593Smuzhiyun const u32 now = ccid2_jiffies32;
240*4882a593Smuzhiyun struct ccid2_seq *next;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* slow-start after idle periods (RFC 2581, RFC 2861) */
243*4882a593Smuzhiyun if (ccid2_do_cwv && !hc->tx_pipe &&
244*4882a593Smuzhiyun (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
245*4882a593Smuzhiyun ccid2_cwnd_restart(sk, now);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun hc->tx_lsndtime = now;
248*4882a593Smuzhiyun hc->tx_pipe += 1;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* see whether cwnd was fully used (RFC 2861), update expected window */
251*4882a593Smuzhiyun if (ccid2_cwnd_network_limited(hc)) {
252*4882a593Smuzhiyun ccid2_update_used_window(hc, hc->tx_cwnd);
253*4882a593Smuzhiyun hc->tx_cwnd_used = 0;
254*4882a593Smuzhiyun hc->tx_cwnd_stamp = now;
255*4882a593Smuzhiyun } else {
256*4882a593Smuzhiyun if (hc->tx_pipe > hc->tx_cwnd_used)
257*4882a593Smuzhiyun hc->tx_cwnd_used = hc->tx_pipe;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ccid2_update_used_window(hc, hc->tx_cwnd_used);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
262*4882a593Smuzhiyun ccid2_cwnd_application_limited(sk, now);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
266*4882a593Smuzhiyun hc->tx_seqh->ccid2s_acked = 0;
267*4882a593Smuzhiyun hc->tx_seqh->ccid2s_sent = now;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun next = hc->tx_seqh->ccid2s_next;
270*4882a593Smuzhiyun /* check if we need to alloc more space */
271*4882a593Smuzhiyun if (next == hc->tx_seqt) {
272*4882a593Smuzhiyun if (ccid2_hc_tx_alloc_seq(hc)) {
273*4882a593Smuzhiyun DCCP_CRIT("packet history - out of memory!");
274*4882a593Smuzhiyun /* FIXME: find a more graceful way to bail out */
275*4882a593Smuzhiyun return;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun next = hc->tx_seqh->ccid2s_next;
278*4882a593Smuzhiyun BUG_ON(next == hc->tx_seqt);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun hc->tx_seqh = next;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * FIXME: The code below is broken and the variables have been removed
286*4882a593Smuzhiyun * from the socket struct. The `ackloss' variable was always set to 0,
287*4882a593Smuzhiyun * and with arsent there are several problems:
288*4882a593Smuzhiyun * (i) it doesn't just count the number of Acks, but all sent packets;
289*4882a593Smuzhiyun * (ii) it is expressed in # of packets, not # of windows, so the
290*4882a593Smuzhiyun * comparison below uses the wrong formula: Appendix A of RFC 4341
291*4882a593Smuzhiyun * comes up with the number K = cwnd / (R^2 - R) of consecutive windows
292*4882a593Smuzhiyun * of data with no lost or marked Ack packets. If arsent were the # of
293*4882a593Smuzhiyun * consecutive Acks received without loss, then Ack Ratio needs to be
294*4882a593Smuzhiyun * decreased by 1 when
295*4882a593Smuzhiyun * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2)
296*4882a593Smuzhiyun * where cwnd / R is the number of Acks received per window of data
297*4882a593Smuzhiyun * (cf. RFC 4341, App. A). The problems are that
298*4882a593Smuzhiyun * - arsent counts other packets as well;
299*4882a593Smuzhiyun * - the comparison uses a formula different from RFC 4341;
300*4882a593Smuzhiyun * - computing a cubic/quadratic equation each time is too complicated.
301*4882a593Smuzhiyun * Hence a different algorithm is needed.
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun #if 0
304*4882a593Smuzhiyun /* Ack Ratio. Need to maintain a concept of how many windows we sent */
305*4882a593Smuzhiyun hc->tx_arsent++;
306*4882a593Smuzhiyun /* We had an ack loss in this window... */
307*4882a593Smuzhiyun if (hc->tx_ackloss) {
308*4882a593Smuzhiyun if (hc->tx_arsent >= hc->tx_cwnd) {
309*4882a593Smuzhiyun hc->tx_arsent = 0;
310*4882a593Smuzhiyun hc->tx_ackloss = 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun } else {
313*4882a593Smuzhiyun /* No acks lost up to now... */
314*4882a593Smuzhiyun /* decrease ack ratio if enough packets were sent */
315*4882a593Smuzhiyun if (dp->dccps_l_ack_ratio > 1) {
316*4882a593Smuzhiyun /* XXX don't calculate denominator each time */
317*4882a593Smuzhiyun int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
318*4882a593Smuzhiyun dp->dccps_l_ack_ratio;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun denom = hc->tx_cwnd * hc->tx_cwnd / denom;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (hc->tx_arsent >= denom) {
323*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
324*4882a593Smuzhiyun hc->tx_arsent = 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun } else {
327*4882a593Smuzhiyun /* we can't increase ack ratio further [1] */
328*4882a593Smuzhiyun hc->tx_arsent = 0; /* or maybe set it to cwnd*/
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun #endif
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
336*4882a593Smuzhiyun do {
337*4882a593Smuzhiyun struct ccid2_seq *seqp = hc->tx_seqt;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun while (seqp != hc->tx_seqh) {
340*4882a593Smuzhiyun ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
341*4882a593Smuzhiyun (unsigned long long)seqp->ccid2s_seq,
342*4882a593Smuzhiyun seqp->ccid2s_acked, seqp->ccid2s_sent);
343*4882a593Smuzhiyun seqp = seqp->ccid2s_next;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun } while (0);
346*4882a593Smuzhiyun ccid2_pr_debug("=========\n");
347*4882a593Smuzhiyun #endif
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
352*4882a593Smuzhiyun * This code is almost identical with TCP's tcp_rtt_estimator(), since
353*4882a593Smuzhiyun * - it has a higher sampling frequency (recommended by RFC 1323),
354*4882a593Smuzhiyun * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
355*4882a593Smuzhiyun * - it is simple (cf. more complex proposals such as Eifel timer or research
356*4882a593Smuzhiyun * which suggests that the gain should be set according to window size),
357*4882a593Smuzhiyun * - in tests it was found to work well with CCID2 [gerrit].
358*4882a593Smuzhiyun */
ccid2_rtt_estimator(struct sock * sk,const long mrtt)359*4882a593Smuzhiyun static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
362*4882a593Smuzhiyun long m = mrtt ? : 1;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (hc->tx_srtt == 0) {
365*4882a593Smuzhiyun /* First measurement m */
366*4882a593Smuzhiyun hc->tx_srtt = m << 3;
367*4882a593Smuzhiyun hc->tx_mdev = m << 1;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
370*4882a593Smuzhiyun hc->tx_rttvar = hc->tx_mdev_max;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
373*4882a593Smuzhiyun } else {
374*4882a593Smuzhiyun /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
375*4882a593Smuzhiyun m -= (hc->tx_srtt >> 3);
376*4882a593Smuzhiyun hc->tx_srtt += m;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* Similarly, update scaled mdev with regard to |m| */
379*4882a593Smuzhiyun if (m < 0) {
380*4882a593Smuzhiyun m = -m;
381*4882a593Smuzhiyun m -= (hc->tx_mdev >> 2);
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * This neutralises RTO increase when RTT < SRTT - mdev
384*4882a593Smuzhiyun * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
385*4882a593Smuzhiyun * in Linux TCP", USENIX 2002, pp. 49-62).
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun if (m > 0)
388*4882a593Smuzhiyun m >>= 3;
389*4882a593Smuzhiyun } else {
390*4882a593Smuzhiyun m -= (hc->tx_mdev >> 2);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun hc->tx_mdev += m;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (hc->tx_mdev > hc->tx_mdev_max) {
395*4882a593Smuzhiyun hc->tx_mdev_max = hc->tx_mdev;
396*4882a593Smuzhiyun if (hc->tx_mdev_max > hc->tx_rttvar)
397*4882a593Smuzhiyun hc->tx_rttvar = hc->tx_mdev_max;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Decay RTTVAR at most once per flight, exploiting that
402*4882a593Smuzhiyun * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
403*4882a593Smuzhiyun * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
404*4882a593Smuzhiyun * GAR is a useful bound for FlightSize = pipe.
405*4882a593Smuzhiyun * AWL is probably too low here, as it over-estimates pipe.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
408*4882a593Smuzhiyun if (hc->tx_mdev_max < hc->tx_rttvar)
409*4882a593Smuzhiyun hc->tx_rttvar -= (hc->tx_rttvar -
410*4882a593Smuzhiyun hc->tx_mdev_max) >> 2;
411*4882a593Smuzhiyun hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
412*4882a593Smuzhiyun hc->tx_mdev_max = tcp_rto_min(sk);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Set RTO from SRTT and RTTVAR
418*4882a593Smuzhiyun * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
419*4882a593Smuzhiyun * This agrees with RFC 4341, 5:
420*4882a593Smuzhiyun * "Because DCCP does not retransmit data, DCCP does not require
421*4882a593Smuzhiyun * TCP's recommended minimum timeout of one second".
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (hc->tx_rto > DCCP_RTO_MAX)
426*4882a593Smuzhiyun hc->tx_rto = DCCP_RTO_MAX;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
ccid2_new_ack(struct sock * sk,struct ccid2_seq * seqp,unsigned int * maxincr)429*4882a593Smuzhiyun static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
430*4882a593Smuzhiyun unsigned int *maxincr)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
433*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
434*4882a593Smuzhiyun int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (hc->tx_cwnd < dp->dccps_l_seq_win &&
437*4882a593Smuzhiyun r_seq_used < dp->dccps_r_seq_win) {
438*4882a593Smuzhiyun if (hc->tx_cwnd < hc->tx_ssthresh) {
439*4882a593Smuzhiyun if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
440*4882a593Smuzhiyun hc->tx_cwnd += 1;
441*4882a593Smuzhiyun *maxincr -= 1;
442*4882a593Smuzhiyun hc->tx_packets_acked = 0;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
445*4882a593Smuzhiyun hc->tx_cwnd += 1;
446*4882a593Smuzhiyun hc->tx_packets_acked = 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Adjust the local sequence window and the ack ratio to allow about
452*4882a593Smuzhiyun * 5 times the number of packets in the network (RFC 4340 7.5.2)
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
455*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
456*4882a593Smuzhiyun else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
457*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
460*4882a593Smuzhiyun ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
461*4882a593Smuzhiyun else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
462*4882a593Smuzhiyun ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * FIXME: RTT is sampled several times per acknowledgment (for each
466*4882a593Smuzhiyun * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
467*4882a593Smuzhiyun * This causes the RTT to be over-estimated, since the older entries
468*4882a593Smuzhiyun * in the Ack Vector have earlier sending times.
469*4882a593Smuzhiyun * The cleanest solution is to not use the ccid2s_sent field at all
470*4882a593Smuzhiyun * and instead use DCCP timestamps: requires changes in other places.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
ccid2_congestion_event(struct sock * sk,struct ccid2_seq * seqp)475*4882a593Smuzhiyun static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
480*4882a593Smuzhiyun ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
481*4882a593Smuzhiyun return;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun hc->tx_last_cong = ccid2_jiffies32;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
487*4882a593Smuzhiyun hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun ccid2_check_l_ack_ratio(sk);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
ccid2_hc_tx_parse_options(struct sock * sk,u8 packet_type,u8 option,u8 * optval,u8 optlen)492*4882a593Smuzhiyun static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
493*4882a593Smuzhiyun u8 option, u8 *optval, u8 optlen)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun switch (option) {
498*4882a593Smuzhiyun case DCCPO_ACK_VECTOR_0:
499*4882a593Smuzhiyun case DCCPO_ACK_VECTOR_1:
500*4882a593Smuzhiyun return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
501*4882a593Smuzhiyun option - DCCPO_ACK_VECTOR_0);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
ccid2_hc_tx_packet_recv(struct sock * sk,struct sk_buff * skb)506*4882a593Smuzhiyun static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
509*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
510*4882a593Smuzhiyun const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
511*4882a593Smuzhiyun struct dccp_ackvec_parsed *avp;
512*4882a593Smuzhiyun u64 ackno, seqno;
513*4882a593Smuzhiyun struct ccid2_seq *seqp;
514*4882a593Smuzhiyun int done = 0;
515*4882a593Smuzhiyun unsigned int maxincr = 0;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* check reverse path congestion */
518*4882a593Smuzhiyun seqno = DCCP_SKB_CB(skb)->dccpd_seq;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* XXX this whole "algorithm" is broken. Need to fix it to keep track
521*4882a593Smuzhiyun * of the seqnos of the dupacks so that rpseq and rpdupack are correct
522*4882a593Smuzhiyun * -sorbo.
523*4882a593Smuzhiyun */
524*4882a593Smuzhiyun /* need to bootstrap */
525*4882a593Smuzhiyun if (hc->tx_rpdupack == -1) {
526*4882a593Smuzhiyun hc->tx_rpdupack = 0;
527*4882a593Smuzhiyun hc->tx_rpseq = seqno;
528*4882a593Smuzhiyun } else {
529*4882a593Smuzhiyun /* check if packet is consecutive */
530*4882a593Smuzhiyun if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
531*4882a593Smuzhiyun hc->tx_rpseq = seqno;
532*4882a593Smuzhiyun /* it's a later packet */
533*4882a593Smuzhiyun else if (after48(seqno, hc->tx_rpseq)) {
534*4882a593Smuzhiyun hc->tx_rpdupack++;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* check if we got enough dupacks */
537*4882a593Smuzhiyun if (hc->tx_rpdupack >= NUMDUPACK) {
538*4882a593Smuzhiyun hc->tx_rpdupack = -1; /* XXX lame */
539*4882a593Smuzhiyun hc->tx_rpseq = 0;
540*4882a593Smuzhiyun #ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * FIXME: Ack Congestion Control is broken; in
543*4882a593Smuzhiyun * the current state instabilities occurred with
544*4882a593Smuzhiyun * Ack Ratios greater than 1; causing hang-ups
545*4882a593Smuzhiyun * and long RTO timeouts. This needs to be fixed
546*4882a593Smuzhiyun * before opening up dynamic changes. -- gerrit
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
549*4882a593Smuzhiyun #endif
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* check forward path congestion */
555*4882a593Smuzhiyun if (dccp_packet_without_ack(skb))
556*4882a593Smuzhiyun return;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /* still didn't send out new data packets */
559*4882a593Smuzhiyun if (hc->tx_seqh == hc->tx_seqt)
560*4882a593Smuzhiyun goto done;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
563*4882a593Smuzhiyun if (after48(ackno, hc->tx_high_ack))
564*4882a593Smuzhiyun hc->tx_high_ack = ackno;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun seqp = hc->tx_seqt;
567*4882a593Smuzhiyun while (before48(seqp->ccid2s_seq, ackno)) {
568*4882a593Smuzhiyun seqp = seqp->ccid2s_next;
569*4882a593Smuzhiyun if (seqp == hc->tx_seqh) {
570*4882a593Smuzhiyun seqp = hc->tx_seqh->ccid2s_prev;
571*4882a593Smuzhiyun break;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
577*4882a593Smuzhiyun * packets per acknowledgement. Rounding up avoids that cwnd is not
578*4882a593Smuzhiyun * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun if (hc->tx_cwnd < hc->tx_ssthresh)
581*4882a593Smuzhiyun maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* go through all ack vectors */
584*4882a593Smuzhiyun list_for_each_entry(avp, &hc->tx_av_chunks, node) {
585*4882a593Smuzhiyun /* go through this ack vector */
586*4882a593Smuzhiyun for (; avp->len--; avp->vec++) {
587*4882a593Smuzhiyun u64 ackno_end_rl = SUB48(ackno,
588*4882a593Smuzhiyun dccp_ackvec_runlen(avp->vec));
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun ccid2_pr_debug("ackvec %llu |%u,%u|\n",
591*4882a593Smuzhiyun (unsigned long long)ackno,
592*4882a593Smuzhiyun dccp_ackvec_state(avp->vec) >> 6,
593*4882a593Smuzhiyun dccp_ackvec_runlen(avp->vec));
594*4882a593Smuzhiyun /* if the seqno we are analyzing is larger than the
595*4882a593Smuzhiyun * current ackno, then move towards the tail of our
596*4882a593Smuzhiyun * seqnos.
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun while (after48(seqp->ccid2s_seq, ackno)) {
599*4882a593Smuzhiyun if (seqp == hc->tx_seqt) {
600*4882a593Smuzhiyun done = 1;
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun seqp = seqp->ccid2s_prev;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun if (done)
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* check all seqnos in the range of the vector
609*4882a593Smuzhiyun * run length
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
612*4882a593Smuzhiyun const u8 state = dccp_ackvec_state(avp->vec);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* new packet received or marked */
615*4882a593Smuzhiyun if (state != DCCPAV_NOT_RECEIVED &&
616*4882a593Smuzhiyun !seqp->ccid2s_acked) {
617*4882a593Smuzhiyun if (state == DCCPAV_ECN_MARKED)
618*4882a593Smuzhiyun ccid2_congestion_event(sk,
619*4882a593Smuzhiyun seqp);
620*4882a593Smuzhiyun else
621*4882a593Smuzhiyun ccid2_new_ack(sk, seqp,
622*4882a593Smuzhiyun &maxincr);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun seqp->ccid2s_acked = 1;
625*4882a593Smuzhiyun ccid2_pr_debug("Got ack for %llu\n",
626*4882a593Smuzhiyun (unsigned long long)seqp->ccid2s_seq);
627*4882a593Smuzhiyun hc->tx_pipe--;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun if (seqp == hc->tx_seqt) {
630*4882a593Smuzhiyun done = 1;
631*4882a593Smuzhiyun break;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun seqp = seqp->ccid2s_prev;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun if (done)
636*4882a593Smuzhiyun break;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun ackno = SUB48(ackno_end_rl, 1);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun if (done)
641*4882a593Smuzhiyun break;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* The state about what is acked should be correct now
645*4882a593Smuzhiyun * Check for NUMDUPACK
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun seqp = hc->tx_seqt;
648*4882a593Smuzhiyun while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
649*4882a593Smuzhiyun seqp = seqp->ccid2s_next;
650*4882a593Smuzhiyun if (seqp == hc->tx_seqh) {
651*4882a593Smuzhiyun seqp = hc->tx_seqh->ccid2s_prev;
652*4882a593Smuzhiyun break;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun done = 0;
656*4882a593Smuzhiyun while (1) {
657*4882a593Smuzhiyun if (seqp->ccid2s_acked) {
658*4882a593Smuzhiyun done++;
659*4882a593Smuzhiyun if (done == NUMDUPACK)
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun if (seqp == hc->tx_seqt)
663*4882a593Smuzhiyun break;
664*4882a593Smuzhiyun seqp = seqp->ccid2s_prev;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* If there are at least 3 acknowledgements, anything unacknowledged
668*4882a593Smuzhiyun * below the last sequence number is considered lost
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun if (done == NUMDUPACK) {
671*4882a593Smuzhiyun struct ccid2_seq *last_acked = seqp;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* check for lost packets */
674*4882a593Smuzhiyun while (1) {
675*4882a593Smuzhiyun if (!seqp->ccid2s_acked) {
676*4882a593Smuzhiyun ccid2_pr_debug("Packet lost: %llu\n",
677*4882a593Smuzhiyun (unsigned long long)seqp->ccid2s_seq);
678*4882a593Smuzhiyun /* XXX need to traverse from tail -> head in
679*4882a593Smuzhiyun * order to detect multiple congestion events in
680*4882a593Smuzhiyun * one ack vector.
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun ccid2_congestion_event(sk, seqp);
683*4882a593Smuzhiyun hc->tx_pipe--;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun if (seqp == hc->tx_seqt)
686*4882a593Smuzhiyun break;
687*4882a593Smuzhiyun seqp = seqp->ccid2s_prev;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun hc->tx_seqt = last_acked;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* trim acked packets in tail */
694*4882a593Smuzhiyun while (hc->tx_seqt != hc->tx_seqh) {
695*4882a593Smuzhiyun if (!hc->tx_seqt->ccid2s_acked)
696*4882a593Smuzhiyun break;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun hc->tx_seqt = hc->tx_seqt->ccid2s_next;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* restart RTO timer if not all outstanding data has been acked */
702*4882a593Smuzhiyun if (hc->tx_pipe == 0)
703*4882a593Smuzhiyun sk_stop_timer(sk, &hc->tx_rtotimer);
704*4882a593Smuzhiyun else
705*4882a593Smuzhiyun sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
706*4882a593Smuzhiyun done:
707*4882a593Smuzhiyun /* check if incoming Acks allow pending packets to be sent */
708*4882a593Smuzhiyun if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709*4882a593Smuzhiyun dccp_tasklet_schedule(sk);
710*4882a593Smuzhiyun dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
ccid2_hc_tx_init(struct ccid * ccid,struct sock * sk)713*4882a593Smuzhiyun static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
716*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
717*4882a593Smuzhiyun u32 max_ratio;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
720*4882a593Smuzhiyun hc->tx_ssthresh = ~0U;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /* Use larger initial windows (RFC 4341, section 5). */
723*4882a593Smuzhiyun hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
724*4882a593Smuzhiyun hc->tx_expected_wnd = hc->tx_cwnd;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Make sure that Ack Ratio is enabled and within bounds. */
727*4882a593Smuzhiyun max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
728*4882a593Smuzhiyun if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
729*4882a593Smuzhiyun dp->dccps_l_ack_ratio = max_ratio;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* XXX init ~ to window size... */
732*4882a593Smuzhiyun if (ccid2_hc_tx_alloc_seq(hc))
733*4882a593Smuzhiyun return -ENOMEM;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun hc->tx_rto = DCCP_TIMEOUT_INIT;
736*4882a593Smuzhiyun hc->tx_rpdupack = -1;
737*4882a593Smuzhiyun hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
738*4882a593Smuzhiyun hc->tx_cwnd_used = 0;
739*4882a593Smuzhiyun hc->sk = sk;
740*4882a593Smuzhiyun timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0);
741*4882a593Smuzhiyun INIT_LIST_HEAD(&hc->tx_av_chunks);
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
ccid2_hc_tx_exit(struct sock * sk)745*4882a593Smuzhiyun static void ccid2_hc_tx_exit(struct sock *sk)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
748*4882a593Smuzhiyun int i;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun sk_stop_timer(sk, &hc->tx_rtotimer);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun for (i = 0; i < hc->tx_seqbufc; i++)
753*4882a593Smuzhiyun kfree(hc->tx_seqbuf[i]);
754*4882a593Smuzhiyun hc->tx_seqbufc = 0;
755*4882a593Smuzhiyun dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
ccid2_hc_rx_packet_recv(struct sock * sk,struct sk_buff * skb)758*4882a593Smuzhiyun static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (!dccp_data_packet(skb))
763*4882a593Smuzhiyun return;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
766*4882a593Smuzhiyun dccp_send_ack(sk);
767*4882a593Smuzhiyun hc->rx_num_data_pkts = 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun struct ccid_operations ccid2_ops = {
772*4882a593Smuzhiyun .ccid_id = DCCPC_CCID2,
773*4882a593Smuzhiyun .ccid_name = "TCP-like",
774*4882a593Smuzhiyun .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
775*4882a593Smuzhiyun .ccid_hc_tx_init = ccid2_hc_tx_init,
776*4882a593Smuzhiyun .ccid_hc_tx_exit = ccid2_hc_tx_exit,
777*4882a593Smuzhiyun .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
778*4882a593Smuzhiyun .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
779*4882a593Smuzhiyun .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
780*4882a593Smuzhiyun .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
781*4882a593Smuzhiyun .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
782*4882a593Smuzhiyun .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
783*4882a593Smuzhiyun };
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786*4882a593Smuzhiyun module_param(ccid2_debug, bool, 0644);
787*4882a593Smuzhiyun MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
788*4882a593Smuzhiyun #endif
789