1*4882a593Smuzhiyun #ifndef _TCP_DCTCP_H
2*4882a593Smuzhiyun #define _TCP_DCTCP_H
3*4882a593Smuzhiyun
dctcp_ece_ack_cwr(struct sock * sk,u32 ce_state)4*4882a593Smuzhiyun static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
5*4882a593Smuzhiyun {
6*4882a593Smuzhiyun struct tcp_sock *tp = tcp_sk(sk);
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun if (ce_state == 1)
9*4882a593Smuzhiyun tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
10*4882a593Smuzhiyun else
11*4882a593Smuzhiyun tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
12*4882a593Smuzhiyun }
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /* Minimal DCTP CE state machine:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * S: 0 <- last pkt was non-CE
17*4882a593Smuzhiyun * 1 <- last pkt was CE
18*4882a593Smuzhiyun */
dctcp_ece_ack_update(struct sock * sk,enum tcp_ca_event evt,u32 * prior_rcv_nxt,u32 * ce_state)19*4882a593Smuzhiyun static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
20*4882a593Smuzhiyun u32 *prior_rcv_nxt, u32 *ce_state)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun if (*ce_state != new_ce_state) {
25*4882a593Smuzhiyun /* CE state has changed, force an immediate ACK to
26*4882a593Smuzhiyun * reflect the new CE state. If an ACK was delayed,
27*4882a593Smuzhiyun * send that first to reflect the prior CE state.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
30*4882a593Smuzhiyun dctcp_ece_ack_cwr(sk, *ce_state);
31*4882a593Smuzhiyun __tcp_send_ack(sk, *prior_rcv_nxt);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
36*4882a593Smuzhiyun *ce_state = new_ce_state;
37*4882a593Smuzhiyun dctcp_ece_ack_cwr(sk, new_ce_state);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #endif
41