1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/dccp/input.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * An implementation of the DCCP protocol
6*4882a593Smuzhiyun * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dccp.h>
10*4882a593Smuzhiyun #include <linux/skbuff.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <net/sock.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "ackvec.h"
16*4882a593Smuzhiyun #include "ccid.h"
17*4882a593Smuzhiyun #include "dccp.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
20*4882a593Smuzhiyun int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
21*4882a593Smuzhiyun
dccp_enqueue_skb(struct sock * sk,struct sk_buff * skb)22*4882a593Smuzhiyun static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
25*4882a593Smuzhiyun __skb_queue_tail(&sk->sk_receive_queue, skb);
26*4882a593Smuzhiyun skb_set_owner_r(skb, sk);
27*4882a593Smuzhiyun sk->sk_data_ready(sk);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
dccp_fin(struct sock * sk,struct sk_buff * skb)30*4882a593Smuzhiyun static void dccp_fin(struct sock *sk, struct sk_buff *skb)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * On receiving Close/CloseReq, both RD/WR shutdown are performed.
34*4882a593Smuzhiyun * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
35*4882a593Smuzhiyun * receiving the closing segment, but there is no guarantee that such
36*4882a593Smuzhiyun * data will be processed at all.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun sk->sk_shutdown = SHUTDOWN_MASK;
39*4882a593Smuzhiyun sock_set_flag(sk, SOCK_DONE);
40*4882a593Smuzhiyun dccp_enqueue_skb(sk, skb);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
dccp_rcv_close(struct sock * sk,struct sk_buff * skb)43*4882a593Smuzhiyun static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun int queued = 0;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun switch (sk->sk_state) {
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * We ignore Close when received in one of the following states:
50*4882a593Smuzhiyun * - CLOSED (may be a late or duplicate packet)
51*4882a593Smuzhiyun * - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier)
52*4882a593Smuzhiyun * - RESPOND (already handled by dccp_check_req)
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun case DCCP_CLOSING:
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Simultaneous-close: receiving a Close after sending one. This
57*4882a593Smuzhiyun * can happen if both client and server perform active-close and
58*4882a593Smuzhiyun * will result in an endless ping-pong of crossing and retrans-
59*4882a593Smuzhiyun * mitted Close packets, which only terminates when one of the
60*4882a593Smuzhiyun * nodes times out (min. 64 seconds). Quicker convergence can be
61*4882a593Smuzhiyun * achieved when one of the nodes acts as tie-breaker.
62*4882a593Smuzhiyun * This is ok as both ends are done with data transfer and each
63*4882a593Smuzhiyun * end is just waiting for the other to acknowledge termination.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun fallthrough;
68*4882a593Smuzhiyun case DCCP_REQUESTING:
69*4882a593Smuzhiyun case DCCP_ACTIVE_CLOSEREQ:
70*4882a593Smuzhiyun dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
71*4882a593Smuzhiyun dccp_done(sk);
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun case DCCP_OPEN:
74*4882a593Smuzhiyun case DCCP_PARTOPEN:
75*4882a593Smuzhiyun /* Give waiting application a chance to read pending data */
76*4882a593Smuzhiyun queued = 1;
77*4882a593Smuzhiyun dccp_fin(sk, skb);
78*4882a593Smuzhiyun dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
79*4882a593Smuzhiyun fallthrough;
80*4882a593Smuzhiyun case DCCP_PASSIVE_CLOSE:
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Retransmitted Close: we have already enqueued the first one.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun return queued;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
dccp_rcv_closereq(struct sock * sk,struct sk_buff * skb)89*4882a593Smuzhiyun static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun int queued = 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Step 7: Check for unexpected packet types
95*4882a593Smuzhiyun * If (S.is_server and P.type == CloseReq)
96*4882a593Smuzhiyun * Send Sync packet acknowledging P.seqno
97*4882a593Smuzhiyun * Drop packet and return
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
100*4882a593Smuzhiyun dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
101*4882a593Smuzhiyun return queued;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Step 13: process relevant Client states < CLOSEREQ */
105*4882a593Smuzhiyun switch (sk->sk_state) {
106*4882a593Smuzhiyun case DCCP_REQUESTING:
107*4882a593Smuzhiyun dccp_send_close(sk, 0);
108*4882a593Smuzhiyun dccp_set_state(sk, DCCP_CLOSING);
109*4882a593Smuzhiyun break;
110*4882a593Smuzhiyun case DCCP_OPEN:
111*4882a593Smuzhiyun case DCCP_PARTOPEN:
112*4882a593Smuzhiyun /* Give waiting application a chance to read pending data */
113*4882a593Smuzhiyun queued = 1;
114*4882a593Smuzhiyun dccp_fin(sk, skb);
115*4882a593Smuzhiyun dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
116*4882a593Smuzhiyun fallthrough;
117*4882a593Smuzhiyun case DCCP_PASSIVE_CLOSEREQ:
118*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun return queued;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
dccp_reset_code_convert(const u8 code)123*4882a593Smuzhiyun static u16 dccp_reset_code_convert(const u8 code)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun static const u16 error_code[] = {
126*4882a593Smuzhiyun [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
127*4882a593Smuzhiyun [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
128*4882a593Smuzhiyun [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
131*4882a593Smuzhiyun [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
132*4882a593Smuzhiyun [DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
133*4882a593Smuzhiyun [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
136*4882a593Smuzhiyun [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
137*4882a593Smuzhiyun [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
138*4882a593Smuzhiyun [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
139*4882a593Smuzhiyun [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
dccp_rcv_reset(struct sock * sk,struct sk_buff * skb)145*4882a593Smuzhiyun static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun sk->sk_err = err;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
152*4882a593Smuzhiyun dccp_fin(sk, skb);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (err && !sock_flag(sk, SOCK_DEAD))
155*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
156*4882a593Smuzhiyun dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
dccp_handle_ackvec_processing(struct sock * sk,struct sk_buff * skb)159*4882a593Smuzhiyun static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (av == NULL)
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
166*4882a593Smuzhiyun dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
167*4882a593Smuzhiyun dccp_ackvec_input(av, skb);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
dccp_deliver_input_to_ccids(struct sock * sk,struct sk_buff * skb)170*4882a593Smuzhiyun static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun const struct dccp_sock *dp = dccp_sk(sk);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Don't deliver to RX CCID when node has shut down read end. */
175*4882a593Smuzhiyun if (!(sk->sk_shutdown & RCV_SHUTDOWN))
176*4882a593Smuzhiyun ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * Until the TX queue has been drained, we can not honour SHUT_WR, since
179*4882a593Smuzhiyun * we need received feedback as input to adjust congestion control.
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
182*4882a593Smuzhiyun ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
dccp_check_seqno(struct sock * sk,struct sk_buff * skb)185*4882a593Smuzhiyun static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun const struct dccp_hdr *dh = dccp_hdr(skb);
188*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
189*4882a593Smuzhiyun u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
190*4882a593Smuzhiyun ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * Step 5: Prepare sequence numbers for Sync
194*4882a593Smuzhiyun * If P.type == Sync or P.type == SyncAck,
195*4882a593Smuzhiyun * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
196*4882a593Smuzhiyun * / * P is valid, so update sequence number variables
197*4882a593Smuzhiyun * accordingly. After this update, P will pass the tests
198*4882a593Smuzhiyun * in Step 6. A SyncAck is generated if necessary in
199*4882a593Smuzhiyun * Step 15 * /
200*4882a593Smuzhiyun * Update S.GSR, S.SWL, S.SWH
201*4882a593Smuzhiyun * Otherwise,
202*4882a593Smuzhiyun * Drop packet and return
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_SYNC ||
205*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_SYNCACK) {
206*4882a593Smuzhiyun if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
207*4882a593Smuzhiyun dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
208*4882a593Smuzhiyun dccp_update_gsr(sk, seqno);
209*4882a593Smuzhiyun else
210*4882a593Smuzhiyun return -1;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * Step 6: Check sequence numbers
215*4882a593Smuzhiyun * Let LSWL = S.SWL and LAWL = S.AWL
216*4882a593Smuzhiyun * If P.type == CloseReq or P.type == Close or P.type == Reset,
217*4882a593Smuzhiyun * LSWL := S.GSR + 1, LAWL := S.GAR
218*4882a593Smuzhiyun * If LSWL <= P.seqno <= S.SWH
219*4882a593Smuzhiyun * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
220*4882a593Smuzhiyun * Update S.GSR, S.SWL, S.SWH
221*4882a593Smuzhiyun * If P.type != Sync,
222*4882a593Smuzhiyun * Update S.GAR
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun lswl = dp->dccps_swl;
225*4882a593Smuzhiyun lawl = dp->dccps_awl;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
228*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_CLOSE ||
229*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_RESET) {
230*4882a593Smuzhiyun lswl = ADD48(dp->dccps_gsr, 1);
231*4882a593Smuzhiyun lawl = dp->dccps_gar;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (between48(seqno, lswl, dp->dccps_swh) &&
235*4882a593Smuzhiyun (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
236*4882a593Smuzhiyun between48(ackno, lawl, dp->dccps_awh))) {
237*4882a593Smuzhiyun dccp_update_gsr(sk, seqno);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (dh->dccph_type != DCCP_PKT_SYNC &&
240*4882a593Smuzhiyun ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
241*4882a593Smuzhiyun after48(ackno, dp->dccps_gar))
242*4882a593Smuzhiyun dp->dccps_gar = ackno;
243*4882a593Smuzhiyun } else {
244*4882a593Smuzhiyun unsigned long now = jiffies;
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * Step 6: Check sequence numbers
247*4882a593Smuzhiyun * Otherwise,
248*4882a593Smuzhiyun * If P.type == Reset,
249*4882a593Smuzhiyun * Send Sync packet acknowledging S.GSR
250*4882a593Smuzhiyun * Otherwise,
251*4882a593Smuzhiyun * Send Sync packet acknowledging P.seqno
252*4882a593Smuzhiyun * Drop packet and return
253*4882a593Smuzhiyun *
254*4882a593Smuzhiyun * These Syncs are rate-limited as per RFC 4340, 7.5.4:
255*4882a593Smuzhiyun * at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun if (time_before(now, (dp->dccps_rate_last +
258*4882a593Smuzhiyun sysctl_dccp_sync_ratelimit)))
259*4882a593Smuzhiyun return -1;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun DCCP_WARN("Step 6 failed for %s packet, "
262*4882a593Smuzhiyun "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
263*4882a593Smuzhiyun "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
264*4882a593Smuzhiyun "sending SYNC...\n", dccp_packet_name(dh->dccph_type),
265*4882a593Smuzhiyun (unsigned long long) lswl, (unsigned long long) seqno,
266*4882a593Smuzhiyun (unsigned long long) dp->dccps_swh,
267*4882a593Smuzhiyun (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
268*4882a593Smuzhiyun : "exists",
269*4882a593Smuzhiyun (unsigned long long) lawl, (unsigned long long) ackno,
270*4882a593Smuzhiyun (unsigned long long) dp->dccps_awh);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun dp->dccps_rate_last = now;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_RESET)
275*4882a593Smuzhiyun seqno = dp->dccps_gsr;
276*4882a593Smuzhiyun dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
277*4882a593Smuzhiyun return -1;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
__dccp_rcv_established(struct sock * sk,struct sk_buff * skb,const struct dccp_hdr * dh,const unsigned int len)283*4882a593Smuzhiyun static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
284*4882a593Smuzhiyun const struct dccp_hdr *dh, const unsigned int len)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun switch (dccp_hdr(skb)->dccph_type) {
289*4882a593Smuzhiyun case DCCP_PKT_DATAACK:
290*4882a593Smuzhiyun case DCCP_PKT_DATA:
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
293*4882a593Smuzhiyun * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
294*4882a593Smuzhiyun * - sk_receive_queue is full, use Code 2, "Receive Buffer"
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun dccp_enqueue_skb(sk, skb);
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun case DCCP_PKT_ACK:
299*4882a593Smuzhiyun goto discard;
300*4882a593Smuzhiyun case DCCP_PKT_RESET:
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * Step 9: Process Reset
303*4882a593Smuzhiyun * If P.type == Reset,
304*4882a593Smuzhiyun * Tear down connection
305*4882a593Smuzhiyun * S.state := TIMEWAIT
306*4882a593Smuzhiyun * Set TIMEWAIT timer
307*4882a593Smuzhiyun * Drop packet and return
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun dccp_rcv_reset(sk, skb);
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun case DCCP_PKT_CLOSEREQ:
312*4882a593Smuzhiyun if (dccp_rcv_closereq(sk, skb))
313*4882a593Smuzhiyun return 0;
314*4882a593Smuzhiyun goto discard;
315*4882a593Smuzhiyun case DCCP_PKT_CLOSE:
316*4882a593Smuzhiyun if (dccp_rcv_close(sk, skb))
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun goto discard;
319*4882a593Smuzhiyun case DCCP_PKT_REQUEST:
320*4882a593Smuzhiyun /* Step 7
321*4882a593Smuzhiyun * or (S.is_server and P.type == Response)
322*4882a593Smuzhiyun * or (S.is_client and P.type == Request)
323*4882a593Smuzhiyun * or (S.state >= OPEN and P.type == Request
324*4882a593Smuzhiyun * and P.seqno >= S.OSR)
325*4882a593Smuzhiyun * or (S.state >= OPEN and P.type == Response
326*4882a593Smuzhiyun * and P.seqno >= S.OSR)
327*4882a593Smuzhiyun * or (S.state == RESPOND and P.type == Data),
328*4882a593Smuzhiyun * Send Sync packet acknowledging P.seqno
329*4882a593Smuzhiyun * Drop packet and return
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun if (dp->dccps_role != DCCP_ROLE_LISTEN)
332*4882a593Smuzhiyun goto send_sync;
333*4882a593Smuzhiyun goto check_seq;
334*4882a593Smuzhiyun case DCCP_PKT_RESPONSE:
335*4882a593Smuzhiyun if (dp->dccps_role != DCCP_ROLE_CLIENT)
336*4882a593Smuzhiyun goto send_sync;
337*4882a593Smuzhiyun check_seq:
338*4882a593Smuzhiyun if (dccp_delta_seqno(dp->dccps_osr,
339*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
340*4882a593Smuzhiyun send_sync:
341*4882a593Smuzhiyun dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
342*4882a593Smuzhiyun DCCP_PKT_SYNC);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun break;
345*4882a593Smuzhiyun case DCCP_PKT_SYNC:
346*4882a593Smuzhiyun dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
347*4882a593Smuzhiyun DCCP_PKT_SYNCACK);
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * From RFC 4340, sec. 5.7
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
352*4882a593Smuzhiyun * MAY have non-zero-length application data areas, whose
353*4882a593Smuzhiyun * contents receivers MUST ignore.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun goto discard;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun DCCP_INC_STATS(DCCP_MIB_INERRS);
359*4882a593Smuzhiyun discard:
360*4882a593Smuzhiyun __kfree_skb(skb);
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
dccp_rcv_established(struct sock * sk,struct sk_buff * skb,const struct dccp_hdr * dh,const unsigned int len)364*4882a593Smuzhiyun int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
365*4882a593Smuzhiyun const struct dccp_hdr *dh, const unsigned int len)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun if (dccp_check_seqno(sk, skb))
368*4882a593Smuzhiyun goto discard;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (dccp_parse_options(sk, NULL, skb))
371*4882a593Smuzhiyun return 1;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun dccp_handle_ackvec_processing(sk, skb);
374*4882a593Smuzhiyun dccp_deliver_input_to_ccids(sk, skb);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return __dccp_rcv_established(sk, skb, dh, len);
377*4882a593Smuzhiyun discard:
378*4882a593Smuzhiyun __kfree_skb(skb);
379*4882a593Smuzhiyun return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_rcv_established);
383*4882a593Smuzhiyun
dccp_rcv_request_sent_state_process(struct sock * sk,struct sk_buff * skb,const struct dccp_hdr * dh,const unsigned int len)384*4882a593Smuzhiyun static int dccp_rcv_request_sent_state_process(struct sock *sk,
385*4882a593Smuzhiyun struct sk_buff *skb,
386*4882a593Smuzhiyun const struct dccp_hdr *dh,
387*4882a593Smuzhiyun const unsigned int len)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * Step 4: Prepare sequence numbers in REQUEST
391*4882a593Smuzhiyun * If S.state == REQUEST,
392*4882a593Smuzhiyun * If (P.type == Response or P.type == Reset)
393*4882a593Smuzhiyun * and S.AWL <= P.ackno <= S.AWH,
394*4882a593Smuzhiyun * / * Set sequence number variables corresponding to the
395*4882a593Smuzhiyun * other endpoint, so P will pass the tests in Step 6 * /
396*4882a593Smuzhiyun * Set S.GSR, S.ISR, S.SWL, S.SWH
397*4882a593Smuzhiyun * / * Response processing continues in Step 10; Reset
398*4882a593Smuzhiyun * processing continues in Step 9 * /
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_RESPONSE) {
401*4882a593Smuzhiyun const struct inet_connection_sock *icsk = inet_csk(sk);
402*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
403*4882a593Smuzhiyun long tstamp = dccp_timestamp();
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
406*4882a593Smuzhiyun dp->dccps_awl, dp->dccps_awh)) {
407*4882a593Smuzhiyun dccp_pr_debug("invalid ackno: S.AWL=%llu, "
408*4882a593Smuzhiyun "P.ackno=%llu, S.AWH=%llu\n",
409*4882a593Smuzhiyun (unsigned long long)dp->dccps_awl,
410*4882a593Smuzhiyun (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
411*4882a593Smuzhiyun (unsigned long long)dp->dccps_awh);
412*4882a593Smuzhiyun goto out_invalid_packet;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * If option processing (Step 8) failed, return 1 here so that
417*4882a593Smuzhiyun * dccp_v4_do_rcv() sends a Reset. The Reset code depends on
418*4882a593Smuzhiyun * the option type and is set in dccp_parse_options().
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun if (dccp_parse_options(sk, NULL, skb))
421*4882a593Smuzhiyun return 1;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
424*4882a593Smuzhiyun if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
425*4882a593Smuzhiyun dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
426*4882a593Smuzhiyun dp->dccps_options_received.dccpor_timestamp_echo));
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Stop the REQUEST timer */
429*4882a593Smuzhiyun inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
430*4882a593Smuzhiyun WARN_ON(sk->sk_send_head == NULL);
431*4882a593Smuzhiyun kfree_skb(sk->sk_send_head);
432*4882a593Smuzhiyun sk->sk_send_head = NULL;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect
436*4882a593Smuzhiyun * and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH
437*4882a593Smuzhiyun * is done as part of activating the feature values below, since
438*4882a593Smuzhiyun * these settings depend on the local/remote Sequence Window
439*4882a593Smuzhiyun * features, which were undefined or not confirmed until now.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * Step 10: Process REQUEST state (second part)
447*4882a593Smuzhiyun * If S.state == REQUEST,
448*4882a593Smuzhiyun * / * If we get here, P is a valid Response from the
449*4882a593Smuzhiyun * server (see Step 4), and we should move to
450*4882a593Smuzhiyun * PARTOPEN state. PARTOPEN means send an Ack,
451*4882a593Smuzhiyun * don't send Data packets, retransmit Acks
452*4882a593Smuzhiyun * periodically, and always include any Init Cookie
453*4882a593Smuzhiyun * from the Response * /
454*4882a593Smuzhiyun * S.state := PARTOPEN
455*4882a593Smuzhiyun * Set PARTOPEN timer
456*4882a593Smuzhiyun * Continue with S.state == PARTOPEN
457*4882a593Smuzhiyun * / * Step 12 will send the Ack completing the
458*4882a593Smuzhiyun * three-way handshake * /
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun dccp_set_state(sk, DCCP_PARTOPEN);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * If feature negotiation was successful, activate features now;
464*4882a593Smuzhiyun * an activation failure means that this host could not activate
465*4882a593Smuzhiyun * one ore more features (e.g. insufficient memory), which would
466*4882a593Smuzhiyun * leave at least one feature in an undefined state.
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
469*4882a593Smuzhiyun goto unable_to_proceed;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* Make sure socket is routed, for correct metrics. */
472*4882a593Smuzhiyun icsk->icsk_af_ops->rebuild_header(sk);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD)) {
475*4882a593Smuzhiyun sk->sk_state_change(sk);
476*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
480*4882a593Smuzhiyun icsk->icsk_accept_queue.rskq_defer_accept) {
481*4882a593Smuzhiyun /* Save one ACK. Data will be ready after
482*4882a593Smuzhiyun * several ticks, if write_pending is set.
483*4882a593Smuzhiyun *
484*4882a593Smuzhiyun * It may be deleted, but with this feature tcpdumps
485*4882a593Smuzhiyun * look so _wonderfully_ clever, that I was not able
486*4882a593Smuzhiyun * to stand against the temptation 8) --ANK
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun * OK, in DCCP we can as well do a similar trick, its
490*4882a593Smuzhiyun * even in the draft, but there is no need for us to
491*4882a593Smuzhiyun * schedule an ack here, as dccp_sendmsg does this for
492*4882a593Smuzhiyun * us, also stated in the draft. -acme
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun __kfree_skb(skb);
495*4882a593Smuzhiyun return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun dccp_send_ack(sk);
498*4882a593Smuzhiyun return -1;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun out_invalid_packet:
502*4882a593Smuzhiyun /* dccp_v4_do_rcv will send a reset */
503*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
504*4882a593Smuzhiyun return 1;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun unable_to_proceed:
507*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun * We mark this socket as no longer usable, so that the loop in
510*4882a593Smuzhiyun * dccp_sendmsg() terminates and the application gets notified.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun dccp_set_state(sk, DCCP_CLOSED);
513*4882a593Smuzhiyun sk->sk_err = ECOMM;
514*4882a593Smuzhiyun return 1;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
dccp_rcv_respond_partopen_state_process(struct sock * sk,struct sk_buff * skb,const struct dccp_hdr * dh,const unsigned int len)517*4882a593Smuzhiyun static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
518*4882a593Smuzhiyun struct sk_buff *skb,
519*4882a593Smuzhiyun const struct dccp_hdr *dh,
520*4882a593Smuzhiyun const unsigned int len)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
523*4882a593Smuzhiyun u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
524*4882a593Smuzhiyun int queued = 0;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun switch (dh->dccph_type) {
527*4882a593Smuzhiyun case DCCP_PKT_RESET:
528*4882a593Smuzhiyun inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
529*4882a593Smuzhiyun break;
530*4882a593Smuzhiyun case DCCP_PKT_DATA:
531*4882a593Smuzhiyun if (sk->sk_state == DCCP_RESPOND)
532*4882a593Smuzhiyun break;
533*4882a593Smuzhiyun fallthrough;
534*4882a593Smuzhiyun case DCCP_PKT_DATAACK:
535*4882a593Smuzhiyun case DCCP_PKT_ACK:
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * FIXME: we should be resetting the PARTOPEN (DELACK) timer
538*4882a593Smuzhiyun * here but only if we haven't used the DELACK timer for
539*4882a593Smuzhiyun * something else, like sending a delayed ack for a TIMESTAMP
540*4882a593Smuzhiyun * echo, etc, for now were not clearing it, sending an extra
541*4882a593Smuzhiyun * ACK when there is nothing else to do in DELACK is not a big
542*4882a593Smuzhiyun * deal after all.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* Stop the PARTOPEN timer */
546*4882a593Smuzhiyun if (sk->sk_state == DCCP_PARTOPEN)
547*4882a593Smuzhiyun inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
550*4882a593Smuzhiyun if (likely(sample)) {
551*4882a593Smuzhiyun long delta = dccp_timestamp() - sample;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
557*4882a593Smuzhiyun dccp_set_state(sk, DCCP_OPEN);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_DATAACK ||
560*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_DATA) {
561*4882a593Smuzhiyun __dccp_rcv_established(sk, skb, dh, len);
562*4882a593Smuzhiyun queued = 1; /* packet was queued
563*4882a593Smuzhiyun (by __dccp_rcv_established) */
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun break;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return queued;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
dccp_rcv_state_process(struct sock * sk,struct sk_buff * skb,struct dccp_hdr * dh,unsigned int len)571*4882a593Smuzhiyun int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
572*4882a593Smuzhiyun struct dccp_hdr *dh, unsigned int len)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
575*4882a593Smuzhiyun struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
576*4882a593Smuzhiyun const int old_state = sk->sk_state;
577*4882a593Smuzhiyun bool acceptable;
578*4882a593Smuzhiyun int queued = 0;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * Step 3: Process LISTEN state
582*4882a593Smuzhiyun *
583*4882a593Smuzhiyun * If S.state == LISTEN,
584*4882a593Smuzhiyun * If P.type == Request or P contains a valid Init Cookie option,
585*4882a593Smuzhiyun * (* Must scan the packet's options to check for Init
586*4882a593Smuzhiyun * Cookies. Only Init Cookies are processed here,
587*4882a593Smuzhiyun * however; other options are processed in Step 8. This
588*4882a593Smuzhiyun * scan need only be performed if the endpoint uses Init
589*4882a593Smuzhiyun * Cookies *)
590*4882a593Smuzhiyun * (* Generate a new socket and switch to that socket *)
591*4882a593Smuzhiyun * Set S := new socket for this port pair
592*4882a593Smuzhiyun * S.state = RESPOND
593*4882a593Smuzhiyun * Choose S.ISS (initial seqno) or set from Init Cookies
594*4882a593Smuzhiyun * Initialize S.GAR := S.ISS
595*4882a593Smuzhiyun * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
596*4882a593Smuzhiyun * Cookies Continue with S.state == RESPOND
597*4882a593Smuzhiyun * (* A Response packet will be generated in Step 11 *)
598*4882a593Smuzhiyun * Otherwise,
599*4882a593Smuzhiyun * Generate Reset(No Connection) unless P.type == Reset
600*4882a593Smuzhiyun * Drop packet and return
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun if (sk->sk_state == DCCP_LISTEN) {
603*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_REQUEST) {
604*4882a593Smuzhiyun /* It is possible that we process SYN packets from backlog,
605*4882a593Smuzhiyun * so we need to make sure to disable BH and RCU right there.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun rcu_read_lock();
608*4882a593Smuzhiyun local_bh_disable();
609*4882a593Smuzhiyun acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
610*4882a593Smuzhiyun local_bh_enable();
611*4882a593Smuzhiyun rcu_read_unlock();
612*4882a593Smuzhiyun if (!acceptable)
613*4882a593Smuzhiyun return 1;
614*4882a593Smuzhiyun consume_skb(skb);
615*4882a593Smuzhiyun return 0;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_RESET)
618*4882a593Smuzhiyun goto discard;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* Caller (dccp_v4_do_rcv) will send Reset */
621*4882a593Smuzhiyun dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
622*4882a593Smuzhiyun return 1;
623*4882a593Smuzhiyun } else if (sk->sk_state == DCCP_CLOSED) {
624*4882a593Smuzhiyun dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
625*4882a593Smuzhiyun return 1;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */
629*4882a593Smuzhiyun if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
630*4882a593Smuzhiyun goto discard;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * Step 7: Check for unexpected packet types
634*4882a593Smuzhiyun * If (S.is_server and P.type == Response)
635*4882a593Smuzhiyun * or (S.is_client and P.type == Request)
636*4882a593Smuzhiyun * or (S.state == RESPOND and P.type == Data),
637*4882a593Smuzhiyun * Send Sync packet acknowledging P.seqno
638*4882a593Smuzhiyun * Drop packet and return
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
641*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_RESPONSE) ||
642*4882a593Smuzhiyun (dp->dccps_role == DCCP_ROLE_CLIENT &&
643*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_REQUEST) ||
644*4882a593Smuzhiyun (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
645*4882a593Smuzhiyun dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
646*4882a593Smuzhiyun goto discard;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* Step 8: Process options */
650*4882a593Smuzhiyun if (dccp_parse_options(sk, NULL, skb))
651*4882a593Smuzhiyun return 1;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /*
654*4882a593Smuzhiyun * Step 9: Process Reset
655*4882a593Smuzhiyun * If P.type == Reset,
656*4882a593Smuzhiyun * Tear down connection
657*4882a593Smuzhiyun * S.state := TIMEWAIT
658*4882a593Smuzhiyun * Set TIMEWAIT timer
659*4882a593Smuzhiyun * Drop packet and return
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_RESET) {
662*4882a593Smuzhiyun dccp_rcv_reset(sk, skb);
663*4882a593Smuzhiyun return 0;
664*4882a593Smuzhiyun } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { /* Step 13 */
665*4882a593Smuzhiyun if (dccp_rcv_closereq(sk, skb))
666*4882a593Smuzhiyun return 0;
667*4882a593Smuzhiyun goto discard;
668*4882a593Smuzhiyun } else if (dh->dccph_type == DCCP_PKT_CLOSE) { /* Step 14 */
669*4882a593Smuzhiyun if (dccp_rcv_close(sk, skb))
670*4882a593Smuzhiyun return 0;
671*4882a593Smuzhiyun goto discard;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun switch (sk->sk_state) {
675*4882a593Smuzhiyun case DCCP_REQUESTING:
676*4882a593Smuzhiyun queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677*4882a593Smuzhiyun if (queued >= 0)
678*4882a593Smuzhiyun return queued;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun __kfree_skb(skb);
681*4882a593Smuzhiyun return 0;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun case DCCP_PARTOPEN:
684*4882a593Smuzhiyun /* Step 8: if using Ack Vectors, mark packet acknowledgeable */
685*4882a593Smuzhiyun dccp_handle_ackvec_processing(sk, skb);
686*4882a593Smuzhiyun dccp_deliver_input_to_ccids(sk, skb);
687*4882a593Smuzhiyun fallthrough;
688*4882a593Smuzhiyun case DCCP_RESPOND:
689*4882a593Smuzhiyun queued = dccp_rcv_respond_partopen_state_process(sk, skb,
690*4882a593Smuzhiyun dh, len);
691*4882a593Smuzhiyun break;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (dh->dccph_type == DCCP_PKT_ACK ||
695*4882a593Smuzhiyun dh->dccph_type == DCCP_PKT_DATAACK) {
696*4882a593Smuzhiyun switch (old_state) {
697*4882a593Smuzhiyun case DCCP_PARTOPEN:
698*4882a593Smuzhiyun sk->sk_state_change(sk);
699*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
700*4882a593Smuzhiyun break;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
703*4882a593Smuzhiyun dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
704*4882a593Smuzhiyun goto discard;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (!queued) {
708*4882a593Smuzhiyun discard:
709*4882a593Smuzhiyun __kfree_skb(skb);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun return 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /**
717*4882a593Smuzhiyun * dccp_sample_rtt - Validate and finalise computation of RTT sample
718*4882a593Smuzhiyun * @sk: socket structure
719*4882a593Smuzhiyun * @delta: number of microseconds between packet and acknowledgment
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * The routine is kept generic to work in different contexts. It should be
722*4882a593Smuzhiyun * called immediately when the ACK used for the RTT sample arrives.
723*4882a593Smuzhiyun */
dccp_sample_rtt(struct sock * sk,long delta)724*4882a593Smuzhiyun u32 dccp_sample_rtt(struct sock *sk, long delta)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun /* dccpor_elapsed_time is either zeroed out or set and > 0 */
727*4882a593Smuzhiyun delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (unlikely(delta <= 0)) {
730*4882a593Smuzhiyun DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
731*4882a593Smuzhiyun return DCCP_SANE_RTT_MIN;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
734*4882a593Smuzhiyun DCCP_WARN("RTT sample %ld too large, using max\n", delta);
735*4882a593Smuzhiyun return DCCP_SANE_RTT_MAX;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun return delta;
739*4882a593Smuzhiyun }
740