1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/dccp/output.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * An implementation of the DCCP protocol
6*4882a593Smuzhiyun * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dccp.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/skbuff.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <net/inet_sock.h>
16*4882a593Smuzhiyun #include <net/sock.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "ackvec.h"
19*4882a593Smuzhiyun #include "ccid.h"
20*4882a593Smuzhiyun #include "dccp.h"
21*4882a593Smuzhiyun
dccp_event_ack_sent(struct sock * sk)22*4882a593Smuzhiyun static inline void dccp_event_ack_sent(struct sock *sk)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* enqueue @skb on sk_send_head for retransmission, return clone to send now */
dccp_skb_entail(struct sock * sk,struct sk_buff * skb)28*4882a593Smuzhiyun static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun skb_set_owner_w(skb, sk);
31*4882a593Smuzhiyun WARN_ON(sk->sk_send_head);
32*4882a593Smuzhiyun sk->sk_send_head = skb;
33*4882a593Smuzhiyun return skb_clone(sk->sk_send_head, gfp_any());
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * All SKB's seen here are completely headerless. It is our
38*4882a593Smuzhiyun * job to build the DCCP header, and pass the packet down to
39*4882a593Smuzhiyun * IP so it can do the same plus pass the packet off to the
40*4882a593Smuzhiyun * device.
41*4882a593Smuzhiyun */
dccp_transmit_skb(struct sock * sk,struct sk_buff * skb)42*4882a593Smuzhiyun static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun if (likely(skb != NULL)) {
45*4882a593Smuzhiyun struct inet_sock *inet = inet_sk(sk);
46*4882a593Smuzhiyun const struct inet_connection_sock *icsk = inet_csk(sk);
47*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
48*4882a593Smuzhiyun struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
49*4882a593Smuzhiyun struct dccp_hdr *dh;
50*4882a593Smuzhiyun /* XXX For now we're using only 48 bits sequence numbers */
51*4882a593Smuzhiyun const u32 dccp_header_size = sizeof(*dh) +
52*4882a593Smuzhiyun sizeof(struct dccp_hdr_ext) +
53*4882a593Smuzhiyun dccp_packet_hdr_len(dcb->dccpd_type);
54*4882a593Smuzhiyun int err, set_ack = 1;
55*4882a593Smuzhiyun u64 ackno = dp->dccps_gsr;
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Increment GSS here already in case the option code needs it.
58*4882a593Smuzhiyun * Update GSS for real only if option processing below succeeds.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun switch (dcb->dccpd_type) {
63*4882a593Smuzhiyun case DCCP_PKT_DATA:
64*4882a593Smuzhiyun set_ack = 0;
65*4882a593Smuzhiyun fallthrough;
66*4882a593Smuzhiyun case DCCP_PKT_DATAACK:
67*4882a593Smuzhiyun case DCCP_PKT_RESET:
68*4882a593Smuzhiyun break;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun case DCCP_PKT_REQUEST:
71*4882a593Smuzhiyun set_ack = 0;
72*4882a593Smuzhiyun /* Use ISS on the first (non-retransmitted) Request. */
73*4882a593Smuzhiyun if (icsk->icsk_retransmits == 0)
74*4882a593Smuzhiyun dcb->dccpd_seq = dp->dccps_iss;
75*4882a593Smuzhiyun fallthrough;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun case DCCP_PKT_SYNC:
78*4882a593Smuzhiyun case DCCP_PKT_SYNCACK:
79*4882a593Smuzhiyun ackno = dcb->dccpd_ack_seq;
80*4882a593Smuzhiyun fallthrough;
81*4882a593Smuzhiyun default:
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Set owner/destructor: some skbs are allocated via
84*4882a593Smuzhiyun * alloc_skb (e.g. when retransmission may happen).
85*4882a593Smuzhiyun * Only Data, DataAck, and Reset packets should come
86*4882a593Smuzhiyun * through here with skb->sk set.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun WARN_ON(skb->sk);
89*4882a593Smuzhiyun skb_set_owner_w(skb, sk);
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (dccp_insert_options(sk, skb)) {
94*4882a593Smuzhiyun kfree_skb(skb);
95*4882a593Smuzhiyun return -EPROTO;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Build DCCP header and checksum it. */
100*4882a593Smuzhiyun dh = dccp_zeroed_hdr(skb, dccp_header_size);
101*4882a593Smuzhiyun dh->dccph_type = dcb->dccpd_type;
102*4882a593Smuzhiyun dh->dccph_sport = inet->inet_sport;
103*4882a593Smuzhiyun dh->dccph_dport = inet->inet_dport;
104*4882a593Smuzhiyun dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105*4882a593Smuzhiyun dh->dccph_ccval = dcb->dccpd_ccval;
106*4882a593Smuzhiyun dh->dccph_cscov = dp->dccps_pcslen;
107*4882a593Smuzhiyun /* XXX For now we're using only 48 bits sequence numbers */
108*4882a593Smuzhiyun dh->dccph_x = 1;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun dccp_update_gss(sk, dcb->dccpd_seq);
111*4882a593Smuzhiyun dccp_hdr_set_seq(dh, dp->dccps_gss);
112*4882a593Smuzhiyun if (set_ack)
113*4882a593Smuzhiyun dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun switch (dcb->dccpd_type) {
116*4882a593Smuzhiyun case DCCP_PKT_REQUEST:
117*4882a593Smuzhiyun dccp_hdr_request(skb)->dccph_req_service =
118*4882a593Smuzhiyun dp->dccps_service;
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * Limit Ack window to ISS <= P.ackno <= GSS, so that
121*4882a593Smuzhiyun * only Responses to Requests we sent are considered.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun dp->dccps_awl = dp->dccps_iss;
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun case DCCP_PKT_RESET:
126*4882a593Smuzhiyun dccp_hdr_reset(skb)->dccph_reset_code =
127*4882a593Smuzhiyun dcb->dccpd_reset_code;
128*4882a593Smuzhiyun break;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun icsk->icsk_af_ops->send_check(sk, skb);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (set_ack)
134*4882a593Smuzhiyun dccp_event_ack_sent(sk);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
139*4882a593Smuzhiyun return net_xmit_eval(err);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun return -ENOBUFS;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
146*4882a593Smuzhiyun * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
147*4882a593Smuzhiyun * since the RX CCID is restricted to feedback packets (Acks), which are small
148*4882a593Smuzhiyun * in comparison with the data traffic. A value of 0 means "no current CCMPS".
149*4882a593Smuzhiyun */
dccp_determine_ccmps(const struct dccp_sock * dp)150*4882a593Smuzhiyun static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
155*4882a593Smuzhiyun return 0;
156*4882a593Smuzhiyun return tx_ccid->ccid_ops->ccid_ccmps;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
dccp_sync_mss(struct sock * sk,u32 pmtu)159*4882a593Smuzhiyun unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
162*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
163*4882a593Smuzhiyun u32 ccmps = dccp_determine_ccmps(dp);
164*4882a593Smuzhiyun u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Account for header lengths and IPv4/v6 option overhead */
167*4882a593Smuzhiyun cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
168*4882a593Smuzhiyun sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Leave enough headroom for common DCCP header options.
172*4882a593Smuzhiyun * This only considers options which may appear on DCCP-Data packets, as
173*4882a593Smuzhiyun * per table 3 in RFC 4340, 5.8. When running out of space for other
174*4882a593Smuzhiyun * options (eg. Ack Vector which can take up to 255 bytes), it is better
175*4882a593Smuzhiyun * to schedule a separate Ack. Thus we leave headroom for the following:
176*4882a593Smuzhiyun * - 1 byte for Slow Receiver (11.6)
177*4882a593Smuzhiyun * - 6 bytes for Timestamp (13.1)
178*4882a593Smuzhiyun * - 10 bytes for Timestamp Echo (13.3)
179*4882a593Smuzhiyun * - 8 bytes for NDP count (7.7, when activated)
180*4882a593Smuzhiyun * - 6 bytes for Data Checksum (9.3)
181*4882a593Smuzhiyun * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
184*4882a593Smuzhiyun (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* And store cached results */
187*4882a593Smuzhiyun icsk->icsk_pmtu_cookie = pmtu;
188*4882a593Smuzhiyun dp->dccps_mss_cache = cur_mps;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return cur_mps;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_sync_mss);
194*4882a593Smuzhiyun
dccp_write_space(struct sock * sk)195*4882a593Smuzhiyun void dccp_write_space(struct sock *sk)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct socket_wq *wq;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun rcu_read_lock();
200*4882a593Smuzhiyun wq = rcu_dereference(sk->sk_wq);
201*4882a593Smuzhiyun if (skwq_has_sleeper(wq))
202*4882a593Smuzhiyun wake_up_interruptible(&wq->wait);
203*4882a593Smuzhiyun /* Should agree with poll, otherwise some programs break */
204*4882a593Smuzhiyun if (sock_writeable(sk))
205*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun rcu_read_unlock();
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * dccp_wait_for_ccid - Await CCID send permission
212*4882a593Smuzhiyun * @sk: socket to wait for
213*4882a593Smuzhiyun * @delay: timeout in jiffies
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * This is used by CCIDs which need to delay the send time in process context.
216*4882a593Smuzhiyun */
dccp_wait_for_ccid(struct sock * sk,unsigned long delay)217*4882a593Smuzhiyun static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun DEFINE_WAIT(wait);
220*4882a593Smuzhiyun long remaining;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
223*4882a593Smuzhiyun sk->sk_write_pending++;
224*4882a593Smuzhiyun release_sock(sk);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun remaining = schedule_timeout(delay);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun lock_sock(sk);
229*4882a593Smuzhiyun sk->sk_write_pending--;
230*4882a593Smuzhiyun finish_wait(sk_sleep(sk), &wait);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (signal_pending(current) || sk->sk_err)
233*4882a593Smuzhiyun return -1;
234*4882a593Smuzhiyun return remaining;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * dccp_xmit_packet - Send data packet under control of CCID
239*4882a593Smuzhiyun * Transmits next-queued payload and informs CCID to account for the packet.
240*4882a593Smuzhiyun */
dccp_xmit_packet(struct sock * sk)241*4882a593Smuzhiyun static void dccp_xmit_packet(struct sock *sk)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun int err, len;
244*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
245*4882a593Smuzhiyun struct sk_buff *skb = dccp_qpolicy_pop(sk);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (unlikely(skb == NULL))
248*4882a593Smuzhiyun return;
249*4882a593Smuzhiyun len = skb->len;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (sk->sk_state == DCCP_PARTOPEN) {
252*4882a593Smuzhiyun const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * See 8.1.5 - Handshake Completion.
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * For robustness we resend Confirm options until the client has
257*4882a593Smuzhiyun * entered OPEN. During the initial feature negotiation, the MPS
258*4882a593Smuzhiyun * is smaller than usual, reduced by the Change/Confirm options.
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
261*4882a593Smuzhiyun DCCP_WARN("Payload too large (%d) for featneg.\n", len);
262*4882a593Smuzhiyun dccp_send_ack(sk);
263*4882a593Smuzhiyun dccp_feat_list_purge(&dp->dccps_featneg);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun inet_csk_schedule_ack(sk);
267*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
268*4882a593Smuzhiyun inet_csk(sk)->icsk_rto,
269*4882a593Smuzhiyun DCCP_RTO_MAX);
270*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271*4882a593Smuzhiyun } else if (dccp_ack_pending(sk)) {
272*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273*4882a593Smuzhiyun } else {
274*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun err = dccp_transmit_skb(sk, skb);
278*4882a593Smuzhiyun if (err)
279*4882a593Smuzhiyun dccp_pr_debug("transmit_skb() returned err=%d\n", err);
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * Register this one as sent even if an error occurred. To the remote
282*4882a593Smuzhiyun * end a local packet drop is indistinguishable from network loss, i.e.
283*4882a593Smuzhiyun * any local drop will eventually be reported via receiver feedback.
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * If the CCID needs to transfer additional header options out-of-band
289*4882a593Smuzhiyun * (e.g. Ack Vectors or feature-negotiation options), it activates this
290*4882a593Smuzhiyun * flag to schedule a Sync. The Sync will automatically incorporate all
291*4882a593Smuzhiyun * currently pending header options, thus clearing the backlog.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun if (dp->dccps_sync_scheduled)
294*4882a593Smuzhiyun dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * dccp_flush_write_queue - Drain queue at end of connection
299*4882a593Smuzhiyun * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
300*4882a593Smuzhiyun * happen that the TX queue is not empty at the end of a connection. We give the
301*4882a593Smuzhiyun * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
302*4882a593Smuzhiyun * returns with a non-empty write queue, it will be purged later.
303*4882a593Smuzhiyun */
dccp_flush_write_queue(struct sock * sk,long * time_budget)304*4882a593Smuzhiyun void dccp_flush_write_queue(struct sock *sk, long *time_budget)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
307*4882a593Smuzhiyun struct sk_buff *skb;
308*4882a593Smuzhiyun long delay, rc;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
311*4882a593Smuzhiyun rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun switch (ccid_packet_dequeue_eval(rc)) {
314*4882a593Smuzhiyun case CCID_PACKET_WILL_DEQUEUE_LATER:
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * If the CCID determines when to send, the next sending
317*4882a593Smuzhiyun * time is unknown or the CCID may not even send again
318*4882a593Smuzhiyun * (e.g. remote host crashes or lost Ack packets).
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun DCCP_WARN("CCID did not manage to send all packets\n");
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun case CCID_PACKET_DELAY:
323*4882a593Smuzhiyun delay = msecs_to_jiffies(rc);
324*4882a593Smuzhiyun if (delay > *time_budget)
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun rc = dccp_wait_for_ccid(sk, delay);
327*4882a593Smuzhiyun if (rc < 0)
328*4882a593Smuzhiyun return;
329*4882a593Smuzhiyun *time_budget -= (delay - rc);
330*4882a593Smuzhiyun /* check again if we can send now */
331*4882a593Smuzhiyun break;
332*4882a593Smuzhiyun case CCID_PACKET_SEND_AT_ONCE:
333*4882a593Smuzhiyun dccp_xmit_packet(sk);
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun case CCID_PACKET_ERR:
336*4882a593Smuzhiyun skb_dequeue(&sk->sk_write_queue);
337*4882a593Smuzhiyun kfree_skb(skb);
338*4882a593Smuzhiyun dccp_pr_debug("packet discarded due to err=%ld\n", rc);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
dccp_write_xmit(struct sock * sk)343*4882a593Smuzhiyun void dccp_write_xmit(struct sock *sk)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
346*4882a593Smuzhiyun struct sk_buff *skb;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun while ((skb = dccp_qpolicy_top(sk))) {
349*4882a593Smuzhiyun int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun switch (ccid_packet_dequeue_eval(rc)) {
352*4882a593Smuzhiyun case CCID_PACKET_WILL_DEQUEUE_LATER:
353*4882a593Smuzhiyun return;
354*4882a593Smuzhiyun case CCID_PACKET_DELAY:
355*4882a593Smuzhiyun sk_reset_timer(sk, &dp->dccps_xmit_timer,
356*4882a593Smuzhiyun jiffies + msecs_to_jiffies(rc));
357*4882a593Smuzhiyun return;
358*4882a593Smuzhiyun case CCID_PACKET_SEND_AT_ONCE:
359*4882a593Smuzhiyun dccp_xmit_packet(sk);
360*4882a593Smuzhiyun break;
361*4882a593Smuzhiyun case CCID_PACKET_ERR:
362*4882a593Smuzhiyun dccp_qpolicy_drop(sk, skb);
363*4882a593Smuzhiyun dccp_pr_debug("packet discarded due to err=%d\n", rc);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /**
369*4882a593Smuzhiyun * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
370*4882a593Smuzhiyun * There are only four retransmittable packet types in DCCP:
371*4882a593Smuzhiyun * - Request in client-REQUEST state (sec. 8.1.1),
372*4882a593Smuzhiyun * - CloseReq in server-CLOSEREQ state (sec. 8.3),
373*4882a593Smuzhiyun * - Close in node-CLOSING state (sec. 8.3),
374*4882a593Smuzhiyun * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
375*4882a593Smuzhiyun * This function expects sk->sk_send_head to contain the original skb.
376*4882a593Smuzhiyun */
dccp_retransmit_skb(struct sock * sk)377*4882a593Smuzhiyun int dccp_retransmit_skb(struct sock *sk)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun WARN_ON(sk->sk_send_head == NULL);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
382*4882a593Smuzhiyun return -EHOSTUNREACH; /* Routing failure or similar. */
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* this count is used to distinguish original and retransmitted skb */
385*4882a593Smuzhiyun inet_csk(sk)->icsk_retransmits++;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
dccp_make_response(const struct sock * sk,struct dst_entry * dst,struct request_sock * req)390*4882a593Smuzhiyun struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
391*4882a593Smuzhiyun struct request_sock *req)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct dccp_hdr *dh;
394*4882a593Smuzhiyun struct dccp_request_sock *dreq;
395*4882a593Smuzhiyun const u32 dccp_header_size = sizeof(struct dccp_hdr) +
396*4882a593Smuzhiyun sizeof(struct dccp_hdr_ext) +
397*4882a593Smuzhiyun sizeof(struct dccp_hdr_response);
398*4882a593Smuzhiyun struct sk_buff *skb;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* sk is marked const to clearly express we dont hold socket lock.
401*4882a593Smuzhiyun * sock_wmalloc() will atomically change sk->sk_wmem_alloc,
402*4882a593Smuzhiyun * it is safe to promote sk to non const.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
405*4882a593Smuzhiyun GFP_ATOMIC);
406*4882a593Smuzhiyun if (!skb)
407*4882a593Smuzhiyun return NULL;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun skb_reserve(skb, MAX_DCCP_HEADER);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun skb_dst_set(skb, dst_clone(dst));
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun dreq = dccp_rsk(req);
414*4882a593Smuzhiyun if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
415*4882a593Smuzhiyun dccp_inc_seqno(&dreq->dreq_gss);
416*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
417*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Resolve feature dependencies resulting from choice of CCID */
420*4882a593Smuzhiyun if (dccp_feat_server_ccid_dependencies(dreq))
421*4882a593Smuzhiyun goto response_failed;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (dccp_insert_options_rsk(dreq, skb))
424*4882a593Smuzhiyun goto response_failed;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Build and checksum header */
427*4882a593Smuzhiyun dh = dccp_zeroed_hdr(skb, dccp_header_size);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun dh->dccph_sport = htons(inet_rsk(req)->ir_num);
430*4882a593Smuzhiyun dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
431*4882a593Smuzhiyun dh->dccph_doff = (dccp_header_size +
432*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
433*4882a593Smuzhiyun dh->dccph_type = DCCP_PKT_RESPONSE;
434*4882a593Smuzhiyun dh->dccph_x = 1;
435*4882a593Smuzhiyun dccp_hdr_set_seq(dh, dreq->dreq_gss);
436*4882a593Smuzhiyun dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
437*4882a593Smuzhiyun dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun dccp_csum_outgoing(skb);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* We use `acked' to remember that a Response was already sent. */
442*4882a593Smuzhiyun inet_rsk(req)->acked = 1;
443*4882a593Smuzhiyun DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
444*4882a593Smuzhiyun return skb;
445*4882a593Smuzhiyun response_failed:
446*4882a593Smuzhiyun kfree_skb(skb);
447*4882a593Smuzhiyun return NULL;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_make_response);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
dccp_ctl_make_reset(struct sock * sk,struct sk_buff * rcv_skb)453*4882a593Smuzhiyun struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
456*4882a593Smuzhiyun struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
457*4882a593Smuzhiyun const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
458*4882a593Smuzhiyun sizeof(struct dccp_hdr_ext) +
459*4882a593Smuzhiyun sizeof(struct dccp_hdr_reset);
460*4882a593Smuzhiyun struct dccp_hdr_reset *dhr;
461*4882a593Smuzhiyun struct sk_buff *skb;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
464*4882a593Smuzhiyun if (skb == NULL)
465*4882a593Smuzhiyun return NULL;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Swap the send and the receive. */
470*4882a593Smuzhiyun dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
471*4882a593Smuzhiyun dh->dccph_type = DCCP_PKT_RESET;
472*4882a593Smuzhiyun dh->dccph_sport = rxdh->dccph_dport;
473*4882a593Smuzhiyun dh->dccph_dport = rxdh->dccph_sport;
474*4882a593Smuzhiyun dh->dccph_doff = dccp_hdr_reset_len / 4;
475*4882a593Smuzhiyun dh->dccph_x = 1;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun dhr = dccp_hdr_reset(skb);
478*4882a593Smuzhiyun dhr->dccph_reset_code = dcb->dccpd_reset_code;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun switch (dcb->dccpd_reset_code) {
481*4882a593Smuzhiyun case DCCP_RESET_CODE_PACKET_ERROR:
482*4882a593Smuzhiyun dhr->dccph_reset_data[0] = rxdh->dccph_type;
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun case DCCP_RESET_CODE_OPTION_ERROR:
485*4882a593Smuzhiyun case DCCP_RESET_CODE_MANDATORY_ERROR:
486*4882a593Smuzhiyun memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
487*4882a593Smuzhiyun break;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * From RFC 4340, 8.3.1:
491*4882a593Smuzhiyun * If P.ackno exists, set R.seqno := P.ackno + 1.
492*4882a593Smuzhiyun * Else set R.seqno := 0.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
495*4882a593Smuzhiyun dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
496*4882a593Smuzhiyun dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun dccp_csum_outgoing(skb);
499*4882a593Smuzhiyun return skb;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* send Reset on established socket, to close or abort the connection */
dccp_send_reset(struct sock * sk,enum dccp_reset_codes code)505*4882a593Smuzhiyun int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct sk_buff *skb;
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun * FIXME: what if rebuild_header fails?
510*4882a593Smuzhiyun * Should we be doing a rebuild_header here?
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (err != 0)
515*4882a593Smuzhiyun return err;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
518*4882a593Smuzhiyun if (skb == NULL)
519*4882a593Smuzhiyun return -ENOBUFS;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Reserve space for headers and prepare control bits. */
522*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
523*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
524*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_reset_code = code;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun return dccp_transmit_skb(sk, skb);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /*
530*4882a593Smuzhiyun * Do all connect socket setups that can be done AF independent.
531*4882a593Smuzhiyun */
dccp_connect(struct sock * sk)532*4882a593Smuzhiyun int dccp_connect(struct sock *sk)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct sk_buff *skb;
535*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
536*4882a593Smuzhiyun struct dst_entry *dst = __sk_dst_get(sk);
537*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun sk->sk_err = 0;
540*4882a593Smuzhiyun sock_reset_flag(sk, SOCK_DONE);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun dccp_sync_mss(sk, dst_mtu(dst));
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* do not connect if feature negotiation setup fails */
545*4882a593Smuzhiyun if (dccp_feat_finalise_settings(dccp_sk(sk)))
546*4882a593Smuzhiyun return -EPROTO;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
549*4882a593Smuzhiyun dp->dccps_gar = dp->dccps_iss;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
552*4882a593Smuzhiyun if (unlikely(skb == NULL))
553*4882a593Smuzhiyun return -ENOBUFS;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* Reserve space for headers. */
556*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
561*4882a593Smuzhiyun DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* Timer for repeating the REQUEST until an answer. */
564*4882a593Smuzhiyun icsk->icsk_retransmits = 0;
565*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
566*4882a593Smuzhiyun icsk->icsk_rto, DCCP_RTO_MAX);
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_connect);
571*4882a593Smuzhiyun
dccp_send_ack(struct sock * sk)572*4882a593Smuzhiyun void dccp_send_ack(struct sock *sk)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun /* If we have been reset, we may not send again. */
575*4882a593Smuzhiyun if (sk->sk_state != DCCP_CLOSED) {
576*4882a593Smuzhiyun struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
577*4882a593Smuzhiyun GFP_ATOMIC);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (skb == NULL) {
580*4882a593Smuzhiyun inet_csk_schedule_ack(sk);
581*4882a593Smuzhiyun inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
582*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
583*4882a593Smuzhiyun TCP_DELACK_MAX,
584*4882a593Smuzhiyun DCCP_RTO_MAX);
585*4882a593Smuzhiyun return;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* Reserve space for headers */
589*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
590*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
591*4882a593Smuzhiyun dccp_transmit_skb(sk, skb);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_send_ack);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun #if 0
598*4882a593Smuzhiyun /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
599*4882a593Smuzhiyun void dccp_send_delayed_ack(struct sock *sk)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct inet_connection_sock *icsk = inet_csk(sk);
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * FIXME: tune this timer. elapsed time fixes the skew, so no problem
604*4882a593Smuzhiyun * with using 2s, and active senders also piggyback the ACK into a
605*4882a593Smuzhiyun * DATAACK packet, so this is really for quiescent senders.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun unsigned long timeout = jiffies + 2 * HZ;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Use new timeout only if there wasn't a older one earlier. */
610*4882a593Smuzhiyun if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
611*4882a593Smuzhiyun /* If delack timer was blocked or is about to expire,
612*4882a593Smuzhiyun * send ACK now.
613*4882a593Smuzhiyun *
614*4882a593Smuzhiyun * FIXME: check the "about to expire" part
615*4882a593Smuzhiyun */
616*4882a593Smuzhiyun if (icsk->icsk_ack.blocked) {
617*4882a593Smuzhiyun dccp_send_ack(sk);
618*4882a593Smuzhiyun return;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!time_before(timeout, icsk->icsk_ack.timeout))
622*4882a593Smuzhiyun timeout = icsk->icsk_ack.timeout;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
625*4882a593Smuzhiyun icsk->icsk_ack.timeout = timeout;
626*4882a593Smuzhiyun sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun #endif
629*4882a593Smuzhiyun
dccp_send_sync(struct sock * sk,const u64 ackno,const enum dccp_pkt_type pkt_type)630*4882a593Smuzhiyun void dccp_send_sync(struct sock *sk, const u64 ackno,
631*4882a593Smuzhiyun const enum dccp_pkt_type pkt_type)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun /*
634*4882a593Smuzhiyun * We are not putting this on the write queue, so
635*4882a593Smuzhiyun * dccp_transmit_skb() will set the ownership to this
636*4882a593Smuzhiyun * sock.
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (skb == NULL) {
641*4882a593Smuzhiyun /* FIXME: how to make sure the sync is sent? */
642*4882a593Smuzhiyun DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
643*4882a593Smuzhiyun return;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* Reserve space for headers and prepare control bits. */
647*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
648*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
649*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun * Clear the flag in case the Sync was scheduled for out-of-band data,
653*4882a593Smuzhiyun * such as carrying a long Ack Vector.
654*4882a593Smuzhiyun */
655*4882a593Smuzhiyun dccp_sk(sk)->dccps_sync_scheduled = 0;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun dccp_transmit_skb(sk, skb);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dccp_send_sync);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
664*4882a593Smuzhiyun * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
665*4882a593Smuzhiyun * any circumstances.
666*4882a593Smuzhiyun */
dccp_send_close(struct sock * sk,const int active)667*4882a593Smuzhiyun void dccp_send_close(struct sock *sk, const int active)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct dccp_sock *dp = dccp_sk(sk);
670*4882a593Smuzhiyun struct sk_buff *skb;
671*4882a593Smuzhiyun const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun skb = alloc_skb(sk->sk_prot->max_header, prio);
674*4882a593Smuzhiyun if (skb == NULL)
675*4882a593Smuzhiyun return;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Reserve space for headers and prepare control bits. */
678*4882a593Smuzhiyun skb_reserve(skb, sk->sk_prot->max_header);
679*4882a593Smuzhiyun if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
680*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
681*4882a593Smuzhiyun else
682*4882a593Smuzhiyun DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (active) {
685*4882a593Smuzhiyun skb = dccp_skb_entail(sk, skb);
686*4882a593Smuzhiyun /*
687*4882a593Smuzhiyun * Retransmission timer for active-close: RFC 4340, 8.3 requires
688*4882a593Smuzhiyun * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
689*4882a593Smuzhiyun * state can be left. The initial timeout is 2 RTTs.
690*4882a593Smuzhiyun * Since RTT measurement is done by the CCIDs, there is no easy
691*4882a593Smuzhiyun * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
692*4882a593Smuzhiyun * is too low (200ms); we use a high value to avoid unnecessary
693*4882a593Smuzhiyun * retransmissions when the link RTT is > 0.2 seconds.
694*4882a593Smuzhiyun * FIXME: Let main module sample RTTs and use that instead.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
697*4882a593Smuzhiyun DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun dccp_transmit_skb(sk, skb);
700*4882a593Smuzhiyun }
701