xref: /OK3568_Linux_fs/kernel/net/ipv4/tcp_fastopen.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/crypto.h>
3*4882a593Smuzhiyun #include <linux/err.h>
4*4882a593Smuzhiyun #include <linux/init.h>
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun #include <linux/tcp.h>
8*4882a593Smuzhiyun #include <linux/rcupdate.h>
9*4882a593Smuzhiyun #include <linux/rculist.h>
10*4882a593Smuzhiyun #include <net/inetpeer.h>
11*4882a593Smuzhiyun #include <net/tcp.h>
12*4882a593Smuzhiyun 
tcp_fastopen_init_key_once(struct net * net)13*4882a593Smuzhiyun void tcp_fastopen_init_key_once(struct net *net)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	u8 key[TCP_FASTOPEN_KEY_LENGTH];
16*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctxt;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	rcu_read_lock();
19*4882a593Smuzhiyun 	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
20*4882a593Smuzhiyun 	if (ctxt) {
21*4882a593Smuzhiyun 		rcu_read_unlock();
22*4882a593Smuzhiyun 		return;
23*4882a593Smuzhiyun 	}
24*4882a593Smuzhiyun 	rcu_read_unlock();
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	/* tcp_fastopen_reset_cipher publishes the new context
27*4882a593Smuzhiyun 	 * atomically, so we allow this race happening here.
28*4882a593Smuzhiyun 	 *
29*4882a593Smuzhiyun 	 * All call sites of tcp_fastopen_cookie_gen also check
30*4882a593Smuzhiyun 	 * for a valid cookie, so this is an acceptable risk.
31*4882a593Smuzhiyun 	 */
32*4882a593Smuzhiyun 	get_random_bytes(key, sizeof(key));
33*4882a593Smuzhiyun 	tcp_fastopen_reset_cipher(net, NULL, key, NULL);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
tcp_fastopen_ctx_free(struct rcu_head * head)36*4882a593Smuzhiyun static void tcp_fastopen_ctx_free(struct rcu_head *head)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx =
39*4882a593Smuzhiyun 	    container_of(head, struct tcp_fastopen_context, rcu);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	kfree_sensitive(ctx);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
tcp_fastopen_destroy_cipher(struct sock * sk)44*4882a593Smuzhiyun void tcp_fastopen_destroy_cipher(struct sock *sk)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	ctx = rcu_dereference_protected(
49*4882a593Smuzhiyun 			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
50*4882a593Smuzhiyun 	if (ctx)
51*4882a593Smuzhiyun 		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
tcp_fastopen_ctx_destroy(struct net * net)54*4882a593Smuzhiyun void tcp_fastopen_ctx_destroy(struct net *net)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctxt;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
61*4882a593Smuzhiyun 				lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
62*4882a593Smuzhiyun 	rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
63*4882a593Smuzhiyun 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (ctxt)
66*4882a593Smuzhiyun 		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
tcp_fastopen_reset_cipher(struct net * net,struct sock * sk,void * primary_key,void * backup_key)69*4882a593Smuzhiyun int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
70*4882a593Smuzhiyun 			      void *primary_key, void *backup_key)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx, *octx;
73*4882a593Smuzhiyun 	struct fastopen_queue *q;
74*4882a593Smuzhiyun 	int err = 0;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
77*4882a593Smuzhiyun 	if (!ctx) {
78*4882a593Smuzhiyun 		err = -ENOMEM;
79*4882a593Smuzhiyun 		goto out;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	ctx->key[0].key[0] = get_unaligned_le64(primary_key);
83*4882a593Smuzhiyun 	ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
84*4882a593Smuzhiyun 	if (backup_key) {
85*4882a593Smuzhiyun 		ctx->key[1].key[0] = get_unaligned_le64(backup_key);
86*4882a593Smuzhiyun 		ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
87*4882a593Smuzhiyun 		ctx->num = 2;
88*4882a593Smuzhiyun 	} else {
89*4882a593Smuzhiyun 		ctx->num = 1;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
93*4882a593Smuzhiyun 	if (sk) {
94*4882a593Smuzhiyun 		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
95*4882a593Smuzhiyun 		octx = rcu_dereference_protected(q->ctx,
96*4882a593Smuzhiyun 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
97*4882a593Smuzhiyun 		rcu_assign_pointer(q->ctx, ctx);
98*4882a593Smuzhiyun 	} else {
99*4882a593Smuzhiyun 		octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
100*4882a593Smuzhiyun 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
101*4882a593Smuzhiyun 		rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (octx)
106*4882a593Smuzhiyun 		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
107*4882a593Smuzhiyun out:
108*4882a593Smuzhiyun 	return err;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
tcp_fastopen_get_cipher(struct net * net,struct inet_connection_sock * icsk,u64 * key)111*4882a593Smuzhiyun int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
112*4882a593Smuzhiyun 			    u64 *key)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx;
115*4882a593Smuzhiyun 	int n_keys = 0, i;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	rcu_read_lock();
118*4882a593Smuzhiyun 	if (icsk)
119*4882a593Smuzhiyun 		ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
120*4882a593Smuzhiyun 	else
121*4882a593Smuzhiyun 		ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
122*4882a593Smuzhiyun 	if (ctx) {
123*4882a593Smuzhiyun 		n_keys = tcp_fastopen_context_len(ctx);
124*4882a593Smuzhiyun 		for (i = 0; i < n_keys; i++) {
125*4882a593Smuzhiyun 			put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
126*4882a593Smuzhiyun 			put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	rcu_read_unlock();
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return n_keys;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
__tcp_fastopen_cookie_gen_cipher(struct request_sock * req,struct sk_buff * syn,const siphash_key_t * key,struct tcp_fastopen_cookie * foc)134*4882a593Smuzhiyun static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
135*4882a593Smuzhiyun 					     struct sk_buff *syn,
136*4882a593Smuzhiyun 					     const siphash_key_t *key,
137*4882a593Smuzhiyun 					     struct tcp_fastopen_cookie *foc)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (req->rsk_ops->family == AF_INET) {
142*4882a593Smuzhiyun 		const struct iphdr *iph = ip_hdr(syn);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
145*4882a593Smuzhiyun 					  sizeof(iph->saddr) +
146*4882a593Smuzhiyun 					  sizeof(iph->daddr),
147*4882a593Smuzhiyun 					  key));
148*4882a593Smuzhiyun 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
149*4882a593Smuzhiyun 		return true;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
152*4882a593Smuzhiyun 	if (req->rsk_ops->family == AF_INET6) {
153*4882a593Smuzhiyun 		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
156*4882a593Smuzhiyun 					  sizeof(ip6h->saddr) +
157*4882a593Smuzhiyun 					  sizeof(ip6h->daddr),
158*4882a593Smuzhiyun 					  key));
159*4882a593Smuzhiyun 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
160*4882a593Smuzhiyun 		return true;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun 	return false;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /* Generate the fastopen cookie by applying SipHash to both the source and
167*4882a593Smuzhiyun  * destination addresses.
168*4882a593Smuzhiyun  */
tcp_fastopen_cookie_gen(struct sock * sk,struct request_sock * req,struct sk_buff * syn,struct tcp_fastopen_cookie * foc)169*4882a593Smuzhiyun static void tcp_fastopen_cookie_gen(struct sock *sk,
170*4882a593Smuzhiyun 				    struct request_sock *req,
171*4882a593Smuzhiyun 				    struct sk_buff *syn,
172*4882a593Smuzhiyun 				    struct tcp_fastopen_cookie *foc)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	rcu_read_lock();
177*4882a593Smuzhiyun 	ctx = tcp_fastopen_get_ctx(sk);
178*4882a593Smuzhiyun 	if (ctx)
179*4882a593Smuzhiyun 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
180*4882a593Smuzhiyun 	rcu_read_unlock();
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
184*4882a593Smuzhiyun  * queue this additional data / FIN.
185*4882a593Smuzhiyun  */
tcp_fastopen_add_skb(struct sock * sk,struct sk_buff * skb)186*4882a593Smuzhiyun void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
191*4882a593Smuzhiyun 		return;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	skb = skb_clone(skb, GFP_ATOMIC);
194*4882a593Smuzhiyun 	if (!skb)
195*4882a593Smuzhiyun 		return;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	skb_dst_drop(skb);
198*4882a593Smuzhiyun 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
199*4882a593Smuzhiyun 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
200*4882a593Smuzhiyun 	 * to avoid double counting.  Also, tcp_segs_in() expects
201*4882a593Smuzhiyun 	 * skb->len to include the tcp_hdrlen.  Hence, it should
202*4882a593Smuzhiyun 	 * be called before __skb_pull().
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	tp->segs_in = 0;
205*4882a593Smuzhiyun 	tcp_segs_in(tp, skb);
206*4882a593Smuzhiyun 	__skb_pull(skb, tcp_hdrlen(skb));
207*4882a593Smuzhiyun 	sk_forced_mem_schedule(sk, skb->truesize);
208*4882a593Smuzhiyun 	skb_set_owner_r(skb, sk);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->seq++;
211*4882a593Smuzhiyun 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
214*4882a593Smuzhiyun 	__skb_queue_tail(&sk->sk_receive_queue, skb);
215*4882a593Smuzhiyun 	tp->syn_data_acked = 1;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* u64_stats_update_begin(&tp->syncp) not needed here,
218*4882a593Smuzhiyun 	 * as we certainly are not changing upper 32bit value (0)
219*4882a593Smuzhiyun 	 */
220*4882a593Smuzhiyun 	tp->bytes_received = skb->len;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
223*4882a593Smuzhiyun 		tcp_fin(sk);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* returns 0 - no key match, 1 for primary, 2 for backup */
tcp_fastopen_cookie_gen_check(struct sock * sk,struct request_sock * req,struct sk_buff * syn,struct tcp_fastopen_cookie * orig,struct tcp_fastopen_cookie * valid_foc)227*4882a593Smuzhiyun static int tcp_fastopen_cookie_gen_check(struct sock *sk,
228*4882a593Smuzhiyun 					 struct request_sock *req,
229*4882a593Smuzhiyun 					 struct sk_buff *syn,
230*4882a593Smuzhiyun 					 struct tcp_fastopen_cookie *orig,
231*4882a593Smuzhiyun 					 struct tcp_fastopen_cookie *valid_foc)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct tcp_fastopen_cookie search_foc = { .len = -1 };
234*4882a593Smuzhiyun 	struct tcp_fastopen_cookie *foc = valid_foc;
235*4882a593Smuzhiyun 	struct tcp_fastopen_context *ctx;
236*4882a593Smuzhiyun 	int i, ret = 0;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	rcu_read_lock();
239*4882a593Smuzhiyun 	ctx = tcp_fastopen_get_ctx(sk);
240*4882a593Smuzhiyun 	if (!ctx)
241*4882a593Smuzhiyun 		goto out;
242*4882a593Smuzhiyun 	for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
243*4882a593Smuzhiyun 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
244*4882a593Smuzhiyun 		if (tcp_fastopen_cookie_match(foc, orig)) {
245*4882a593Smuzhiyun 			ret = i + 1;
246*4882a593Smuzhiyun 			goto out;
247*4882a593Smuzhiyun 		}
248*4882a593Smuzhiyun 		foc = &search_foc;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun out:
251*4882a593Smuzhiyun 	rcu_read_unlock();
252*4882a593Smuzhiyun 	return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
tcp_fastopen_create_child(struct sock * sk,struct sk_buff * skb,struct request_sock * req)255*4882a593Smuzhiyun static struct sock *tcp_fastopen_create_child(struct sock *sk,
256*4882a593Smuzhiyun 					      struct sk_buff *skb,
257*4882a593Smuzhiyun 					      struct request_sock *req)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct tcp_sock *tp;
260*4882a593Smuzhiyun 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
261*4882a593Smuzhiyun 	struct sock *child;
262*4882a593Smuzhiyun 	bool own_req;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
265*4882a593Smuzhiyun 							 NULL, &own_req);
266*4882a593Smuzhiyun 	if (!child)
267*4882a593Smuzhiyun 		return NULL;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	spin_lock(&queue->fastopenq.lock);
270*4882a593Smuzhiyun 	queue->fastopenq.qlen++;
271*4882a593Smuzhiyun 	spin_unlock(&queue->fastopenq.lock);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* Initialize the child socket. Have to fix some values to take
274*4882a593Smuzhiyun 	 * into account the child is a Fast Open socket and is created
275*4882a593Smuzhiyun 	 * only out of the bits carried in the SYN packet.
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	tp = tcp_sk(child);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	rcu_assign_pointer(tp->fastopen_rsk, req);
280*4882a593Smuzhiyun 	tcp_rsk(req)->tfo_listener = true;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* RFC1323: The window in SYN & SYN/ACK segments is never
283*4882a593Smuzhiyun 	 * scaled. So correct it appropriately.
284*4882a593Smuzhiyun 	 */
285*4882a593Smuzhiyun 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
286*4882a593Smuzhiyun 	tp->max_window = tp->snd_wnd;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Activate the retrans timer so that SYNACK can be retransmitted.
289*4882a593Smuzhiyun 	 * The request socket is not added to the ehash
290*4882a593Smuzhiyun 	 * because it's been added to the accept queue directly.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
293*4882a593Smuzhiyun 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	refcount_set(&req->rsk_refcnt, 2);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Now finish processing the fastopen child socket. */
298*4882a593Smuzhiyun 	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	tcp_fastopen_add_skb(child, skb);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
305*4882a593Smuzhiyun 	tp->rcv_wup = tp->rcv_nxt;
306*4882a593Smuzhiyun 	/* tcp_conn_request() is sending the SYNACK,
307*4882a593Smuzhiyun 	 * and queues the child into listener accept queue.
308*4882a593Smuzhiyun 	 */
309*4882a593Smuzhiyun 	return child;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
tcp_fastopen_queue_check(struct sock * sk)312*4882a593Smuzhiyun static bool tcp_fastopen_queue_check(struct sock *sk)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct fastopen_queue *fastopenq;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Make sure the listener has enabled fastopen, and we don't
317*4882a593Smuzhiyun 	 * exceed the max # of pending TFO requests allowed before trying
318*4882a593Smuzhiyun 	 * to validating the cookie in order to avoid burning CPU cycles
319*4882a593Smuzhiyun 	 * unnecessarily.
320*4882a593Smuzhiyun 	 *
321*4882a593Smuzhiyun 	 * XXX (TFO) - The implication of checking the max_qlen before
322*4882a593Smuzhiyun 	 * processing a cookie request is that clients can't differentiate
323*4882a593Smuzhiyun 	 * between qlen overflow causing Fast Open to be disabled
324*4882a593Smuzhiyun 	 * temporarily vs a server not supporting Fast Open at all.
325*4882a593Smuzhiyun 	 */
326*4882a593Smuzhiyun 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
327*4882a593Smuzhiyun 	if (fastopenq->max_qlen == 0)
328*4882a593Smuzhiyun 		return false;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (fastopenq->qlen >= fastopenq->max_qlen) {
331*4882a593Smuzhiyun 		struct request_sock *req1;
332*4882a593Smuzhiyun 		spin_lock(&fastopenq->lock);
333*4882a593Smuzhiyun 		req1 = fastopenq->rskq_rst_head;
334*4882a593Smuzhiyun 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
335*4882a593Smuzhiyun 			__NET_INC_STATS(sock_net(sk),
336*4882a593Smuzhiyun 					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
337*4882a593Smuzhiyun 			spin_unlock(&fastopenq->lock);
338*4882a593Smuzhiyun 			return false;
339*4882a593Smuzhiyun 		}
340*4882a593Smuzhiyun 		fastopenq->rskq_rst_head = req1->dl_next;
341*4882a593Smuzhiyun 		fastopenq->qlen--;
342*4882a593Smuzhiyun 		spin_unlock(&fastopenq->lock);
343*4882a593Smuzhiyun 		reqsk_put(req1);
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 	return true;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
tcp_fastopen_no_cookie(const struct sock * sk,const struct dst_entry * dst,int flag)348*4882a593Smuzhiyun static bool tcp_fastopen_no_cookie(const struct sock *sk,
349*4882a593Smuzhiyun 				   const struct dst_entry *dst,
350*4882a593Smuzhiyun 				   int flag)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
353*4882a593Smuzhiyun 	       tcp_sk(sk)->fastopen_no_cookie ||
354*4882a593Smuzhiyun 	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
358*4882a593Smuzhiyun  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
359*4882a593Smuzhiyun  * cookie request (foc->len == 0).
360*4882a593Smuzhiyun  */
tcp_try_fastopen(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct tcp_fastopen_cookie * foc,const struct dst_entry * dst)361*4882a593Smuzhiyun struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
362*4882a593Smuzhiyun 			      struct request_sock *req,
363*4882a593Smuzhiyun 			      struct tcp_fastopen_cookie *foc,
364*4882a593Smuzhiyun 			      const struct dst_entry *dst)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
367*4882a593Smuzhiyun 	int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
368*4882a593Smuzhiyun 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
369*4882a593Smuzhiyun 	struct sock *child;
370*4882a593Smuzhiyun 	int ret = 0;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (foc->len == 0) /* Client requests a cookie */
373*4882a593Smuzhiyun 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
376*4882a593Smuzhiyun 	      (syn_data || foc->len >= 0) &&
377*4882a593Smuzhiyun 	      tcp_fastopen_queue_check(sk))) {
378*4882a593Smuzhiyun 		foc->len = -1;
379*4882a593Smuzhiyun 		return NULL;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
383*4882a593Smuzhiyun 		goto fastopen;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (foc->len == 0) {
386*4882a593Smuzhiyun 		/* Client requests a cookie. */
387*4882a593Smuzhiyun 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
388*4882a593Smuzhiyun 	} else if (foc->len > 0) {
389*4882a593Smuzhiyun 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
390*4882a593Smuzhiyun 						    &valid_foc);
391*4882a593Smuzhiyun 		if (!ret) {
392*4882a593Smuzhiyun 			NET_INC_STATS(sock_net(sk),
393*4882a593Smuzhiyun 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
394*4882a593Smuzhiyun 		} else {
395*4882a593Smuzhiyun 			/* Cookie is valid. Create a (full) child socket to
396*4882a593Smuzhiyun 			 * accept the data in SYN before returning a SYN-ACK to
397*4882a593Smuzhiyun 			 * ack the data. If we fail to create the socket, fall
398*4882a593Smuzhiyun 			 * back and ack the ISN only but includes the same
399*4882a593Smuzhiyun 			 * cookie.
400*4882a593Smuzhiyun 			 *
401*4882a593Smuzhiyun 			 * Note: Data-less SYN with valid cookie is allowed to
402*4882a593Smuzhiyun 			 * send data in SYN_RECV state.
403*4882a593Smuzhiyun 			 */
404*4882a593Smuzhiyun fastopen:
405*4882a593Smuzhiyun 			child = tcp_fastopen_create_child(sk, skb, req);
406*4882a593Smuzhiyun 			if (child) {
407*4882a593Smuzhiyun 				if (ret == 2) {
408*4882a593Smuzhiyun 					valid_foc.exp = foc->exp;
409*4882a593Smuzhiyun 					*foc = valid_foc;
410*4882a593Smuzhiyun 					NET_INC_STATS(sock_net(sk),
411*4882a593Smuzhiyun 						      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
412*4882a593Smuzhiyun 				} else {
413*4882a593Smuzhiyun 					foc->len = -1;
414*4882a593Smuzhiyun 				}
415*4882a593Smuzhiyun 				NET_INC_STATS(sock_net(sk),
416*4882a593Smuzhiyun 					      LINUX_MIB_TCPFASTOPENPASSIVE);
417*4882a593Smuzhiyun 				return child;
418*4882a593Smuzhiyun 			}
419*4882a593Smuzhiyun 			NET_INC_STATS(sock_net(sk),
420*4882a593Smuzhiyun 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 	valid_foc.exp = foc->exp;
424*4882a593Smuzhiyun 	*foc = valid_foc;
425*4882a593Smuzhiyun 	return NULL;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
tcp_fastopen_cookie_check(struct sock * sk,u16 * mss,struct tcp_fastopen_cookie * cookie)428*4882a593Smuzhiyun bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
429*4882a593Smuzhiyun 			       struct tcp_fastopen_cookie *cookie)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	const struct dst_entry *dst;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	tcp_fastopen_cache_get(sk, mss, cookie);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	/* Firewall blackhole issue check */
436*4882a593Smuzhiyun 	if (tcp_fastopen_active_should_disable(sk)) {
437*4882a593Smuzhiyun 		cookie->len = -1;
438*4882a593Smuzhiyun 		return false;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	dst = __sk_dst_get(sk);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
444*4882a593Smuzhiyun 		cookie->len = -1;
445*4882a593Smuzhiyun 		return true;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 	if (cookie->len > 0)
448*4882a593Smuzhiyun 		return true;
449*4882a593Smuzhiyun 	tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
450*4882a593Smuzhiyun 	return false;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /* This function checks if we want to defer sending SYN until the first
454*4882a593Smuzhiyun  * write().  We defer under the following conditions:
455*4882a593Smuzhiyun  * 1. fastopen_connect sockopt is set
456*4882a593Smuzhiyun  * 2. we have a valid cookie
457*4882a593Smuzhiyun  * Return value: return true if we want to defer until application writes data
458*4882a593Smuzhiyun  *               return false if we want to send out SYN immediately
459*4882a593Smuzhiyun  */
tcp_fastopen_defer_connect(struct sock * sk,int * err)460*4882a593Smuzhiyun bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct tcp_fastopen_cookie cookie = { .len = 0 };
463*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
464*4882a593Smuzhiyun 	u16 mss;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (tp->fastopen_connect && !tp->fastopen_req) {
467*4882a593Smuzhiyun 		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
468*4882a593Smuzhiyun 			inet_sk(sk)->defer_connect = 1;
469*4882a593Smuzhiyun 			return true;
470*4882a593Smuzhiyun 		}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		/* Alloc fastopen_req in order for FO option to be included
473*4882a593Smuzhiyun 		 * in SYN
474*4882a593Smuzhiyun 		 */
475*4882a593Smuzhiyun 		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
476*4882a593Smuzhiyun 					   sk->sk_allocation);
477*4882a593Smuzhiyun 		if (tp->fastopen_req)
478*4882a593Smuzhiyun 			tp->fastopen_req->cookie = cookie;
479*4882a593Smuzhiyun 		else
480*4882a593Smuzhiyun 			*err = -ENOBUFS;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	return false;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun EXPORT_SYMBOL(tcp_fastopen_defer_connect);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun  * The following code block is to deal with middle box issues with TFO:
488*4882a593Smuzhiyun  * Middlebox firewall issues can potentially cause server's data being
489*4882a593Smuzhiyun  * blackholed after a successful 3WHS using TFO.
490*4882a593Smuzhiyun  * The proposed solution is to disable active TFO globally under the
491*4882a593Smuzhiyun  * following circumstances:
492*4882a593Smuzhiyun  *   1. client side TFO socket receives out of order FIN
493*4882a593Smuzhiyun  *   2. client side TFO socket receives out of order RST
494*4882a593Smuzhiyun  *   3. client side TFO socket has timed out three times consecutively during
495*4882a593Smuzhiyun  *      or after handshake
496*4882a593Smuzhiyun  * We disable active side TFO globally for 1hr at first. Then if it
497*4882a593Smuzhiyun  * happens again, we disable it for 2h, then 4h, 8h, ...
498*4882a593Smuzhiyun  * And we reset the timeout back to 1hr when we see a successful active
499*4882a593Smuzhiyun  * TFO connection with data exchanges.
500*4882a593Smuzhiyun  */
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun /* Disable active TFO and record current jiffies and
503*4882a593Smuzhiyun  * tfo_active_disable_times
504*4882a593Smuzhiyun  */
tcp_fastopen_active_disable(struct sock * sk)505*4882a593Smuzhiyun void tcp_fastopen_active_disable(struct sock *sk)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct net *net = sock_net(sk);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
510*4882a593Smuzhiyun 		return;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
513*4882a593Smuzhiyun 	WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
516*4882a593Smuzhiyun 	 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
517*4882a593Smuzhiyun 	 */
518*4882a593Smuzhiyun 	smp_mb__before_atomic();
519*4882a593Smuzhiyun 	atomic_inc(&net->ipv4.tfo_active_disable_times);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /* Calculate timeout for tfo active disable
525*4882a593Smuzhiyun  * Return true if we are still in the active TFO disable period
526*4882a593Smuzhiyun  * Return false if timeout already expired and we should use active TFO
527*4882a593Smuzhiyun  */
tcp_fastopen_active_should_disable(struct sock * sk)528*4882a593Smuzhiyun bool tcp_fastopen_active_should_disable(struct sock *sk)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	unsigned int tfo_bh_timeout =
531*4882a593Smuzhiyun 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
532*4882a593Smuzhiyun 	unsigned long timeout;
533*4882a593Smuzhiyun 	int tfo_da_times;
534*4882a593Smuzhiyun 	int multiplier;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (!tfo_bh_timeout)
537*4882a593Smuzhiyun 		return false;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
540*4882a593Smuzhiyun 	if (!tfo_da_times)
541*4882a593Smuzhiyun 		return false;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
544*4882a593Smuzhiyun 	smp_rmb();
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* Limit timout to max: 2^6 * initial timeout */
547*4882a593Smuzhiyun 	multiplier = 1 << min(tfo_da_times - 1, 6);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
550*4882a593Smuzhiyun 	timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
551*4882a593Smuzhiyun 		  multiplier * tfo_bh_timeout * HZ;
552*4882a593Smuzhiyun 	if (time_before(jiffies, timeout))
553*4882a593Smuzhiyun 		return true;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/* Mark check bit so we can check for successful active TFO
556*4882a593Smuzhiyun 	 * condition and reset tfo_active_disable_times
557*4882a593Smuzhiyun 	 */
558*4882a593Smuzhiyun 	tcp_sk(sk)->syn_fastopen_ch = 1;
559*4882a593Smuzhiyun 	return false;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /* Disable active TFO if FIN is the only packet in the ofo queue
563*4882a593Smuzhiyun  * and no data is received.
564*4882a593Smuzhiyun  * Also check if we can reset tfo_active_disable_times if data is
565*4882a593Smuzhiyun  * received successfully on a marked active TFO sockets opened on
566*4882a593Smuzhiyun  * a non-loopback interface
567*4882a593Smuzhiyun  */
tcp_fastopen_active_disable_ofo_check(struct sock * sk)568*4882a593Smuzhiyun void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
571*4882a593Smuzhiyun 	struct dst_entry *dst;
572*4882a593Smuzhiyun 	struct sk_buff *skb;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (!tp->syn_fastopen)
575*4882a593Smuzhiyun 		return;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (!tp->data_segs_in) {
578*4882a593Smuzhiyun 		skb = skb_rb_first(&tp->out_of_order_queue);
579*4882a593Smuzhiyun 		if (skb && !skb_rb_next(skb)) {
580*4882a593Smuzhiyun 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
581*4882a593Smuzhiyun 				tcp_fastopen_active_disable(sk);
582*4882a593Smuzhiyun 				return;
583*4882a593Smuzhiyun 			}
584*4882a593Smuzhiyun 		}
585*4882a593Smuzhiyun 	} else if (tp->syn_fastopen_ch &&
586*4882a593Smuzhiyun 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
587*4882a593Smuzhiyun 		dst = sk_dst_get(sk);
588*4882a593Smuzhiyun 		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
589*4882a593Smuzhiyun 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
590*4882a593Smuzhiyun 		dst_release(dst);
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
tcp_fastopen_active_detect_blackhole(struct sock * sk,bool expired)594*4882a593Smuzhiyun void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	u32 timeouts = inet_csk(sk)->icsk_retransmits;
597*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(sk);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* Broken middle-boxes may black-hole Fast Open connection during or
600*4882a593Smuzhiyun 	 * even after the handshake. Be extremely conservative and pause
601*4882a593Smuzhiyun 	 * Fast Open globally after hitting the third consecutive timeout or
602*4882a593Smuzhiyun 	 * exceeding the configured timeout limit.
603*4882a593Smuzhiyun 	 */
604*4882a593Smuzhiyun 	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
605*4882a593Smuzhiyun 	    (timeouts == 2 || (timeouts < 2 && expired))) {
606*4882a593Smuzhiyun 		tcp_fastopen_active_disable(sk);
607*4882a593Smuzhiyun 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun }
610