xref: /OK3568_Linux_fs/kernel/drivers/net/wireguard/send.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include "queueing.h"
7*4882a593Smuzhiyun #include "timers.h"
8*4882a593Smuzhiyun #include "device.h"
9*4882a593Smuzhiyun #include "peer.h"
10*4882a593Smuzhiyun #include "socket.h"
11*4882a593Smuzhiyun #include "messages.h"
12*4882a593Smuzhiyun #include "cookie.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/uio.h>
15*4882a593Smuzhiyun #include <linux/inetdevice.h>
16*4882a593Smuzhiyun #include <linux/socket.h>
17*4882a593Smuzhiyun #include <net/ip_tunnels.h>
18*4882a593Smuzhiyun #include <net/udp.h>
19*4882a593Smuzhiyun #include <net/sock.h>
20*4882a593Smuzhiyun 
wg_packet_send_handshake_initiation(struct wg_peer * peer)21*4882a593Smuzhiyun static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct message_handshake_initiation packet;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
26*4882a593Smuzhiyun 				      REKEY_TIMEOUT))
27*4882a593Smuzhiyun 		return; /* This function is rate limited. */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
30*4882a593Smuzhiyun 	net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
31*4882a593Smuzhiyun 			    peer->device->dev->name, peer->internal_id,
32*4882a593Smuzhiyun 			    &peer->endpoint.addr);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
35*4882a593Smuzhiyun 		wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
36*4882a593Smuzhiyun 		wg_timers_any_authenticated_packet_traversal(peer);
37*4882a593Smuzhiyun 		wg_timers_any_authenticated_packet_sent(peer);
38*4882a593Smuzhiyun 		atomic64_set(&peer->last_sent_handshake,
39*4882a593Smuzhiyun 			     ktime_get_coarse_boottime_ns());
40*4882a593Smuzhiyun 		wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
41*4882a593Smuzhiyun 					      HANDSHAKE_DSCP);
42*4882a593Smuzhiyun 		wg_timers_handshake_initiated(peer);
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
wg_packet_handshake_send_worker(struct work_struct * work)46*4882a593Smuzhiyun void wg_packet_handshake_send_worker(struct work_struct *work)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct wg_peer *peer = container_of(work, struct wg_peer,
49*4882a593Smuzhiyun 					    transmit_handshake_work);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	wg_packet_send_handshake_initiation(peer);
52*4882a593Smuzhiyun 	wg_peer_put(peer);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
wg_packet_send_queued_handshake_initiation(struct wg_peer * peer,bool is_retry)55*4882a593Smuzhiyun void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
56*4882a593Smuzhiyun 						bool is_retry)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	if (!is_retry)
59*4882a593Smuzhiyun 		peer->timer_handshake_attempts = 0;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	rcu_read_lock_bh();
62*4882a593Smuzhiyun 	/* We check last_sent_handshake here in addition to the actual function
63*4882a593Smuzhiyun 	 * we're queueing up, so that we don't queue things if not strictly
64*4882a593Smuzhiyun 	 * necessary:
65*4882a593Smuzhiyun 	 */
66*4882a593Smuzhiyun 	if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
67*4882a593Smuzhiyun 				      REKEY_TIMEOUT) ||
68*4882a593Smuzhiyun 			unlikely(READ_ONCE(peer->is_dead)))
69*4882a593Smuzhiyun 		goto out;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	wg_peer_get(peer);
72*4882a593Smuzhiyun 	/* Queues up calling packet_send_queued_handshakes(peer), where we do a
73*4882a593Smuzhiyun 	 * peer_put(peer) after:
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	if (!queue_work(peer->device->handshake_send_wq,
76*4882a593Smuzhiyun 			&peer->transmit_handshake_work))
77*4882a593Smuzhiyun 		/* If the work was already queued, we want to drop the
78*4882a593Smuzhiyun 		 * extra reference:
79*4882a593Smuzhiyun 		 */
80*4882a593Smuzhiyun 		wg_peer_put(peer);
81*4882a593Smuzhiyun out:
82*4882a593Smuzhiyun 	rcu_read_unlock_bh();
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
wg_packet_send_handshake_response(struct wg_peer * peer)85*4882a593Smuzhiyun void wg_packet_send_handshake_response(struct wg_peer *peer)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct message_handshake_response packet;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
90*4882a593Smuzhiyun 	net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
91*4882a593Smuzhiyun 			    peer->device->dev->name, peer->internal_id,
92*4882a593Smuzhiyun 			    &peer->endpoint.addr);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
95*4882a593Smuzhiyun 		wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
96*4882a593Smuzhiyun 		if (wg_noise_handshake_begin_session(&peer->handshake,
97*4882a593Smuzhiyun 						     &peer->keypairs)) {
98*4882a593Smuzhiyun 			wg_timers_session_derived(peer);
99*4882a593Smuzhiyun 			wg_timers_any_authenticated_packet_traversal(peer);
100*4882a593Smuzhiyun 			wg_timers_any_authenticated_packet_sent(peer);
101*4882a593Smuzhiyun 			atomic64_set(&peer->last_sent_handshake,
102*4882a593Smuzhiyun 				     ktime_get_coarse_boottime_ns());
103*4882a593Smuzhiyun 			wg_socket_send_buffer_to_peer(peer, &packet,
104*4882a593Smuzhiyun 						      sizeof(packet),
105*4882a593Smuzhiyun 						      HANDSHAKE_DSCP);
106*4882a593Smuzhiyun 		}
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
wg_packet_send_handshake_cookie(struct wg_device * wg,struct sk_buff * initiating_skb,__le32 sender_index)110*4882a593Smuzhiyun void wg_packet_send_handshake_cookie(struct wg_device *wg,
111*4882a593Smuzhiyun 				     struct sk_buff *initiating_skb,
112*4882a593Smuzhiyun 				     __le32 sender_index)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct message_handshake_cookie packet;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
117*4882a593Smuzhiyun 				wg->dev->name, initiating_skb);
118*4882a593Smuzhiyun 	wg_cookie_message_create(&packet, initiating_skb, sender_index,
119*4882a593Smuzhiyun 				 &wg->cookie_checker);
120*4882a593Smuzhiyun 	wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
121*4882a593Smuzhiyun 					      sizeof(packet));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
keep_key_fresh(struct wg_peer * peer)124*4882a593Smuzhiyun static void keep_key_fresh(struct wg_peer *peer)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct noise_keypair *keypair;
127*4882a593Smuzhiyun 	bool send;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	rcu_read_lock_bh();
130*4882a593Smuzhiyun 	keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
131*4882a593Smuzhiyun 	send = keypair && READ_ONCE(keypair->sending.is_valid) &&
132*4882a593Smuzhiyun 	       (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
133*4882a593Smuzhiyun 		(keypair->i_am_the_initiator &&
134*4882a593Smuzhiyun 		 wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
135*4882a593Smuzhiyun 	rcu_read_unlock_bh();
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	if (unlikely(send))
138*4882a593Smuzhiyun 		wg_packet_send_queued_handshake_initiation(peer, false);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
calculate_skb_padding(struct sk_buff * skb)141*4882a593Smuzhiyun static unsigned int calculate_skb_padding(struct sk_buff *skb)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	unsigned int padded_size, last_unit = skb->len;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (unlikely(!PACKET_CB(skb)->mtu))
146*4882a593Smuzhiyun 		return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* We do this modulo business with the MTU, just in case the networking
149*4882a593Smuzhiyun 	 * layer gives us a packet that's bigger than the MTU. In that case, we
150*4882a593Smuzhiyun 	 * wouldn't want the final subtraction to overflow in the case of the
151*4882a593Smuzhiyun 	 * padded_size being clamped. Fortunately, that's very rarely the case,
152*4882a593Smuzhiyun 	 * so we optimize for that not happening.
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	if (unlikely(last_unit > PACKET_CB(skb)->mtu))
155*4882a593Smuzhiyun 		last_unit %= PACKET_CB(skb)->mtu;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	padded_size = min(PACKET_CB(skb)->mtu,
158*4882a593Smuzhiyun 			  ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
159*4882a593Smuzhiyun 	return padded_size - last_unit;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
encrypt_packet(struct sk_buff * skb,struct noise_keypair * keypair)162*4882a593Smuzhiyun static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	unsigned int padding_len, plaintext_len, trailer_len;
165*4882a593Smuzhiyun 	struct scatterlist sg[MAX_SKB_FRAGS + 8];
166*4882a593Smuzhiyun 	struct message_data *header;
167*4882a593Smuzhiyun 	struct sk_buff *trailer;
168*4882a593Smuzhiyun 	int num_frags;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* Force hash calculation before encryption so that flow analysis is
171*4882a593Smuzhiyun 	 * consistent over the inner packet.
172*4882a593Smuzhiyun 	 */
173*4882a593Smuzhiyun 	skb_get_hash(skb);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Calculate lengths. */
176*4882a593Smuzhiyun 	padding_len = calculate_skb_padding(skb);
177*4882a593Smuzhiyun 	trailer_len = padding_len + noise_encrypted_len(0);
178*4882a593Smuzhiyun 	plaintext_len = skb->len + padding_len;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* Expand data section to have room for padding and auth tag. */
181*4882a593Smuzhiyun 	num_frags = skb_cow_data(skb, trailer_len, &trailer);
182*4882a593Smuzhiyun 	if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
183*4882a593Smuzhiyun 		return false;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Set the padding to zeros, and make sure it and the auth tag are part
186*4882a593Smuzhiyun 	 * of the skb.
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	memset(skb_tail_pointer(trailer), 0, padding_len);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Expand head section to have room for our header and the network
191*4882a593Smuzhiyun 	 * stack's headers.
192*4882a593Smuzhiyun 	 */
193*4882a593Smuzhiyun 	if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
194*4882a593Smuzhiyun 		return false;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Finalize checksum calculation for the inner packet, if required. */
197*4882a593Smuzhiyun 	if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
198*4882a593Smuzhiyun 		     skb_checksum_help(skb)))
199*4882a593Smuzhiyun 		return false;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* Only after checksumming can we safely add on the padding at the end
202*4882a593Smuzhiyun 	 * and the header.
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	skb_set_inner_network_header(skb, 0);
205*4882a593Smuzhiyun 	header = (struct message_data *)skb_push(skb, sizeof(*header));
206*4882a593Smuzhiyun 	header->header.type = cpu_to_le32(MESSAGE_DATA);
207*4882a593Smuzhiyun 	header->key_idx = keypair->remote_index;
208*4882a593Smuzhiyun 	header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
209*4882a593Smuzhiyun 	pskb_put(skb, trailer, trailer_len);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Now we can encrypt the scattergather segments */
212*4882a593Smuzhiyun 	sg_init_table(sg, num_frags);
213*4882a593Smuzhiyun 	if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
214*4882a593Smuzhiyun 			 noise_encrypted_len(plaintext_len)) <= 0)
215*4882a593Smuzhiyun 		return false;
216*4882a593Smuzhiyun 	return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
217*4882a593Smuzhiyun 						   PACKET_CB(skb)->nonce,
218*4882a593Smuzhiyun 						   keypair->sending.key);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
wg_packet_send_keepalive(struct wg_peer * peer)221*4882a593Smuzhiyun void wg_packet_send_keepalive(struct wg_peer *peer)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct sk_buff *skb;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (skb_queue_empty(&peer->staged_packet_queue)) {
226*4882a593Smuzhiyun 		skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
227*4882a593Smuzhiyun 				GFP_ATOMIC);
228*4882a593Smuzhiyun 		if (unlikely(!skb))
229*4882a593Smuzhiyun 			return;
230*4882a593Smuzhiyun 		skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
231*4882a593Smuzhiyun 		skb->dev = peer->device->dev;
232*4882a593Smuzhiyun 		PACKET_CB(skb)->mtu = skb->dev->mtu;
233*4882a593Smuzhiyun 		skb_queue_tail(&peer->staged_packet_queue, skb);
234*4882a593Smuzhiyun 		net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
235*4882a593Smuzhiyun 				    peer->device->dev->name, peer->internal_id,
236*4882a593Smuzhiyun 				    &peer->endpoint.addr);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	wg_packet_send_staged_packets(peer);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
wg_packet_create_data_done(struct wg_peer * peer,struct sk_buff * first)242*4882a593Smuzhiyun static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct sk_buff *skb, *next;
245*4882a593Smuzhiyun 	bool is_keepalive, data_sent = false;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	wg_timers_any_authenticated_packet_traversal(peer);
248*4882a593Smuzhiyun 	wg_timers_any_authenticated_packet_sent(peer);
249*4882a593Smuzhiyun 	skb_list_walk_safe(first, skb, next) {
250*4882a593Smuzhiyun 		is_keepalive = skb->len == message_data_len(0);
251*4882a593Smuzhiyun 		if (likely(!wg_socket_send_skb_to_peer(peer, skb,
252*4882a593Smuzhiyun 				PACKET_CB(skb)->ds) && !is_keepalive))
253*4882a593Smuzhiyun 			data_sent = true;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (likely(data_sent))
257*4882a593Smuzhiyun 		wg_timers_data_sent(peer);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	keep_key_fresh(peer);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
wg_packet_tx_worker(struct work_struct * work)262*4882a593Smuzhiyun void wg_packet_tx_worker(struct work_struct *work)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
265*4882a593Smuzhiyun 	struct noise_keypair *keypair;
266*4882a593Smuzhiyun 	enum packet_state state;
267*4882a593Smuzhiyun 	struct sk_buff *first;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
270*4882a593Smuzhiyun 	       (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
271*4882a593Smuzhiyun 		       PACKET_STATE_UNCRYPTED) {
272*4882a593Smuzhiyun 		wg_prev_queue_drop_peeked(&peer->tx_queue);
273*4882a593Smuzhiyun 		keypair = PACKET_CB(first)->keypair;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		if (likely(state == PACKET_STATE_CRYPTED))
276*4882a593Smuzhiyun 			wg_packet_create_data_done(peer, first);
277*4882a593Smuzhiyun 		else
278*4882a593Smuzhiyun 			kfree_skb_list(first);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		wg_noise_keypair_put(keypair, false);
281*4882a593Smuzhiyun 		wg_peer_put(peer);
282*4882a593Smuzhiyun 		if (need_resched())
283*4882a593Smuzhiyun 			cond_resched();
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
wg_packet_encrypt_worker(struct work_struct * work)287*4882a593Smuzhiyun void wg_packet_encrypt_worker(struct work_struct *work)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct crypt_queue *queue = container_of(work, struct multicore_worker,
290*4882a593Smuzhiyun 						 work)->ptr;
291*4882a593Smuzhiyun 	struct sk_buff *first, *skb, *next;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
294*4882a593Smuzhiyun 		enum packet_state state = PACKET_STATE_CRYPTED;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		skb_list_walk_safe(first, skb, next) {
297*4882a593Smuzhiyun 			if (likely(encrypt_packet(skb,
298*4882a593Smuzhiyun 					PACKET_CB(first)->keypair))) {
299*4882a593Smuzhiyun 				wg_reset_packet(skb, true);
300*4882a593Smuzhiyun 			} else {
301*4882a593Smuzhiyun 				state = PACKET_STATE_DEAD;
302*4882a593Smuzhiyun 				break;
303*4882a593Smuzhiyun 			}
304*4882a593Smuzhiyun 		}
305*4882a593Smuzhiyun 		wg_queue_enqueue_per_peer_tx(first, state);
306*4882a593Smuzhiyun 		if (need_resched())
307*4882a593Smuzhiyun 			cond_resched();
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
wg_packet_create_data(struct wg_peer * peer,struct sk_buff * first)311*4882a593Smuzhiyun static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct wg_device *wg = peer->device;
314*4882a593Smuzhiyun 	int ret = -EINVAL;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	rcu_read_lock_bh();
317*4882a593Smuzhiyun 	if (unlikely(READ_ONCE(peer->is_dead)))
318*4882a593Smuzhiyun 		goto err;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
321*4882a593Smuzhiyun 						   wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
322*4882a593Smuzhiyun 	if (unlikely(ret == -EPIPE))
323*4882a593Smuzhiyun 		wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
324*4882a593Smuzhiyun err:
325*4882a593Smuzhiyun 	rcu_read_unlock_bh();
326*4882a593Smuzhiyun 	if (likely(!ret || ret == -EPIPE))
327*4882a593Smuzhiyun 		return;
328*4882a593Smuzhiyun 	wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
329*4882a593Smuzhiyun 	wg_peer_put(peer);
330*4882a593Smuzhiyun 	kfree_skb_list(first);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
wg_packet_purge_staged_packets(struct wg_peer * peer)333*4882a593Smuzhiyun void wg_packet_purge_staged_packets(struct wg_peer *peer)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	spin_lock_bh(&peer->staged_packet_queue.lock);
336*4882a593Smuzhiyun 	peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
337*4882a593Smuzhiyun 	__skb_queue_purge(&peer->staged_packet_queue);
338*4882a593Smuzhiyun 	spin_unlock_bh(&peer->staged_packet_queue.lock);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
wg_packet_send_staged_packets(struct wg_peer * peer)341*4882a593Smuzhiyun void wg_packet_send_staged_packets(struct wg_peer *peer)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct noise_keypair *keypair;
344*4882a593Smuzhiyun 	struct sk_buff_head packets;
345*4882a593Smuzhiyun 	struct sk_buff *skb;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* Steal the current queue into our local one. */
348*4882a593Smuzhiyun 	__skb_queue_head_init(&packets);
349*4882a593Smuzhiyun 	spin_lock_bh(&peer->staged_packet_queue.lock);
350*4882a593Smuzhiyun 	skb_queue_splice_init(&peer->staged_packet_queue, &packets);
351*4882a593Smuzhiyun 	spin_unlock_bh(&peer->staged_packet_queue.lock);
352*4882a593Smuzhiyun 	if (unlikely(skb_queue_empty(&packets)))
353*4882a593Smuzhiyun 		return;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* First we make sure we have a valid reference to a valid key. */
356*4882a593Smuzhiyun 	rcu_read_lock_bh();
357*4882a593Smuzhiyun 	keypair = wg_noise_keypair_get(
358*4882a593Smuzhiyun 		rcu_dereference_bh(peer->keypairs.current_keypair));
359*4882a593Smuzhiyun 	rcu_read_unlock_bh();
360*4882a593Smuzhiyun 	if (unlikely(!keypair))
361*4882a593Smuzhiyun 		goto out_nokey;
362*4882a593Smuzhiyun 	if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
363*4882a593Smuzhiyun 		goto out_nokey;
364*4882a593Smuzhiyun 	if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
365*4882a593Smuzhiyun 					      REJECT_AFTER_TIME)))
366*4882a593Smuzhiyun 		goto out_invalid;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* After we know we have a somewhat valid key, we now try to assign
369*4882a593Smuzhiyun 	 * nonces to all of the packets in the queue. If we can't assign nonces
370*4882a593Smuzhiyun 	 * for all of them, we just consider it a failure and wait for the next
371*4882a593Smuzhiyun 	 * handshake.
372*4882a593Smuzhiyun 	 */
373*4882a593Smuzhiyun 	skb_queue_walk(&packets, skb) {
374*4882a593Smuzhiyun 		/* 0 for no outer TOS: no leak. TODO: at some later point, we
375*4882a593Smuzhiyun 		 * might consider using flowi->tos as outer instead.
376*4882a593Smuzhiyun 		 */
377*4882a593Smuzhiyun 		PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
378*4882a593Smuzhiyun 		PACKET_CB(skb)->nonce =
379*4882a593Smuzhiyun 				atomic64_inc_return(&keypair->sending_counter) - 1;
380*4882a593Smuzhiyun 		if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
381*4882a593Smuzhiyun 			goto out_invalid;
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	packets.prev->next = NULL;
385*4882a593Smuzhiyun 	wg_peer_get(keypair->entry.peer);
386*4882a593Smuzhiyun 	PACKET_CB(packets.next)->keypair = keypair;
387*4882a593Smuzhiyun 	wg_packet_create_data(peer, packets.next);
388*4882a593Smuzhiyun 	return;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun out_invalid:
391*4882a593Smuzhiyun 	WRITE_ONCE(keypair->sending.is_valid, false);
392*4882a593Smuzhiyun out_nokey:
393*4882a593Smuzhiyun 	wg_noise_keypair_put(keypair, false);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* We orphan the packets if we're waiting on a handshake, so that they
396*4882a593Smuzhiyun 	 * don't block a socket's pool.
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	skb_queue_walk(&packets, skb)
399*4882a593Smuzhiyun 		skb_orphan(skb);
400*4882a593Smuzhiyun 	/* Then we put them back on the top of the queue. We're not too
401*4882a593Smuzhiyun 	 * concerned about accidentally getting things a little out of order if
402*4882a593Smuzhiyun 	 * packets are being added really fast, because this queue is for before
403*4882a593Smuzhiyun 	 * packets can even be sent and it's small anyway.
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	spin_lock_bh(&peer->staged_packet_queue.lock);
406*4882a593Smuzhiyun 	skb_queue_splice(&packets, &peer->staged_packet_queue);
407*4882a593Smuzhiyun 	spin_unlock_bh(&peer->staged_packet_queue.lock);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* If we're exiting because there's something wrong with the key, it
410*4882a593Smuzhiyun 	 * means we should initiate a new handshake.
411*4882a593Smuzhiyun 	 */
412*4882a593Smuzhiyun 	wg_packet_send_queued_handshake_initiation(peer, false);
413*4882a593Smuzhiyun }
414