1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _WG_QUEUEING_H
7*4882a593Smuzhiyun #define _WG_QUEUEING_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "peer.h"
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/skbuff.h>
12*4882a593Smuzhiyun #include <linux/ip.h>
13*4882a593Smuzhiyun #include <linux/ipv6.h>
14*4882a593Smuzhiyun #include <net/ip_tunnels.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct wg_device;
17*4882a593Smuzhiyun struct wg_peer;
18*4882a593Smuzhiyun struct multicore_worker;
19*4882a593Smuzhiyun struct crypt_queue;
20*4882a593Smuzhiyun struct prev_queue;
21*4882a593Smuzhiyun struct sk_buff;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* queueing.c APIs: */
24*4882a593Smuzhiyun int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25*4882a593Smuzhiyun unsigned int len);
26*4882a593Smuzhiyun void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
27*4882a593Smuzhiyun struct multicore_worker __percpu *
28*4882a593Smuzhiyun wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* receive.c APIs: */
31*4882a593Smuzhiyun void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
32*4882a593Smuzhiyun void wg_packet_handshake_receive_worker(struct work_struct *work);
33*4882a593Smuzhiyun /* NAPI poll function: */
34*4882a593Smuzhiyun int wg_packet_rx_poll(struct napi_struct *napi, int budget);
35*4882a593Smuzhiyun /* Workqueue worker: */
36*4882a593Smuzhiyun void wg_packet_decrypt_worker(struct work_struct *work);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* send.c APIs: */
39*4882a593Smuzhiyun void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40*4882a593Smuzhiyun bool is_retry);
41*4882a593Smuzhiyun void wg_packet_send_handshake_response(struct wg_peer *peer);
42*4882a593Smuzhiyun void wg_packet_send_handshake_cookie(struct wg_device *wg,
43*4882a593Smuzhiyun struct sk_buff *initiating_skb,
44*4882a593Smuzhiyun __le32 sender_index);
45*4882a593Smuzhiyun void wg_packet_send_keepalive(struct wg_peer *peer);
46*4882a593Smuzhiyun void wg_packet_purge_staged_packets(struct wg_peer *peer);
47*4882a593Smuzhiyun void wg_packet_send_staged_packets(struct wg_peer *peer);
48*4882a593Smuzhiyun /* Workqueue workers: */
49*4882a593Smuzhiyun void wg_packet_handshake_send_worker(struct work_struct *work);
50*4882a593Smuzhiyun void wg_packet_tx_worker(struct work_struct *work);
51*4882a593Smuzhiyun void wg_packet_encrypt_worker(struct work_struct *work);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun enum packet_state {
54*4882a593Smuzhiyun PACKET_STATE_UNCRYPTED,
55*4882a593Smuzhiyun PACKET_STATE_CRYPTED,
56*4882a593Smuzhiyun PACKET_STATE_DEAD
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct packet_cb {
60*4882a593Smuzhiyun u64 nonce;
61*4882a593Smuzhiyun struct noise_keypair *keypair;
62*4882a593Smuzhiyun atomic_t state;
63*4882a593Smuzhiyun u32 mtu;
64*4882a593Smuzhiyun u8 ds;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68*4882a593Smuzhiyun #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69*4882a593Smuzhiyun
wg_check_packet_protocol(struct sk_buff * skb)70*4882a593Smuzhiyun static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun __be16 real_protocol = ip_tunnel_parse_protocol(skb);
73*4882a593Smuzhiyun return real_protocol && skb->protocol == real_protocol;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
wg_reset_packet(struct sk_buff * skb,bool encapsulating)76*4882a593Smuzhiyun static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun u8 l4_hash = skb->l4_hash;
79*4882a593Smuzhiyun u8 sw_hash = skb->sw_hash;
80*4882a593Smuzhiyun u32 hash = skb->hash;
81*4882a593Smuzhiyun skb_scrub_packet(skb, true);
82*4882a593Smuzhiyun memset(&skb->headers_start, 0,
83*4882a593Smuzhiyun offsetof(struct sk_buff, headers_end) -
84*4882a593Smuzhiyun offsetof(struct sk_buff, headers_start));
85*4882a593Smuzhiyun if (encapsulating) {
86*4882a593Smuzhiyun skb->l4_hash = l4_hash;
87*4882a593Smuzhiyun skb->sw_hash = sw_hash;
88*4882a593Smuzhiyun skb->hash = hash;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun skb->queue_mapping = 0;
91*4882a593Smuzhiyun skb->nohdr = 0;
92*4882a593Smuzhiyun skb->peeked = 0;
93*4882a593Smuzhiyun skb->mac_len = 0;
94*4882a593Smuzhiyun skb->dev = NULL;
95*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
96*4882a593Smuzhiyun skb->tc_index = 0;
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun skb_reset_redirect(skb);
99*4882a593Smuzhiyun skb->hdr_len = skb_headroom(skb);
100*4882a593Smuzhiyun skb_reset_mac_header(skb);
101*4882a593Smuzhiyun skb_reset_network_header(skb);
102*4882a593Smuzhiyun skb_reset_transport_header(skb);
103*4882a593Smuzhiyun skb_probe_transport_header(skb);
104*4882a593Smuzhiyun skb_reset_inner_headers(skb);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
wg_cpumask_choose_online(int * stored_cpu,unsigned int id)107*4882a593Smuzhiyun static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned int cpu = *stored_cpu, cpu_index, i;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (unlikely(cpu == nr_cpumask_bits ||
112*4882a593Smuzhiyun !cpumask_test_cpu(cpu, cpu_online_mask))) {
113*4882a593Smuzhiyun cpu_index = id % cpumask_weight(cpu_online_mask);
114*4882a593Smuzhiyun cpu = cpumask_first(cpu_online_mask);
115*4882a593Smuzhiyun for (i = 0; i < cpu_index; ++i)
116*4882a593Smuzhiyun cpu = cpumask_next(cpu, cpu_online_mask);
117*4882a593Smuzhiyun *stored_cpu = cpu;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun return cpu;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* This function is racy, in the sense that next is unlocked, so it could return
123*4882a593Smuzhiyun * the same CPU twice. A race-free version of this would be to instead store an
124*4882a593Smuzhiyun * atomic sequence number, do an increment-and-return, and then iterate through
125*4882a593Smuzhiyun * every possible CPU until we get to that index -- choose_cpu. However that's
126*4882a593Smuzhiyun * a bit slower, and it doesn't seem like this potential race actually
127*4882a593Smuzhiyun * introduces any performance loss, so we live with it.
128*4882a593Smuzhiyun */
wg_cpumask_next_online(int * next)129*4882a593Smuzhiyun static inline int wg_cpumask_next_online(int *next)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun int cpu = *next;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
134*4882a593Smuzhiyun cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
135*4882a593Smuzhiyun *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
136*4882a593Smuzhiyun return cpu;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun void wg_prev_queue_init(struct prev_queue *queue);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Multi producer */
142*4882a593Smuzhiyun bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Single consumer */
145*4882a593Smuzhiyun struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Single consumer */
wg_prev_queue_peek(struct prev_queue * queue)148*4882a593Smuzhiyun static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun if (queue->peeked)
151*4882a593Smuzhiyun return queue->peeked;
152*4882a593Smuzhiyun queue->peeked = wg_prev_queue_dequeue(queue);
153*4882a593Smuzhiyun return queue->peeked;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Single consumer */
wg_prev_queue_drop_peeked(struct prev_queue * queue)157*4882a593Smuzhiyun static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun queue->peeked = NULL;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
wg_queue_enqueue_per_device_and_peer(struct crypt_queue * device_queue,struct prev_queue * peer_queue,struct sk_buff * skb,struct workqueue_struct * wq,int * next_cpu)162*4882a593Smuzhiyun static inline int wg_queue_enqueue_per_device_and_peer(
163*4882a593Smuzhiyun struct crypt_queue *device_queue, struct prev_queue *peer_queue,
164*4882a593Smuzhiyun struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun int cpu;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
169*4882a593Smuzhiyun /* We first queue this up for the peer ingestion, but the consumer
170*4882a593Smuzhiyun * will wait for the state to change to CRYPTED or DEAD before.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
173*4882a593Smuzhiyun return -ENOSPC;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Then we queue it up in the device queue, which consumes the
176*4882a593Smuzhiyun * packet as soon as it can.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun cpu = wg_cpumask_next_online(next_cpu);
179*4882a593Smuzhiyun if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
180*4882a593Smuzhiyun return -EPIPE;
181*4882a593Smuzhiyun queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
wg_queue_enqueue_per_peer_tx(struct sk_buff * skb,enum packet_state state)185*4882a593Smuzhiyun static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun /* We take a reference, because as soon as we call atomic_set, the
188*4882a593Smuzhiyun * peer can be freed from below us.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun atomic_set_release(&PACKET_CB(skb)->state, state);
193*4882a593Smuzhiyun queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
194*4882a593Smuzhiyun peer->device->packet_crypt_wq, &peer->transmit_packet_work);
195*4882a593Smuzhiyun wg_peer_put(peer);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
wg_queue_enqueue_per_peer_rx(struct sk_buff * skb,enum packet_state state)198*4882a593Smuzhiyun static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun /* We take a reference, because as soon as we call atomic_set, the
201*4882a593Smuzhiyun * peer can be freed from below us.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun atomic_set_release(&PACKET_CB(skb)->state, state);
206*4882a593Smuzhiyun napi_schedule(&peer->napi);
207*4882a593Smuzhiyun wg_peer_put(peer);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #ifdef DEBUG
211*4882a593Smuzhiyun bool wg_packet_counter_selftest(void);
212*4882a593Smuzhiyun #endif
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun #endif /* _WG_QUEUEING_H */
215