1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "peer.h"
7*4882a593Smuzhiyun #include "device.h"
8*4882a593Smuzhiyun #include "queueing.h"
9*4882a593Smuzhiyun #include "timers.h"
10*4882a593Smuzhiyun #include "peerlookup.h"
11*4882a593Smuzhiyun #include "noise.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/kref.h>
14*4882a593Smuzhiyun #include <linux/lockdep.h>
15*4882a593Smuzhiyun #include <linux/rcupdate.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static struct kmem_cache *peer_cache;
19*4882a593Smuzhiyun static atomic64_t peer_counter = ATOMIC64_INIT(0);
20*4882a593Smuzhiyun
wg_peer_create(struct wg_device * wg,const u8 public_key[NOISE_PUBLIC_KEY_LEN],const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])21*4882a593Smuzhiyun struct wg_peer *wg_peer_create(struct wg_device *wg,
22*4882a593Smuzhiyun const u8 public_key[NOISE_PUBLIC_KEY_LEN],
23*4882a593Smuzhiyun const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct wg_peer *peer;
26*4882a593Smuzhiyun int ret = -ENOMEM;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun lockdep_assert_held(&wg->device_update_lock);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
31*4882a593Smuzhiyun return ERR_PTR(ret);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
34*4882a593Smuzhiyun if (unlikely(!peer))
35*4882a593Smuzhiyun return ERR_PTR(ret);
36*4882a593Smuzhiyun if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
37*4882a593Smuzhiyun goto err;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun peer->device = wg;
40*4882a593Smuzhiyun wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
41*4882a593Smuzhiyun public_key, preshared_key, peer);
42*4882a593Smuzhiyun peer->internal_id = atomic64_inc_return(&peer_counter);
43*4882a593Smuzhiyun peer->serial_work_cpu = nr_cpumask_bits;
44*4882a593Smuzhiyun wg_cookie_init(&peer->latest_cookie);
45*4882a593Smuzhiyun wg_timers_init(peer);
46*4882a593Smuzhiyun wg_cookie_checker_precompute_peer_keys(peer);
47*4882a593Smuzhiyun spin_lock_init(&peer->keypairs.keypair_update_lock);
48*4882a593Smuzhiyun INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
49*4882a593Smuzhiyun INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
50*4882a593Smuzhiyun wg_prev_queue_init(&peer->tx_queue);
51*4882a593Smuzhiyun wg_prev_queue_init(&peer->rx_queue);
52*4882a593Smuzhiyun rwlock_init(&peer->endpoint_lock);
53*4882a593Smuzhiyun kref_init(&peer->refcount);
54*4882a593Smuzhiyun skb_queue_head_init(&peer->staged_packet_queue);
55*4882a593Smuzhiyun wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
56*4882a593Smuzhiyun set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
57*4882a593Smuzhiyun netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
58*4882a593Smuzhiyun NAPI_POLL_WEIGHT);
59*4882a593Smuzhiyun napi_enable(&peer->napi);
60*4882a593Smuzhiyun list_add_tail(&peer->peer_list, &wg->peer_list);
61*4882a593Smuzhiyun INIT_LIST_HEAD(&peer->allowedips_list);
62*4882a593Smuzhiyun wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
63*4882a593Smuzhiyun ++wg->num_peers;
64*4882a593Smuzhiyun pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
65*4882a593Smuzhiyun return peer;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun err:
68*4882a593Smuzhiyun kmem_cache_free(peer_cache, peer);
69*4882a593Smuzhiyun return ERR_PTR(ret);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
wg_peer_get_maybe_zero(struct wg_peer * peer)72*4882a593Smuzhiyun struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
75*4882a593Smuzhiyun "Taking peer reference without holding the RCU read lock");
76*4882a593Smuzhiyun if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun return peer;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
peer_make_dead(struct wg_peer * peer)81*4882a593Smuzhiyun static void peer_make_dead(struct wg_peer *peer)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun /* Remove from configuration-time lookup structures. */
84*4882a593Smuzhiyun list_del_init(&peer->peer_list);
85*4882a593Smuzhiyun wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
86*4882a593Smuzhiyun &peer->device->device_update_lock);
87*4882a593Smuzhiyun wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* Mark as dead, so that we don't allow jumping contexts after. */
90*4882a593Smuzhiyun WRITE_ONCE(peer->is_dead, true);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* The caller must now synchronize_net() for this to take effect. */
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
peer_remove_after_dead(struct wg_peer * peer)95*4882a593Smuzhiyun static void peer_remove_after_dead(struct wg_peer *peer)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun WARN_ON(!peer->is_dead);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* No more keypairs can be created for this peer, since is_dead protects
100*4882a593Smuzhiyun * add_new_keypair, so we can now destroy existing ones.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun wg_noise_keypairs_clear(&peer->keypairs);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Destroy all ongoing timers that were in-flight at the beginning of
105*4882a593Smuzhiyun * this function.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun wg_timers_stop(peer);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* The transition between packet encryption/decryption queues isn't
110*4882a593Smuzhiyun * guarded by is_dead, but each reference's life is strictly bounded by
111*4882a593Smuzhiyun * two generations: once for parallel crypto and once for serial
112*4882a593Smuzhiyun * ingestion, so we can simply flush twice, and be sure that we no
113*4882a593Smuzhiyun * longer have references inside these queues.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* a) For encrypt/decrypt. */
117*4882a593Smuzhiyun flush_workqueue(peer->device->packet_crypt_wq);
118*4882a593Smuzhiyun /* b.1) For send (but not receive, since that's napi). */
119*4882a593Smuzhiyun flush_workqueue(peer->device->packet_crypt_wq);
120*4882a593Smuzhiyun /* b.2.1) For receive (but not send, since that's wq). */
121*4882a593Smuzhiyun napi_disable(&peer->napi);
122*4882a593Smuzhiyun /* b.2.1) It's now safe to remove the napi struct, which must be done
123*4882a593Smuzhiyun * here from process context.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun netif_napi_del(&peer->napi);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Ensure any workstructs we own (like transmit_handshake_work or
128*4882a593Smuzhiyun * clear_peer_work) no longer are in use.
129*4882a593Smuzhiyun */
130*4882a593Smuzhiyun flush_workqueue(peer->device->handshake_send_wq);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* After the above flushes, a peer might still be active in a few
133*4882a593Smuzhiyun * different contexts: 1) from xmit(), before hitting is_dead and
134*4882a593Smuzhiyun * returning, 2) from wg_packet_consume_data(), before hitting is_dead
135*4882a593Smuzhiyun * and returning, 3) from wg_receive_handshake_packet() after a point
136*4882a593Smuzhiyun * where it has processed an incoming handshake packet, but where
137*4882a593Smuzhiyun * all calls to pass it off to timers fails because of is_dead. We won't
138*4882a593Smuzhiyun * have new references in (1) eventually, because we're removed from
139*4882a593Smuzhiyun * allowedips; we won't have new references in (2) eventually, because
140*4882a593Smuzhiyun * wg_index_hashtable_lookup will always return NULL, since we removed
141*4882a593Smuzhiyun * all existing keypairs and no more can be created; we won't have new
142*4882a593Smuzhiyun * references in (3) eventually, because we're removed from the pubkey
143*4882a593Smuzhiyun * hash table, which allows for a maximum of one handshake response,
144*4882a593Smuzhiyun * via the still-uncleared index hashtable entry, but not more than one,
145*4882a593Smuzhiyun * and in wg_cookie_message_consume, the lookup eventually gets a peer
146*4882a593Smuzhiyun * with a refcount of zero, so no new reference is taken.
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun --peer->device->num_peers;
150*4882a593Smuzhiyun wg_peer_put(peer);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* We have a separate "remove" function make sure that all active places where
154*4882a593Smuzhiyun * a peer is currently operating will eventually come to an end and not pass
155*4882a593Smuzhiyun * their reference onto another context.
156*4882a593Smuzhiyun */
wg_peer_remove(struct wg_peer * peer)157*4882a593Smuzhiyun void wg_peer_remove(struct wg_peer *peer)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun if (unlikely(!peer))
160*4882a593Smuzhiyun return;
161*4882a593Smuzhiyun lockdep_assert_held(&peer->device->device_update_lock);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun peer_make_dead(peer);
164*4882a593Smuzhiyun synchronize_net();
165*4882a593Smuzhiyun peer_remove_after_dead(peer);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
wg_peer_remove_all(struct wg_device * wg)168*4882a593Smuzhiyun void wg_peer_remove_all(struct wg_device *wg)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct wg_peer *peer, *temp;
171*4882a593Smuzhiyun LIST_HEAD(dead_peers);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun lockdep_assert_held(&wg->device_update_lock);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Avoid having to traverse individually for each one. */
176*4882a593Smuzhiyun wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
179*4882a593Smuzhiyun peer_make_dead(peer);
180*4882a593Smuzhiyun list_add_tail(&peer->peer_list, &dead_peers);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun synchronize_net();
183*4882a593Smuzhiyun list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
184*4882a593Smuzhiyun peer_remove_after_dead(peer);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
rcu_release(struct rcu_head * rcu)187*4882a593Smuzhiyun static void rcu_release(struct rcu_head *rcu)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun dst_cache_destroy(&peer->endpoint_cache);
192*4882a593Smuzhiyun WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* The final zeroing takes care of clearing any remaining handshake key
195*4882a593Smuzhiyun * material and other potentially sensitive information.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun memzero_explicit(peer, sizeof(*peer));
198*4882a593Smuzhiyun kmem_cache_free(peer_cache, peer);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
kref_release(struct kref * refcount)201*4882a593Smuzhiyun static void kref_release(struct kref *refcount)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
206*4882a593Smuzhiyun peer->device->dev->name, peer->internal_id,
207*4882a593Smuzhiyun &peer->endpoint.addr);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Remove ourself from dynamic runtime lookup structures, now that the
210*4882a593Smuzhiyun * last reference is gone.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun wg_index_hashtable_remove(peer->device->index_hashtable,
213*4882a593Smuzhiyun &peer->handshake.entry);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Remove any lingering packets that didn't have a chance to be
216*4882a593Smuzhiyun * transmitted.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun wg_packet_purge_staged_packets(peer);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Free the memory used. */
221*4882a593Smuzhiyun call_rcu(&peer->rcu, rcu_release);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
wg_peer_put(struct wg_peer * peer)224*4882a593Smuzhiyun void wg_peer_put(struct wg_peer *peer)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun if (unlikely(!peer))
227*4882a593Smuzhiyun return;
228*4882a593Smuzhiyun kref_put(&peer->refcount, kref_release);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
wg_peer_init(void)231*4882a593Smuzhiyun int __init wg_peer_init(void)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun peer_cache = KMEM_CACHE(wg_peer, 0);
234*4882a593Smuzhiyun return peer_cache ? 0 : -ENOMEM;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
wg_peer_uninit(void)237*4882a593Smuzhiyun void wg_peer_uninit(void)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun kmem_cache_destroy(peer_cache);
240*4882a593Smuzhiyun }
241