1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "peerlookup.h"
7*4882a593Smuzhiyun #include "peer.h"
8*4882a593Smuzhiyun #include "noise.h"
9*4882a593Smuzhiyun
pubkey_bucket(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])10*4882a593Smuzhiyun static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
11*4882a593Smuzhiyun const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun /* siphash gives us a secure 64bit number based on a random key. Since
14*4882a593Smuzhiyun * the bits are uniformly distributed, we can then mask off to get the
15*4882a593Smuzhiyun * bits we need.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun
wg_pubkey_hashtable_alloc(void)22*4882a593Smuzhiyun struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun if (!table)
27*4882a593Smuzhiyun return NULL;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun get_random_bytes(&table->key, sizeof(table->key));
30*4882a593Smuzhiyun hash_init(table->hashtable);
31*4882a593Smuzhiyun mutex_init(&table->lock);
32*4882a593Smuzhiyun return table;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
wg_pubkey_hashtable_add(struct pubkey_hashtable * table,struct wg_peer * peer)35*4882a593Smuzhiyun void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
36*4882a593Smuzhiyun struct wg_peer *peer)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun mutex_lock(&table->lock);
39*4882a593Smuzhiyun hlist_add_head_rcu(&peer->pubkey_hash,
40*4882a593Smuzhiyun pubkey_bucket(table, peer->handshake.remote_static));
41*4882a593Smuzhiyun mutex_unlock(&table->lock);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
wg_pubkey_hashtable_remove(struct pubkey_hashtable * table,struct wg_peer * peer)44*4882a593Smuzhiyun void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
45*4882a593Smuzhiyun struct wg_peer *peer)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun mutex_lock(&table->lock);
48*4882a593Smuzhiyun hlist_del_init_rcu(&peer->pubkey_hash);
49*4882a593Smuzhiyun mutex_unlock(&table->lock);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Returns a strong reference to a peer */
53*4882a593Smuzhiyun struct wg_peer *
wg_pubkey_hashtable_lookup(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])54*4882a593Smuzhiyun wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
55*4882a593Smuzhiyun const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun struct wg_peer *iter_peer, *peer = NULL;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun rcu_read_lock_bh();
60*4882a593Smuzhiyun hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
61*4882a593Smuzhiyun pubkey_hash) {
62*4882a593Smuzhiyun if (!memcmp(pubkey, iter_peer->handshake.remote_static,
63*4882a593Smuzhiyun NOISE_PUBLIC_KEY_LEN)) {
64*4882a593Smuzhiyun peer = iter_peer;
65*4882a593Smuzhiyun break;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun peer = wg_peer_get_maybe_zero(peer);
69*4882a593Smuzhiyun rcu_read_unlock_bh();
70*4882a593Smuzhiyun return peer;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
index_bucket(struct index_hashtable * table,const __le32 index)73*4882a593Smuzhiyun static struct hlist_head *index_bucket(struct index_hashtable *table,
74*4882a593Smuzhiyun const __le32 index)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun /* Since the indices are random and thus all bits are uniformly
77*4882a593Smuzhiyun * distributed, we can find its bucket simply by masking.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun return &table->hashtable[(__force u32)index &
80*4882a593Smuzhiyun (HASH_SIZE(table->hashtable) - 1)];
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
wg_index_hashtable_alloc(void)83*4882a593Smuzhiyun struct index_hashtable *wg_index_hashtable_alloc(void)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (!table)
88*4882a593Smuzhiyun return NULL;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun hash_init(table->hashtable);
91*4882a593Smuzhiyun spin_lock_init(&table->lock);
92*4882a593Smuzhiyun return table;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* At the moment, we limit ourselves to 2^20 total peers, which generally might
96*4882a593Smuzhiyun * amount to 2^20*3 items in this hashtable. The algorithm below works by
97*4882a593Smuzhiyun * picking a random number and testing it. We can see that these limits mean we
98*4882a593Smuzhiyun * usually succeed pretty quickly:
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * >>> def calculation(tries, size):
101*4882a593Smuzhiyun * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
102*4882a593Smuzhiyun * ...
103*4882a593Smuzhiyun * >>> calculation(1, 2**20 * 3)
104*4882a593Smuzhiyun * 0.999267578125
105*4882a593Smuzhiyun * >>> calculation(2, 2**20 * 3)
106*4882a593Smuzhiyun * 0.0007318854331970215
107*4882a593Smuzhiyun * >>> calculation(3, 2**20 * 3)
108*4882a593Smuzhiyun * 5.360489012673497e-07
109*4882a593Smuzhiyun * >>> calculation(4, 2**20 * 3)
110*4882a593Smuzhiyun * 3.9261394135792216e-10
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * At the moment, we don't do any masking, so this algorithm isn't exactly
113*4882a593Smuzhiyun * constant time in either the random guessing or in the hash list lookup. We
114*4882a593Smuzhiyun * could require a minimum of 3 tries, which would successfully mask the
115*4882a593Smuzhiyun * guessing. this would not, however, help with the growing hash lengths, which
116*4882a593Smuzhiyun * is another thing to consider moving forward.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun
wg_index_hashtable_insert(struct index_hashtable * table,struct index_hashtable_entry * entry)119*4882a593Smuzhiyun __le32 wg_index_hashtable_insert(struct index_hashtable *table,
120*4882a593Smuzhiyun struct index_hashtable_entry *entry)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct index_hashtable_entry *existing_entry;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spin_lock_bh(&table->lock);
125*4882a593Smuzhiyun hlist_del_init_rcu(&entry->index_hash);
126*4882a593Smuzhiyun spin_unlock_bh(&table->lock);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun rcu_read_lock_bh();
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun search_unused_slot:
131*4882a593Smuzhiyun /* First we try to find an unused slot, randomly, while unlocked. */
132*4882a593Smuzhiyun entry->index = (__force __le32)get_random_u32();
133*4882a593Smuzhiyun hlist_for_each_entry_rcu_bh(existing_entry,
134*4882a593Smuzhiyun index_bucket(table, entry->index),
135*4882a593Smuzhiyun index_hash) {
136*4882a593Smuzhiyun if (existing_entry->index == entry->index)
137*4882a593Smuzhiyun /* If it's already in use, we continue searching. */
138*4882a593Smuzhiyun goto search_unused_slot;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Once we've found an unused slot, we lock it, and then double-check
142*4882a593Smuzhiyun * that nobody else stole it from us.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun spin_lock_bh(&table->lock);
145*4882a593Smuzhiyun hlist_for_each_entry_rcu_bh(existing_entry,
146*4882a593Smuzhiyun index_bucket(table, entry->index),
147*4882a593Smuzhiyun index_hash) {
148*4882a593Smuzhiyun if (existing_entry->index == entry->index) {
149*4882a593Smuzhiyun spin_unlock_bh(&table->lock);
150*4882a593Smuzhiyun /* If it was stolen, we start over. */
151*4882a593Smuzhiyun goto search_unused_slot;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun /* Otherwise, we know we have it exclusively (since we're locked),
155*4882a593Smuzhiyun * so we insert.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun hlist_add_head_rcu(&entry->index_hash,
158*4882a593Smuzhiyun index_bucket(table, entry->index));
159*4882a593Smuzhiyun spin_unlock_bh(&table->lock);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun rcu_read_unlock_bh();
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return entry->index;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
wg_index_hashtable_replace(struct index_hashtable * table,struct index_hashtable_entry * old,struct index_hashtable_entry * new)166*4882a593Smuzhiyun bool wg_index_hashtable_replace(struct index_hashtable *table,
167*4882a593Smuzhiyun struct index_hashtable_entry *old,
168*4882a593Smuzhiyun struct index_hashtable_entry *new)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun bool ret;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun spin_lock_bh(&table->lock);
173*4882a593Smuzhiyun ret = !hlist_unhashed(&old->index_hash);
174*4882a593Smuzhiyun if (unlikely(!ret))
175*4882a593Smuzhiyun goto out;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun new->index = old->index;
178*4882a593Smuzhiyun hlist_replace_rcu(&old->index_hash, &new->index_hash);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Calling init here NULLs out index_hash, and in fact after this
181*4882a593Smuzhiyun * function returns, it's theoretically possible for this to get
182*4882a593Smuzhiyun * reinserted elsewhere. That means the RCU lookup below might either
183*4882a593Smuzhiyun * terminate early or jump between buckets, in which case the packet
184*4882a593Smuzhiyun * simply gets dropped, which isn't terrible.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun INIT_HLIST_NODE(&old->index_hash);
187*4882a593Smuzhiyun out:
188*4882a593Smuzhiyun spin_unlock_bh(&table->lock);
189*4882a593Smuzhiyun return ret;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
wg_index_hashtable_remove(struct index_hashtable * table,struct index_hashtable_entry * entry)192*4882a593Smuzhiyun void wg_index_hashtable_remove(struct index_hashtable *table,
193*4882a593Smuzhiyun struct index_hashtable_entry *entry)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun spin_lock_bh(&table->lock);
196*4882a593Smuzhiyun hlist_del_init_rcu(&entry->index_hash);
197*4882a593Smuzhiyun spin_unlock_bh(&table->lock);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Returns a strong reference to a entry->peer */
201*4882a593Smuzhiyun struct index_hashtable_entry *
wg_index_hashtable_lookup(struct index_hashtable * table,const enum index_hashtable_type type_mask,const __le32 index,struct wg_peer ** peer)202*4882a593Smuzhiyun wg_index_hashtable_lookup(struct index_hashtable *table,
203*4882a593Smuzhiyun const enum index_hashtable_type type_mask,
204*4882a593Smuzhiyun const __le32 index, struct wg_peer **peer)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct index_hashtable_entry *iter_entry, *entry = NULL;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun rcu_read_lock_bh();
209*4882a593Smuzhiyun hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
210*4882a593Smuzhiyun index_hash) {
211*4882a593Smuzhiyun if (iter_entry->index == index) {
212*4882a593Smuzhiyun if (likely(iter_entry->type & type_mask))
213*4882a593Smuzhiyun entry = iter_entry;
214*4882a593Smuzhiyun break;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun if (likely(entry)) {
218*4882a593Smuzhiyun entry->peer = wg_peer_get_maybe_zero(entry->peer);
219*4882a593Smuzhiyun if (likely(entry->peer))
220*4882a593Smuzhiyun *peer = entry->peer;
221*4882a593Smuzhiyun else
222*4882a593Smuzhiyun entry = NULL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun rcu_read_unlock_bh();
225*4882a593Smuzhiyun return entry;
226*4882a593Smuzhiyun }
227