• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #include "peerlookup.h"
7 #include "peer.h"
8 #include "noise.h"
9 
pubkey_bucket(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])10 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
11 					const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
12 {
13 	/* siphash gives us a secure 64bit number based on a random key. Since
14 	 * the bits are uniformly distributed, we can then mask off to get the
15 	 * bits we need.
16 	 */
17 	const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
18 
19 	return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
20 }
21 
wg_pubkey_hashtable_alloc(void)22 struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
23 {
24 	struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
25 
26 	if (!table)
27 		return NULL;
28 
29 	get_random_bytes(&table->key, sizeof(table->key));
30 	hash_init(table->hashtable);
31 	mutex_init(&table->lock);
32 	return table;
33 }
34 
wg_pubkey_hashtable_add(struct pubkey_hashtable * table,struct wg_peer * peer)35 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
36 			     struct wg_peer *peer)
37 {
38 	mutex_lock(&table->lock);
39 	hlist_add_head_rcu(&peer->pubkey_hash,
40 			   pubkey_bucket(table, peer->handshake.remote_static));
41 	mutex_unlock(&table->lock);
42 }
43 
wg_pubkey_hashtable_remove(struct pubkey_hashtable * table,struct wg_peer * peer)44 void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
45 				struct wg_peer *peer)
46 {
47 	mutex_lock(&table->lock);
48 	hlist_del_init_rcu(&peer->pubkey_hash);
49 	mutex_unlock(&table->lock);
50 }
51 
52 /* Returns a strong reference to a peer */
53 struct wg_peer *
wg_pubkey_hashtable_lookup(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])54 wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
55 			   const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
56 {
57 	struct wg_peer *iter_peer, *peer = NULL;
58 
59 	rcu_read_lock_bh();
60 	hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
61 				    pubkey_hash) {
62 		if (!memcmp(pubkey, iter_peer->handshake.remote_static,
63 			    NOISE_PUBLIC_KEY_LEN)) {
64 			peer = iter_peer;
65 			break;
66 		}
67 	}
68 	peer = wg_peer_get_maybe_zero(peer);
69 	rcu_read_unlock_bh();
70 	return peer;
71 }
72 
index_bucket(struct index_hashtable * table,const __le32 index)73 static struct hlist_head *index_bucket(struct index_hashtable *table,
74 				       const __le32 index)
75 {
76 	/* Since the indices are random and thus all bits are uniformly
77 	 * distributed, we can find its bucket simply by masking.
78 	 */
79 	return &table->hashtable[(__force u32)index &
80 				 (HASH_SIZE(table->hashtable) - 1)];
81 }
82 
wg_index_hashtable_alloc(void)83 struct index_hashtable *wg_index_hashtable_alloc(void)
84 {
85 	struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
86 
87 	if (!table)
88 		return NULL;
89 
90 	hash_init(table->hashtable);
91 	spin_lock_init(&table->lock);
92 	return table;
93 }
94 
95 /* At the moment, we limit ourselves to 2^20 total peers, which generally might
96  * amount to 2^20*3 items in this hashtable. The algorithm below works by
97  * picking a random number and testing it. We can see that these limits mean we
98  * usually succeed pretty quickly:
99  *
100  * >>> def calculation(tries, size):
101  * ...     return (size / 2**32)**(tries - 1) *  (1 - (size / 2**32))
102  * ...
103  * >>> calculation(1, 2**20 * 3)
104  * 0.999267578125
105  * >>> calculation(2, 2**20 * 3)
106  * 0.0007318854331970215
107  * >>> calculation(3, 2**20 * 3)
108  * 5.360489012673497e-07
109  * >>> calculation(4, 2**20 * 3)
110  * 3.9261394135792216e-10
111  *
112  * At the moment, we don't do any masking, so this algorithm isn't exactly
113  * constant time in either the random guessing or in the hash list lookup. We
114  * could require a minimum of 3 tries, which would successfully mask the
115  * guessing. this would not, however, help with the growing hash lengths, which
116  * is another thing to consider moving forward.
117  */
118 
wg_index_hashtable_insert(struct index_hashtable * table,struct index_hashtable_entry * entry)119 __le32 wg_index_hashtable_insert(struct index_hashtable *table,
120 				 struct index_hashtable_entry *entry)
121 {
122 	struct index_hashtable_entry *existing_entry;
123 
124 	spin_lock_bh(&table->lock);
125 	hlist_del_init_rcu(&entry->index_hash);
126 	spin_unlock_bh(&table->lock);
127 
128 	rcu_read_lock_bh();
129 
130 search_unused_slot:
131 	/* First we try to find an unused slot, randomly, while unlocked. */
132 	entry->index = (__force __le32)get_random_u32();
133 	hlist_for_each_entry_rcu_bh(existing_entry,
134 				    index_bucket(table, entry->index),
135 				    index_hash) {
136 		if (existing_entry->index == entry->index)
137 			/* If it's already in use, we continue searching. */
138 			goto search_unused_slot;
139 	}
140 
141 	/* Once we've found an unused slot, we lock it, and then double-check
142 	 * that nobody else stole it from us.
143 	 */
144 	spin_lock_bh(&table->lock);
145 	hlist_for_each_entry_rcu_bh(existing_entry,
146 				    index_bucket(table, entry->index),
147 				    index_hash) {
148 		if (existing_entry->index == entry->index) {
149 			spin_unlock_bh(&table->lock);
150 			/* If it was stolen, we start over. */
151 			goto search_unused_slot;
152 		}
153 	}
154 	/* Otherwise, we know we have it exclusively (since we're locked),
155 	 * so we insert.
156 	 */
157 	hlist_add_head_rcu(&entry->index_hash,
158 			   index_bucket(table, entry->index));
159 	spin_unlock_bh(&table->lock);
160 
161 	rcu_read_unlock_bh();
162 
163 	return entry->index;
164 }
165 
wg_index_hashtable_replace(struct index_hashtable * table,struct index_hashtable_entry * old,struct index_hashtable_entry * new)166 bool wg_index_hashtable_replace(struct index_hashtable *table,
167 				struct index_hashtable_entry *old,
168 				struct index_hashtable_entry *new)
169 {
170 	bool ret;
171 
172 	spin_lock_bh(&table->lock);
173 	ret = !hlist_unhashed(&old->index_hash);
174 	if (unlikely(!ret))
175 		goto out;
176 
177 	new->index = old->index;
178 	hlist_replace_rcu(&old->index_hash, &new->index_hash);
179 
180 	/* Calling init here NULLs out index_hash, and in fact after this
181 	 * function returns, it's theoretically possible for this to get
182 	 * reinserted elsewhere. That means the RCU lookup below might either
183 	 * terminate early or jump between buckets, in which case the packet
184 	 * simply gets dropped, which isn't terrible.
185 	 */
186 	INIT_HLIST_NODE(&old->index_hash);
187 out:
188 	spin_unlock_bh(&table->lock);
189 	return ret;
190 }
191 
wg_index_hashtable_remove(struct index_hashtable * table,struct index_hashtable_entry * entry)192 void wg_index_hashtable_remove(struct index_hashtable *table,
193 			       struct index_hashtable_entry *entry)
194 {
195 	spin_lock_bh(&table->lock);
196 	hlist_del_init_rcu(&entry->index_hash);
197 	spin_unlock_bh(&table->lock);
198 }
199 
200 /* Returns a strong reference to a entry->peer */
201 struct index_hashtable_entry *
wg_index_hashtable_lookup(struct index_hashtable * table,const enum index_hashtable_type type_mask,const __le32 index,struct wg_peer ** peer)202 wg_index_hashtable_lookup(struct index_hashtable *table,
203 			  const enum index_hashtable_type type_mask,
204 			  const __le32 index, struct wg_peer **peer)
205 {
206 	struct index_hashtable_entry *iter_entry, *entry = NULL;
207 
208 	rcu_read_lock_bh();
209 	hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
210 				    index_hash) {
211 		if (iter_entry->index == index) {
212 			if (likely(iter_entry->type & type_mask))
213 				entry = iter_entry;
214 			break;
215 		}
216 	}
217 	if (likely(entry)) {
218 		entry->peer = wg_peer_get_maybe_zero(entry->peer);
219 		if (likely(entry->peer))
220 			*peer = entry->peer;
221 		else
222 			entry = NULL;
223 	}
224 	rcu_read_unlock_bh();
225 	return entry;
226 }
227