• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * To speed up listener socket lookup, create an array to store all sockets
4  * listening on the same port.  This allows a decision to be made after finding
5  * the first socket.  An optional BPF program can also be configured for
6  * selecting the socket index from the array of available sockets.
7  */
8 
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
14 
15 #define INIT_SOCKS 128
16 
17 DEFINE_SPINLOCK(reuseport_lock);
18 
19 #define REUSEPORT_MIN_ID 1
20 static DEFINE_IDA(reuseport_ida);
21 
reuseport_get_id(struct sock_reuseport * reuse)22 int reuseport_get_id(struct sock_reuseport *reuse)
23 {
24 	int id;
25 
26 	if (reuse->reuseport_id)
27 		return reuse->reuseport_id;
28 
29 	id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30 			    /* Called under reuseport_lock */
31 			    GFP_ATOMIC);
32 	if (id < 0)
33 		return id;
34 
35 	reuse->reuseport_id = id;
36 
37 	return reuse->reuseport_id;
38 }
39 
__reuseport_alloc(unsigned int max_socks)40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
41 {
42 	unsigned int size = sizeof(struct sock_reuseport) +
43 		      sizeof(struct sock *) * max_socks;
44 	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
45 
46 	if (!reuse)
47 		return NULL;
48 
49 	reuse->max_socks = max_socks;
50 
51 	RCU_INIT_POINTER(reuse->prog, NULL);
52 	return reuse;
53 }
54 
reuseport_alloc(struct sock * sk,bool bind_inany)55 int reuseport_alloc(struct sock *sk, bool bind_inany)
56 {
57 	struct sock_reuseport *reuse;
58 
59 	/* bh lock used since this function call may precede hlist lock in
60 	 * soft irq of receive path or setsockopt from process context
61 	 */
62 	spin_lock_bh(&reuseport_lock);
63 
64 	/* Allocation attempts can occur concurrently via the setsockopt path
65 	 * and the bind/hash path.  Nothing to do when we lose the race.
66 	 */
67 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
68 					  lockdep_is_held(&reuseport_lock));
69 	if (reuse) {
70 		/* Only set reuse->bind_inany if the bind_inany is true.
71 		 * Otherwise, it will overwrite the reuse->bind_inany
72 		 * which was set by the bind/hash path.
73 		 */
74 		if (bind_inany)
75 			reuse->bind_inany = bind_inany;
76 		goto out;
77 	}
78 
79 	reuse = __reuseport_alloc(INIT_SOCKS);
80 	if (!reuse) {
81 		spin_unlock_bh(&reuseport_lock);
82 		return -ENOMEM;
83 	}
84 
85 	reuse->socks[0] = sk;
86 	reuse->num_socks = 1;
87 	reuse->bind_inany = bind_inany;
88 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
89 
90 out:
91 	spin_unlock_bh(&reuseport_lock);
92 
93 	return 0;
94 }
95 EXPORT_SYMBOL(reuseport_alloc);
96 
reuseport_grow(struct sock_reuseport * reuse)97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
98 {
99 	struct sock_reuseport *more_reuse;
100 	u32 more_socks_size, i;
101 
102 	more_socks_size = reuse->max_socks * 2U;
103 	if (more_socks_size > U16_MAX)
104 		return NULL;
105 
106 	more_reuse = __reuseport_alloc(more_socks_size);
107 	if (!more_reuse)
108 		return NULL;
109 
110 	more_reuse->max_socks = more_socks_size;
111 	more_reuse->num_socks = reuse->num_socks;
112 	more_reuse->prog = reuse->prog;
113 	more_reuse->reuseport_id = reuse->reuseport_id;
114 	more_reuse->bind_inany = reuse->bind_inany;
115 	more_reuse->has_conns = reuse->has_conns;
116 
117 	memcpy(more_reuse->socks, reuse->socks,
118 	       reuse->num_socks * sizeof(struct sock *));
119 	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
120 
121 	for (i = 0; i < reuse->num_socks; ++i)
122 		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
123 				   more_reuse);
124 
125 	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
126 	 * that reuse and more_reuse can temporarily share a reference
127 	 * to prog.
128 	 */
129 	kfree_rcu(reuse, rcu);
130 	return more_reuse;
131 }
132 
reuseport_free_rcu(struct rcu_head * head)133 static void reuseport_free_rcu(struct rcu_head *head)
134 {
135 	struct sock_reuseport *reuse;
136 
137 	reuse = container_of(head, struct sock_reuseport, rcu);
138 	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
139 	if (reuse->reuseport_id)
140 		ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
141 	kfree(reuse);
142 }
143 
144 /**
145  *  reuseport_add_sock - Add a socket to the reuseport group of another.
146  *  @sk:  New socket to add to the group.
147  *  @sk2: Socket belonging to the existing reuseport group.
148  *  @bind_inany: Whether or not the group is bound to a local INANY address.
149  *
150  *  May return ENOMEM and not add socket to group under memory pressure.
151  */
reuseport_add_sock(struct sock * sk,struct sock * sk2,bool bind_inany)152 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
153 {
154 	struct sock_reuseport *old_reuse, *reuse;
155 
156 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
157 		int err = reuseport_alloc(sk2, bind_inany);
158 
159 		if (err)
160 			return err;
161 	}
162 
163 	spin_lock_bh(&reuseport_lock);
164 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
165 					  lockdep_is_held(&reuseport_lock));
166 	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
167 					     lockdep_is_held(&reuseport_lock));
168 	if (old_reuse && old_reuse->num_socks != 1) {
169 		spin_unlock_bh(&reuseport_lock);
170 		return -EBUSY;
171 	}
172 
173 	if (reuse->num_socks == reuse->max_socks) {
174 		reuse = reuseport_grow(reuse);
175 		if (!reuse) {
176 			spin_unlock_bh(&reuseport_lock);
177 			return -ENOMEM;
178 		}
179 	}
180 
181 	reuse->socks[reuse->num_socks] = sk;
182 	/* paired with smp_rmb() in reuseport_select_sock() */
183 	smp_wmb();
184 	reuse->num_socks++;
185 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
186 
187 	spin_unlock_bh(&reuseport_lock);
188 
189 	if (old_reuse)
190 		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
191 	return 0;
192 }
193 EXPORT_SYMBOL(reuseport_add_sock);
194 
reuseport_detach_sock(struct sock * sk)195 void reuseport_detach_sock(struct sock *sk)
196 {
197 	struct sock_reuseport *reuse;
198 	int i;
199 
200 	spin_lock_bh(&reuseport_lock);
201 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
202 					  lockdep_is_held(&reuseport_lock));
203 
204 	/* At least one of the sk in this reuseport group is added to
205 	 * a bpf map.  Notify the bpf side.  The bpf map logic will
206 	 * remove the sk if it is indeed added to a bpf map.
207 	 */
208 	if (reuse->reuseport_id)
209 		bpf_sk_reuseport_detach(sk);
210 
211 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
212 
213 	for (i = 0; i < reuse->num_socks; i++) {
214 		if (reuse->socks[i] == sk) {
215 			reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
216 			reuse->num_socks--;
217 			if (reuse->num_socks == 0)
218 				call_rcu(&reuse->rcu, reuseport_free_rcu);
219 			break;
220 		}
221 	}
222 	spin_unlock_bh(&reuseport_lock);
223 }
224 EXPORT_SYMBOL(reuseport_detach_sock);
225 
run_bpf_filter(struct sock_reuseport * reuse,u16 socks,struct bpf_prog * prog,struct sk_buff * skb,int hdr_len)226 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
227 				   struct bpf_prog *prog, struct sk_buff *skb,
228 				   int hdr_len)
229 {
230 	struct sk_buff *nskb = NULL;
231 	u32 index;
232 
233 	if (skb_shared(skb)) {
234 		nskb = skb_clone(skb, GFP_ATOMIC);
235 		if (!nskb)
236 			return NULL;
237 		skb = nskb;
238 	}
239 
240 	/* temporarily advance data past protocol header */
241 	if (!pskb_pull(skb, hdr_len)) {
242 		kfree_skb(nskb);
243 		return NULL;
244 	}
245 	index = bpf_prog_run_save_cb(prog, skb);
246 	__skb_push(skb, hdr_len);
247 
248 	consume_skb(nskb);
249 
250 	if (index >= socks)
251 		return NULL;
252 
253 	return reuse->socks[index];
254 }
255 
256 /**
257  *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
258  *  @sk: First socket in the group.
259  *  @hash: When no BPF filter is available, use this hash to select.
260  *  @skb: skb to run through BPF filter.
261  *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
262  *    the skb does not yet point at the payload, this parameter represents
263  *    how far the pointer needs to advance to reach the payload.
264  *  Returns a socket that should receive the packet (or NULL on error).
265  */
reuseport_select_sock(struct sock * sk,u32 hash,struct sk_buff * skb,int hdr_len)266 struct sock *reuseport_select_sock(struct sock *sk,
267 				   u32 hash,
268 				   struct sk_buff *skb,
269 				   int hdr_len)
270 {
271 	struct sock_reuseport *reuse;
272 	struct bpf_prog *prog;
273 	struct sock *sk2 = NULL;
274 	u16 socks;
275 
276 	rcu_read_lock();
277 	reuse = rcu_dereference(sk->sk_reuseport_cb);
278 
279 	/* if memory allocation failed or add call is not yet complete */
280 	if (!reuse)
281 		goto out;
282 
283 	prog = rcu_dereference(reuse->prog);
284 	socks = READ_ONCE(reuse->num_socks);
285 	if (likely(socks)) {
286 		/* paired with smp_wmb() in reuseport_add_sock() */
287 		smp_rmb();
288 
289 		if (!prog || !skb)
290 			goto select_by_hash;
291 
292 		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
293 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
294 		else
295 			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
296 
297 select_by_hash:
298 		/* no bpf or invalid bpf result: fall back to hash usage */
299 		if (!sk2) {
300 			int i, j;
301 
302 			i = j = reciprocal_scale(hash, socks);
303 			while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
304 				i++;
305 				if (i >= socks)
306 					i = 0;
307 				if (i == j)
308 					goto out;
309 			}
310 			sk2 = reuse->socks[i];
311 		}
312 	}
313 
314 out:
315 	rcu_read_unlock();
316 	return sk2;
317 }
318 EXPORT_SYMBOL(reuseport_select_sock);
319 
reuseport_attach_prog(struct sock * sk,struct bpf_prog * prog)320 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
321 {
322 	struct sock_reuseport *reuse;
323 	struct bpf_prog *old_prog;
324 
325 	if (sk_unhashed(sk) && sk->sk_reuseport) {
326 		int err = reuseport_alloc(sk, false);
327 
328 		if (err)
329 			return err;
330 	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
331 		/* The socket wasn't bound with SO_REUSEPORT */
332 		return -EINVAL;
333 	}
334 
335 	spin_lock_bh(&reuseport_lock);
336 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
337 					  lockdep_is_held(&reuseport_lock));
338 	old_prog = rcu_dereference_protected(reuse->prog,
339 					     lockdep_is_held(&reuseport_lock));
340 	rcu_assign_pointer(reuse->prog, prog);
341 	spin_unlock_bh(&reuseport_lock);
342 
343 	sk_reuseport_prog_free(old_prog);
344 	return 0;
345 }
346 EXPORT_SYMBOL(reuseport_attach_prog);
347 
reuseport_detach_prog(struct sock * sk)348 int reuseport_detach_prog(struct sock *sk)
349 {
350 	struct sock_reuseport *reuse;
351 	struct bpf_prog *old_prog;
352 
353 	if (!rcu_access_pointer(sk->sk_reuseport_cb))
354 		return sk->sk_reuseport ? -ENOENT : -EINVAL;
355 
356 	old_prog = NULL;
357 	spin_lock_bh(&reuseport_lock);
358 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
359 					  lockdep_is_held(&reuseport_lock));
360 	rcu_swap_protected(reuse->prog, old_prog,
361 			   lockdep_is_held(&reuseport_lock));
362 	spin_unlock_bh(&reuseport_lock);
363 
364 	if (!old_prog)
365 		return -ENOENT;
366 
367 	sk_reuseport_prog_free(old_prog);
368 	return 0;
369 }
370 EXPORT_SYMBOL(reuseport_detach_prog);
371