• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14  * for doing socket redirect with BPF helper routines.
15  *
16  * A sock map may have BPF programs attached to it, currently a program
17  * used to parse packets and a program to provide a verdict and redirect
18  * decision on the packet are supported. Any programs attached to a sock
19  * map are inherited by sock objects when they are added to the map. If
20  * no BPF programs are attached the sock object may only be used for sock
21  * redirect.
22  *
23  * A sock object may be in multiple maps, but can only inherit a single
24  * parse or verdict program. If adding a sock object to a map would result
25  * in having multiple parsing programs the update will return an EBUSY error.
26  *
27  * For reference this program is similar to devmap used in XDP context
28  * reviewing these together may be useful. For an example please review
29  * ./samples/bpf/sockmap/.
30  */
31 #include <linux/bpf.h>
32 #include <net/sock.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
41 #include <net/strparser.h>
42 #include <net/tcp.h>
43 
44 #define SOCK_CREATE_FLAG_MASK \
45 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
46 
47 struct bpf_stab {
48 	struct bpf_map map;
49 	struct sock **sock_map;
50 	struct bpf_prog *bpf_parse;
51 	struct bpf_prog *bpf_verdict;
52 };
53 
54 enum smap_psock_state {
55 	SMAP_TX_RUNNING,
56 };
57 
58 struct smap_psock_map_entry {
59 	struct list_head list;
60 	struct sock **entry;
61 };
62 
63 struct smap_psock {
64 	struct rcu_head	rcu;
65 	/* refcnt is used inside sk_callback_lock */
66 	u32 refcnt;
67 
68 	/* datapath variables */
69 	struct sk_buff_head rxqueue;
70 	bool strp_enabled;
71 
72 	/* datapath error path cache across tx work invocations */
73 	int save_rem;
74 	int save_off;
75 	struct sk_buff *save_skb;
76 
77 	struct strparser strp;
78 	struct bpf_prog *bpf_parse;
79 	struct bpf_prog *bpf_verdict;
80 	struct list_head maps;
81 
82 	/* Back reference used when sock callback trigger sockmap operations */
83 	struct sock *sock;
84 	unsigned long state;
85 
86 	struct work_struct tx_work;
87 	struct work_struct gc_work;
88 
89 	void (*save_data_ready)(struct sock *sk);
90 	void (*save_write_space)(struct sock *sk);
91 	void (*save_state_change)(struct sock *sk);
92 };
93 
smap_psock_sk(const struct sock * sk)94 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
95 {
96 	return rcu_dereference_sk_user_data(sk);
97 }
98 
99 /* compute the linear packet data range [data, data_end) for skb when
100  * sk_skb type programs are in use.
101  */
bpf_compute_data_end_sk_skb(struct sk_buff * skb)102 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
103 {
104 	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
105 }
106 
107 enum __sk_action {
108 	__SK_DROP = 0,
109 	__SK_PASS,
110 	__SK_REDIRECT,
111 };
112 
smap_verdict_func(struct smap_psock * psock,struct sk_buff * skb)113 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
114 {
115 	struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
116 	int rc;
117 
118 	if (unlikely(!prog))
119 		return __SK_DROP;
120 
121 	skb_orphan(skb);
122 	/* We need to ensure that BPF metadata for maps is also cleared
123 	 * when we orphan the skb so that we don't have the possibility
124 	 * to reference a stale map.
125 	 */
126 	TCP_SKB_CB(skb)->bpf.map = NULL;
127 	skb->sk = psock->sock;
128 	bpf_compute_data_end_sk_skb(skb);
129 	preempt_disable();
130 	rc = (*prog->bpf_func)(skb, prog->insnsi);
131 	preempt_enable();
132 	skb->sk = NULL;
133 
134 	/* Moving return codes from UAPI namespace into internal namespace */
135 	return rc == SK_PASS ?
136 		(TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
137 		__SK_DROP;
138 }
139 
smap_do_verdict(struct smap_psock * psock,struct sk_buff * skb)140 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
141 {
142 	struct sock *sk;
143 	int rc;
144 
145 	rc = smap_verdict_func(psock, skb);
146 	switch (rc) {
147 	case __SK_REDIRECT:
148 		sk = do_sk_redirect_map(skb);
149 		if (likely(sk)) {
150 			struct smap_psock *peer = smap_psock_sk(sk);
151 
152 			if (likely(peer &&
153 				   test_bit(SMAP_TX_RUNNING, &peer->state) &&
154 				   !sock_flag(sk, SOCK_DEAD) &&
155 				   sock_writeable(sk))) {
156 				skb_set_owner_w(skb, sk);
157 				skb_queue_tail(&peer->rxqueue, skb);
158 				schedule_work(&peer->tx_work);
159 				break;
160 			}
161 		}
162 	/* Fall through and free skb otherwise */
163 	case __SK_DROP:
164 	default:
165 		kfree_skb(skb);
166 	}
167 }
168 
smap_report_sk_error(struct smap_psock * psock,int err)169 static void smap_report_sk_error(struct smap_psock *psock, int err)
170 {
171 	struct sock *sk = psock->sock;
172 
173 	sk->sk_err = err;
174 	sk->sk_error_report(sk);
175 }
176 
177 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
178 
179 /* Called with lock_sock(sk) held */
smap_state_change(struct sock * sk)180 static void smap_state_change(struct sock *sk)
181 {
182 	struct smap_psock_map_entry *e, *tmp;
183 	struct smap_psock *psock;
184 	struct socket_wq *wq;
185 	struct sock *osk;
186 
187 	rcu_read_lock();
188 
189 	/* Allowing transitions into an established syn_recv states allows
190 	 * for early binding sockets to a smap object before the connection
191 	 * is established.
192 	 */
193 	switch (sk->sk_state) {
194 	case TCP_SYN_SENT:
195 	case TCP_SYN_RECV:
196 	case TCP_ESTABLISHED:
197 		break;
198 	case TCP_CLOSE_WAIT:
199 	case TCP_CLOSING:
200 	case TCP_LAST_ACK:
201 	case TCP_FIN_WAIT1:
202 	case TCP_FIN_WAIT2:
203 	case TCP_LISTEN:
204 		break;
205 	case TCP_CLOSE:
206 		/* Only release if the map entry is in fact the sock in
207 		 * question. There is a case where the operator deletes
208 		 * the sock from the map, but the TCP sock is closed before
209 		 * the psock is detached. Use cmpxchg to verify correct
210 		 * sock is removed.
211 		 */
212 		psock = smap_psock_sk(sk);
213 		if (unlikely(!psock))
214 			break;
215 		write_lock_bh(&sk->sk_callback_lock);
216 		list_for_each_entry_safe(e, tmp, &psock->maps, list) {
217 			osk = cmpxchg(e->entry, sk, NULL);
218 			if (osk == sk) {
219 				list_del(&e->list);
220 				smap_release_sock(psock, sk);
221 			}
222 		}
223 		write_unlock_bh(&sk->sk_callback_lock);
224 		break;
225 	default:
226 		psock = smap_psock_sk(sk);
227 		if (unlikely(!psock))
228 			break;
229 		smap_report_sk_error(psock, EPIPE);
230 		break;
231 	}
232 
233 	wq = rcu_dereference(sk->sk_wq);
234 	if (skwq_has_sleeper(wq))
235 		wake_up_interruptible_all(&wq->wait);
236 	rcu_read_unlock();
237 }
238 
smap_read_sock_strparser(struct strparser * strp,struct sk_buff * skb)239 static void smap_read_sock_strparser(struct strparser *strp,
240 				     struct sk_buff *skb)
241 {
242 	struct smap_psock *psock;
243 
244 	rcu_read_lock();
245 	psock = container_of(strp, struct smap_psock, strp);
246 	smap_do_verdict(psock, skb);
247 	rcu_read_unlock();
248 }
249 
250 /* Called with lock held on socket */
smap_data_ready(struct sock * sk)251 static void smap_data_ready(struct sock *sk)
252 {
253 	struct smap_psock *psock;
254 
255 	rcu_read_lock();
256 	psock = smap_psock_sk(sk);
257 	if (likely(psock)) {
258 		write_lock_bh(&sk->sk_callback_lock);
259 		strp_data_ready(&psock->strp);
260 		write_unlock_bh(&sk->sk_callback_lock);
261 	}
262 	rcu_read_unlock();
263 }
264 
smap_tx_work(struct work_struct * w)265 static void smap_tx_work(struct work_struct *w)
266 {
267 	struct smap_psock *psock;
268 	struct sk_buff *skb;
269 	int rem, off, n;
270 
271 	psock = container_of(w, struct smap_psock, tx_work);
272 
273 	/* lock sock to avoid losing sk_socket at some point during loop */
274 	lock_sock(psock->sock);
275 	if (psock->save_skb) {
276 		skb = psock->save_skb;
277 		rem = psock->save_rem;
278 		off = psock->save_off;
279 		psock->save_skb = NULL;
280 		goto start;
281 	}
282 
283 	while ((skb = skb_dequeue(&psock->rxqueue))) {
284 		rem = skb->len;
285 		off = 0;
286 start:
287 		do {
288 			if (likely(psock->sock->sk_socket))
289 				n = skb_send_sock_locked(psock->sock,
290 							 skb, off, rem);
291 			else
292 				n = -EINVAL;
293 			if (n <= 0) {
294 				if (n == -EAGAIN) {
295 					/* Retry when space is available */
296 					psock->save_skb = skb;
297 					psock->save_rem = rem;
298 					psock->save_off = off;
299 					goto out;
300 				}
301 				/* Hard errors break pipe and stop xmit */
302 				smap_report_sk_error(psock, n ? -n : EPIPE);
303 				clear_bit(SMAP_TX_RUNNING, &psock->state);
304 				kfree_skb(skb);
305 				goto out;
306 			}
307 			rem -= n;
308 			off += n;
309 		} while (rem);
310 		kfree_skb(skb);
311 	}
312 out:
313 	release_sock(psock->sock);
314 }
315 
smap_write_space(struct sock * sk)316 static void smap_write_space(struct sock *sk)
317 {
318 	struct smap_psock *psock;
319 	void (*write_space)(struct sock *sk);
320 
321 	rcu_read_lock();
322 	psock = smap_psock_sk(sk);
323 	if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
324 		schedule_work(&psock->tx_work);
325 	write_space = psock->save_write_space;
326 	rcu_read_unlock();
327 	write_space(sk);
328 }
329 
smap_stop_sock(struct smap_psock * psock,struct sock * sk)330 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
331 {
332 	if (!psock->strp_enabled)
333 		return;
334 	sk->sk_data_ready = psock->save_data_ready;
335 	sk->sk_write_space = psock->save_write_space;
336 	sk->sk_state_change = psock->save_state_change;
337 	psock->save_data_ready = NULL;
338 	psock->save_write_space = NULL;
339 	psock->save_state_change = NULL;
340 	strp_stop(&psock->strp);
341 	psock->strp_enabled = false;
342 }
343 
smap_destroy_psock(struct rcu_head * rcu)344 static void smap_destroy_psock(struct rcu_head *rcu)
345 {
346 	struct smap_psock *psock = container_of(rcu,
347 						  struct smap_psock, rcu);
348 
349 	/* Now that a grace period has passed there is no longer
350 	 * any reference to this sock in the sockmap so we can
351 	 * destroy the psock, strparser, and bpf programs. But,
352 	 * because we use workqueue sync operations we can not
353 	 * do it in rcu context
354 	 */
355 	schedule_work(&psock->gc_work);
356 }
357 
smap_release_sock(struct smap_psock * psock,struct sock * sock)358 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
359 {
360 	psock->refcnt--;
361 	if (psock->refcnt)
362 		return;
363 
364 	smap_stop_sock(psock, sock);
365 	clear_bit(SMAP_TX_RUNNING, &psock->state);
366 	rcu_assign_sk_user_data(sock, NULL);
367 	call_rcu_sched(&psock->rcu, smap_destroy_psock);
368 }
369 
smap_parse_func_strparser(struct strparser * strp,struct sk_buff * skb)370 static int smap_parse_func_strparser(struct strparser *strp,
371 				       struct sk_buff *skb)
372 {
373 	struct smap_psock *psock;
374 	struct bpf_prog *prog;
375 	int rc;
376 
377 	rcu_read_lock();
378 	psock = container_of(strp, struct smap_psock, strp);
379 	prog = READ_ONCE(psock->bpf_parse);
380 
381 	if (unlikely(!prog)) {
382 		rcu_read_unlock();
383 		return skb->len;
384 	}
385 
386 	/* Attach socket for bpf program to use if needed we can do this
387 	 * because strparser clones the skb before handing it to a upper
388 	 * layer, meaning skb_orphan has been called. We NULL sk on the
389 	 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
390 	 * later and because we are not charging the memory of this skb to
391 	 * any socket yet.
392 	 */
393 	skb->sk = psock->sock;
394 	bpf_compute_data_end_sk_skb(skb);
395 	rc = (*prog->bpf_func)(skb, prog->insnsi);
396 	skb->sk = NULL;
397 	rcu_read_unlock();
398 	return rc;
399 }
400 
401 
smap_read_sock_done(struct strparser * strp,int err)402 static int smap_read_sock_done(struct strparser *strp, int err)
403 {
404 	return err;
405 }
406 
smap_init_sock(struct smap_psock * psock,struct sock * sk)407 static int smap_init_sock(struct smap_psock *psock,
408 			  struct sock *sk)
409 {
410 	static const struct strp_callbacks cb = {
411 		.rcv_msg = smap_read_sock_strparser,
412 		.parse_msg = smap_parse_func_strparser,
413 		.read_sock_done = smap_read_sock_done,
414 	};
415 
416 	return strp_init(&psock->strp, sk, &cb);
417 }
418 
smap_init_progs(struct smap_psock * psock,struct bpf_stab * stab,struct bpf_prog * verdict,struct bpf_prog * parse)419 static void smap_init_progs(struct smap_psock *psock,
420 			    struct bpf_stab *stab,
421 			    struct bpf_prog *verdict,
422 			    struct bpf_prog *parse)
423 {
424 	struct bpf_prog *orig_parse, *orig_verdict;
425 
426 	orig_parse = xchg(&psock->bpf_parse, parse);
427 	orig_verdict = xchg(&psock->bpf_verdict, verdict);
428 
429 	if (orig_verdict)
430 		bpf_prog_put(orig_verdict);
431 	if (orig_parse)
432 		bpf_prog_put(orig_parse);
433 }
434 
smap_start_sock(struct smap_psock * psock,struct sock * sk)435 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
436 {
437 	if (sk->sk_data_ready == smap_data_ready)
438 		return;
439 	psock->save_data_ready = sk->sk_data_ready;
440 	psock->save_write_space = sk->sk_write_space;
441 	psock->save_state_change = sk->sk_state_change;
442 	sk->sk_data_ready = smap_data_ready;
443 	sk->sk_write_space = smap_write_space;
444 	sk->sk_state_change = smap_state_change;
445 	psock->strp_enabled = true;
446 }
447 
sock_map_remove_complete(struct bpf_stab * stab)448 static void sock_map_remove_complete(struct bpf_stab *stab)
449 {
450 	bpf_map_area_free(stab->sock_map);
451 	kfree(stab);
452 }
453 
smap_gc_work(struct work_struct * w)454 static void smap_gc_work(struct work_struct *w)
455 {
456 	struct smap_psock_map_entry *e, *tmp;
457 	struct smap_psock *psock;
458 
459 	psock = container_of(w, struct smap_psock, gc_work);
460 
461 	/* no callback lock needed because we already detached sockmap ops */
462 	if (psock->strp_enabled)
463 		strp_done(&psock->strp);
464 
465 	cancel_work_sync(&psock->tx_work);
466 	__skb_queue_purge(&psock->rxqueue);
467 
468 	/* At this point all strparser and xmit work must be complete */
469 	if (psock->bpf_parse)
470 		bpf_prog_put(psock->bpf_parse);
471 	if (psock->bpf_verdict)
472 		bpf_prog_put(psock->bpf_verdict);
473 
474 	list_for_each_entry_safe(e, tmp, &psock->maps, list) {
475 		list_del(&e->list);
476 		kfree(e);
477 	}
478 
479 	sock_put(psock->sock);
480 	kfree(psock);
481 }
482 
smap_init_psock(struct sock * sock,struct bpf_stab * stab)483 static struct smap_psock *smap_init_psock(struct sock *sock,
484 					  struct bpf_stab *stab)
485 {
486 	struct smap_psock *psock;
487 
488 	psock = kzalloc_node(sizeof(struct smap_psock),
489 			     GFP_ATOMIC | __GFP_NOWARN,
490 			     stab->map.numa_node);
491 	if (!psock)
492 		return ERR_PTR(-ENOMEM);
493 
494 	psock->sock = sock;
495 	skb_queue_head_init(&psock->rxqueue);
496 	INIT_WORK(&psock->tx_work, smap_tx_work);
497 	INIT_WORK(&psock->gc_work, smap_gc_work);
498 	INIT_LIST_HEAD(&psock->maps);
499 	psock->refcnt = 1;
500 
501 	rcu_assign_sk_user_data(sock, psock);
502 	sock_hold(sock);
503 	return psock;
504 }
505 
sock_map_alloc(union bpf_attr * attr)506 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
507 {
508 	struct bpf_stab *stab;
509 	int err = -EINVAL;
510 	u64 cost;
511 
512 	if (!capable(CAP_NET_ADMIN))
513 		return ERR_PTR(-EPERM);
514 
515 	/* check sanity of attributes */
516 	if (attr->max_entries == 0 || attr->key_size != 4 ||
517 	    attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
518 		return ERR_PTR(-EINVAL);
519 
520 	if (attr->value_size > KMALLOC_MAX_SIZE)
521 		return ERR_PTR(-E2BIG);
522 
523 	stab = kzalloc(sizeof(*stab), GFP_USER);
524 	if (!stab)
525 		return ERR_PTR(-ENOMEM);
526 
527 	/* mandatory map attributes */
528 	stab->map.map_type = attr->map_type;
529 	stab->map.key_size = attr->key_size;
530 	stab->map.value_size = attr->value_size;
531 	stab->map.max_entries = attr->max_entries;
532 	stab->map.map_flags = attr->map_flags;
533 	stab->map.numa_node = bpf_map_attr_numa_node(attr);
534 
535 	/* make sure page count doesn't overflow */
536 	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
537 	if (cost >= U32_MAX - PAGE_SIZE)
538 		goto free_stab;
539 
540 	stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
541 
542 	/* if map size is larger than memlock limit, reject it early */
543 	err = bpf_map_precharge_memlock(stab->map.pages);
544 	if (err)
545 		goto free_stab;
546 
547 	err = -ENOMEM;
548 	stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
549 					    sizeof(struct sock *),
550 					    stab->map.numa_node);
551 	if (!stab->sock_map)
552 		goto free_stab;
553 
554 	return &stab->map;
555 free_stab:
556 	kfree(stab);
557 	return ERR_PTR(err);
558 }
559 
smap_list_remove(struct smap_psock * psock,struct sock ** entry)560 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
561 {
562 	struct smap_psock_map_entry *e, *tmp;
563 
564 	list_for_each_entry_safe(e, tmp, &psock->maps, list) {
565 		if (e->entry == entry) {
566 			list_del(&e->list);
567 			break;
568 		}
569 	}
570 }
571 
sock_map_free(struct bpf_map * map)572 static void sock_map_free(struct bpf_map *map)
573 {
574 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
575 	int i;
576 
577 	synchronize_rcu();
578 
579 	/* At this point no update, lookup or delete operations can happen.
580 	 * However, be aware we can still get a socket state event updates,
581 	 * and data ready callabacks that reference the psock from sk_user_data
582 	 * Also psock worker threads are still in-flight. So smap_release_sock
583 	 * will only free the psock after cancel_sync on the worker threads
584 	 * and a grace period expire to ensure psock is really safe to remove.
585 	 */
586 	rcu_read_lock();
587 	for (i = 0; i < stab->map.max_entries; i++) {
588 		struct smap_psock *psock;
589 		struct sock *sock;
590 
591 		sock = xchg(&stab->sock_map[i], NULL);
592 		if (!sock)
593 			continue;
594 
595 		write_lock_bh(&sock->sk_callback_lock);
596 		psock = smap_psock_sk(sock);
597 		/* This check handles a racing sock event that can get the
598 		 * sk_callback_lock before this case but after xchg happens
599 		 * causing the refcnt to hit zero and sock user data (psock)
600 		 * to be null and queued for garbage collection.
601 		 */
602 		if (likely(psock)) {
603 			smap_list_remove(psock, &stab->sock_map[i]);
604 			smap_release_sock(psock, sock);
605 		}
606 		write_unlock_bh(&sock->sk_callback_lock);
607 	}
608 	rcu_read_unlock();
609 
610 	sock_map_remove_complete(stab);
611 }
612 
sock_map_get_next_key(struct bpf_map * map,void * key,void * next_key)613 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
614 {
615 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
616 	u32 i = key ? *(u32 *)key : U32_MAX;
617 	u32 *next = (u32 *)next_key;
618 
619 	if (i >= stab->map.max_entries) {
620 		*next = 0;
621 		return 0;
622 	}
623 
624 	if (i == stab->map.max_entries - 1)
625 		return -ENOENT;
626 
627 	*next = i + 1;
628 	return 0;
629 }
630 
__sock_map_lookup_elem(struct bpf_map * map,u32 key)631 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
632 {
633 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
634 
635 	if (key >= map->max_entries)
636 		return NULL;
637 
638 	return READ_ONCE(stab->sock_map[key]);
639 }
640 
sock_map_delete_elem(struct bpf_map * map,void * key)641 static int sock_map_delete_elem(struct bpf_map *map, void *key)
642 {
643 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
644 	struct smap_psock *psock;
645 	int k = *(u32 *)key;
646 	struct sock *sock;
647 
648 	if (k >= map->max_entries)
649 		return -EINVAL;
650 
651 	sock = xchg(&stab->sock_map[k], NULL);
652 	if (!sock)
653 		return -EINVAL;
654 
655 	write_lock_bh(&sock->sk_callback_lock);
656 	psock = smap_psock_sk(sock);
657 	if (!psock)
658 		goto out;
659 
660 	if (psock->bpf_parse)
661 		smap_stop_sock(psock, sock);
662 	smap_list_remove(psock, &stab->sock_map[k]);
663 	smap_release_sock(psock, sock);
664 out:
665 	write_unlock_bh(&sock->sk_callback_lock);
666 	return 0;
667 }
668 
669 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
670  * done inside rcu critical sections. This ensures on updates that the psock
671  * will not be released via smap_release_sock() until concurrent updates/deletes
672  * complete. All operations operate on sock_map using cmpxchg and xchg
673  * operations to ensure we do not get stale references. Any reads into the
674  * map must be done with READ_ONCE() because of this.
675  *
676  * A psock is destroyed via call_rcu and after any worker threads are cancelled
677  * and syncd so we are certain all references from the update/lookup/delete
678  * operations as well as references in the data path are no longer in use.
679  *
680  * Psocks may exist in multiple maps, but only a single set of parse/verdict
681  * programs may be inherited from the maps it belongs to. A reference count
682  * is kept with the total number of references to the psock from all maps. The
683  * psock will not be released until this reaches zero. The psock and sock
684  * user data data use the sk_callback_lock to protect critical data structures
685  * from concurrent access. This allows us to avoid two updates from modifying
686  * the user data in sock and the lock is required anyways for modifying
687  * callbacks, we simply increase its scope slightly.
688  *
689  * Rules to follow,
690  *  - psock must always be read inside RCU critical section
691  *  - sk_user_data must only be modified inside sk_callback_lock and read
692  *    inside RCU critical section.
693  *  - psock->maps list must only be read & modified inside sk_callback_lock
694  *  - sock_map must use READ_ONCE and (cmp)xchg operations
695  *  - BPF verdict/parse programs must use READ_ONCE and xchg operations
696  */
sock_map_ctx_update_elem(struct bpf_sock_ops_kern * skops,struct bpf_map * map,void * key,u64 flags)697 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
698 				    struct bpf_map *map,
699 				    void *key, u64 flags)
700 {
701 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
702 	struct smap_psock_map_entry *e = NULL;
703 	struct bpf_prog *verdict, *parse;
704 	struct sock *osock, *sock;
705 	struct smap_psock *psock;
706 	u32 i = *(u32 *)key;
707 	int err;
708 
709 	if (unlikely(flags > BPF_EXIST))
710 		return -EINVAL;
711 
712 	if (unlikely(i >= stab->map.max_entries))
713 		return -E2BIG;
714 
715 	sock = READ_ONCE(stab->sock_map[i]);
716 	if (flags == BPF_EXIST && !sock)
717 		return -ENOENT;
718 	else if (flags == BPF_NOEXIST && sock)
719 		return -EEXIST;
720 
721 	sock = skops->sk;
722 
723 	/* 1. If sock map has BPF programs those will be inherited by the
724 	 * sock being added. If the sock is already attached to BPF programs
725 	 * this results in an error.
726 	 */
727 	verdict = READ_ONCE(stab->bpf_verdict);
728 	parse = READ_ONCE(stab->bpf_parse);
729 
730 	if (parse && verdict) {
731 		/* bpf prog refcnt may be zero if a concurrent attach operation
732 		 * removes the program after the above READ_ONCE() but before
733 		 * we increment the refcnt. If this is the case abort with an
734 		 * error.
735 		 */
736 		verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
737 		if (IS_ERR(verdict))
738 			return PTR_ERR(verdict);
739 
740 		parse = bpf_prog_inc_not_zero(stab->bpf_parse);
741 		if (IS_ERR(parse)) {
742 			bpf_prog_put(verdict);
743 			return PTR_ERR(parse);
744 		}
745 	}
746 
747 	write_lock_bh(&sock->sk_callback_lock);
748 	psock = smap_psock_sk(sock);
749 
750 	/* 2. Do not allow inheriting programs if psock exists and has
751 	 * already inherited programs. This would create confusion on
752 	 * which parser/verdict program is running. If no psock exists
753 	 * create one. Inside sk_callback_lock to ensure concurrent create
754 	 * doesn't update user data.
755 	 */
756 	if (psock) {
757 		if (READ_ONCE(psock->bpf_parse) && parse) {
758 			err = -EBUSY;
759 			goto out_progs;
760 		}
761 		psock->refcnt++;
762 	} else {
763 		psock = smap_init_psock(sock, stab);
764 		if (IS_ERR(psock)) {
765 			err = PTR_ERR(psock);
766 			goto out_progs;
767 		}
768 
769 		set_bit(SMAP_TX_RUNNING, &psock->state);
770 	}
771 
772 	e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
773 	if (!e) {
774 		err = -ENOMEM;
775 		goto out_progs;
776 	}
777 	e->entry = &stab->sock_map[i];
778 
779 	/* 3. At this point we have a reference to a valid psock that is
780 	 * running. Attach any BPF programs needed.
781 	 */
782 	if (parse && verdict && !psock->strp_enabled) {
783 		err = smap_init_sock(psock, sock);
784 		if (err)
785 			goto out_free;
786 		smap_init_progs(psock, stab, verdict, parse);
787 		smap_start_sock(psock, sock);
788 	}
789 
790 	/* 4. Place psock in sockmap for use and stop any programs on
791 	 * the old sock assuming its not the same sock we are replacing
792 	 * it with. Because we can only have a single set of programs if
793 	 * old_sock has a strp we can stop it.
794 	 */
795 	list_add_tail(&e->list, &psock->maps);
796 	write_unlock_bh(&sock->sk_callback_lock);
797 
798 	osock = xchg(&stab->sock_map[i], sock);
799 	if (osock) {
800 		struct smap_psock *opsock = smap_psock_sk(osock);
801 
802 		write_lock_bh(&osock->sk_callback_lock);
803 		if (osock != sock && parse)
804 			smap_stop_sock(opsock, osock);
805 		smap_list_remove(opsock, &stab->sock_map[i]);
806 		smap_release_sock(opsock, osock);
807 		write_unlock_bh(&osock->sk_callback_lock);
808 	}
809 	return 0;
810 out_free:
811 	smap_release_sock(psock, sock);
812 out_progs:
813 	if (verdict)
814 		bpf_prog_put(verdict);
815 	if (parse)
816 		bpf_prog_put(parse);
817 	write_unlock_bh(&sock->sk_callback_lock);
818 	kfree(e);
819 	return err;
820 }
821 
sock_map_prog(struct bpf_map * map,struct bpf_prog * prog,u32 type)822 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
823 {
824 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
825 	struct bpf_prog *orig;
826 
827 	if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
828 		return -EINVAL;
829 
830 	switch (type) {
831 	case BPF_SK_SKB_STREAM_PARSER:
832 		orig = xchg(&stab->bpf_parse, prog);
833 		break;
834 	case BPF_SK_SKB_STREAM_VERDICT:
835 		orig = xchg(&stab->bpf_verdict, prog);
836 		break;
837 	default:
838 		return -EOPNOTSUPP;
839 	}
840 
841 	if (orig)
842 		bpf_prog_put(orig);
843 
844 	return 0;
845 }
846 
sock_map_lookup(struct bpf_map * map,void * key)847 static void *sock_map_lookup(struct bpf_map *map, void *key)
848 {
849 	return NULL;
850 }
851 
sock_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)852 static int sock_map_update_elem(struct bpf_map *map,
853 				void *key, void *value, u64 flags)
854 {
855 	struct bpf_sock_ops_kern skops;
856 	u32 fd = *(u32 *)value;
857 	struct socket *socket;
858 	int err;
859 
860 	socket = sockfd_lookup(fd, &err);
861 	if (!socket)
862 		return err;
863 
864 	skops.sk = socket->sk;
865 	if (!skops.sk) {
866 		fput(socket->file);
867 		return -EINVAL;
868 	}
869 
870 	if (skops.sk->sk_type != SOCK_STREAM ||
871 	    skops.sk->sk_protocol != IPPROTO_TCP) {
872 		fput(socket->file);
873 		return -EOPNOTSUPP;
874 	}
875 
876 	err = sock_map_ctx_update_elem(&skops, map, key, flags);
877 	fput(socket->file);
878 	return err;
879 }
880 
sock_map_release(struct bpf_map * map)881 static void sock_map_release(struct bpf_map *map)
882 {
883 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
884 	struct bpf_prog *orig;
885 
886 	orig = xchg(&stab->bpf_parse, NULL);
887 	if (orig)
888 		bpf_prog_put(orig);
889 	orig = xchg(&stab->bpf_verdict, NULL);
890 	if (orig)
891 		bpf_prog_put(orig);
892 }
893 
894 const struct bpf_map_ops sock_map_ops = {
895 	.map_alloc = sock_map_alloc,
896 	.map_free = sock_map_free,
897 	.map_lookup_elem = sock_map_lookup,
898 	.map_get_next_key = sock_map_get_next_key,
899 	.map_update_elem = sock_map_update_elem,
900 	.map_delete_elem = sock_map_delete_elem,
901 	.map_release_uref = sock_map_release,
902 };
903 
BPF_CALL_4(bpf_sock_map_update,struct bpf_sock_ops_kern *,bpf_sock,struct bpf_map *,map,void *,key,u64,flags)904 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
905 	   struct bpf_map *, map, void *, key, u64, flags)
906 {
907 	WARN_ON_ONCE(!rcu_read_lock_held());
908 	return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
909 }
910 
911 const struct bpf_func_proto bpf_sock_map_update_proto = {
912 	.func		= bpf_sock_map_update,
913 	.gpl_only	= false,
914 	.pkt_access	= true,
915 	.ret_type	= RET_INTEGER,
916 	.arg1_type	= ARG_PTR_TO_CTX,
917 	.arg2_type	= ARG_CONST_MAP_PTR,
918 	.arg3_type	= ARG_PTR_TO_MAP_KEY,
919 	.arg4_type	= ARG_ANYTHING,
920 };
921