1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/bpf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/filter.h>
7 #include <linux/errno.h>
8 #include <linux/file.h>
9 #include <linux/net.h>
10 #include <linux/workqueue.h>
11 #include <linux/skmsg.h>
12 #include <linux/list.h>
13 #include <linux/jhash.h>
14 #include <linux/sock_diag.h>
15 #include <net/udp.h>
16
17 struct bpf_stab {
18 struct bpf_map map;
19 struct sock **sks;
20 struct sk_psock_progs progs;
21 raw_spinlock_t lock;
22 };
23
24 #define SOCK_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
sock_map_alloc(union bpf_attr * attr)27 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
28 {
29 struct bpf_stab *stab;
30 u64 cost;
31 int err;
32
33 if (!capable(CAP_NET_ADMIN))
34 return ERR_PTR(-EPERM);
35 if (attr->max_entries == 0 ||
36 attr->key_size != 4 ||
37 (attr->value_size != sizeof(u32) &&
38 attr->value_size != sizeof(u64)) ||
39 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
40 return ERR_PTR(-EINVAL);
41
42 stab = kzalloc(sizeof(*stab), GFP_USER);
43 if (!stab)
44 return ERR_PTR(-ENOMEM);
45
46 bpf_map_init_from_attr(&stab->map, attr);
47 raw_spin_lock_init(&stab->lock);
48
49 /* Make sure page count doesn't overflow. */
50 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
51 err = bpf_map_charge_init(&stab->map.memory, cost);
52 if (err)
53 goto free_stab;
54
55 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
56 sizeof(struct sock *),
57 stab->map.numa_node);
58 if (stab->sks)
59 return &stab->map;
60 err = -ENOMEM;
61 bpf_map_charge_finish(&stab->map.memory);
62 free_stab:
63 kfree(stab);
64 return ERR_PTR(err);
65 }
66
sock_map_get_from_fd(const union bpf_attr * attr,struct bpf_prog * prog)67 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
68 {
69 u32 ufd = attr->target_fd;
70 struct bpf_map *map;
71 struct fd f;
72 int ret;
73
74 if (attr->attach_flags || attr->replace_bpf_fd)
75 return -EINVAL;
76
77 f = fdget(ufd);
78 map = __bpf_map_get(f);
79 if (IS_ERR(map))
80 return PTR_ERR(map);
81 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
82 fdput(f);
83 return ret;
84 }
85
sock_map_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)86 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
87 {
88 u32 ufd = attr->target_fd;
89 struct bpf_prog *prog;
90 struct bpf_map *map;
91 struct fd f;
92 int ret;
93
94 if (attr->attach_flags || attr->replace_bpf_fd)
95 return -EINVAL;
96
97 f = fdget(ufd);
98 map = __bpf_map_get(f);
99 if (IS_ERR(map))
100 return PTR_ERR(map);
101
102 prog = bpf_prog_get(attr->attach_bpf_fd);
103 if (IS_ERR(prog)) {
104 ret = PTR_ERR(prog);
105 goto put_map;
106 }
107
108 if (prog->type != ptype) {
109 ret = -EINVAL;
110 goto put_prog;
111 }
112
113 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
114 put_prog:
115 bpf_prog_put(prog);
116 put_map:
117 fdput(f);
118 return ret;
119 }
120
sock_map_sk_acquire(struct sock * sk)121 static void sock_map_sk_acquire(struct sock *sk)
122 __acquires(&sk->sk_lock.slock)
123 {
124 lock_sock(sk);
125 rcu_read_lock();
126 }
127
sock_map_sk_release(struct sock * sk)128 static void sock_map_sk_release(struct sock *sk)
129 __releases(&sk->sk_lock.slock)
130 {
131 rcu_read_unlock();
132 release_sock(sk);
133 }
134
sock_map_add_link(struct sk_psock * psock,struct sk_psock_link * link,struct bpf_map * map,void * link_raw)135 static void sock_map_add_link(struct sk_psock *psock,
136 struct sk_psock_link *link,
137 struct bpf_map *map, void *link_raw)
138 {
139 link->link_raw = link_raw;
140 link->map = map;
141 spin_lock_bh(&psock->link_lock);
142 list_add_tail(&link->list, &psock->link);
143 spin_unlock_bh(&psock->link_lock);
144 }
145
sock_map_del_link(struct sock * sk,struct sk_psock * psock,void * link_raw)146 static void sock_map_del_link(struct sock *sk,
147 struct sk_psock *psock, void *link_raw)
148 {
149 bool strp_stop = false, verdict_stop = false;
150 struct sk_psock_link *link, *tmp;
151
152 spin_lock_bh(&psock->link_lock);
153 list_for_each_entry_safe(link, tmp, &psock->link, list) {
154 if (link->link_raw == link_raw) {
155 struct bpf_map *map = link->map;
156 struct bpf_stab *stab = container_of(map, struct bpf_stab,
157 map);
158 if (psock->parser.enabled && stab->progs.skb_parser)
159 strp_stop = true;
160 if (psock->parser.enabled && stab->progs.skb_verdict)
161 verdict_stop = true;
162 list_del(&link->list);
163 sk_psock_free_link(link);
164 }
165 }
166 spin_unlock_bh(&psock->link_lock);
167 if (strp_stop || verdict_stop) {
168 write_lock_bh(&sk->sk_callback_lock);
169 if (strp_stop)
170 sk_psock_stop_strp(sk, psock);
171 else
172 sk_psock_stop_verdict(sk, psock);
173 write_unlock_bh(&sk->sk_callback_lock);
174 }
175 }
176
sock_map_unref(struct sock * sk,void * link_raw)177 static void sock_map_unref(struct sock *sk, void *link_raw)
178 {
179 struct sk_psock *psock = sk_psock(sk);
180
181 if (likely(psock)) {
182 sock_map_del_link(sk, psock, link_raw);
183 sk_psock_put(sk, psock);
184 }
185 }
186
sock_map_init_proto(struct sock * sk,struct sk_psock * psock)187 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
188 {
189 struct proto *prot;
190
191 switch (sk->sk_type) {
192 case SOCK_STREAM:
193 prot = tcp_bpf_get_proto(sk, psock);
194 break;
195
196 case SOCK_DGRAM:
197 prot = udp_bpf_get_proto(sk, psock);
198 break;
199
200 default:
201 return -EINVAL;
202 }
203
204 if (IS_ERR(prot))
205 return PTR_ERR(prot);
206
207 sk_psock_update_proto(sk, psock, prot);
208 return 0;
209 }
210
sock_map_psock_get_checked(struct sock * sk)211 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
212 {
213 struct sk_psock *psock;
214
215 rcu_read_lock();
216 psock = sk_psock(sk);
217 if (psock) {
218 if (sk->sk_prot->close != sock_map_close) {
219 psock = ERR_PTR(-EBUSY);
220 goto out;
221 }
222
223 if (!refcount_inc_not_zero(&psock->refcnt))
224 psock = ERR_PTR(-EBUSY);
225 }
226 out:
227 rcu_read_unlock();
228 return psock;
229 }
230
sock_map_link(struct bpf_map * map,struct sk_psock_progs * progs,struct sock * sk)231 static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
232 struct sock *sk)
233 {
234 struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
235 struct sk_psock *psock;
236 int ret;
237
238 skb_verdict = READ_ONCE(progs->skb_verdict);
239 if (skb_verdict) {
240 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
241 if (IS_ERR(skb_verdict))
242 return PTR_ERR(skb_verdict);
243 }
244
245 skb_parser = READ_ONCE(progs->skb_parser);
246 if (skb_parser) {
247 skb_parser = bpf_prog_inc_not_zero(skb_parser);
248 if (IS_ERR(skb_parser)) {
249 ret = PTR_ERR(skb_parser);
250 goto out_put_skb_verdict;
251 }
252 }
253
254 msg_parser = READ_ONCE(progs->msg_parser);
255 if (msg_parser) {
256 msg_parser = bpf_prog_inc_not_zero(msg_parser);
257 if (IS_ERR(msg_parser)) {
258 ret = PTR_ERR(msg_parser);
259 goto out_put_skb_parser;
260 }
261 }
262
263 psock = sock_map_psock_get_checked(sk);
264 if (IS_ERR(psock)) {
265 ret = PTR_ERR(psock);
266 goto out_progs;
267 }
268
269 if (psock) {
270 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
271 (skb_parser && READ_ONCE(psock->progs.skb_parser)) ||
272 (skb_verdict && READ_ONCE(psock->progs.skb_verdict))) {
273 sk_psock_put(sk, psock);
274 ret = -EBUSY;
275 goto out_progs;
276 }
277 } else {
278 psock = sk_psock_init(sk, map->numa_node);
279 if (IS_ERR(psock)) {
280 ret = PTR_ERR(psock);
281 goto out_progs;
282 }
283 }
284
285 if (msg_parser)
286 psock_set_prog(&psock->progs.msg_parser, msg_parser);
287
288 ret = sock_map_init_proto(sk, psock);
289 if (ret < 0)
290 goto out_drop;
291
292 write_lock_bh(&sk->sk_callback_lock);
293 if (skb_parser && skb_verdict && !psock->parser.enabled) {
294 ret = sk_psock_init_strp(sk, psock);
295 if (ret)
296 goto out_unlock_drop;
297 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
298 psock_set_prog(&psock->progs.skb_parser, skb_parser);
299 sk_psock_start_strp(sk, psock);
300 } else if (!skb_parser && skb_verdict && !psock->parser.enabled) {
301 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
302 sk_psock_start_verdict(sk,psock);
303 }
304 write_unlock_bh(&sk->sk_callback_lock);
305 return 0;
306 out_unlock_drop:
307 write_unlock_bh(&sk->sk_callback_lock);
308 out_drop:
309 sk_psock_put(sk, psock);
310 out_progs:
311 if (msg_parser)
312 bpf_prog_put(msg_parser);
313 out_put_skb_parser:
314 if (skb_parser)
315 bpf_prog_put(skb_parser);
316 out_put_skb_verdict:
317 if (skb_verdict)
318 bpf_prog_put(skb_verdict);
319 return ret;
320 }
321
sock_map_link_no_progs(struct bpf_map * map,struct sock * sk)322 static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
323 {
324 struct sk_psock *psock;
325 int ret;
326
327 psock = sock_map_psock_get_checked(sk);
328 if (IS_ERR(psock))
329 return PTR_ERR(psock);
330
331 if (!psock) {
332 psock = sk_psock_init(sk, map->numa_node);
333 if (IS_ERR(psock))
334 return PTR_ERR(psock);
335 }
336
337 ret = sock_map_init_proto(sk, psock);
338 if (ret < 0)
339 sk_psock_put(sk, psock);
340 return ret;
341 }
342
sock_map_free(struct bpf_map * map)343 static void sock_map_free(struct bpf_map *map)
344 {
345 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
346 int i;
347
348 /* After the sync no updates or deletes will be in-flight so it
349 * is safe to walk map and remove entries without risking a race
350 * in EEXIST update case.
351 */
352 synchronize_rcu();
353 for (i = 0; i < stab->map.max_entries; i++) {
354 struct sock **psk = &stab->sks[i];
355 struct sock *sk;
356
357 sk = xchg(psk, NULL);
358 if (sk) {
359 sock_hold(sk);
360 lock_sock(sk);
361 rcu_read_lock();
362 sock_map_unref(sk, psk);
363 rcu_read_unlock();
364 release_sock(sk);
365 sock_put(sk);
366 }
367 }
368
369 /* wait for psock readers accessing its map link */
370 synchronize_rcu();
371
372 bpf_map_area_free(stab->sks);
373 kfree(stab);
374 }
375
sock_map_release_progs(struct bpf_map * map)376 static void sock_map_release_progs(struct bpf_map *map)
377 {
378 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
379 }
380
__sock_map_lookup_elem(struct bpf_map * map,u32 key)381 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
382 {
383 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
384
385 WARN_ON_ONCE(!rcu_read_lock_held());
386
387 if (unlikely(key >= map->max_entries))
388 return NULL;
389 return READ_ONCE(stab->sks[key]);
390 }
391
sock_map_lookup(struct bpf_map * map,void * key)392 static void *sock_map_lookup(struct bpf_map *map, void *key)
393 {
394 struct sock *sk;
395
396 sk = __sock_map_lookup_elem(map, *(u32 *)key);
397 if (!sk)
398 return NULL;
399 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
400 return NULL;
401 return sk;
402 }
403
sock_map_lookup_sys(struct bpf_map * map,void * key)404 static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
405 {
406 struct sock *sk;
407
408 if (map->value_size != sizeof(u64))
409 return ERR_PTR(-ENOSPC);
410
411 sk = __sock_map_lookup_elem(map, *(u32 *)key);
412 if (!sk)
413 return ERR_PTR(-ENOENT);
414
415 __sock_gen_cookie(sk);
416 return &sk->sk_cookie;
417 }
418
__sock_map_delete(struct bpf_stab * stab,struct sock * sk_test,struct sock ** psk)419 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
420 struct sock **psk)
421 {
422 struct sock *sk;
423 int err = 0;
424
425 raw_spin_lock_bh(&stab->lock);
426 sk = *psk;
427 if (!sk_test || sk_test == sk)
428 sk = xchg(psk, NULL);
429
430 if (likely(sk))
431 sock_map_unref(sk, psk);
432 else
433 err = -EINVAL;
434
435 raw_spin_unlock_bh(&stab->lock);
436 return err;
437 }
438
sock_map_delete_from_link(struct bpf_map * map,struct sock * sk,void * link_raw)439 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
440 void *link_raw)
441 {
442 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
443
444 __sock_map_delete(stab, sk, link_raw);
445 }
446
sock_map_delete_elem(struct bpf_map * map,void * key)447 static int sock_map_delete_elem(struct bpf_map *map, void *key)
448 {
449 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
450 u32 i = *(u32 *)key;
451 struct sock **psk;
452
453 if (unlikely(i >= map->max_entries))
454 return -EINVAL;
455
456 psk = &stab->sks[i];
457 return __sock_map_delete(stab, NULL, psk);
458 }
459
sock_map_get_next_key(struct bpf_map * map,void * key,void * next)460 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
461 {
462 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
463 u32 i = key ? *(u32 *)key : U32_MAX;
464 u32 *key_next = next;
465
466 if (i == stab->map.max_entries - 1)
467 return -ENOENT;
468 if (i >= stab->map.max_entries)
469 *key_next = 0;
470 else
471 *key_next = i + 1;
472 return 0;
473 }
474
475 static bool sock_map_redirect_allowed(const struct sock *sk);
476
sock_map_update_common(struct bpf_map * map,u32 idx,struct sock * sk,u64 flags)477 static int sock_map_update_common(struct bpf_map *map, u32 idx,
478 struct sock *sk, u64 flags)
479 {
480 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
481 struct sk_psock_link *link;
482 struct sk_psock *psock;
483 struct sock *osk;
484 int ret;
485
486 WARN_ON_ONCE(!rcu_read_lock_held());
487 if (unlikely(flags > BPF_EXIST))
488 return -EINVAL;
489 if (unlikely(idx >= map->max_entries))
490 return -E2BIG;
491
492 link = sk_psock_init_link();
493 if (!link)
494 return -ENOMEM;
495
496 /* Only sockets we can redirect into/from in BPF need to hold
497 * refs to parser/verdict progs and have their sk_data_ready
498 * and sk_write_space callbacks overridden.
499 */
500 if (sock_map_redirect_allowed(sk))
501 ret = sock_map_link(map, &stab->progs, sk);
502 else
503 ret = sock_map_link_no_progs(map, sk);
504 if (ret < 0)
505 goto out_free;
506
507 psock = sk_psock(sk);
508 WARN_ON_ONCE(!psock);
509
510 raw_spin_lock_bh(&stab->lock);
511 osk = stab->sks[idx];
512 if (osk && flags == BPF_NOEXIST) {
513 ret = -EEXIST;
514 goto out_unlock;
515 } else if (!osk && flags == BPF_EXIST) {
516 ret = -ENOENT;
517 goto out_unlock;
518 }
519
520 sock_map_add_link(psock, link, map, &stab->sks[idx]);
521 stab->sks[idx] = sk;
522 if (osk)
523 sock_map_unref(osk, &stab->sks[idx]);
524 raw_spin_unlock_bh(&stab->lock);
525 return 0;
526 out_unlock:
527 raw_spin_unlock_bh(&stab->lock);
528 if (psock)
529 sk_psock_put(sk, psock);
530 out_free:
531 sk_psock_free_link(link);
532 return ret;
533 }
534
sock_map_op_okay(const struct bpf_sock_ops_kern * ops)535 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
536 {
537 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
538 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
539 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
540 }
541
sk_is_tcp(const struct sock * sk)542 static bool sk_is_tcp(const struct sock *sk)
543 {
544 return sk->sk_type == SOCK_STREAM &&
545 sk->sk_protocol == IPPROTO_TCP;
546 }
547
sk_is_udp(const struct sock * sk)548 static bool sk_is_udp(const struct sock *sk)
549 {
550 return sk->sk_type == SOCK_DGRAM &&
551 sk->sk_protocol == IPPROTO_UDP;
552 }
553
sock_map_redirect_allowed(const struct sock * sk)554 static bool sock_map_redirect_allowed(const struct sock *sk)
555 {
556 return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN;
557 }
558
sock_map_sk_is_suitable(const struct sock * sk)559 static bool sock_map_sk_is_suitable(const struct sock *sk)
560 {
561 return sk_is_tcp(sk) || sk_is_udp(sk);
562 }
563
sock_map_sk_state_allowed(const struct sock * sk)564 static bool sock_map_sk_state_allowed(const struct sock *sk)
565 {
566 if (sk_is_tcp(sk))
567 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
568 else if (sk_is_udp(sk))
569 return sk_hashed(sk);
570
571 return false;
572 }
573
574 static int sock_hash_update_common(struct bpf_map *map, void *key,
575 struct sock *sk, u64 flags);
576
sock_map_update_elem_sys(struct bpf_map * map,void * key,void * value,u64 flags)577 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
578 u64 flags)
579 {
580 struct socket *sock;
581 struct sock *sk;
582 int ret;
583 u64 ufd;
584
585 if (map->value_size == sizeof(u64))
586 ufd = *(u64 *)value;
587 else
588 ufd = *(u32 *)value;
589 if (ufd > S32_MAX)
590 return -EINVAL;
591
592 sock = sockfd_lookup(ufd, &ret);
593 if (!sock)
594 return ret;
595 sk = sock->sk;
596 if (!sk) {
597 ret = -EINVAL;
598 goto out;
599 }
600 if (!sock_map_sk_is_suitable(sk)) {
601 ret = -EOPNOTSUPP;
602 goto out;
603 }
604
605 sock_map_sk_acquire(sk);
606 if (!sock_map_sk_state_allowed(sk))
607 ret = -EOPNOTSUPP;
608 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
609 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
610 else
611 ret = sock_hash_update_common(map, key, sk, flags);
612 sock_map_sk_release(sk);
613 out:
614 fput(sock->file);
615 return ret;
616 }
617
sock_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)618 static int sock_map_update_elem(struct bpf_map *map, void *key,
619 void *value, u64 flags)
620 {
621 struct sock *sk = (struct sock *)value;
622 int ret;
623
624 if (unlikely(!sk || !sk_fullsock(sk)))
625 return -EINVAL;
626
627 if (!sock_map_sk_is_suitable(sk))
628 return -EOPNOTSUPP;
629
630 local_bh_disable();
631 bh_lock_sock(sk);
632 if (!sock_map_sk_state_allowed(sk))
633 ret = -EOPNOTSUPP;
634 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
635 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
636 else
637 ret = sock_hash_update_common(map, key, sk, flags);
638 bh_unlock_sock(sk);
639 local_bh_enable();
640 return ret;
641 }
642
BPF_CALL_4(bpf_sock_map_update,struct bpf_sock_ops_kern *,sops,struct bpf_map *,map,void *,key,u64,flags)643 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
644 struct bpf_map *, map, void *, key, u64, flags)
645 {
646 WARN_ON_ONCE(!rcu_read_lock_held());
647
648 if (likely(sock_map_sk_is_suitable(sops->sk) &&
649 sock_map_op_okay(sops)))
650 return sock_map_update_common(map, *(u32 *)key, sops->sk,
651 flags);
652 return -EOPNOTSUPP;
653 }
654
655 const struct bpf_func_proto bpf_sock_map_update_proto = {
656 .func = bpf_sock_map_update,
657 .gpl_only = false,
658 .pkt_access = true,
659 .ret_type = RET_INTEGER,
660 .arg1_type = ARG_PTR_TO_CTX,
661 .arg2_type = ARG_CONST_MAP_PTR,
662 .arg3_type = ARG_PTR_TO_MAP_KEY,
663 .arg4_type = ARG_ANYTHING,
664 };
665
BPF_CALL_4(bpf_sk_redirect_map,struct sk_buff *,skb,struct bpf_map *,map,u32,key,u64,flags)666 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
667 struct bpf_map *, map, u32, key, u64, flags)
668 {
669 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
670 struct sock *sk;
671
672 if (unlikely(flags & ~(BPF_F_INGRESS)))
673 return SK_DROP;
674
675 sk = __sock_map_lookup_elem(map, key);
676 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
677 return SK_DROP;
678
679 tcb->bpf.flags = flags;
680 tcb->bpf.sk_redir = sk;
681 return SK_PASS;
682 }
683
684 const struct bpf_func_proto bpf_sk_redirect_map_proto = {
685 .func = bpf_sk_redirect_map,
686 .gpl_only = false,
687 .ret_type = RET_INTEGER,
688 .arg1_type = ARG_PTR_TO_CTX,
689 .arg2_type = ARG_CONST_MAP_PTR,
690 .arg3_type = ARG_ANYTHING,
691 .arg4_type = ARG_ANYTHING,
692 };
693
BPF_CALL_4(bpf_msg_redirect_map,struct sk_msg *,msg,struct bpf_map *,map,u32,key,u64,flags)694 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
695 struct bpf_map *, map, u32, key, u64, flags)
696 {
697 struct sock *sk;
698
699 if (unlikely(flags & ~(BPF_F_INGRESS)))
700 return SK_DROP;
701
702 sk = __sock_map_lookup_elem(map, key);
703 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
704 return SK_DROP;
705
706 msg->flags = flags;
707 msg->sk_redir = sk;
708 return SK_PASS;
709 }
710
711 const struct bpf_func_proto bpf_msg_redirect_map_proto = {
712 .func = bpf_msg_redirect_map,
713 .gpl_only = false,
714 .ret_type = RET_INTEGER,
715 .arg1_type = ARG_PTR_TO_CTX,
716 .arg2_type = ARG_CONST_MAP_PTR,
717 .arg3_type = ARG_ANYTHING,
718 .arg4_type = ARG_ANYTHING,
719 };
720
721 struct sock_map_seq_info {
722 struct bpf_map *map;
723 struct sock *sk;
724 u32 index;
725 };
726
727 struct bpf_iter__sockmap {
728 __bpf_md_ptr(struct bpf_iter_meta *, meta);
729 __bpf_md_ptr(struct bpf_map *, map);
730 __bpf_md_ptr(void *, key);
731 __bpf_md_ptr(struct sock *, sk);
732 };
733
DEFINE_BPF_ITER_FUNC(sockmap,struct bpf_iter_meta * meta,struct bpf_map * map,void * key,struct sock * sk)734 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
735 struct bpf_map *map, void *key,
736 struct sock *sk)
737
738 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
739 {
740 if (unlikely(info->index >= info->map->max_entries))
741 return NULL;
742
743 info->sk = __sock_map_lookup_elem(info->map, info->index);
744
745 /* can't return sk directly, since that might be NULL */
746 return info;
747 }
748
sock_map_seq_start(struct seq_file * seq,loff_t * pos)749 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
750 __acquires(rcu)
751 {
752 struct sock_map_seq_info *info = seq->private;
753
754 if (*pos == 0)
755 ++*pos;
756
757 /* pairs with sock_map_seq_stop */
758 rcu_read_lock();
759 return sock_map_seq_lookup_elem(info);
760 }
761
sock_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)762 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
763 __must_hold(rcu)
764 {
765 struct sock_map_seq_info *info = seq->private;
766
767 ++*pos;
768 ++info->index;
769
770 return sock_map_seq_lookup_elem(info);
771 }
772
sock_map_seq_show(struct seq_file * seq,void * v)773 static int sock_map_seq_show(struct seq_file *seq, void *v)
774 __must_hold(rcu)
775 {
776 struct sock_map_seq_info *info = seq->private;
777 struct bpf_iter__sockmap ctx = {};
778 struct bpf_iter_meta meta;
779 struct bpf_prog *prog;
780
781 meta.seq = seq;
782 prog = bpf_iter_get_info(&meta, !v);
783 if (!prog)
784 return 0;
785
786 ctx.meta = &meta;
787 ctx.map = info->map;
788 if (v) {
789 ctx.key = &info->index;
790 ctx.sk = info->sk;
791 }
792
793 return bpf_iter_run_prog(prog, &ctx);
794 }
795
sock_map_seq_stop(struct seq_file * seq,void * v)796 static void sock_map_seq_stop(struct seq_file *seq, void *v)
797 __releases(rcu)
798 {
799 if (!v)
800 (void)sock_map_seq_show(seq, NULL);
801
802 /* pairs with sock_map_seq_start */
803 rcu_read_unlock();
804 }
805
806 static const struct seq_operations sock_map_seq_ops = {
807 .start = sock_map_seq_start,
808 .next = sock_map_seq_next,
809 .stop = sock_map_seq_stop,
810 .show = sock_map_seq_show,
811 };
812
sock_map_init_seq_private(void * priv_data,struct bpf_iter_aux_info * aux)813 static int sock_map_init_seq_private(void *priv_data,
814 struct bpf_iter_aux_info *aux)
815 {
816 struct sock_map_seq_info *info = priv_data;
817
818 bpf_map_inc_with_uref(aux->map);
819 info->map = aux->map;
820 return 0;
821 }
822
sock_map_fini_seq_private(void * priv_data)823 static void sock_map_fini_seq_private(void *priv_data)
824 {
825 struct sock_map_seq_info *info = priv_data;
826
827 bpf_map_put_with_uref(info->map);
828 }
829
830 static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
831 .seq_ops = &sock_map_seq_ops,
832 .init_seq_private = sock_map_init_seq_private,
833 .fini_seq_private = sock_map_fini_seq_private,
834 .seq_priv_size = sizeof(struct sock_map_seq_info),
835 };
836
837 static int sock_map_btf_id;
838 const struct bpf_map_ops sock_map_ops = {
839 .map_meta_equal = bpf_map_meta_equal,
840 .map_alloc = sock_map_alloc,
841 .map_free = sock_map_free,
842 .map_get_next_key = sock_map_get_next_key,
843 .map_lookup_elem_sys_only = sock_map_lookup_sys,
844 .map_update_elem = sock_map_update_elem,
845 .map_delete_elem = sock_map_delete_elem,
846 .map_lookup_elem = sock_map_lookup,
847 .map_release_uref = sock_map_release_progs,
848 .map_check_btf = map_check_no_btf,
849 .map_btf_name = "bpf_stab",
850 .map_btf_id = &sock_map_btf_id,
851 .iter_seq_info = &sock_map_iter_seq_info,
852 };
853
854 struct bpf_shtab_elem {
855 struct rcu_head rcu;
856 u32 hash;
857 struct sock *sk;
858 struct hlist_node node;
859 u8 key[];
860 };
861
862 struct bpf_shtab_bucket {
863 struct hlist_head head;
864 raw_spinlock_t lock;
865 };
866
867 struct bpf_shtab {
868 struct bpf_map map;
869 struct bpf_shtab_bucket *buckets;
870 u32 buckets_num;
871 u32 elem_size;
872 struct sk_psock_progs progs;
873 atomic_t count;
874 };
875
sock_hash_bucket_hash(const void * key,u32 len)876 static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
877 {
878 return jhash(key, len, 0);
879 }
880
sock_hash_select_bucket(struct bpf_shtab * htab,u32 hash)881 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
882 u32 hash)
883 {
884 return &htab->buckets[hash & (htab->buckets_num - 1)];
885 }
886
887 static struct bpf_shtab_elem *
sock_hash_lookup_elem_raw(struct hlist_head * head,u32 hash,void * key,u32 key_size)888 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
889 u32 key_size)
890 {
891 struct bpf_shtab_elem *elem;
892
893 hlist_for_each_entry_rcu(elem, head, node) {
894 if (elem->hash == hash &&
895 !memcmp(&elem->key, key, key_size))
896 return elem;
897 }
898
899 return NULL;
900 }
901
__sock_hash_lookup_elem(struct bpf_map * map,void * key)902 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
903 {
904 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
905 u32 key_size = map->key_size, hash;
906 struct bpf_shtab_bucket *bucket;
907 struct bpf_shtab_elem *elem;
908
909 WARN_ON_ONCE(!rcu_read_lock_held());
910
911 hash = sock_hash_bucket_hash(key, key_size);
912 bucket = sock_hash_select_bucket(htab, hash);
913 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
914
915 return elem ? elem->sk : NULL;
916 }
917
sock_hash_free_elem(struct bpf_shtab * htab,struct bpf_shtab_elem * elem)918 static void sock_hash_free_elem(struct bpf_shtab *htab,
919 struct bpf_shtab_elem *elem)
920 {
921 atomic_dec(&htab->count);
922 kfree_rcu(elem, rcu);
923 }
924
sock_hash_delete_from_link(struct bpf_map * map,struct sock * sk,void * link_raw)925 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
926 void *link_raw)
927 {
928 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
929 struct bpf_shtab_elem *elem_probe, *elem = link_raw;
930 struct bpf_shtab_bucket *bucket;
931
932 WARN_ON_ONCE(!rcu_read_lock_held());
933 bucket = sock_hash_select_bucket(htab, elem->hash);
934
935 /* elem may be deleted in parallel from the map, but access here
936 * is okay since it's going away only after RCU grace period.
937 * However, we need to check whether it's still present.
938 */
939 raw_spin_lock_bh(&bucket->lock);
940 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
941 elem->key, map->key_size);
942 if (elem_probe && elem_probe == elem) {
943 hlist_del_rcu(&elem->node);
944 sock_map_unref(elem->sk, elem);
945 sock_hash_free_elem(htab, elem);
946 }
947 raw_spin_unlock_bh(&bucket->lock);
948 }
949
sock_hash_delete_elem(struct bpf_map * map,void * key)950 static int sock_hash_delete_elem(struct bpf_map *map, void *key)
951 {
952 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
953 u32 hash, key_size = map->key_size;
954 struct bpf_shtab_bucket *bucket;
955 struct bpf_shtab_elem *elem;
956 int ret = -ENOENT;
957
958 hash = sock_hash_bucket_hash(key, key_size);
959 bucket = sock_hash_select_bucket(htab, hash);
960
961 raw_spin_lock_bh(&bucket->lock);
962 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
963 if (elem) {
964 hlist_del_rcu(&elem->node);
965 sock_map_unref(elem->sk, elem);
966 sock_hash_free_elem(htab, elem);
967 ret = 0;
968 }
969 raw_spin_unlock_bh(&bucket->lock);
970 return ret;
971 }
972
sock_hash_alloc_elem(struct bpf_shtab * htab,void * key,u32 key_size,u32 hash,struct sock * sk,struct bpf_shtab_elem * old)973 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
974 void *key, u32 key_size,
975 u32 hash, struct sock *sk,
976 struct bpf_shtab_elem *old)
977 {
978 struct bpf_shtab_elem *new;
979
980 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
981 if (!old) {
982 atomic_dec(&htab->count);
983 return ERR_PTR(-E2BIG);
984 }
985 }
986
987 new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
988 htab->map.numa_node);
989 if (!new) {
990 atomic_dec(&htab->count);
991 return ERR_PTR(-ENOMEM);
992 }
993 memcpy(new->key, key, key_size);
994 new->sk = sk;
995 new->hash = hash;
996 return new;
997 }
998
sock_hash_update_common(struct bpf_map * map,void * key,struct sock * sk,u64 flags)999 static int sock_hash_update_common(struct bpf_map *map, void *key,
1000 struct sock *sk, u64 flags)
1001 {
1002 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1003 u32 key_size = map->key_size, hash;
1004 struct bpf_shtab_elem *elem, *elem_new;
1005 struct bpf_shtab_bucket *bucket;
1006 struct sk_psock_link *link;
1007 struct sk_psock *psock;
1008 int ret;
1009
1010 WARN_ON_ONCE(!rcu_read_lock_held());
1011 if (unlikely(flags > BPF_EXIST))
1012 return -EINVAL;
1013
1014 link = sk_psock_init_link();
1015 if (!link)
1016 return -ENOMEM;
1017
1018 /* Only sockets we can redirect into/from in BPF need to hold
1019 * refs to parser/verdict progs and have their sk_data_ready
1020 * and sk_write_space callbacks overridden.
1021 */
1022 if (sock_map_redirect_allowed(sk))
1023 ret = sock_map_link(map, &htab->progs, sk);
1024 else
1025 ret = sock_map_link_no_progs(map, sk);
1026 if (ret < 0)
1027 goto out_free;
1028
1029 psock = sk_psock(sk);
1030 WARN_ON_ONCE(!psock);
1031
1032 hash = sock_hash_bucket_hash(key, key_size);
1033 bucket = sock_hash_select_bucket(htab, hash);
1034
1035 raw_spin_lock_bh(&bucket->lock);
1036 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1037 if (elem && flags == BPF_NOEXIST) {
1038 ret = -EEXIST;
1039 goto out_unlock;
1040 } else if (!elem && flags == BPF_EXIST) {
1041 ret = -ENOENT;
1042 goto out_unlock;
1043 }
1044
1045 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1046 if (IS_ERR(elem_new)) {
1047 ret = PTR_ERR(elem_new);
1048 goto out_unlock;
1049 }
1050
1051 sock_map_add_link(psock, link, map, elem_new);
1052 /* Add new element to the head of the list, so that
1053 * concurrent search will find it before old elem.
1054 */
1055 hlist_add_head_rcu(&elem_new->node, &bucket->head);
1056 if (elem) {
1057 hlist_del_rcu(&elem->node);
1058 sock_map_unref(elem->sk, elem);
1059 sock_hash_free_elem(htab, elem);
1060 }
1061 raw_spin_unlock_bh(&bucket->lock);
1062 return 0;
1063 out_unlock:
1064 raw_spin_unlock_bh(&bucket->lock);
1065 sk_psock_put(sk, psock);
1066 out_free:
1067 sk_psock_free_link(link);
1068 return ret;
1069 }
1070
sock_hash_get_next_key(struct bpf_map * map,void * key,void * key_next)1071 static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1072 void *key_next)
1073 {
1074 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1075 struct bpf_shtab_elem *elem, *elem_next;
1076 u32 hash, key_size = map->key_size;
1077 struct hlist_head *head;
1078 int i = 0;
1079
1080 if (!key)
1081 goto find_first_elem;
1082 hash = sock_hash_bucket_hash(key, key_size);
1083 head = &sock_hash_select_bucket(htab, hash)->head;
1084 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1085 if (!elem)
1086 goto find_first_elem;
1087
1088 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1089 struct bpf_shtab_elem, node);
1090 if (elem_next) {
1091 memcpy(key_next, elem_next->key, key_size);
1092 return 0;
1093 }
1094
1095 i = hash & (htab->buckets_num - 1);
1096 i++;
1097 find_first_elem:
1098 for (; i < htab->buckets_num; i++) {
1099 head = &sock_hash_select_bucket(htab, i)->head;
1100 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1101 struct bpf_shtab_elem, node);
1102 if (elem_next) {
1103 memcpy(key_next, elem_next->key, key_size);
1104 return 0;
1105 }
1106 }
1107
1108 return -ENOENT;
1109 }
1110
sock_hash_alloc(union bpf_attr * attr)1111 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1112 {
1113 struct bpf_shtab *htab;
1114 int i, err;
1115 u64 cost;
1116
1117 if (!capable(CAP_NET_ADMIN))
1118 return ERR_PTR(-EPERM);
1119 if (attr->max_entries == 0 ||
1120 attr->key_size == 0 ||
1121 (attr->value_size != sizeof(u32) &&
1122 attr->value_size != sizeof(u64)) ||
1123 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1124 return ERR_PTR(-EINVAL);
1125 if (attr->key_size > MAX_BPF_STACK)
1126 return ERR_PTR(-E2BIG);
1127
1128 htab = kzalloc(sizeof(*htab), GFP_USER);
1129 if (!htab)
1130 return ERR_PTR(-ENOMEM);
1131
1132 bpf_map_init_from_attr(&htab->map, attr);
1133
1134 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1135 htab->elem_size = sizeof(struct bpf_shtab_elem) +
1136 round_up(htab->map.key_size, 8);
1137 if (htab->buckets_num == 0 ||
1138 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1139 err = -EINVAL;
1140 goto free_htab;
1141 }
1142
1143 cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
1144 (u64) htab->elem_size * htab->map.max_entries;
1145 if (cost >= U32_MAX - PAGE_SIZE) {
1146 err = -EINVAL;
1147 goto free_htab;
1148 }
1149 err = bpf_map_charge_init(&htab->map.memory, cost);
1150 if (err)
1151 goto free_htab;
1152
1153 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1154 sizeof(struct bpf_shtab_bucket),
1155 htab->map.numa_node);
1156 if (!htab->buckets) {
1157 bpf_map_charge_finish(&htab->map.memory);
1158 err = -ENOMEM;
1159 goto free_htab;
1160 }
1161
1162 for (i = 0; i < htab->buckets_num; i++) {
1163 INIT_HLIST_HEAD(&htab->buckets[i].head);
1164 raw_spin_lock_init(&htab->buckets[i].lock);
1165 }
1166
1167 return &htab->map;
1168 free_htab:
1169 kfree(htab);
1170 return ERR_PTR(err);
1171 }
1172
sock_hash_free(struct bpf_map * map)1173 static void sock_hash_free(struct bpf_map *map)
1174 {
1175 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1176 struct bpf_shtab_bucket *bucket;
1177 struct hlist_head unlink_list;
1178 struct bpf_shtab_elem *elem;
1179 struct hlist_node *node;
1180 int i;
1181
1182 /* After the sync no updates or deletes will be in-flight so it
1183 * is safe to walk map and remove entries without risking a race
1184 * in EEXIST update case.
1185 */
1186 synchronize_rcu();
1187 for (i = 0; i < htab->buckets_num; i++) {
1188 bucket = sock_hash_select_bucket(htab, i);
1189
1190 /* We are racing with sock_hash_delete_from_link to
1191 * enter the spin-lock critical section. Every socket on
1192 * the list is still linked to sockhash. Since link
1193 * exists, psock exists and holds a ref to socket. That
1194 * lets us to grab a socket ref too.
1195 */
1196 raw_spin_lock_bh(&bucket->lock);
1197 hlist_for_each_entry(elem, &bucket->head, node)
1198 sock_hold(elem->sk);
1199 hlist_move_list(&bucket->head, &unlink_list);
1200 raw_spin_unlock_bh(&bucket->lock);
1201
1202 /* Process removed entries out of atomic context to
1203 * block for socket lock before deleting the psock's
1204 * link to sockhash.
1205 */
1206 hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1207 hlist_del(&elem->node);
1208 lock_sock(elem->sk);
1209 rcu_read_lock();
1210 sock_map_unref(elem->sk, elem);
1211 rcu_read_unlock();
1212 release_sock(elem->sk);
1213 sock_put(elem->sk);
1214 sock_hash_free_elem(htab, elem);
1215 }
1216 cond_resched();
1217 }
1218
1219 /* wait for psock readers accessing its map link */
1220 synchronize_rcu();
1221
1222 bpf_map_area_free(htab->buckets);
1223 kfree(htab);
1224 }
1225
sock_hash_lookup_sys(struct bpf_map * map,void * key)1226 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1227 {
1228 struct sock *sk;
1229
1230 if (map->value_size != sizeof(u64))
1231 return ERR_PTR(-ENOSPC);
1232
1233 sk = __sock_hash_lookup_elem(map, key);
1234 if (!sk)
1235 return ERR_PTR(-ENOENT);
1236
1237 __sock_gen_cookie(sk);
1238 return &sk->sk_cookie;
1239 }
1240
sock_hash_lookup(struct bpf_map * map,void * key)1241 static void *sock_hash_lookup(struct bpf_map *map, void *key)
1242 {
1243 struct sock *sk;
1244
1245 sk = __sock_hash_lookup_elem(map, key);
1246 if (!sk)
1247 return NULL;
1248 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1249 return NULL;
1250 return sk;
1251 }
1252
sock_hash_release_progs(struct bpf_map * map)1253 static void sock_hash_release_progs(struct bpf_map *map)
1254 {
1255 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1256 }
1257
BPF_CALL_4(bpf_sock_hash_update,struct bpf_sock_ops_kern *,sops,struct bpf_map *,map,void *,key,u64,flags)1258 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1259 struct bpf_map *, map, void *, key, u64, flags)
1260 {
1261 WARN_ON_ONCE(!rcu_read_lock_held());
1262
1263 if (likely(sock_map_sk_is_suitable(sops->sk) &&
1264 sock_map_op_okay(sops)))
1265 return sock_hash_update_common(map, key, sops->sk, flags);
1266 return -EOPNOTSUPP;
1267 }
1268
1269 const struct bpf_func_proto bpf_sock_hash_update_proto = {
1270 .func = bpf_sock_hash_update,
1271 .gpl_only = false,
1272 .pkt_access = true,
1273 .ret_type = RET_INTEGER,
1274 .arg1_type = ARG_PTR_TO_CTX,
1275 .arg2_type = ARG_CONST_MAP_PTR,
1276 .arg3_type = ARG_PTR_TO_MAP_KEY,
1277 .arg4_type = ARG_ANYTHING,
1278 };
1279
BPF_CALL_4(bpf_sk_redirect_hash,struct sk_buff *,skb,struct bpf_map *,map,void *,key,u64,flags)1280 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1281 struct bpf_map *, map, void *, key, u64, flags)
1282 {
1283 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1284 struct sock *sk;
1285
1286 if (unlikely(flags & ~(BPF_F_INGRESS)))
1287 return SK_DROP;
1288
1289 sk = __sock_hash_lookup_elem(map, key);
1290 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1291 return SK_DROP;
1292
1293 tcb->bpf.flags = flags;
1294 tcb->bpf.sk_redir = sk;
1295 return SK_PASS;
1296 }
1297
1298 const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1299 .func = bpf_sk_redirect_hash,
1300 .gpl_only = false,
1301 .ret_type = RET_INTEGER,
1302 .arg1_type = ARG_PTR_TO_CTX,
1303 .arg2_type = ARG_CONST_MAP_PTR,
1304 .arg3_type = ARG_PTR_TO_MAP_KEY,
1305 .arg4_type = ARG_ANYTHING,
1306 };
1307
BPF_CALL_4(bpf_msg_redirect_hash,struct sk_msg *,msg,struct bpf_map *,map,void *,key,u64,flags)1308 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1309 struct bpf_map *, map, void *, key, u64, flags)
1310 {
1311 struct sock *sk;
1312
1313 if (unlikely(flags & ~(BPF_F_INGRESS)))
1314 return SK_DROP;
1315
1316 sk = __sock_hash_lookup_elem(map, key);
1317 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1318 return SK_DROP;
1319
1320 msg->flags = flags;
1321 msg->sk_redir = sk;
1322 return SK_PASS;
1323 }
1324
1325 const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1326 .func = bpf_msg_redirect_hash,
1327 .gpl_only = false,
1328 .ret_type = RET_INTEGER,
1329 .arg1_type = ARG_PTR_TO_CTX,
1330 .arg2_type = ARG_CONST_MAP_PTR,
1331 .arg3_type = ARG_PTR_TO_MAP_KEY,
1332 .arg4_type = ARG_ANYTHING,
1333 };
1334
1335 struct sock_hash_seq_info {
1336 struct bpf_map *map;
1337 struct bpf_shtab *htab;
1338 u32 bucket_id;
1339 };
1340
sock_hash_seq_find_next(struct sock_hash_seq_info * info,struct bpf_shtab_elem * prev_elem)1341 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1342 struct bpf_shtab_elem *prev_elem)
1343 {
1344 const struct bpf_shtab *htab = info->htab;
1345 struct bpf_shtab_bucket *bucket;
1346 struct bpf_shtab_elem *elem;
1347 struct hlist_node *node;
1348
1349 /* try to find next elem in the same bucket */
1350 if (prev_elem) {
1351 node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1352 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1353 if (elem)
1354 return elem;
1355
1356 /* no more elements, continue in the next bucket */
1357 info->bucket_id++;
1358 }
1359
1360 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1361 bucket = &htab->buckets[info->bucket_id];
1362 node = rcu_dereference(hlist_first_rcu(&bucket->head));
1363 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1364 if (elem)
1365 return elem;
1366 }
1367
1368 return NULL;
1369 }
1370
sock_hash_seq_start(struct seq_file * seq,loff_t * pos)1371 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1372 __acquires(rcu)
1373 {
1374 struct sock_hash_seq_info *info = seq->private;
1375
1376 if (*pos == 0)
1377 ++*pos;
1378
1379 /* pairs with sock_hash_seq_stop */
1380 rcu_read_lock();
1381 return sock_hash_seq_find_next(info, NULL);
1382 }
1383
sock_hash_seq_next(struct seq_file * seq,void * v,loff_t * pos)1384 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1385 __must_hold(rcu)
1386 {
1387 struct sock_hash_seq_info *info = seq->private;
1388
1389 ++*pos;
1390 return sock_hash_seq_find_next(info, v);
1391 }
1392
sock_hash_seq_show(struct seq_file * seq,void * v)1393 static int sock_hash_seq_show(struct seq_file *seq, void *v)
1394 __must_hold(rcu)
1395 {
1396 struct sock_hash_seq_info *info = seq->private;
1397 struct bpf_iter__sockmap ctx = {};
1398 struct bpf_shtab_elem *elem = v;
1399 struct bpf_iter_meta meta;
1400 struct bpf_prog *prog;
1401
1402 meta.seq = seq;
1403 prog = bpf_iter_get_info(&meta, !elem);
1404 if (!prog)
1405 return 0;
1406
1407 ctx.meta = &meta;
1408 ctx.map = info->map;
1409 if (elem) {
1410 ctx.key = elem->key;
1411 ctx.sk = elem->sk;
1412 }
1413
1414 return bpf_iter_run_prog(prog, &ctx);
1415 }
1416
sock_hash_seq_stop(struct seq_file * seq,void * v)1417 static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1418 __releases(rcu)
1419 {
1420 if (!v)
1421 (void)sock_hash_seq_show(seq, NULL);
1422
1423 /* pairs with sock_hash_seq_start */
1424 rcu_read_unlock();
1425 }
1426
1427 static const struct seq_operations sock_hash_seq_ops = {
1428 .start = sock_hash_seq_start,
1429 .next = sock_hash_seq_next,
1430 .stop = sock_hash_seq_stop,
1431 .show = sock_hash_seq_show,
1432 };
1433
sock_hash_init_seq_private(void * priv_data,struct bpf_iter_aux_info * aux)1434 static int sock_hash_init_seq_private(void *priv_data,
1435 struct bpf_iter_aux_info *aux)
1436 {
1437 struct sock_hash_seq_info *info = priv_data;
1438
1439 bpf_map_inc_with_uref(aux->map);
1440 info->map = aux->map;
1441 info->htab = container_of(aux->map, struct bpf_shtab, map);
1442 return 0;
1443 }
1444
sock_hash_fini_seq_private(void * priv_data)1445 static void sock_hash_fini_seq_private(void *priv_data)
1446 {
1447 struct sock_hash_seq_info *info = priv_data;
1448
1449 bpf_map_put_with_uref(info->map);
1450 }
1451
1452 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1453 .seq_ops = &sock_hash_seq_ops,
1454 .init_seq_private = sock_hash_init_seq_private,
1455 .fini_seq_private = sock_hash_fini_seq_private,
1456 .seq_priv_size = sizeof(struct sock_hash_seq_info),
1457 };
1458
1459 static int sock_hash_map_btf_id;
1460 const struct bpf_map_ops sock_hash_ops = {
1461 .map_meta_equal = bpf_map_meta_equal,
1462 .map_alloc = sock_hash_alloc,
1463 .map_free = sock_hash_free,
1464 .map_get_next_key = sock_hash_get_next_key,
1465 .map_update_elem = sock_map_update_elem,
1466 .map_delete_elem = sock_hash_delete_elem,
1467 .map_lookup_elem = sock_hash_lookup,
1468 .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1469 .map_release_uref = sock_hash_release_progs,
1470 .map_check_btf = map_check_no_btf,
1471 .map_btf_name = "bpf_shtab",
1472 .map_btf_id = &sock_hash_map_btf_id,
1473 .iter_seq_info = &sock_hash_iter_seq_info,
1474 };
1475
sock_map_progs(struct bpf_map * map)1476 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1477 {
1478 switch (map->map_type) {
1479 case BPF_MAP_TYPE_SOCKMAP:
1480 return &container_of(map, struct bpf_stab, map)->progs;
1481 case BPF_MAP_TYPE_SOCKHASH:
1482 return &container_of(map, struct bpf_shtab, map)->progs;
1483 default:
1484 break;
1485 }
1486
1487 return NULL;
1488 }
1489
sock_map_prog_update(struct bpf_map * map,struct bpf_prog * prog,struct bpf_prog * old,u32 which)1490 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1491 struct bpf_prog *old, u32 which)
1492 {
1493 struct sk_psock_progs *progs = sock_map_progs(map);
1494 struct bpf_prog **pprog;
1495
1496 if (!progs)
1497 return -EOPNOTSUPP;
1498
1499 switch (which) {
1500 case BPF_SK_MSG_VERDICT:
1501 pprog = &progs->msg_parser;
1502 break;
1503 case BPF_SK_SKB_STREAM_PARSER:
1504 pprog = &progs->skb_parser;
1505 break;
1506 case BPF_SK_SKB_STREAM_VERDICT:
1507 pprog = &progs->skb_verdict;
1508 break;
1509 default:
1510 return -EOPNOTSUPP;
1511 }
1512
1513 if (old)
1514 return psock_replace_prog(pprog, prog, old);
1515
1516 psock_set_prog(pprog, prog);
1517 return 0;
1518 }
1519
sock_map_unlink(struct sock * sk,struct sk_psock_link * link)1520 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1521 {
1522 switch (link->map->map_type) {
1523 case BPF_MAP_TYPE_SOCKMAP:
1524 return sock_map_delete_from_link(link->map, sk,
1525 link->link_raw);
1526 case BPF_MAP_TYPE_SOCKHASH:
1527 return sock_hash_delete_from_link(link->map, sk,
1528 link->link_raw);
1529 default:
1530 break;
1531 }
1532 }
1533
sock_map_remove_links(struct sock * sk,struct sk_psock * psock)1534 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1535 {
1536 struct sk_psock_link *link;
1537
1538 while ((link = sk_psock_link_pop(psock))) {
1539 sock_map_unlink(sk, link);
1540 sk_psock_free_link(link);
1541 }
1542 }
1543
sock_map_unhash(struct sock * sk)1544 void sock_map_unhash(struct sock *sk)
1545 {
1546 void (*saved_unhash)(struct sock *sk);
1547 struct sk_psock *psock;
1548
1549 rcu_read_lock();
1550 psock = sk_psock(sk);
1551 if (unlikely(!psock)) {
1552 rcu_read_unlock();
1553 if (sk->sk_prot->unhash)
1554 sk->sk_prot->unhash(sk);
1555 return;
1556 }
1557
1558 saved_unhash = psock->saved_unhash;
1559 sock_map_remove_links(sk, psock);
1560 rcu_read_unlock();
1561 saved_unhash(sk);
1562 }
1563
sock_map_close(struct sock * sk,long timeout)1564 void sock_map_close(struct sock *sk, long timeout)
1565 {
1566 void (*saved_close)(struct sock *sk, long timeout);
1567 struct sk_psock *psock;
1568
1569 lock_sock(sk);
1570 rcu_read_lock();
1571 psock = sk_psock(sk);
1572 if (unlikely(!psock)) {
1573 rcu_read_unlock();
1574 release_sock(sk);
1575 return sk->sk_prot->close(sk, timeout);
1576 }
1577
1578 saved_close = psock->saved_close;
1579 sock_map_remove_links(sk, psock);
1580 rcu_read_unlock();
1581 release_sock(sk);
1582 saved_close(sk, timeout);
1583 }
1584
sock_map_iter_attach_target(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)1585 static int sock_map_iter_attach_target(struct bpf_prog *prog,
1586 union bpf_iter_link_info *linfo,
1587 struct bpf_iter_aux_info *aux)
1588 {
1589 struct bpf_map *map;
1590 int err = -EINVAL;
1591
1592 if (!linfo->map.map_fd)
1593 return -EBADF;
1594
1595 map = bpf_map_get_with_uref(linfo->map.map_fd);
1596 if (IS_ERR(map))
1597 return PTR_ERR(map);
1598
1599 if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1600 map->map_type != BPF_MAP_TYPE_SOCKHASH)
1601 goto put_map;
1602
1603 if (prog->aux->max_rdonly_access > map->key_size) {
1604 err = -EACCES;
1605 goto put_map;
1606 }
1607
1608 aux->map = map;
1609 return 0;
1610
1611 put_map:
1612 bpf_map_put_with_uref(map);
1613 return err;
1614 }
1615
sock_map_iter_detach_target(struct bpf_iter_aux_info * aux)1616 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1617 {
1618 bpf_map_put_with_uref(aux->map);
1619 }
1620
1621 static struct bpf_iter_reg sock_map_iter_reg = {
1622 .target = "sockmap",
1623 .attach_target = sock_map_iter_attach_target,
1624 .detach_target = sock_map_iter_detach_target,
1625 .show_fdinfo = bpf_iter_map_show_fdinfo,
1626 .fill_link_info = bpf_iter_map_fill_link_info,
1627 .ctx_arg_info_size = 2,
1628 .ctx_arg_info = {
1629 { offsetof(struct bpf_iter__sockmap, key),
1630 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1631 { offsetof(struct bpf_iter__sockmap, sk),
1632 PTR_TO_BTF_ID_OR_NULL },
1633 },
1634 };
1635
bpf_sockmap_iter_init(void)1636 static int __init bpf_sockmap_iter_init(void)
1637 {
1638 sock_map_iter_reg.ctx_arg_info[1].btf_id =
1639 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1640 return bpf_iter_reg_target(&sock_map_iter_reg);
1641 }
1642 late_initcall(bpf_sockmap_iter_init);
1643