1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/bpf_sk_storage.h>
12 #include <net/sock.h>
13 #include <uapi/linux/sock_diag.h>
14 #include <uapi/linux/btf.h>
15
16 DEFINE_BPF_STORAGE_CACHE(sk_cache);
17
18 static struct bpf_local_storage_data *
sk_storage_lookup(struct sock * sk,struct bpf_map * map,bool cacheit_lockit)19 sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
20 {
21 struct bpf_local_storage *sk_storage;
22 struct bpf_local_storage_map *smap;
23
24 sk_storage = rcu_dereference(sk->sk_bpf_storage);
25 if (!sk_storage)
26 return NULL;
27
28 smap = (struct bpf_local_storage_map *)map;
29 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
30 }
31
sk_storage_delete(struct sock * sk,struct bpf_map * map)32 static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
33 {
34 struct bpf_local_storage_data *sdata;
35
36 sdata = sk_storage_lookup(sk, map, false);
37 if (!sdata)
38 return -ENOENT;
39
40 bpf_selem_unlink(SELEM(sdata));
41
42 return 0;
43 }
44
45 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
bpf_sk_storage_free(struct sock * sk)46 void bpf_sk_storage_free(struct sock *sk)
47 {
48 struct bpf_local_storage_elem *selem;
49 struct bpf_local_storage *sk_storage;
50 bool free_sk_storage = false;
51 struct hlist_node *n;
52
53 rcu_read_lock();
54 sk_storage = rcu_dereference(sk->sk_bpf_storage);
55 if (!sk_storage) {
56 rcu_read_unlock();
57 return;
58 }
59
60 /* Netiher the bpf_prog nor the bpf-map's syscall
61 * could be modifying the sk_storage->list now.
62 * Thus, no elem can be added-to or deleted-from the
63 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
64 *
65 * It is racing with bpf_local_storage_map_free() alone
66 * when unlinking elem from the sk_storage->list and
67 * the map's bucket->list.
68 */
69 raw_spin_lock_bh(&sk_storage->lock);
70 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
71 /* Always unlink from map before unlinking from
72 * sk_storage.
73 */
74 bpf_selem_unlink_map(selem);
75 free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
76 selem, true);
77 }
78 raw_spin_unlock_bh(&sk_storage->lock);
79 rcu_read_unlock();
80
81 if (free_sk_storage)
82 kfree_rcu(sk_storage, rcu);
83 }
84
sk_storage_map_free(struct bpf_map * map)85 static void sk_storage_map_free(struct bpf_map *map)
86 {
87 struct bpf_local_storage_map *smap;
88
89 smap = (struct bpf_local_storage_map *)map;
90 bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
91 bpf_local_storage_map_free(smap);
92 }
93
sk_storage_map_alloc(union bpf_attr * attr)94 static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
95 {
96 struct bpf_local_storage_map *smap;
97
98 smap = bpf_local_storage_map_alloc(attr);
99 if (IS_ERR(smap))
100 return ERR_CAST(smap);
101
102 smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
103 return &smap->map;
104 }
105
notsupp_get_next_key(struct bpf_map * map,void * key,void * next_key)106 static int notsupp_get_next_key(struct bpf_map *map, void *key,
107 void *next_key)
108 {
109 return -ENOTSUPP;
110 }
111
bpf_fd_sk_storage_lookup_elem(struct bpf_map * map,void * key)112 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
113 {
114 struct bpf_local_storage_data *sdata;
115 struct socket *sock;
116 int fd, err;
117
118 fd = *(int *)key;
119 sock = sockfd_lookup(fd, &err);
120 if (sock) {
121 sdata = sk_storage_lookup(sock->sk, map, true);
122 sockfd_put(sock);
123 return sdata ? sdata->data : NULL;
124 }
125
126 return ERR_PTR(err);
127 }
128
bpf_fd_sk_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)129 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
130 void *value, u64 map_flags)
131 {
132 struct bpf_local_storage_data *sdata;
133 struct socket *sock;
134 int fd, err;
135
136 fd = *(int *)key;
137 sock = sockfd_lookup(fd, &err);
138 if (sock) {
139 sdata = bpf_local_storage_update(
140 sock->sk, (struct bpf_local_storage_map *)map, value,
141 map_flags);
142 sockfd_put(sock);
143 return PTR_ERR_OR_ZERO(sdata);
144 }
145
146 return err;
147 }
148
bpf_fd_sk_storage_delete_elem(struct bpf_map * map,void * key)149 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
150 {
151 struct socket *sock;
152 int fd, err;
153
154 fd = *(int *)key;
155 sock = sockfd_lookup(fd, &err);
156 if (sock) {
157 err = sk_storage_delete(sock->sk, map);
158 sockfd_put(sock);
159 return err;
160 }
161
162 return err;
163 }
164
165 static struct bpf_local_storage_elem *
bpf_sk_storage_clone_elem(struct sock * newsk,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)166 bpf_sk_storage_clone_elem(struct sock *newsk,
167 struct bpf_local_storage_map *smap,
168 struct bpf_local_storage_elem *selem)
169 {
170 struct bpf_local_storage_elem *copy_selem;
171
172 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
173 if (!copy_selem)
174 return NULL;
175
176 if (map_value_has_spin_lock(&smap->map))
177 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
178 SDATA(selem)->data, true);
179 else
180 copy_map_value(&smap->map, SDATA(copy_selem)->data,
181 SDATA(selem)->data);
182
183 return copy_selem;
184 }
185
bpf_sk_storage_clone(const struct sock * sk,struct sock * newsk)186 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
187 {
188 struct bpf_local_storage *new_sk_storage = NULL;
189 struct bpf_local_storage *sk_storage;
190 struct bpf_local_storage_elem *selem;
191 int ret = 0;
192
193 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
194
195 rcu_read_lock();
196 sk_storage = rcu_dereference(sk->sk_bpf_storage);
197
198 if (!sk_storage || hlist_empty(&sk_storage->list))
199 goto out;
200
201 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
202 struct bpf_local_storage_elem *copy_selem;
203 struct bpf_local_storage_map *smap;
204 struct bpf_map *map;
205
206 smap = rcu_dereference(SDATA(selem)->smap);
207 if (!(smap->map.map_flags & BPF_F_CLONE))
208 continue;
209
210 /* Note that for lockless listeners adding new element
211 * here can race with cleanup in bpf_local_storage_map_free.
212 * Try to grab map refcnt to make sure that it's still
213 * alive and prevent concurrent removal.
214 */
215 map = bpf_map_inc_not_zero(&smap->map);
216 if (IS_ERR(map))
217 continue;
218
219 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
220 if (!copy_selem) {
221 ret = -ENOMEM;
222 bpf_map_put(map);
223 goto out;
224 }
225
226 if (new_sk_storage) {
227 bpf_selem_link_map(smap, copy_selem);
228 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
229 } else {
230 ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
231 if (ret) {
232 kfree(copy_selem);
233 atomic_sub(smap->elem_size,
234 &newsk->sk_omem_alloc);
235 bpf_map_put(map);
236 goto out;
237 }
238
239 new_sk_storage =
240 rcu_dereference(copy_selem->local_storage);
241 }
242 bpf_map_put(map);
243 }
244
245 out:
246 rcu_read_unlock();
247
248 /* In case of an error, don't free anything explicitly here, the
249 * caller is responsible to call bpf_sk_storage_free.
250 */
251
252 return ret;
253 }
254
BPF_CALL_4(bpf_sk_storage_get,struct bpf_map *,map,struct sock *,sk,void *,value,u64,flags)255 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
256 void *, value, u64, flags)
257 {
258 struct bpf_local_storage_data *sdata;
259
260 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
261 return (unsigned long)NULL;
262
263 sdata = sk_storage_lookup(sk, map, true);
264 if (sdata)
265 return (unsigned long)sdata->data;
266
267 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
268 /* Cannot add new elem to a going away sk.
269 * Otherwise, the new elem may become a leak
270 * (and also other memory issues during map
271 * destruction).
272 */
273 refcount_inc_not_zero(&sk->sk_refcnt)) {
274 sdata = bpf_local_storage_update(
275 sk, (struct bpf_local_storage_map *)map, value,
276 BPF_NOEXIST);
277 /* sk must be a fullsock (guaranteed by verifier),
278 * so sock_gen_put() is unnecessary.
279 */
280 sock_put(sk);
281 return IS_ERR(sdata) ?
282 (unsigned long)NULL : (unsigned long)sdata->data;
283 }
284
285 return (unsigned long)NULL;
286 }
287
BPF_CALL_2(bpf_sk_storage_delete,struct bpf_map *,map,struct sock *,sk)288 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
289 {
290 if (!sk || !sk_fullsock(sk))
291 return -EINVAL;
292
293 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
294 int err;
295
296 err = sk_storage_delete(sk, map);
297 sock_put(sk);
298 return err;
299 }
300
301 return -ENOENT;
302 }
303
sk_storage_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)304 static int sk_storage_charge(struct bpf_local_storage_map *smap,
305 void *owner, u32 size)
306 {
307 int optmem_max = READ_ONCE(sysctl_optmem_max);
308 struct sock *sk = (struct sock *)owner;
309
310 /* same check as in sock_kmalloc() */
311 if (size <= optmem_max &&
312 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
313 atomic_add(size, &sk->sk_omem_alloc);
314 return 0;
315 }
316
317 return -ENOMEM;
318 }
319
sk_storage_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)320 static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
321 void *owner, u32 size)
322 {
323 struct sock *sk = owner;
324
325 atomic_sub(size, &sk->sk_omem_alloc);
326 }
327
328 static struct bpf_local_storage __rcu **
sk_storage_ptr(void * owner)329 sk_storage_ptr(void *owner)
330 {
331 struct sock *sk = owner;
332
333 return &sk->sk_bpf_storage;
334 }
335
336 static int sk_storage_map_btf_id;
337 const struct bpf_map_ops sk_storage_map_ops = {
338 .map_meta_equal = bpf_map_meta_equal,
339 .map_alloc_check = bpf_local_storage_map_alloc_check,
340 .map_alloc = sk_storage_map_alloc,
341 .map_free = sk_storage_map_free,
342 .map_get_next_key = notsupp_get_next_key,
343 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
344 .map_update_elem = bpf_fd_sk_storage_update_elem,
345 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
346 .map_check_btf = bpf_local_storage_map_check_btf,
347 .map_btf_name = "bpf_local_storage_map",
348 .map_btf_id = &sk_storage_map_btf_id,
349 .map_local_storage_charge = sk_storage_charge,
350 .map_local_storage_uncharge = sk_storage_uncharge,
351 .map_owner_storage_ptr = sk_storage_ptr,
352 };
353
354 const struct bpf_func_proto bpf_sk_storage_get_proto = {
355 .func = bpf_sk_storage_get,
356 .gpl_only = false,
357 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
358 .arg1_type = ARG_CONST_MAP_PTR,
359 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
360 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
361 .arg4_type = ARG_ANYTHING,
362 };
363
364 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
365 .func = bpf_sk_storage_get,
366 .gpl_only = false,
367 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
368 .arg1_type = ARG_CONST_MAP_PTR,
369 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
370 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
371 .arg4_type = ARG_ANYTHING,
372 };
373
374 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
375 .func = bpf_sk_storage_delete,
376 .gpl_only = false,
377 .ret_type = RET_INTEGER,
378 .arg1_type = ARG_CONST_MAP_PTR,
379 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
380 };
381
382 struct bpf_sk_storage_diag {
383 u32 nr_maps;
384 struct bpf_map *maps[];
385 };
386
387 /* The reply will be like:
388 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
389 * SK_DIAG_BPF_STORAGE (nla_nest)
390 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
391 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
392 * SK_DIAG_BPF_STORAGE (nla_nest)
393 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
394 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
395 * ....
396 */
nla_value_size(u32 value_size)397 static int nla_value_size(u32 value_size)
398 {
399 /* SK_DIAG_BPF_STORAGE (nla_nest)
400 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
401 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
402 */
403 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
404 nla_total_size_64bit(value_size);
405 }
406
bpf_sk_storage_diag_free(struct bpf_sk_storage_diag * diag)407 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
408 {
409 u32 i;
410
411 if (!diag)
412 return;
413
414 for (i = 0; i < diag->nr_maps; i++)
415 bpf_map_put(diag->maps[i]);
416
417 kfree(diag);
418 }
419 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
420
diag_check_dup(const struct bpf_sk_storage_diag * diag,const struct bpf_map * map)421 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
422 const struct bpf_map *map)
423 {
424 u32 i;
425
426 for (i = 0; i < diag->nr_maps; i++) {
427 if (diag->maps[i] == map)
428 return true;
429 }
430
431 return false;
432 }
433
434 struct bpf_sk_storage_diag *
bpf_sk_storage_diag_alloc(const struct nlattr * nla_stgs)435 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
436 {
437 struct bpf_sk_storage_diag *diag;
438 struct nlattr *nla;
439 u32 nr_maps = 0;
440 int rem, err;
441
442 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
443 * the map_alloc_check() side also does.
444 */
445 if (!bpf_capable())
446 return ERR_PTR(-EPERM);
447
448 nla_for_each_nested(nla, nla_stgs, rem) {
449 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
450 nr_maps++;
451 }
452
453 diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
454 GFP_KERNEL);
455 if (!diag)
456 return ERR_PTR(-ENOMEM);
457
458 nla_for_each_nested(nla, nla_stgs, rem) {
459 struct bpf_map *map;
460 int map_fd;
461
462 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
463 continue;
464
465 map_fd = nla_get_u32(nla);
466 map = bpf_map_get(map_fd);
467 if (IS_ERR(map)) {
468 err = PTR_ERR(map);
469 goto err_free;
470 }
471 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
472 bpf_map_put(map);
473 err = -EINVAL;
474 goto err_free;
475 }
476 if (diag_check_dup(diag, map)) {
477 bpf_map_put(map);
478 err = -EEXIST;
479 goto err_free;
480 }
481 diag->maps[diag->nr_maps++] = map;
482 }
483
484 return diag;
485
486 err_free:
487 bpf_sk_storage_diag_free(diag);
488 return ERR_PTR(err);
489 }
490 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
491
diag_get(struct bpf_local_storage_data * sdata,struct sk_buff * skb)492 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
493 {
494 struct nlattr *nla_stg, *nla_value;
495 struct bpf_local_storage_map *smap;
496
497 /* It cannot exceed max nlattr's payload */
498 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
499
500 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
501 if (!nla_stg)
502 return -EMSGSIZE;
503
504 smap = rcu_dereference(sdata->smap);
505 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
506 goto errout;
507
508 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
509 smap->map.value_size,
510 SK_DIAG_BPF_STORAGE_PAD);
511 if (!nla_value)
512 goto errout;
513
514 if (map_value_has_spin_lock(&smap->map))
515 copy_map_value_locked(&smap->map, nla_data(nla_value),
516 sdata->data, true);
517 else
518 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
519
520 nla_nest_end(skb, nla_stg);
521 return 0;
522
523 errout:
524 nla_nest_cancel(skb, nla_stg);
525 return -EMSGSIZE;
526 }
527
bpf_sk_storage_diag_put_all(struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)528 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
529 int stg_array_type,
530 unsigned int *res_diag_size)
531 {
532 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
533 unsigned int diag_size = nla_total_size(0);
534 struct bpf_local_storage *sk_storage;
535 struct bpf_local_storage_elem *selem;
536 struct bpf_local_storage_map *smap;
537 struct nlattr *nla_stgs;
538 unsigned int saved_len;
539 int err = 0;
540
541 rcu_read_lock();
542
543 sk_storage = rcu_dereference(sk->sk_bpf_storage);
544 if (!sk_storage || hlist_empty(&sk_storage->list)) {
545 rcu_read_unlock();
546 return 0;
547 }
548
549 nla_stgs = nla_nest_start(skb, stg_array_type);
550 if (!nla_stgs)
551 /* Continue to learn diag_size */
552 err = -EMSGSIZE;
553
554 saved_len = skb->len;
555 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
556 smap = rcu_dereference(SDATA(selem)->smap);
557 diag_size += nla_value_size(smap->map.value_size);
558
559 if (nla_stgs && diag_get(SDATA(selem), skb))
560 /* Continue to learn diag_size */
561 err = -EMSGSIZE;
562 }
563
564 rcu_read_unlock();
565
566 if (nla_stgs) {
567 if (saved_len == skb->len)
568 nla_nest_cancel(skb, nla_stgs);
569 else
570 nla_nest_end(skb, nla_stgs);
571 }
572
573 if (diag_size == nla_total_size(0)) {
574 *res_diag_size = 0;
575 return 0;
576 }
577
578 *res_diag_size = diag_size;
579 return err;
580 }
581
bpf_sk_storage_diag_put(struct bpf_sk_storage_diag * diag,struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)582 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
583 struct sock *sk, struct sk_buff *skb,
584 int stg_array_type,
585 unsigned int *res_diag_size)
586 {
587 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
588 unsigned int diag_size = nla_total_size(0);
589 struct bpf_local_storage *sk_storage;
590 struct bpf_local_storage_data *sdata;
591 struct nlattr *nla_stgs;
592 unsigned int saved_len;
593 int err = 0;
594 u32 i;
595
596 *res_diag_size = 0;
597
598 /* No map has been specified. Dump all. */
599 if (!diag->nr_maps)
600 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
601 res_diag_size);
602
603 rcu_read_lock();
604 sk_storage = rcu_dereference(sk->sk_bpf_storage);
605 if (!sk_storage || hlist_empty(&sk_storage->list)) {
606 rcu_read_unlock();
607 return 0;
608 }
609
610 nla_stgs = nla_nest_start(skb, stg_array_type);
611 if (!nla_stgs)
612 /* Continue to learn diag_size */
613 err = -EMSGSIZE;
614
615 saved_len = skb->len;
616 for (i = 0; i < diag->nr_maps; i++) {
617 sdata = bpf_local_storage_lookup(sk_storage,
618 (struct bpf_local_storage_map *)diag->maps[i],
619 false);
620
621 if (!sdata)
622 continue;
623
624 diag_size += nla_value_size(diag->maps[i]->value_size);
625
626 if (nla_stgs && diag_get(sdata, skb))
627 /* Continue to learn diag_size */
628 err = -EMSGSIZE;
629 }
630 rcu_read_unlock();
631
632 if (nla_stgs) {
633 if (saved_len == skb->len)
634 nla_nest_cancel(skb, nla_stgs);
635 else
636 nla_nest_end(skb, nla_stgs);
637 }
638
639 if (diag_size == nla_total_size(0)) {
640 *res_diag_size = 0;
641 return 0;
642 }
643
644 *res_diag_size = diag_size;
645 return err;
646 }
647 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
648
649 struct bpf_iter_seq_sk_storage_map_info {
650 struct bpf_map *map;
651 unsigned int bucket_id;
652 unsigned skip_elems;
653 };
654
655 static struct bpf_local_storage_elem *
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info * info,struct bpf_local_storage_elem * prev_selem)656 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
657 struct bpf_local_storage_elem *prev_selem)
658 __acquires(RCU) __releases(RCU)
659 {
660 struct bpf_local_storage *sk_storage;
661 struct bpf_local_storage_elem *selem;
662 u32 skip_elems = info->skip_elems;
663 struct bpf_local_storage_map *smap;
664 u32 bucket_id = info->bucket_id;
665 u32 i, count, n_buckets;
666 struct bpf_local_storage_map_bucket *b;
667
668 smap = (struct bpf_local_storage_map *)info->map;
669 n_buckets = 1U << smap->bucket_log;
670 if (bucket_id >= n_buckets)
671 return NULL;
672
673 /* try to find next selem in the same bucket */
674 selem = prev_selem;
675 count = 0;
676 while (selem) {
677 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
678 struct bpf_local_storage_elem, map_node);
679 if (!selem) {
680 /* not found, unlock and go to the next bucket */
681 b = &smap->buckets[bucket_id++];
682 rcu_read_unlock();
683 skip_elems = 0;
684 break;
685 }
686 sk_storage = rcu_dereference(selem->local_storage);
687 if (sk_storage) {
688 info->skip_elems = skip_elems + count;
689 return selem;
690 }
691 count++;
692 }
693
694 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
695 b = &smap->buckets[i];
696 rcu_read_lock();
697 count = 0;
698 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
699 sk_storage = rcu_dereference(selem->local_storage);
700 if (sk_storage && count >= skip_elems) {
701 info->bucket_id = i;
702 info->skip_elems = count;
703 return selem;
704 }
705 count++;
706 }
707 rcu_read_unlock();
708 skip_elems = 0;
709 }
710
711 info->bucket_id = i;
712 info->skip_elems = 0;
713 return NULL;
714 }
715
bpf_sk_storage_map_seq_start(struct seq_file * seq,loff_t * pos)716 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
717 {
718 struct bpf_local_storage_elem *selem;
719
720 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
721 if (!selem)
722 return NULL;
723
724 if (*pos == 0)
725 ++*pos;
726 return selem;
727 }
728
bpf_sk_storage_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)729 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
730 loff_t *pos)
731 {
732 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
733
734 ++*pos;
735 ++info->skip_elems;
736 return bpf_sk_storage_map_seq_find_next(seq->private, v);
737 }
738
739 struct bpf_iter__bpf_sk_storage_map {
740 __bpf_md_ptr(struct bpf_iter_meta *, meta);
741 __bpf_md_ptr(struct bpf_map *, map);
742 __bpf_md_ptr(struct sock *, sk);
743 __bpf_md_ptr(void *, value);
744 };
745
DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map,struct bpf_iter_meta * meta,struct bpf_map * map,struct sock * sk,void * value)746 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
747 struct bpf_map *map, struct sock *sk,
748 void *value)
749
750 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
751 struct bpf_local_storage_elem *selem)
752 {
753 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
754 struct bpf_iter__bpf_sk_storage_map ctx = {};
755 struct bpf_local_storage *sk_storage;
756 struct bpf_iter_meta meta;
757 struct bpf_prog *prog;
758 int ret = 0;
759
760 meta.seq = seq;
761 prog = bpf_iter_get_info(&meta, selem == NULL);
762 if (prog) {
763 ctx.meta = &meta;
764 ctx.map = info->map;
765 if (selem) {
766 sk_storage = rcu_dereference(selem->local_storage);
767 ctx.sk = sk_storage->owner;
768 ctx.value = SDATA(selem)->data;
769 }
770 ret = bpf_iter_run_prog(prog, &ctx);
771 }
772
773 return ret;
774 }
775
bpf_sk_storage_map_seq_show(struct seq_file * seq,void * v)776 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
777 {
778 return __bpf_sk_storage_map_seq_show(seq, v);
779 }
780
bpf_sk_storage_map_seq_stop(struct seq_file * seq,void * v)781 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
782 __releases(RCU)
783 {
784 if (!v)
785 (void)__bpf_sk_storage_map_seq_show(seq, v);
786 else
787 rcu_read_unlock();
788 }
789
bpf_iter_init_sk_storage_map(void * priv_data,struct bpf_iter_aux_info * aux)790 static int bpf_iter_init_sk_storage_map(void *priv_data,
791 struct bpf_iter_aux_info *aux)
792 {
793 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
794
795 bpf_map_inc_with_uref(aux->map);
796 seq_info->map = aux->map;
797 return 0;
798 }
799
bpf_iter_fini_sk_storage_map(void * priv_data)800 static void bpf_iter_fini_sk_storage_map(void *priv_data)
801 {
802 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
803
804 bpf_map_put_with_uref(seq_info->map);
805 }
806
bpf_iter_attach_map(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)807 static int bpf_iter_attach_map(struct bpf_prog *prog,
808 union bpf_iter_link_info *linfo,
809 struct bpf_iter_aux_info *aux)
810 {
811 struct bpf_map *map;
812 int err = -EINVAL;
813
814 if (!linfo->map.map_fd)
815 return -EBADF;
816
817 map = bpf_map_get_with_uref(linfo->map.map_fd);
818 if (IS_ERR(map))
819 return PTR_ERR(map);
820
821 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
822 goto put_map;
823
824 if (prog->aux->max_rdwr_access > map->value_size) {
825 err = -EACCES;
826 goto put_map;
827 }
828
829 aux->map = map;
830 return 0;
831
832 put_map:
833 bpf_map_put_with_uref(map);
834 return err;
835 }
836
bpf_iter_detach_map(struct bpf_iter_aux_info * aux)837 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
838 {
839 bpf_map_put_with_uref(aux->map);
840 }
841
842 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
843 .start = bpf_sk_storage_map_seq_start,
844 .next = bpf_sk_storage_map_seq_next,
845 .stop = bpf_sk_storage_map_seq_stop,
846 .show = bpf_sk_storage_map_seq_show,
847 };
848
849 static const struct bpf_iter_seq_info iter_seq_info = {
850 .seq_ops = &bpf_sk_storage_map_seq_ops,
851 .init_seq_private = bpf_iter_init_sk_storage_map,
852 .fini_seq_private = bpf_iter_fini_sk_storage_map,
853 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
854 };
855
856 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
857 .target = "bpf_sk_storage_map",
858 .attach_target = bpf_iter_attach_map,
859 .detach_target = bpf_iter_detach_map,
860 .show_fdinfo = bpf_iter_map_show_fdinfo,
861 .fill_link_info = bpf_iter_map_fill_link_info,
862 .ctx_arg_info_size = 2,
863 .ctx_arg_info = {
864 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
865 PTR_TO_BTF_ID_OR_NULL },
866 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
867 PTR_TO_BUF | PTR_MAYBE_NULL },
868 },
869 .seq_info = &iter_seq_info,
870 };
871
bpf_sk_storage_map_iter_init(void)872 static int __init bpf_sk_storage_map_iter_init(void)
873 {
874 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
875 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
876 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
877 }
878 late_initcall(bpf_sk_storage_map_iter_init);
879