1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14
15 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
16
17 static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)18 select_bucket(struct bpf_local_storage_map *smap,
19 struct bpf_local_storage_elem *selem)
20 {
21 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
22 }
23
mem_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)24 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
25 {
26 struct bpf_map *map = &smap->map;
27
28 if (!map->ops->map_local_storage_charge)
29 return 0;
30
31 return map->ops->map_local_storage_charge(smap, owner, size);
32 }
33
mem_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)34 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
35 u32 size)
36 {
37 struct bpf_map *map = &smap->map;
38
39 if (map->ops->map_local_storage_uncharge)
40 map->ops->map_local_storage_uncharge(smap, owner, size);
41 }
42
43 static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map * smap,void * owner)44 owner_storage(struct bpf_local_storage_map *smap, void *owner)
45 {
46 struct bpf_map *map = &smap->map;
47
48 return map->ops->map_owner_storage_ptr(owner);
49 }
50
selem_linked_to_storage_lockless(const struct bpf_local_storage_elem * selem)51 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
52 {
53 return !hlist_unhashed_lockless(&selem->snode);
54 }
55
selem_linked_to_storage(const struct bpf_local_storage_elem * selem)56 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
57 {
58 return !hlist_unhashed(&selem->snode);
59 }
60
selem_linked_to_map_lockless(const struct bpf_local_storage_elem * selem)61 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
62 {
63 return !hlist_unhashed_lockless(&selem->map_node);
64 }
65
selem_linked_to_map(const struct bpf_local_storage_elem * selem)66 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
67 {
68 return !hlist_unhashed(&selem->map_node);
69 }
70
71 struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map * smap,void * owner,void * value,bool charge_mem)72 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
73 void *value, bool charge_mem)
74 {
75 struct bpf_local_storage_elem *selem;
76
77 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
78 return NULL;
79
80 selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
81 if (selem) {
82 if (value)
83 copy_map_value(&smap->map, SDATA(selem)->data, value);
84 return selem;
85 }
86
87 if (charge_mem)
88 mem_uncharge(smap, owner, smap->elem_size);
89
90 return NULL;
91 }
92
93 /* local_storage->lock must be held and selem->local_storage == local_storage.
94 * The caller must ensure selem->smap is still valid to be
95 * dereferenced for its smap->elem_size and smap->cache_idx.
96 */
bpf_selem_unlink_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem,bool uncharge_mem)97 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
98 struct bpf_local_storage_elem *selem,
99 bool uncharge_mem)
100 {
101 struct bpf_local_storage_map *smap;
102 bool free_local_storage;
103 void *owner;
104
105 smap = rcu_dereference(SDATA(selem)->smap);
106 owner = local_storage->owner;
107
108 /* All uncharging on the owner must be done first.
109 * The owner may be freed once the last selem is unlinked
110 * from local_storage.
111 */
112 if (uncharge_mem)
113 mem_uncharge(smap, owner, smap->elem_size);
114
115 free_local_storage = hlist_is_singular_node(&selem->snode,
116 &local_storage->list);
117 if (free_local_storage) {
118 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
119 local_storage->owner = NULL;
120
121 /* After this RCU_INIT, owner may be freed and cannot be used */
122 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
123
124 /* local_storage is not freed now. local_storage->lock is
125 * still held and raw_spin_unlock_bh(&local_storage->lock)
126 * will be done by the caller.
127 *
128 * Although the unlock will be done under
129 * rcu_read_lock(), it is more intutivie to
130 * read if kfree_rcu(local_storage, rcu) is done
131 * after the raw_spin_unlock_bh(&local_storage->lock).
132 *
133 * Hence, a "bool free_local_storage" is returned
134 * to the caller which then calls the kfree_rcu()
135 * after unlock.
136 */
137 }
138 hlist_del_init_rcu(&selem->snode);
139 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
140 SDATA(selem))
141 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
142
143 kfree_rcu(selem, rcu);
144
145 return free_local_storage;
146 }
147
__bpf_selem_unlink_storage(struct bpf_local_storage_elem * selem)148 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
149 {
150 struct bpf_local_storage *local_storage;
151 bool free_local_storage = false;
152
153 if (unlikely(!selem_linked_to_storage_lockless(selem)))
154 /* selem has already been unlinked from sk */
155 return;
156
157 local_storage = rcu_dereference(selem->local_storage);
158 raw_spin_lock_bh(&local_storage->lock);
159 if (likely(selem_linked_to_storage(selem)))
160 free_local_storage = bpf_selem_unlink_storage_nolock(
161 local_storage, selem, true);
162 raw_spin_unlock_bh(&local_storage->lock);
163
164 if (free_local_storage)
165 kfree_rcu(local_storage, rcu);
166 }
167
bpf_selem_link_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem)168 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
169 struct bpf_local_storage_elem *selem)
170 {
171 RCU_INIT_POINTER(selem->local_storage, local_storage);
172 hlist_add_head_rcu(&selem->snode, &local_storage->list);
173 }
174
bpf_selem_unlink_map(struct bpf_local_storage_elem * selem)175 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
176 {
177 struct bpf_local_storage_map *smap;
178 struct bpf_local_storage_map_bucket *b;
179
180 if (unlikely(!selem_linked_to_map_lockless(selem)))
181 /* selem has already be unlinked from smap */
182 return;
183
184 smap = rcu_dereference(SDATA(selem)->smap);
185 b = select_bucket(smap, selem);
186 raw_spin_lock_bh(&b->lock);
187 if (likely(selem_linked_to_map(selem)))
188 hlist_del_init_rcu(&selem->map_node);
189 raw_spin_unlock_bh(&b->lock);
190 }
191
bpf_selem_link_map(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)192 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
193 struct bpf_local_storage_elem *selem)
194 {
195 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
196
197 raw_spin_lock_bh(&b->lock);
198 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
199 hlist_add_head_rcu(&selem->map_node, &b->list);
200 raw_spin_unlock_bh(&b->lock);
201 }
202
bpf_selem_unlink(struct bpf_local_storage_elem * selem)203 void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
204 {
205 /* Always unlink from map before unlinking from local_storage
206 * because selem will be freed after successfully unlinked from
207 * the local_storage.
208 */
209 bpf_selem_unlink_map(selem);
210 __bpf_selem_unlink_storage(selem);
211 }
212
213 struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)214 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
215 struct bpf_local_storage_map *smap,
216 bool cacheit_lockit)
217 {
218 struct bpf_local_storage_data *sdata;
219 struct bpf_local_storage_elem *selem;
220
221 /* Fast path (cache hit) */
222 sdata = rcu_dereference(local_storage->cache[smap->cache_idx]);
223 if (sdata && rcu_access_pointer(sdata->smap) == smap)
224 return sdata;
225
226 /* Slow path (cache miss) */
227 hlist_for_each_entry_rcu(selem, &local_storage->list, snode)
228 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
229 break;
230
231 if (!selem)
232 return NULL;
233
234 sdata = SDATA(selem);
235 if (cacheit_lockit) {
236 /* spinlock is needed to avoid racing with the
237 * parallel delete. Otherwise, publishing an already
238 * deleted sdata to the cache will become a use-after-free
239 * problem in the next bpf_local_storage_lookup().
240 */
241 raw_spin_lock_bh(&local_storage->lock);
242 if (selem_linked_to_storage(selem))
243 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
244 sdata);
245 raw_spin_unlock_bh(&local_storage->lock);
246 }
247
248 return sdata;
249 }
250
check_flags(const struct bpf_local_storage_data * old_sdata,u64 map_flags)251 static int check_flags(const struct bpf_local_storage_data *old_sdata,
252 u64 map_flags)
253 {
254 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
255 /* elem already exists */
256 return -EEXIST;
257
258 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
259 /* elem doesn't exist, cannot update it */
260 return -ENOENT;
261
262 return 0;
263 }
264
bpf_local_storage_alloc(void * owner,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * first_selem)265 int bpf_local_storage_alloc(void *owner,
266 struct bpf_local_storage_map *smap,
267 struct bpf_local_storage_elem *first_selem)
268 {
269 struct bpf_local_storage *prev_storage, *storage;
270 struct bpf_local_storage **owner_storage_ptr;
271 int err;
272
273 err = mem_charge(smap, owner, sizeof(*storage));
274 if (err)
275 return err;
276
277 storage = kzalloc(sizeof(*storage), GFP_ATOMIC | __GFP_NOWARN);
278 if (!storage) {
279 err = -ENOMEM;
280 goto uncharge;
281 }
282
283 INIT_HLIST_HEAD(&storage->list);
284 raw_spin_lock_init(&storage->lock);
285 storage->owner = owner;
286
287 bpf_selem_link_storage_nolock(storage, first_selem);
288 bpf_selem_link_map(smap, first_selem);
289
290 owner_storage_ptr =
291 (struct bpf_local_storage **)owner_storage(smap, owner);
292 /* Publish storage to the owner.
293 * Instead of using any lock of the kernel object (i.e. owner),
294 * cmpxchg will work with any kernel object regardless what
295 * the running context is, bh, irq...etc.
296 *
297 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
298 * is protected by the storage->lock. Hence, when freeing
299 * the owner->storage, the storage->lock must be held before
300 * setting owner->storage ptr to NULL.
301 */
302 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
303 if (unlikely(prev_storage)) {
304 bpf_selem_unlink_map(first_selem);
305 err = -EAGAIN;
306 goto uncharge;
307
308 /* Note that even first_selem was linked to smap's
309 * bucket->list, first_selem can be freed immediately
310 * (instead of kfree_rcu) because
311 * bpf_local_storage_map_free() does a
312 * synchronize_rcu() before walking the bucket->list.
313 * Hence, no one is accessing selem from the
314 * bucket->list under rcu_read_lock().
315 */
316 }
317
318 return 0;
319
320 uncharge:
321 kfree(storage);
322 mem_uncharge(smap, owner, sizeof(*storage));
323 return err;
324 }
325
326 /* sk cannot be going away because it is linking new elem
327 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
328 * Otherwise, it will become a leak (and other memory issues
329 * during map destruction).
330 */
331 struct bpf_local_storage_data *
bpf_local_storage_update(void * owner,struct bpf_local_storage_map * smap,void * value,u64 map_flags)332 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
333 void *value, u64 map_flags)
334 {
335 struct bpf_local_storage_data *old_sdata = NULL;
336 struct bpf_local_storage_elem *selem;
337 struct bpf_local_storage *local_storage;
338 int err;
339
340 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
341 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
342 /* BPF_F_LOCK can only be used in a value with spin_lock */
343 unlikely((map_flags & BPF_F_LOCK) &&
344 !map_value_has_spin_lock(&smap->map)))
345 return ERR_PTR(-EINVAL);
346
347 local_storage = rcu_dereference(*owner_storage(smap, owner));
348 if (!local_storage || hlist_empty(&local_storage->list)) {
349 /* Very first elem for the owner */
350 err = check_flags(NULL, map_flags);
351 if (err)
352 return ERR_PTR(err);
353
354 selem = bpf_selem_alloc(smap, owner, value, true);
355 if (!selem)
356 return ERR_PTR(-ENOMEM);
357
358 err = bpf_local_storage_alloc(owner, smap, selem);
359 if (err) {
360 kfree(selem);
361 mem_uncharge(smap, owner, smap->elem_size);
362 return ERR_PTR(err);
363 }
364
365 return SDATA(selem);
366 }
367
368 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
369 /* Hoping to find an old_sdata to do inline update
370 * such that it can avoid taking the local_storage->lock
371 * and changing the lists.
372 */
373 old_sdata =
374 bpf_local_storage_lookup(local_storage, smap, false);
375 err = check_flags(old_sdata, map_flags);
376 if (err)
377 return ERR_PTR(err);
378 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
379 copy_map_value_locked(&smap->map, old_sdata->data,
380 value, false);
381 return old_sdata;
382 }
383 }
384
385 raw_spin_lock_bh(&local_storage->lock);
386
387 /* Recheck local_storage->list under local_storage->lock */
388 if (unlikely(hlist_empty(&local_storage->list))) {
389 /* A parallel del is happening and local_storage is going
390 * away. It has just been checked before, so very
391 * unlikely. Return instead of retry to keep things
392 * simple.
393 */
394 err = -EAGAIN;
395 goto unlock_err;
396 }
397
398 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
399 err = check_flags(old_sdata, map_flags);
400 if (err)
401 goto unlock_err;
402
403 if (old_sdata && (map_flags & BPF_F_LOCK)) {
404 copy_map_value_locked(&smap->map, old_sdata->data, value,
405 false);
406 selem = SELEM(old_sdata);
407 goto unlock;
408 }
409
410 /* local_storage->lock is held. Hence, we are sure
411 * we can unlink and uncharge the old_sdata successfully
412 * later. Hence, instead of charging the new selem now
413 * and then uncharge the old selem later (which may cause
414 * a potential but unnecessary charge failure), avoid taking
415 * a charge at all here (the "!old_sdata" check) and the
416 * old_sdata will not be uncharged later during
417 * bpf_selem_unlink_storage_nolock().
418 */
419 selem = bpf_selem_alloc(smap, owner, value, !old_sdata);
420 if (!selem) {
421 err = -ENOMEM;
422 goto unlock_err;
423 }
424
425 /* First, link the new selem to the map */
426 bpf_selem_link_map(smap, selem);
427
428 /* Second, link (and publish) the new selem to local_storage */
429 bpf_selem_link_storage_nolock(local_storage, selem);
430
431 /* Third, remove old selem, SELEM(old_sdata) */
432 if (old_sdata) {
433 bpf_selem_unlink_map(SELEM(old_sdata));
434 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
435 false);
436 }
437
438 unlock:
439 raw_spin_unlock_bh(&local_storage->lock);
440 return SDATA(selem);
441
442 unlock_err:
443 raw_spin_unlock_bh(&local_storage->lock);
444 return ERR_PTR(err);
445 }
446
bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache * cache)447 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
448 {
449 u64 min_usage = U64_MAX;
450 u16 i, res = 0;
451
452 spin_lock(&cache->idx_lock);
453
454 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
455 if (cache->idx_usage_counts[i] < min_usage) {
456 min_usage = cache->idx_usage_counts[i];
457 res = i;
458
459 /* Found a free cache_idx */
460 if (!min_usage)
461 break;
462 }
463 }
464 cache->idx_usage_counts[res]++;
465
466 spin_unlock(&cache->idx_lock);
467
468 return res;
469 }
470
bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache * cache,u16 idx)471 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
472 u16 idx)
473 {
474 spin_lock(&cache->idx_lock);
475 cache->idx_usage_counts[idx]--;
476 spin_unlock(&cache->idx_lock);
477 }
478
bpf_local_storage_map_free(struct bpf_local_storage_map * smap)479 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap)
480 {
481 struct bpf_local_storage_elem *selem;
482 struct bpf_local_storage_map_bucket *b;
483 unsigned int i;
484
485 /* Note that this map might be concurrently cloned from
486 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
487 * RCU read section to finish before proceeding. New RCU
488 * read sections should be prevented via bpf_map_inc_not_zero.
489 */
490 synchronize_rcu();
491
492 /* bpf prog and the userspace can no longer access this map
493 * now. No new selem (of this map) can be added
494 * to the owner->storage or to the map bucket's list.
495 *
496 * The elem of this map can be cleaned up here
497 * or when the storage is freed e.g.
498 * by bpf_sk_storage_free() during __sk_destruct().
499 */
500 for (i = 0; i < (1U << smap->bucket_log); i++) {
501 b = &smap->buckets[i];
502
503 rcu_read_lock();
504 /* No one is adding to b->list now */
505 while ((selem = hlist_entry_safe(
506 rcu_dereference_raw(hlist_first_rcu(&b->list)),
507 struct bpf_local_storage_elem, map_node))) {
508 bpf_selem_unlink(selem);
509 cond_resched_rcu();
510 }
511 rcu_read_unlock();
512 }
513
514 /* While freeing the storage we may still need to access the map.
515 *
516 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
517 * which then made the above while((selem = ...)) loop
518 * exit immediately.
519 *
520 * However, while freeing the storage one still needs to access the
521 * smap->elem_size to do the uncharging in
522 * bpf_selem_unlink_storage_nolock().
523 *
524 * Hence, wait another rcu grace period for the storage to be freed.
525 */
526 synchronize_rcu();
527
528 kvfree(smap->buckets);
529 kfree(smap);
530 }
531
bpf_local_storage_map_alloc_check(union bpf_attr * attr)532 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
533 {
534 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
535 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
536 attr->max_entries ||
537 attr->key_size != sizeof(int) || !attr->value_size ||
538 /* Enforce BTF for userspace sk dumping */
539 !attr->btf_key_type_id || !attr->btf_value_type_id)
540 return -EINVAL;
541
542 if (!bpf_capable())
543 return -EPERM;
544
545 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
546 return -E2BIG;
547
548 return 0;
549 }
550
bpf_local_storage_map_alloc(union bpf_attr * attr)551 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
552 {
553 struct bpf_local_storage_map *smap;
554 unsigned int i;
555 u32 nbuckets;
556 u64 cost;
557 int ret;
558
559 smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
560 if (!smap)
561 return ERR_PTR(-ENOMEM);
562 bpf_map_init_from_attr(&smap->map, attr);
563
564 nbuckets = roundup_pow_of_two(num_possible_cpus());
565 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
566 nbuckets = max_t(u32, 2, nbuckets);
567 smap->bucket_log = ilog2(nbuckets);
568 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
569
570 ret = bpf_map_charge_init(&smap->map.memory, cost);
571 if (ret < 0) {
572 kfree(smap);
573 return ERR_PTR(ret);
574 }
575
576 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
577 GFP_USER | __GFP_NOWARN);
578 if (!smap->buckets) {
579 bpf_map_charge_finish(&smap->map.memory);
580 kfree(smap);
581 return ERR_PTR(-ENOMEM);
582 }
583
584 for (i = 0; i < nbuckets; i++) {
585 INIT_HLIST_HEAD(&smap->buckets[i].list);
586 raw_spin_lock_init(&smap->buckets[i].lock);
587 }
588
589 smap->elem_size =
590 sizeof(struct bpf_local_storage_elem) + attr->value_size;
591
592 return smap;
593 }
594
bpf_local_storage_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)595 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
596 const struct btf *btf,
597 const struct btf_type *key_type,
598 const struct btf_type *value_type)
599 {
600 u32 int_data;
601
602 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
603 return -EINVAL;
604
605 int_data = *(u32 *)(key_type + 1);
606 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
607 return -EINVAL;
608
609 return 0;
610 }
611