• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 
15 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
16 
17 static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)18 select_bucket(struct bpf_local_storage_map *smap,
19 	      struct bpf_local_storage_elem *selem)
20 {
21 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
22 }
23 
mem_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)24 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
25 {
26 	struct bpf_map *map = &smap->map;
27 
28 	if (!map->ops->map_local_storage_charge)
29 		return 0;
30 
31 	return map->ops->map_local_storage_charge(smap, owner, size);
32 }
33 
mem_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)34 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
35 			 u32 size)
36 {
37 	struct bpf_map *map = &smap->map;
38 
39 	if (map->ops->map_local_storage_uncharge)
40 		map->ops->map_local_storage_uncharge(smap, owner, size);
41 }
42 
43 static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map * smap,void * owner)44 owner_storage(struct bpf_local_storage_map *smap, void *owner)
45 {
46 	struct bpf_map *map = &smap->map;
47 
48 	return map->ops->map_owner_storage_ptr(owner);
49 }
50 
selem_linked_to_storage_lockless(const struct bpf_local_storage_elem * selem)51 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
52 {
53 	return !hlist_unhashed_lockless(&selem->snode);
54 }
55 
selem_linked_to_storage(const struct bpf_local_storage_elem * selem)56 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
57 {
58 	return !hlist_unhashed(&selem->snode);
59 }
60 
selem_linked_to_map_lockless(const struct bpf_local_storage_elem * selem)61 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
62 {
63 	return !hlist_unhashed_lockless(&selem->map_node);
64 }
65 
selem_linked_to_map(const struct bpf_local_storage_elem * selem)66 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
67 {
68 	return !hlist_unhashed(&selem->map_node);
69 }
70 
71 struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map * smap,void * owner,void * value,bool charge_mem)72 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
73 		void *value, bool charge_mem)
74 {
75 	struct bpf_local_storage_elem *selem;
76 
77 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
78 		return NULL;
79 
80 	selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
81 				GFP_ATOMIC | __GFP_NOWARN);
82 	if (selem) {
83 		if (value)
84 			copy_map_value(&smap->map, SDATA(selem)->data, value);
85 		return selem;
86 	}
87 
88 	if (charge_mem)
89 		mem_uncharge(smap, owner, smap->elem_size);
90 
91 	return NULL;
92 }
93 
94 /* local_storage->lock must be held and selem->local_storage == local_storage.
95  * The caller must ensure selem->smap is still valid to be
96  * dereferenced for its smap->elem_size and smap->cache_idx.
97  */
bpf_selem_unlink_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem,bool uncharge_mem)98 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
99 				     struct bpf_local_storage_elem *selem,
100 				     bool uncharge_mem)
101 {
102 	struct bpf_local_storage_map *smap;
103 	bool free_local_storage;
104 	void *owner;
105 
106 	smap = rcu_dereference(SDATA(selem)->smap);
107 	owner = local_storage->owner;
108 
109 	/* All uncharging on the owner must be done first.
110 	 * The owner may be freed once the last selem is unlinked
111 	 * from local_storage.
112 	 */
113 	if (uncharge_mem)
114 		mem_uncharge(smap, owner, smap->elem_size);
115 
116 	free_local_storage = hlist_is_singular_node(&selem->snode,
117 						    &local_storage->list);
118 	if (free_local_storage) {
119 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
120 		local_storage->owner = NULL;
121 
122 		/* After this RCU_INIT, owner may be freed and cannot be used */
123 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
124 
125 		/* local_storage is not freed now.  local_storage->lock is
126 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
127 		 * will be done by the caller.
128 		 *
129 		 * Although the unlock will be done under
130 		 * rcu_read_lock(),  it is more intutivie to
131 		 * read if kfree_rcu(local_storage, rcu) is done
132 		 * after the raw_spin_unlock_bh(&local_storage->lock).
133 		 *
134 		 * Hence, a "bool free_local_storage" is returned
135 		 * to the caller which then calls the kfree_rcu()
136 		 * after unlock.
137 		 */
138 	}
139 	hlist_del_init_rcu(&selem->snode);
140 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
141 	    SDATA(selem))
142 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
143 
144 	kfree_rcu(selem, rcu);
145 
146 	return free_local_storage;
147 }
148 
__bpf_selem_unlink_storage(struct bpf_local_storage_elem * selem)149 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
150 {
151 	struct bpf_local_storage *local_storage;
152 	bool free_local_storage = false;
153 	unsigned long flags;
154 
155 	if (unlikely(!selem_linked_to_storage_lockless(selem)))
156 		/* selem has already been unlinked from sk */
157 		return;
158 
159 	local_storage = rcu_dereference(selem->local_storage);
160 	raw_spin_lock_irqsave(&local_storage->lock, flags);
161 	if (likely(selem_linked_to_storage(selem)))
162 		free_local_storage = bpf_selem_unlink_storage_nolock(
163 			local_storage, selem, true);
164 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
165 
166 	if (free_local_storage)
167 		kfree_rcu(local_storage, rcu);
168 }
169 
bpf_selem_link_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem)170 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
171 				   struct bpf_local_storage_elem *selem)
172 {
173 	RCU_INIT_POINTER(selem->local_storage, local_storage);
174 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
175 }
176 
bpf_selem_unlink_map(struct bpf_local_storage_elem * selem)177 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
178 {
179 	struct bpf_local_storage_map *smap;
180 	struct bpf_local_storage_map_bucket *b;
181 	unsigned long flags;
182 
183 	if (unlikely(!selem_linked_to_map_lockless(selem)))
184 		/* selem has already be unlinked from smap */
185 		return;
186 
187 	smap = rcu_dereference(SDATA(selem)->smap);
188 	b = select_bucket(smap, selem);
189 	raw_spin_lock_irqsave(&b->lock, flags);
190 	if (likely(selem_linked_to_map(selem)))
191 		hlist_del_init_rcu(&selem->map_node);
192 	raw_spin_unlock_irqrestore(&b->lock, flags);
193 }
194 
bpf_selem_link_map(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)195 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
196 			struct bpf_local_storage_elem *selem)
197 {
198 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
199 	unsigned long flags;
200 
201 	raw_spin_lock_irqsave(&b->lock, flags);
202 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
203 	hlist_add_head_rcu(&selem->map_node, &b->list);
204 	raw_spin_unlock_irqrestore(&b->lock, flags);
205 }
206 
bpf_selem_unlink(struct bpf_local_storage_elem * selem)207 void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
208 {
209 	/* Always unlink from map before unlinking from local_storage
210 	 * because selem will be freed after successfully unlinked from
211 	 * the local_storage.
212 	 */
213 	bpf_selem_unlink_map(selem);
214 	__bpf_selem_unlink_storage(selem);
215 }
216 
217 struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)218 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
219 			 struct bpf_local_storage_map *smap,
220 			 bool cacheit_lockit)
221 {
222 	struct bpf_local_storage_data *sdata;
223 	struct bpf_local_storage_elem *selem;
224 
225 	/* Fast path (cache hit) */
226 	sdata = rcu_dereference(local_storage->cache[smap->cache_idx]);
227 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
228 		return sdata;
229 
230 	/* Slow path (cache miss) */
231 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode)
232 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
233 			break;
234 
235 	if (!selem)
236 		return NULL;
237 
238 	sdata = SDATA(selem);
239 	if (cacheit_lockit) {
240 		unsigned long flags;
241 
242 		/* spinlock is needed to avoid racing with the
243 		 * parallel delete.  Otherwise, publishing an already
244 		 * deleted sdata to the cache will become a use-after-free
245 		 * problem in the next bpf_local_storage_lookup().
246 		 */
247 		raw_spin_lock_irqsave(&local_storage->lock, flags);
248 		if (selem_linked_to_storage(selem))
249 			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
250 					   sdata);
251 		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
252 	}
253 
254 	return sdata;
255 }
256 
check_flags(const struct bpf_local_storage_data * old_sdata,u64 map_flags)257 static int check_flags(const struct bpf_local_storage_data *old_sdata,
258 		       u64 map_flags)
259 {
260 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
261 		/* elem already exists */
262 		return -EEXIST;
263 
264 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
265 		/* elem doesn't exist, cannot update it */
266 		return -ENOENT;
267 
268 	return 0;
269 }
270 
bpf_local_storage_alloc(void * owner,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * first_selem)271 int bpf_local_storage_alloc(void *owner,
272 			    struct bpf_local_storage_map *smap,
273 			    struct bpf_local_storage_elem *first_selem)
274 {
275 	struct bpf_local_storage *prev_storage, *storage;
276 	struct bpf_local_storage **owner_storage_ptr;
277 	int err;
278 
279 	err = mem_charge(smap, owner, sizeof(*storage));
280 	if (err)
281 		return err;
282 
283 	storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
284 				  GFP_ATOMIC | __GFP_NOWARN);
285 	if (!storage) {
286 		err = -ENOMEM;
287 		goto uncharge;
288 	}
289 
290 	INIT_HLIST_HEAD(&storage->list);
291 	raw_spin_lock_init(&storage->lock);
292 	storage->owner = owner;
293 
294 	bpf_selem_link_storage_nolock(storage, first_selem);
295 	bpf_selem_link_map(smap, first_selem);
296 
297 	owner_storage_ptr =
298 		(struct bpf_local_storage **)owner_storage(smap, owner);
299 	/* Publish storage to the owner.
300 	 * Instead of using any lock of the kernel object (i.e. owner),
301 	 * cmpxchg will work with any kernel object regardless what
302 	 * the running context is, bh, irq...etc.
303 	 *
304 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
305 	 * is protected by the storage->lock.  Hence, when freeing
306 	 * the owner->storage, the storage->lock must be held before
307 	 * setting owner->storage ptr to NULL.
308 	 */
309 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
310 	if (unlikely(prev_storage)) {
311 		bpf_selem_unlink_map(first_selem);
312 		err = -EAGAIN;
313 		goto uncharge;
314 
315 		/* Note that even first_selem was linked to smap's
316 		 * bucket->list, first_selem can be freed immediately
317 		 * (instead of kfree_rcu) because
318 		 * bpf_local_storage_map_free() does a
319 		 * synchronize_rcu() before walking the bucket->list.
320 		 * Hence, no one is accessing selem from the
321 		 * bucket->list under rcu_read_lock().
322 		 */
323 	}
324 
325 	return 0;
326 
327 uncharge:
328 	kfree(storage);
329 	mem_uncharge(smap, owner, sizeof(*storage));
330 	return err;
331 }
332 
333 /* sk cannot be going away because it is linking new elem
334  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
335  * Otherwise, it will become a leak (and other memory issues
336  * during map destruction).
337  */
338 struct bpf_local_storage_data *
bpf_local_storage_update(void * owner,struct bpf_local_storage_map * smap,void * value,u64 map_flags)339 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
340 			 void *value, u64 map_flags)
341 {
342 	struct bpf_local_storage_data *old_sdata = NULL;
343 	struct bpf_local_storage_elem *selem;
344 	struct bpf_local_storage *local_storage;
345 	unsigned long flags;
346 	int err;
347 
348 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
349 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
350 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
351 	    unlikely((map_flags & BPF_F_LOCK) &&
352 		     !map_value_has_spin_lock(&smap->map)))
353 		return ERR_PTR(-EINVAL);
354 
355 	local_storage = rcu_dereference(*owner_storage(smap, owner));
356 	if (!local_storage || hlist_empty(&local_storage->list)) {
357 		/* Very first elem for the owner */
358 		err = check_flags(NULL, map_flags);
359 		if (err)
360 			return ERR_PTR(err);
361 
362 		selem = bpf_selem_alloc(smap, owner, value, true);
363 		if (!selem)
364 			return ERR_PTR(-ENOMEM);
365 
366 		err = bpf_local_storage_alloc(owner, smap, selem);
367 		if (err) {
368 			kfree(selem);
369 			mem_uncharge(smap, owner, smap->elem_size);
370 			return ERR_PTR(err);
371 		}
372 
373 		return SDATA(selem);
374 	}
375 
376 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
377 		/* Hoping to find an old_sdata to do inline update
378 		 * such that it can avoid taking the local_storage->lock
379 		 * and changing the lists.
380 		 */
381 		old_sdata =
382 			bpf_local_storage_lookup(local_storage, smap, false);
383 		err = check_flags(old_sdata, map_flags);
384 		if (err)
385 			return ERR_PTR(err);
386 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
387 			copy_map_value_locked(&smap->map, old_sdata->data,
388 					      value, false);
389 			return old_sdata;
390 		}
391 	}
392 
393 	raw_spin_lock_irqsave(&local_storage->lock, flags);
394 
395 	/* Recheck local_storage->list under local_storage->lock */
396 	if (unlikely(hlist_empty(&local_storage->list))) {
397 		/* A parallel del is happening and local_storage is going
398 		 * away.  It has just been checked before, so very
399 		 * unlikely.  Return instead of retry to keep things
400 		 * simple.
401 		 */
402 		err = -EAGAIN;
403 		goto unlock_err;
404 	}
405 
406 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
407 	err = check_flags(old_sdata, map_flags);
408 	if (err)
409 		goto unlock_err;
410 
411 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
412 		copy_map_value_locked(&smap->map, old_sdata->data, value,
413 				      false);
414 		selem = SELEM(old_sdata);
415 		goto unlock;
416 	}
417 
418 	/* local_storage->lock is held.  Hence, we are sure
419 	 * we can unlink and uncharge the old_sdata successfully
420 	 * later.  Hence, instead of charging the new selem now
421 	 * and then uncharge the old selem later (which may cause
422 	 * a potential but unnecessary charge failure),  avoid taking
423 	 * a charge at all here (the "!old_sdata" check) and the
424 	 * old_sdata will not be uncharged later during
425 	 * bpf_selem_unlink_storage_nolock().
426 	 */
427 	selem = bpf_selem_alloc(smap, owner, value, !old_sdata);
428 	if (!selem) {
429 		err = -ENOMEM;
430 		goto unlock_err;
431 	}
432 
433 	/* First, link the new selem to the map */
434 	bpf_selem_link_map(smap, selem);
435 
436 	/* Second, link (and publish) the new selem to local_storage */
437 	bpf_selem_link_storage_nolock(local_storage, selem);
438 
439 	/* Third, remove old selem, SELEM(old_sdata) */
440 	if (old_sdata) {
441 		bpf_selem_unlink_map(SELEM(old_sdata));
442 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
443 						false);
444 	}
445 
446 unlock:
447 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
448 	return SDATA(selem);
449 
450 unlock_err:
451 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
452 	return ERR_PTR(err);
453 }
454 
bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache * cache)455 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
456 {
457 	u64 min_usage = U64_MAX;
458 	u16 i, res = 0;
459 
460 	spin_lock(&cache->idx_lock);
461 
462 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
463 		if (cache->idx_usage_counts[i] < min_usage) {
464 			min_usage = cache->idx_usage_counts[i];
465 			res = i;
466 
467 			/* Found a free cache_idx */
468 			if (!min_usage)
469 				break;
470 		}
471 	}
472 	cache->idx_usage_counts[res]++;
473 
474 	spin_unlock(&cache->idx_lock);
475 
476 	return res;
477 }
478 
bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache * cache,u16 idx)479 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
480 				      u16 idx)
481 {
482 	spin_lock(&cache->idx_lock);
483 	cache->idx_usage_counts[idx]--;
484 	spin_unlock(&cache->idx_lock);
485 }
486 
bpf_local_storage_map_free(struct bpf_local_storage_map * smap,int __percpu * busy_counter)487 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
488 				int __percpu *busy_counter)
489 {
490 	struct bpf_local_storage_elem *selem;
491 	struct bpf_local_storage_map_bucket *b;
492 	unsigned int i;
493 
494 	/* Note that this map might be concurrently cloned from
495 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
496 	 * RCU read section to finish before proceeding. New RCU
497 	 * read sections should be prevented via bpf_map_inc_not_zero.
498 	 */
499 	synchronize_rcu();
500 
501 	/* bpf prog and the userspace can no longer access this map
502 	 * now.  No new selem (of this map) can be added
503 	 * to the owner->storage or to the map bucket's list.
504 	 *
505 	 * The elem of this map can be cleaned up here
506 	 * or when the storage is freed e.g.
507 	 * by bpf_sk_storage_free() during __sk_destruct().
508 	 */
509 	for (i = 0; i < (1U << smap->bucket_log); i++) {
510 		b = &smap->buckets[i];
511 
512 		rcu_read_lock();
513 		/* No one is adding to b->list now */
514 		while ((selem = hlist_entry_safe(
515 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
516 				struct bpf_local_storage_elem, map_node))) {
517 			if (busy_counter) {
518 				migrate_disable();
519 				this_cpu_inc(*busy_counter);
520 			}
521 			bpf_selem_unlink(selem);
522 			if (busy_counter) {
523 				this_cpu_dec(*busy_counter);
524 				migrate_enable();
525 			}
526 			cond_resched_rcu();
527 		}
528 		rcu_read_unlock();
529 	}
530 
531 	/* While freeing the storage we may still need to access the map.
532 	 *
533 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
534 	 * which then made the above while((selem = ...)) loop
535 	 * exit immediately.
536 	 *
537 	 * However, while freeing the storage one still needs to access the
538 	 * smap->elem_size to do the uncharging in
539 	 * bpf_selem_unlink_storage_nolock().
540 	 *
541 	 * Hence, wait another rcu grace period for the storage to be freed.
542 	 */
543 	synchronize_rcu();
544 
545 	kvfree(smap->buckets);
546 	kfree(smap);
547 }
548 
bpf_local_storage_map_alloc_check(union bpf_attr * attr)549 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
550 {
551 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
552 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
553 	    attr->max_entries ||
554 	    attr->key_size != sizeof(int) || !attr->value_size ||
555 	    /* Enforce BTF for userspace sk dumping */
556 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
557 		return -EINVAL;
558 
559 	if (!bpf_capable())
560 		return -EPERM;
561 
562 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
563 		return -E2BIG;
564 
565 	return 0;
566 }
567 
bpf_local_storage_map_alloc(union bpf_attr * attr)568 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
569 {
570 	struct bpf_local_storage_map *smap;
571 	unsigned int i;
572 	u32 nbuckets;
573 
574 	smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
575 	if (!smap)
576 		return ERR_PTR(-ENOMEM);
577 	bpf_map_init_from_attr(&smap->map, attr);
578 
579 	nbuckets = roundup_pow_of_two(num_possible_cpus());
580 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
581 	nbuckets = max_t(u32, 2, nbuckets);
582 	smap->bucket_log = ilog2(nbuckets);
583 
584 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
585 				 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
586 	if (!smap->buckets) {
587 		kfree(smap);
588 		return ERR_PTR(-ENOMEM);
589 	}
590 
591 	for (i = 0; i < nbuckets; i++) {
592 		INIT_HLIST_HEAD(&smap->buckets[i].list);
593 		raw_spin_lock_init(&smap->buckets[i].lock);
594 	}
595 
596 	smap->elem_size =
597 		sizeof(struct bpf_local_storage_elem) + attr->value_size;
598 
599 	return smap;
600 }
601 
bpf_local_storage_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)602 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
603 				    const struct btf *btf,
604 				    const struct btf_type *key_type,
605 				    const struct btf_type *value_type)
606 {
607 	u32 int_data;
608 
609 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
610 		return -EINVAL;
611 
612 	int_data = *(u32 *)(key_type + 1);
613 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
614 		return -EINVAL;
615 
616 	return 0;
617 }
618