1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18
19 #define HTAB_CREATE_FLAG_MASK \
20 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
21 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22
23 #define BATCH_OPS(_name) \
24 .map_lookup_batch = \
25 _name##_map_lookup_batch, \
26 .map_lookup_and_delete_batch = \
27 _name##_map_lookup_and_delete_batch, \
28 .map_update_batch = \
29 generic_map_update_batch, \
30 .map_delete_batch = \
31 generic_map_delete_batch
32
33 /*
34 * The bucket lock has two protection scopes:
35 *
36 * 1) Serializing concurrent operations from BPF programs on different
37 * CPUs
38 *
39 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40 *
41 * BPF programs can execute in any context including perf, kprobes and
42 * tracing. As there are almost no limits where perf, kprobes and tracing
43 * can be invoked from the lock operations need to be protected against
44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45 * the lock held section when functions which acquire this lock are invoked
46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47 * variable bpf_prog_active, which prevents BPF programs attached to perf
48 * events, kprobes and tracing to be invoked before the prior invocation
49 * from one of these contexts completed. sys_bpf() uses the same mechanism
50 * by pinning the task to the current CPU and incrementing the recursion
51 * protection across the map operation.
52 *
53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54 * operations like memory allocations (even with GFP_ATOMIC) from atomic
55 * contexts. This is required because even with GFP_ATOMIC the memory
56 * allocator calls into code paths which acquire locks with long held lock
57 * sections. To ensure the deterministic behaviour these locks are regular
58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59 * true atomic contexts on an RT kernel are the low level hardware
60 * handling, scheduling, low level interrupt handling, NMIs etc. None of
61 * these contexts should ever do memory allocations.
62 *
63 * As regular device interrupt handlers and soft interrupts are forced into
64 * thread context, the existing code which does
65 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66 * just works.
67 *
68 * In theory the BPF locks could be converted to regular spinlocks as well,
69 * but the bucket locks and percpu_freelist locks can be taken from
70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
73 * because there is no memory allocation within the lock held sections. However
74 * after hash map was fully converted to use bpf_mem_alloc, there will be
75 * non-synchronous memory allocation for non-preallocated hash map, so it is
76 * safe to always use raw spinlock for bucket lock.
77 */
78 struct bucket {
79 struct hlist_nulls_head head;
80 raw_spinlock_t raw_lock;
81 };
82
83 #define HASHTAB_MAP_LOCK_COUNT 8
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85
86 struct bpf_htab {
87 struct bpf_map map;
88 struct bpf_mem_alloc ma;
89 struct bpf_mem_alloc pcpu_ma;
90 struct bucket *buckets;
91 void *elems;
92 union {
93 struct pcpu_freelist freelist;
94 struct bpf_lru lru;
95 };
96 struct htab_elem *__percpu *extra_elems;
97 /* number of elements in non-preallocated hashtable are kept
98 * in either pcount or count
99 */
100 struct percpu_counter pcount;
101 atomic_t count;
102 bool use_percpu_counter;
103 u32 n_buckets; /* number of hash buckets */
104 u32 elem_size; /* size of each element in bytes */
105 u32 hashrnd;
106 struct lock_class_key lockdep_key;
107 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
108 };
109
110 /* each htab element is struct htab_elem + key + value */
111 struct htab_elem {
112 union {
113 struct hlist_nulls_node hash_node;
114 struct {
115 void *padding;
116 union {
117 struct pcpu_freelist_node fnode;
118 struct htab_elem *batch_flink;
119 };
120 };
121 };
122 union {
123 /* pointer to per-cpu pointer */
124 void *ptr_to_pptr;
125 struct bpf_lru_node lru_node;
126 };
127 u32 hash;
128 char key[] __aligned(8);
129 };
130
htab_is_prealloc(const struct bpf_htab * htab)131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
132 {
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
134 }
135
htab_init_buckets(struct bpf_htab * htab)136 static void htab_init_buckets(struct bpf_htab *htab)
137 {
138 unsigned int i;
139
140 for (i = 0; i < htab->n_buckets; i++) {
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
142 raw_spin_lock_init(&htab->buckets[i].raw_lock);
143 lockdep_set_class(&htab->buckets[i].raw_lock,
144 &htab->lockdep_key);
145 cond_resched();
146 }
147 }
148
htab_lock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long * pflags)149 static inline int htab_lock_bucket(const struct bpf_htab *htab,
150 struct bucket *b, u32 hash,
151 unsigned long *pflags)
152 {
153 unsigned long flags;
154
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
156
157 preempt_disable();
158 local_irq_save(flags);
159 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
160 __this_cpu_dec(*(htab->map_locked[hash]));
161 local_irq_restore(flags);
162 preempt_enable();
163 return -EBUSY;
164 }
165
166 raw_spin_lock(&b->raw_lock);
167 *pflags = flags;
168
169 return 0;
170 }
171
htab_unlock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long flags)172 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
173 struct bucket *b, u32 hash,
174 unsigned long flags)
175 {
176 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
177 raw_spin_unlock(&b->raw_lock);
178 __this_cpu_dec(*(htab->map_locked[hash]));
179 local_irq_restore(flags);
180 preempt_enable();
181 }
182
183 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
184
htab_is_lru(const struct bpf_htab * htab)185 static bool htab_is_lru(const struct bpf_htab *htab)
186 {
187 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
188 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
189 }
190
htab_is_percpu(const struct bpf_htab * htab)191 static bool htab_is_percpu(const struct bpf_htab *htab)
192 {
193 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
194 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
195 }
196
htab_elem_set_ptr(struct htab_elem * l,u32 key_size,void __percpu * pptr)197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
198 void __percpu *pptr)
199 {
200 *(void __percpu **)(l->key + key_size) = pptr;
201 }
202
htab_elem_get_ptr(struct htab_elem * l,u32 key_size)203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
204 {
205 return *(void __percpu **)(l->key + key_size);
206 }
207
fd_htab_map_get_ptr(const struct bpf_map * map,struct htab_elem * l)208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
209 {
210 return *(void **)(l->key + roundup(map->key_size, 8));
211 }
212
get_htab_elem(struct bpf_htab * htab,int i)213 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
214 {
215 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
216 }
217
htab_has_extra_elems(struct bpf_htab * htab)218 static bool htab_has_extra_elems(struct bpf_htab *htab)
219 {
220 return !htab_is_percpu(htab) && !htab_is_lru(htab);
221 }
222
htab_free_prealloced_timers(struct bpf_htab * htab)223 static void htab_free_prealloced_timers(struct bpf_htab *htab)
224 {
225 u32 num_entries = htab->map.max_entries;
226 int i;
227
228 if (!map_value_has_timer(&htab->map))
229 return;
230 if (htab_has_extra_elems(htab))
231 num_entries += num_possible_cpus();
232
233 for (i = 0; i < num_entries; i++) {
234 struct htab_elem *elem;
235
236 elem = get_htab_elem(htab, i);
237 bpf_timer_cancel_and_free(elem->key +
238 round_up(htab->map.key_size, 8) +
239 htab->map.timer_off);
240 cond_resched();
241 }
242 }
243
htab_free_prealloced_kptrs(struct bpf_htab * htab)244 static void htab_free_prealloced_kptrs(struct bpf_htab *htab)
245 {
246 u32 num_entries = htab->map.max_entries;
247 int i;
248
249 if (!map_value_has_kptrs(&htab->map))
250 return;
251 if (htab_has_extra_elems(htab))
252 num_entries += num_possible_cpus();
253
254 for (i = 0; i < num_entries; i++) {
255 struct htab_elem *elem;
256
257 elem = get_htab_elem(htab, i);
258 bpf_map_free_kptrs(&htab->map, elem->key + round_up(htab->map.key_size, 8));
259 cond_resched();
260 }
261 }
262
htab_free_elems(struct bpf_htab * htab)263 static void htab_free_elems(struct bpf_htab *htab)
264 {
265 int i;
266
267 if (!htab_is_percpu(htab))
268 goto free_elems;
269
270 for (i = 0; i < htab->map.max_entries; i++) {
271 void __percpu *pptr;
272
273 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
274 htab->map.key_size);
275 free_percpu(pptr);
276 cond_resched();
277 }
278 free_elems:
279 bpf_map_area_free(htab->elems);
280 }
281
282 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
283 * (bucket_lock). If both locks need to be acquired together, the lock
284 * order is always lru_lock -> bucket_lock and this only happens in
285 * bpf_lru_list.c logic. For example, certain code path of
286 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
287 * will acquire lru_lock first followed by acquiring bucket_lock.
288 *
289 * In hashtab.c, to avoid deadlock, lock acquisition of
290 * bucket_lock followed by lru_lock is not allowed. In such cases,
291 * bucket_lock needs to be released first before acquiring lru_lock.
292 */
prealloc_lru_pop(struct bpf_htab * htab,void * key,u32 hash)293 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
294 u32 hash)
295 {
296 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
297 struct htab_elem *l;
298
299 if (node) {
300 l = container_of(node, struct htab_elem, lru_node);
301 memcpy(l->key, key, htab->map.key_size);
302 return l;
303 }
304
305 return NULL;
306 }
307
prealloc_init(struct bpf_htab * htab)308 static int prealloc_init(struct bpf_htab *htab)
309 {
310 u32 num_entries = htab->map.max_entries;
311 int err = -ENOMEM, i;
312
313 if (htab_has_extra_elems(htab))
314 num_entries += num_possible_cpus();
315
316 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
317 htab->map.numa_node);
318 if (!htab->elems)
319 return -ENOMEM;
320
321 if (!htab_is_percpu(htab))
322 goto skip_percpu_elems;
323
324 for (i = 0; i < num_entries; i++) {
325 u32 size = round_up(htab->map.value_size, 8);
326 void __percpu *pptr;
327
328 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
329 GFP_USER | __GFP_NOWARN);
330 if (!pptr)
331 goto free_elems;
332 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
333 pptr);
334 cond_resched();
335 }
336
337 skip_percpu_elems:
338 if (htab_is_lru(htab))
339 err = bpf_lru_init(&htab->lru,
340 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
341 offsetof(struct htab_elem, hash) -
342 offsetof(struct htab_elem, lru_node),
343 htab_lru_map_delete_node,
344 htab);
345 else
346 err = pcpu_freelist_init(&htab->freelist);
347
348 if (err)
349 goto free_elems;
350
351 if (htab_is_lru(htab))
352 bpf_lru_populate(&htab->lru, htab->elems,
353 offsetof(struct htab_elem, lru_node),
354 htab->elem_size, num_entries);
355 else
356 pcpu_freelist_populate(&htab->freelist,
357 htab->elems + offsetof(struct htab_elem, fnode),
358 htab->elem_size, num_entries);
359
360 return 0;
361
362 free_elems:
363 htab_free_elems(htab);
364 return err;
365 }
366
prealloc_destroy(struct bpf_htab * htab)367 static void prealloc_destroy(struct bpf_htab *htab)
368 {
369 htab_free_elems(htab);
370
371 if (htab_is_lru(htab))
372 bpf_lru_destroy(&htab->lru);
373 else
374 pcpu_freelist_destroy(&htab->freelist);
375 }
376
alloc_extra_elems(struct bpf_htab * htab)377 static int alloc_extra_elems(struct bpf_htab *htab)
378 {
379 struct htab_elem *__percpu *pptr, *l_new;
380 struct pcpu_freelist_node *l;
381 int cpu;
382
383 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
384 GFP_USER | __GFP_NOWARN);
385 if (!pptr)
386 return -ENOMEM;
387
388 for_each_possible_cpu(cpu) {
389 l = pcpu_freelist_pop(&htab->freelist);
390 /* pop will succeed, since prealloc_init()
391 * preallocated extra num_possible_cpus elements
392 */
393 l_new = container_of(l, struct htab_elem, fnode);
394 *per_cpu_ptr(pptr, cpu) = l_new;
395 }
396 htab->extra_elems = pptr;
397 return 0;
398 }
399
400 /* Called from syscall */
htab_map_alloc_check(union bpf_attr * attr)401 static int htab_map_alloc_check(union bpf_attr *attr)
402 {
403 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
404 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
405 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
406 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
407 /* percpu_lru means each cpu has its own LRU list.
408 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
409 * the map's value itself is percpu. percpu_lru has
410 * nothing to do with the map's value.
411 */
412 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
413 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
414 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
415 int numa_node = bpf_map_attr_numa_node(attr);
416
417 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
418 offsetof(struct htab_elem, hash_node.pprev));
419
420 if (lru && !bpf_capable())
421 /* LRU implementation is much complicated than other
422 * maps. Hence, limit to CAP_BPF.
423 */
424 return -EPERM;
425
426 if (zero_seed && !capable(CAP_SYS_ADMIN))
427 /* Guard against local DoS, and discourage production use. */
428 return -EPERM;
429
430 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
431 !bpf_map_flags_access_ok(attr->map_flags))
432 return -EINVAL;
433
434 if (!lru && percpu_lru)
435 return -EINVAL;
436
437 if (lru && !prealloc)
438 return -ENOTSUPP;
439
440 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
441 return -EINVAL;
442
443 /* check sanity of attributes.
444 * value_size == 0 may be allowed in the future to use map as a set
445 */
446 if (attr->max_entries == 0 || attr->key_size == 0 ||
447 attr->value_size == 0)
448 return -EINVAL;
449
450 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
451 sizeof(struct htab_elem))
452 /* if key_size + value_size is bigger, the user space won't be
453 * able to access the elements via bpf syscall. This check
454 * also makes sure that the elem_size doesn't overflow and it's
455 * kmalloc-able later in htab_map_update_elem()
456 */
457 return -E2BIG;
458
459 return 0;
460 }
461
htab_map_alloc(union bpf_attr * attr)462 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
463 {
464 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
465 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
466 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
467 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
468 /* percpu_lru means each cpu has its own LRU list.
469 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
470 * the map's value itself is percpu. percpu_lru has
471 * nothing to do with the map's value.
472 */
473 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
474 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
475 struct bpf_htab *htab;
476 int err, i;
477
478 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
479 if (!htab)
480 return ERR_PTR(-ENOMEM);
481
482 lockdep_register_key(&htab->lockdep_key);
483
484 bpf_map_init_from_attr(&htab->map, attr);
485
486 if (percpu_lru) {
487 /* ensure each CPU's lru list has >=1 elements.
488 * since we are at it, make each lru list has the same
489 * number of elements.
490 */
491 htab->map.max_entries = roundup(attr->max_entries,
492 num_possible_cpus());
493 if (htab->map.max_entries < attr->max_entries)
494 htab->map.max_entries = rounddown(attr->max_entries,
495 num_possible_cpus());
496 }
497
498 /* hash table size must be power of 2 */
499 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
500
501 htab->elem_size = sizeof(struct htab_elem) +
502 round_up(htab->map.key_size, 8);
503 if (percpu)
504 htab->elem_size += sizeof(void *);
505 else
506 htab->elem_size += round_up(htab->map.value_size, 8);
507
508 err = -E2BIG;
509 /* prevent zero size kmalloc and check for u32 overflow */
510 if (htab->n_buckets == 0 ||
511 htab->n_buckets > U32_MAX / sizeof(struct bucket))
512 goto free_htab;
513
514 err = -ENOMEM;
515 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
516 sizeof(struct bucket),
517 htab->map.numa_node);
518 if (!htab->buckets)
519 goto free_htab;
520
521 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
522 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
523 sizeof(int),
524 sizeof(int),
525 GFP_USER);
526 if (!htab->map_locked[i])
527 goto free_map_locked;
528 }
529
530 if (htab->map.map_flags & BPF_F_ZERO_SEED)
531 htab->hashrnd = 0;
532 else
533 htab->hashrnd = get_random_u32();
534
535 htab_init_buckets(htab);
536
537 /* compute_batch_value() computes batch value as num_online_cpus() * 2
538 * and __percpu_counter_compare() needs
539 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
540 * for percpu_counter to be faster than atomic_t. In practice the average bpf
541 * hash map size is 10k, which means that a system with 64 cpus will fill
542 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
543 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
544 * 10k - 8k > 32 _batch_ * 64 _cpus_
545 * and __percpu_counter_compare() will still be fast. At that point hash map
546 * collisions will dominate its performance anyway. Assume that hash map filled
547 * to 50+% isn't going to be O(1) and use the following formula to choose
548 * between percpu_counter and atomic_t.
549 */
550 #define PERCPU_COUNTER_BATCH 32
551 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
552 htab->use_percpu_counter = true;
553
554 if (htab->use_percpu_counter) {
555 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
556 if (err)
557 goto free_map_locked;
558 }
559
560 if (prealloc) {
561 err = prealloc_init(htab);
562 if (err)
563 goto free_map_locked;
564
565 if (!percpu && !lru) {
566 /* lru itself can remove the least used element, so
567 * there is no need for an extra elem during map_update.
568 */
569 err = alloc_extra_elems(htab);
570 if (err)
571 goto free_prealloc;
572 }
573 } else {
574 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
575 if (err)
576 goto free_map_locked;
577 if (percpu) {
578 err = bpf_mem_alloc_init(&htab->pcpu_ma,
579 round_up(htab->map.value_size, 8), true);
580 if (err)
581 goto free_map_locked;
582 }
583 }
584
585 return &htab->map;
586
587 free_prealloc:
588 prealloc_destroy(htab);
589 free_map_locked:
590 if (htab->use_percpu_counter)
591 percpu_counter_destroy(&htab->pcount);
592 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
593 free_percpu(htab->map_locked[i]);
594 bpf_map_area_free(htab->buckets);
595 bpf_mem_alloc_destroy(&htab->pcpu_ma);
596 bpf_mem_alloc_destroy(&htab->ma);
597 free_htab:
598 lockdep_unregister_key(&htab->lockdep_key);
599 bpf_map_area_free(htab);
600 return ERR_PTR(err);
601 }
602
htab_map_hash(const void * key,u32 key_len,u32 hashrnd)603 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
604 {
605 return jhash(key, key_len, hashrnd);
606 }
607
__select_bucket(struct bpf_htab * htab,u32 hash)608 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
609 {
610 return &htab->buckets[hash & (htab->n_buckets - 1)];
611 }
612
select_bucket(struct bpf_htab * htab,u32 hash)613 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
614 {
615 return &__select_bucket(htab, hash)->head;
616 }
617
618 /* this lookup function can only be called with bucket lock taken */
lookup_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size)619 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
620 void *key, u32 key_size)
621 {
622 struct hlist_nulls_node *n;
623 struct htab_elem *l;
624
625 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
626 if (l->hash == hash && !memcmp(&l->key, key, key_size))
627 return l;
628
629 return NULL;
630 }
631
632 /* can be called without bucket lock. it will repeat the loop in
633 * the unlikely event when elements moved from one bucket into another
634 * while link list is being walked
635 */
lookup_nulls_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size,u32 n_buckets)636 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
637 u32 hash, void *key,
638 u32 key_size, u32 n_buckets)
639 {
640 struct hlist_nulls_node *n;
641 struct htab_elem *l;
642
643 again:
644 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
645 if (l->hash == hash && !memcmp(&l->key, key, key_size))
646 return l;
647
648 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
649 goto again;
650
651 return NULL;
652 }
653
654 /* Called from syscall or from eBPF program directly, so
655 * arguments have to match bpf_map_lookup_elem() exactly.
656 * The return value is adjusted by BPF instructions
657 * in htab_map_gen_lookup().
658 */
__htab_map_lookup_elem(struct bpf_map * map,void * key)659 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
660 {
661 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
662 struct hlist_nulls_head *head;
663 struct htab_elem *l;
664 u32 hash, key_size;
665
666 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
667 !rcu_read_lock_bh_held());
668
669 key_size = map->key_size;
670
671 hash = htab_map_hash(key, key_size, htab->hashrnd);
672
673 head = select_bucket(htab, hash);
674
675 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
676
677 return l;
678 }
679
htab_map_lookup_elem(struct bpf_map * map,void * key)680 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
681 {
682 struct htab_elem *l = __htab_map_lookup_elem(map, key);
683
684 if (l)
685 return l->key + round_up(map->key_size, 8);
686
687 return NULL;
688 }
689
690 /* inline bpf_map_lookup_elem() call.
691 * Instead of:
692 * bpf_prog
693 * bpf_map_lookup_elem
694 * map->ops->map_lookup_elem
695 * htab_map_lookup_elem
696 * __htab_map_lookup_elem
697 * do:
698 * bpf_prog
699 * __htab_map_lookup_elem
700 */
htab_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)701 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
702 {
703 struct bpf_insn *insn = insn_buf;
704 const int ret = BPF_REG_0;
705
706 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
707 (void *(*)(struct bpf_map *map, void *key))NULL));
708 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
709 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
710 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
711 offsetof(struct htab_elem, key) +
712 round_up(map->key_size, 8));
713 return insn - insn_buf;
714 }
715
__htab_lru_map_lookup_elem(struct bpf_map * map,void * key,const bool mark)716 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
717 void *key, const bool mark)
718 {
719 struct htab_elem *l = __htab_map_lookup_elem(map, key);
720
721 if (l) {
722 if (mark)
723 bpf_lru_node_set_ref(&l->lru_node);
724 return l->key + round_up(map->key_size, 8);
725 }
726
727 return NULL;
728 }
729
htab_lru_map_lookup_elem(struct bpf_map * map,void * key)730 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
731 {
732 return __htab_lru_map_lookup_elem(map, key, true);
733 }
734
htab_lru_map_lookup_elem_sys(struct bpf_map * map,void * key)735 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
736 {
737 return __htab_lru_map_lookup_elem(map, key, false);
738 }
739
htab_lru_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)740 static int htab_lru_map_gen_lookup(struct bpf_map *map,
741 struct bpf_insn *insn_buf)
742 {
743 struct bpf_insn *insn = insn_buf;
744 const int ret = BPF_REG_0;
745 const int ref_reg = BPF_REG_1;
746
747 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
748 (void *(*)(struct bpf_map *map, void *key))NULL));
749 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
750 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
751 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
752 offsetof(struct htab_elem, lru_node) +
753 offsetof(struct bpf_lru_node, ref));
754 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
755 *insn++ = BPF_ST_MEM(BPF_B, ret,
756 offsetof(struct htab_elem, lru_node) +
757 offsetof(struct bpf_lru_node, ref),
758 1);
759 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
760 offsetof(struct htab_elem, key) +
761 round_up(map->key_size, 8));
762 return insn - insn_buf;
763 }
764
check_and_free_fields(struct bpf_htab * htab,struct htab_elem * elem)765 static void check_and_free_fields(struct bpf_htab *htab,
766 struct htab_elem *elem)
767 {
768 void *map_value = elem->key + round_up(htab->map.key_size, 8);
769
770 if (map_value_has_timer(&htab->map))
771 bpf_timer_cancel_and_free(map_value + htab->map.timer_off);
772 if (map_value_has_kptrs(&htab->map))
773 bpf_map_free_kptrs(&htab->map, map_value);
774 }
775
776 /* It is called from the bpf_lru_list when the LRU needs to delete
777 * older elements from the htab.
778 */
htab_lru_map_delete_node(void * arg,struct bpf_lru_node * node)779 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
780 {
781 struct bpf_htab *htab = arg;
782 struct htab_elem *l = NULL, *tgt_l;
783 struct hlist_nulls_head *head;
784 struct hlist_nulls_node *n;
785 unsigned long flags;
786 struct bucket *b;
787 int ret;
788
789 tgt_l = container_of(node, struct htab_elem, lru_node);
790 b = __select_bucket(htab, tgt_l->hash);
791 head = &b->head;
792
793 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
794 if (ret)
795 return false;
796
797 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
798 if (l == tgt_l) {
799 hlist_nulls_del_rcu(&l->hash_node);
800 check_and_free_fields(htab, l);
801 break;
802 }
803
804 htab_unlock_bucket(htab, b, tgt_l->hash, flags);
805
806 return l == tgt_l;
807 }
808
809 /* Called from syscall */
htab_map_get_next_key(struct bpf_map * map,void * key,void * next_key)810 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
811 {
812 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
813 struct hlist_nulls_head *head;
814 struct htab_elem *l, *next_l;
815 u32 hash, key_size;
816 int i = 0;
817
818 WARN_ON_ONCE(!rcu_read_lock_held());
819
820 key_size = map->key_size;
821
822 if (!key)
823 goto find_first_elem;
824
825 hash = htab_map_hash(key, key_size, htab->hashrnd);
826
827 head = select_bucket(htab, hash);
828
829 /* lookup the key */
830 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
831
832 if (!l)
833 goto find_first_elem;
834
835 /* key was found, get next key in the same bucket */
836 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
837 struct htab_elem, hash_node);
838
839 if (next_l) {
840 /* if next elem in this hash list is non-zero, just return it */
841 memcpy(next_key, next_l->key, key_size);
842 return 0;
843 }
844
845 /* no more elements in this hash list, go to the next bucket */
846 i = hash & (htab->n_buckets - 1);
847 i++;
848
849 find_first_elem:
850 /* iterate over buckets */
851 for (; i < htab->n_buckets; i++) {
852 head = select_bucket(htab, i);
853
854 /* pick first element in the bucket */
855 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
856 struct htab_elem, hash_node);
857 if (next_l) {
858 /* if it's not empty, just return it */
859 memcpy(next_key, next_l->key, key_size);
860 return 0;
861 }
862 }
863
864 /* iterated over all buckets and all elements */
865 return -ENOENT;
866 }
867
htab_elem_free(struct bpf_htab * htab,struct htab_elem * l)868 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
869 {
870 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
871 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
872 check_and_free_fields(htab, l);
873 bpf_mem_cache_free(&htab->ma, l);
874 }
875
htab_put_fd_value(struct bpf_htab * htab,struct htab_elem * l)876 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
877 {
878 struct bpf_map *map = &htab->map;
879 void *ptr;
880
881 if (map->ops->map_fd_put_ptr) {
882 ptr = fd_htab_map_get_ptr(map, l);
883 map->ops->map_fd_put_ptr(ptr);
884 }
885 }
886
is_map_full(struct bpf_htab * htab)887 static bool is_map_full(struct bpf_htab *htab)
888 {
889 if (htab->use_percpu_counter)
890 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
891 PERCPU_COUNTER_BATCH) >= 0;
892 return atomic_read(&htab->count) >= htab->map.max_entries;
893 }
894
inc_elem_count(struct bpf_htab * htab)895 static void inc_elem_count(struct bpf_htab *htab)
896 {
897 if (htab->use_percpu_counter)
898 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
899 else
900 atomic_inc(&htab->count);
901 }
902
dec_elem_count(struct bpf_htab * htab)903 static void dec_elem_count(struct bpf_htab *htab)
904 {
905 if (htab->use_percpu_counter)
906 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
907 else
908 atomic_dec(&htab->count);
909 }
910
911
free_htab_elem(struct bpf_htab * htab,struct htab_elem * l)912 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
913 {
914 htab_put_fd_value(htab, l);
915
916 if (htab_is_prealloc(htab)) {
917 check_and_free_fields(htab, l);
918 __pcpu_freelist_push(&htab->freelist, &l->fnode);
919 } else {
920 dec_elem_count(htab);
921 htab_elem_free(htab, l);
922 }
923 }
924
pcpu_copy_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)925 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
926 void *value, bool onallcpus)
927 {
928 if (!onallcpus) {
929 /* copy true value_size bytes */
930 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
931 } else {
932 u32 size = round_up(htab->map.value_size, 8);
933 int off = 0, cpu;
934
935 for_each_possible_cpu(cpu) {
936 bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
937 value + off, size);
938 off += size;
939 }
940 }
941 }
942
pcpu_init_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)943 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
944 void *value, bool onallcpus)
945 {
946 /* When not setting the initial value on all cpus, zero-fill element
947 * values for other cpus. Otherwise, bpf program has no way to ensure
948 * known initial values for cpus other than current one
949 * (onallcpus=false always when coming from bpf prog).
950 */
951 if (!onallcpus) {
952 u32 size = round_up(htab->map.value_size, 8);
953 int current_cpu = raw_smp_processor_id();
954 int cpu;
955
956 for_each_possible_cpu(cpu) {
957 if (cpu == current_cpu)
958 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
959 size);
960 else
961 memset(per_cpu_ptr(pptr, cpu), 0, size);
962 }
963 } else {
964 pcpu_copy_value(htab, pptr, value, onallcpus);
965 }
966 }
967
fd_htab_map_needs_adjust(const struct bpf_htab * htab)968 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
969 {
970 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
971 BITS_PER_LONG == 64;
972 }
973
alloc_htab_elem(struct bpf_htab * htab,void * key,void * value,u32 key_size,u32 hash,bool percpu,bool onallcpus,struct htab_elem * old_elem)974 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
975 void *value, u32 key_size, u32 hash,
976 bool percpu, bool onallcpus,
977 struct htab_elem *old_elem)
978 {
979 u32 size = htab->map.value_size;
980 bool prealloc = htab_is_prealloc(htab);
981 struct htab_elem *l_new, **pl_new;
982 void __percpu *pptr;
983
984 if (prealloc) {
985 if (old_elem) {
986 /* if we're updating the existing element,
987 * use per-cpu extra elems to avoid freelist_pop/push
988 */
989 pl_new = this_cpu_ptr(htab->extra_elems);
990 l_new = *pl_new;
991 htab_put_fd_value(htab, old_elem);
992 *pl_new = old_elem;
993 } else {
994 struct pcpu_freelist_node *l;
995
996 l = __pcpu_freelist_pop(&htab->freelist);
997 if (!l)
998 return ERR_PTR(-E2BIG);
999 l_new = container_of(l, struct htab_elem, fnode);
1000 }
1001 } else {
1002 if (is_map_full(htab))
1003 if (!old_elem)
1004 /* when map is full and update() is replacing
1005 * old element, it's ok to allocate, since
1006 * old element will be freed immediately.
1007 * Otherwise return an error
1008 */
1009 return ERR_PTR(-E2BIG);
1010 inc_elem_count(htab);
1011 l_new = bpf_mem_cache_alloc(&htab->ma);
1012 if (!l_new) {
1013 l_new = ERR_PTR(-ENOMEM);
1014 goto dec_count;
1015 }
1016 }
1017
1018 memcpy(l_new->key, key, key_size);
1019 if (percpu) {
1020 if (prealloc) {
1021 pptr = htab_elem_get_ptr(l_new, key_size);
1022 } else {
1023 /* alloc_percpu zero-fills */
1024 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1025 if (!pptr) {
1026 bpf_mem_cache_free(&htab->ma, l_new);
1027 l_new = ERR_PTR(-ENOMEM);
1028 goto dec_count;
1029 }
1030 l_new->ptr_to_pptr = pptr;
1031 pptr = *(void **)pptr;
1032 }
1033
1034 pcpu_init_value(htab, pptr, value, onallcpus);
1035
1036 if (!prealloc)
1037 htab_elem_set_ptr(l_new, key_size, pptr);
1038 } else if (fd_htab_map_needs_adjust(htab)) {
1039 size = round_up(size, 8);
1040 memcpy(l_new->key + round_up(key_size, 8), value, size);
1041 } else {
1042 copy_map_value(&htab->map,
1043 l_new->key + round_up(key_size, 8),
1044 value);
1045 }
1046
1047 l_new->hash = hash;
1048 return l_new;
1049 dec_count:
1050 dec_elem_count(htab);
1051 return l_new;
1052 }
1053
check_flags(struct bpf_htab * htab,struct htab_elem * l_old,u64 map_flags)1054 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1055 u64 map_flags)
1056 {
1057 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1058 /* elem already exists */
1059 return -EEXIST;
1060
1061 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1062 /* elem doesn't exist, cannot update it */
1063 return -ENOENT;
1064
1065 return 0;
1066 }
1067
1068 /* Called from syscall or from eBPF program */
htab_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1069 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1070 u64 map_flags)
1071 {
1072 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1073 struct htab_elem *l_new = NULL, *l_old;
1074 struct hlist_nulls_head *head;
1075 unsigned long flags;
1076 struct bucket *b;
1077 u32 key_size, hash;
1078 int ret;
1079
1080 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1081 /* unknown flags */
1082 return -EINVAL;
1083
1084 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1085 !rcu_read_lock_bh_held());
1086
1087 key_size = map->key_size;
1088
1089 hash = htab_map_hash(key, key_size, htab->hashrnd);
1090
1091 b = __select_bucket(htab, hash);
1092 head = &b->head;
1093
1094 if (unlikely(map_flags & BPF_F_LOCK)) {
1095 if (unlikely(!map_value_has_spin_lock(map)))
1096 return -EINVAL;
1097 /* find an element without taking the bucket lock */
1098 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1099 htab->n_buckets);
1100 ret = check_flags(htab, l_old, map_flags);
1101 if (ret)
1102 return ret;
1103 if (l_old) {
1104 /* grab the element lock and update value in place */
1105 copy_map_value_locked(map,
1106 l_old->key + round_up(key_size, 8),
1107 value, false);
1108 return 0;
1109 }
1110 /* fall through, grab the bucket lock and lookup again.
1111 * 99.9% chance that the element won't be found,
1112 * but second lookup under lock has to be done.
1113 */
1114 }
1115
1116 ret = htab_lock_bucket(htab, b, hash, &flags);
1117 if (ret)
1118 return ret;
1119
1120 l_old = lookup_elem_raw(head, hash, key, key_size);
1121
1122 ret = check_flags(htab, l_old, map_flags);
1123 if (ret)
1124 goto err;
1125
1126 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1127 /* first lookup without the bucket lock didn't find the element,
1128 * but second lookup with the bucket lock found it.
1129 * This case is highly unlikely, but has to be dealt with:
1130 * grab the element lock in addition to the bucket lock
1131 * and update element in place
1132 */
1133 copy_map_value_locked(map,
1134 l_old->key + round_up(key_size, 8),
1135 value, false);
1136 ret = 0;
1137 goto err;
1138 }
1139
1140 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1141 l_old);
1142 if (IS_ERR(l_new)) {
1143 /* all pre-allocated elements are in use or memory exhausted */
1144 ret = PTR_ERR(l_new);
1145 goto err;
1146 }
1147
1148 /* add new element to the head of the list, so that
1149 * concurrent search will find it before old elem
1150 */
1151 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1152 if (l_old) {
1153 hlist_nulls_del_rcu(&l_old->hash_node);
1154 if (!htab_is_prealloc(htab))
1155 free_htab_elem(htab, l_old);
1156 else
1157 check_and_free_fields(htab, l_old);
1158 }
1159 ret = 0;
1160 err:
1161 htab_unlock_bucket(htab, b, hash, flags);
1162 return ret;
1163 }
1164
htab_lru_push_free(struct bpf_htab * htab,struct htab_elem * elem)1165 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1166 {
1167 check_and_free_fields(htab, elem);
1168 bpf_lru_push_free(&htab->lru, &elem->lru_node);
1169 }
1170
htab_lru_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1171 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1172 u64 map_flags)
1173 {
1174 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1175 struct htab_elem *l_new, *l_old = NULL;
1176 struct hlist_nulls_head *head;
1177 unsigned long flags;
1178 struct bucket *b;
1179 u32 key_size, hash;
1180 int ret;
1181
1182 if (unlikely(map_flags > BPF_EXIST))
1183 /* unknown flags */
1184 return -EINVAL;
1185
1186 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1187 !rcu_read_lock_bh_held());
1188
1189 key_size = map->key_size;
1190
1191 hash = htab_map_hash(key, key_size, htab->hashrnd);
1192
1193 b = __select_bucket(htab, hash);
1194 head = &b->head;
1195
1196 /* For LRU, we need to alloc before taking bucket's
1197 * spinlock because getting free nodes from LRU may need
1198 * to remove older elements from htab and this removal
1199 * operation will need a bucket lock.
1200 */
1201 l_new = prealloc_lru_pop(htab, key, hash);
1202 if (!l_new)
1203 return -ENOMEM;
1204 copy_map_value(&htab->map,
1205 l_new->key + round_up(map->key_size, 8), value);
1206
1207 ret = htab_lock_bucket(htab, b, hash, &flags);
1208 if (ret)
1209 goto err_lock_bucket;
1210
1211 l_old = lookup_elem_raw(head, hash, key, key_size);
1212
1213 ret = check_flags(htab, l_old, map_flags);
1214 if (ret)
1215 goto err;
1216
1217 /* add new element to the head of the list, so that
1218 * concurrent search will find it before old elem
1219 */
1220 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1221 if (l_old) {
1222 bpf_lru_node_set_ref(&l_new->lru_node);
1223 hlist_nulls_del_rcu(&l_old->hash_node);
1224 }
1225 ret = 0;
1226
1227 err:
1228 htab_unlock_bucket(htab, b, hash, flags);
1229
1230 err_lock_bucket:
1231 if (ret)
1232 htab_lru_push_free(htab, l_new);
1233 else if (l_old)
1234 htab_lru_push_free(htab, l_old);
1235
1236 return ret;
1237 }
1238
__htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1239 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1240 void *value, u64 map_flags,
1241 bool onallcpus)
1242 {
1243 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1244 struct htab_elem *l_new = NULL, *l_old;
1245 struct hlist_nulls_head *head;
1246 unsigned long flags;
1247 struct bucket *b;
1248 u32 key_size, hash;
1249 int ret;
1250
1251 if (unlikely(map_flags > BPF_EXIST))
1252 /* unknown flags */
1253 return -EINVAL;
1254
1255 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1256 !rcu_read_lock_bh_held());
1257
1258 key_size = map->key_size;
1259
1260 hash = htab_map_hash(key, key_size, htab->hashrnd);
1261
1262 b = __select_bucket(htab, hash);
1263 head = &b->head;
1264
1265 ret = htab_lock_bucket(htab, b, hash, &flags);
1266 if (ret)
1267 return ret;
1268
1269 l_old = lookup_elem_raw(head, hash, key, key_size);
1270
1271 ret = check_flags(htab, l_old, map_flags);
1272 if (ret)
1273 goto err;
1274
1275 if (l_old) {
1276 /* per-cpu hash map can update value in-place */
1277 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1278 value, onallcpus);
1279 } else {
1280 l_new = alloc_htab_elem(htab, key, value, key_size,
1281 hash, true, onallcpus, NULL);
1282 if (IS_ERR(l_new)) {
1283 ret = PTR_ERR(l_new);
1284 goto err;
1285 }
1286 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1287 }
1288 ret = 0;
1289 err:
1290 htab_unlock_bucket(htab, b, hash, flags);
1291 return ret;
1292 }
1293
__htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1294 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1295 void *value, u64 map_flags,
1296 bool onallcpus)
1297 {
1298 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1299 struct htab_elem *l_new = NULL, *l_old;
1300 struct hlist_nulls_head *head;
1301 unsigned long flags;
1302 struct bucket *b;
1303 u32 key_size, hash;
1304 int ret;
1305
1306 if (unlikely(map_flags > BPF_EXIST))
1307 /* unknown flags */
1308 return -EINVAL;
1309
1310 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1311 !rcu_read_lock_bh_held());
1312
1313 key_size = map->key_size;
1314
1315 hash = htab_map_hash(key, key_size, htab->hashrnd);
1316
1317 b = __select_bucket(htab, hash);
1318 head = &b->head;
1319
1320 /* For LRU, we need to alloc before taking bucket's
1321 * spinlock because LRU's elem alloc may need
1322 * to remove older elem from htab and this removal
1323 * operation will need a bucket lock.
1324 */
1325 if (map_flags != BPF_EXIST) {
1326 l_new = prealloc_lru_pop(htab, key, hash);
1327 if (!l_new)
1328 return -ENOMEM;
1329 }
1330
1331 ret = htab_lock_bucket(htab, b, hash, &flags);
1332 if (ret)
1333 goto err_lock_bucket;
1334
1335 l_old = lookup_elem_raw(head, hash, key, key_size);
1336
1337 ret = check_flags(htab, l_old, map_flags);
1338 if (ret)
1339 goto err;
1340
1341 if (l_old) {
1342 bpf_lru_node_set_ref(&l_old->lru_node);
1343
1344 /* per-cpu hash map can update value in-place */
1345 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1346 value, onallcpus);
1347 } else {
1348 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1349 value, onallcpus);
1350 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1351 l_new = NULL;
1352 }
1353 ret = 0;
1354 err:
1355 htab_unlock_bucket(htab, b, hash, flags);
1356 err_lock_bucket:
1357 if (l_new)
1358 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1359 return ret;
1360 }
1361
htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1362 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1363 void *value, u64 map_flags)
1364 {
1365 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1366 }
1367
htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1368 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1369 void *value, u64 map_flags)
1370 {
1371 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1372 false);
1373 }
1374
1375 /* Called from syscall or from eBPF program */
htab_map_delete_elem(struct bpf_map * map,void * key)1376 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1377 {
1378 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1379 struct hlist_nulls_head *head;
1380 struct bucket *b;
1381 struct htab_elem *l;
1382 unsigned long flags;
1383 u32 hash, key_size;
1384 int ret;
1385
1386 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1387 !rcu_read_lock_bh_held());
1388
1389 key_size = map->key_size;
1390
1391 hash = htab_map_hash(key, key_size, htab->hashrnd);
1392 b = __select_bucket(htab, hash);
1393 head = &b->head;
1394
1395 ret = htab_lock_bucket(htab, b, hash, &flags);
1396 if (ret)
1397 return ret;
1398
1399 l = lookup_elem_raw(head, hash, key, key_size);
1400
1401 if (l) {
1402 hlist_nulls_del_rcu(&l->hash_node);
1403 free_htab_elem(htab, l);
1404 } else {
1405 ret = -ENOENT;
1406 }
1407
1408 htab_unlock_bucket(htab, b, hash, flags);
1409 return ret;
1410 }
1411
htab_lru_map_delete_elem(struct bpf_map * map,void * key)1412 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1413 {
1414 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1415 struct hlist_nulls_head *head;
1416 struct bucket *b;
1417 struct htab_elem *l;
1418 unsigned long flags;
1419 u32 hash, key_size;
1420 int ret;
1421
1422 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1423 !rcu_read_lock_bh_held());
1424
1425 key_size = map->key_size;
1426
1427 hash = htab_map_hash(key, key_size, htab->hashrnd);
1428 b = __select_bucket(htab, hash);
1429 head = &b->head;
1430
1431 ret = htab_lock_bucket(htab, b, hash, &flags);
1432 if (ret)
1433 return ret;
1434
1435 l = lookup_elem_raw(head, hash, key, key_size);
1436
1437 if (l)
1438 hlist_nulls_del_rcu(&l->hash_node);
1439 else
1440 ret = -ENOENT;
1441
1442 htab_unlock_bucket(htab, b, hash, flags);
1443 if (l)
1444 htab_lru_push_free(htab, l);
1445 return ret;
1446 }
1447
delete_all_elements(struct bpf_htab * htab)1448 static void delete_all_elements(struct bpf_htab *htab)
1449 {
1450 int i;
1451
1452 /* It's called from a worker thread, so disable migration here,
1453 * since bpf_mem_cache_free() relies on that.
1454 */
1455 migrate_disable();
1456 for (i = 0; i < htab->n_buckets; i++) {
1457 struct hlist_nulls_head *head = select_bucket(htab, i);
1458 struct hlist_nulls_node *n;
1459 struct htab_elem *l;
1460
1461 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1462 hlist_nulls_del_rcu(&l->hash_node);
1463 htab_elem_free(htab, l);
1464 }
1465 }
1466 migrate_enable();
1467 }
1468
htab_free_malloced_timers(struct bpf_htab * htab)1469 static void htab_free_malloced_timers(struct bpf_htab *htab)
1470 {
1471 int i;
1472
1473 rcu_read_lock();
1474 for (i = 0; i < htab->n_buckets; i++) {
1475 struct hlist_nulls_head *head = select_bucket(htab, i);
1476 struct hlist_nulls_node *n;
1477 struct htab_elem *l;
1478
1479 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1480 /* We don't reset or free kptr on uref dropping to zero,
1481 * hence just free timer.
1482 */
1483 bpf_timer_cancel_and_free(l->key +
1484 round_up(htab->map.key_size, 8) +
1485 htab->map.timer_off);
1486 }
1487 cond_resched_rcu();
1488 }
1489 rcu_read_unlock();
1490 }
1491
htab_map_free_timers(struct bpf_map * map)1492 static void htab_map_free_timers(struct bpf_map *map)
1493 {
1494 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1495
1496 /* We don't reset or free kptr on uref dropping to zero. */
1497 if (!map_value_has_timer(&htab->map))
1498 return;
1499 if (!htab_is_prealloc(htab))
1500 htab_free_malloced_timers(htab);
1501 else
1502 htab_free_prealloced_timers(htab);
1503 }
1504
1505 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
htab_map_free(struct bpf_map * map)1506 static void htab_map_free(struct bpf_map *map)
1507 {
1508 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1509 int i;
1510
1511 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1512 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1513 * There is no need to synchronize_rcu() here to protect map elements.
1514 */
1515
1516 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1517 * underneath and is reponsible for waiting for callbacks to finish
1518 * during bpf_mem_alloc_destroy().
1519 */
1520 if (!htab_is_prealloc(htab)) {
1521 delete_all_elements(htab);
1522 } else {
1523 htab_free_prealloced_kptrs(htab);
1524 prealloc_destroy(htab);
1525 }
1526
1527 bpf_map_free_kptr_off_tab(map);
1528 free_percpu(htab->extra_elems);
1529 bpf_map_area_free(htab->buckets);
1530 bpf_mem_alloc_destroy(&htab->pcpu_ma);
1531 bpf_mem_alloc_destroy(&htab->ma);
1532 if (htab->use_percpu_counter)
1533 percpu_counter_destroy(&htab->pcount);
1534 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1535 free_percpu(htab->map_locked[i]);
1536 lockdep_unregister_key(&htab->lockdep_key);
1537 bpf_map_area_free(htab);
1538 }
1539
htab_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1540 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1541 struct seq_file *m)
1542 {
1543 void *value;
1544
1545 rcu_read_lock();
1546
1547 value = htab_map_lookup_elem(map, key);
1548 if (!value) {
1549 rcu_read_unlock();
1550 return;
1551 }
1552
1553 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1554 seq_puts(m, ": ");
1555 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1556 seq_puts(m, "\n");
1557
1558 rcu_read_unlock();
1559 }
1560
__htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,bool is_lru_map,bool is_percpu,u64 flags)1561 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1562 void *value, bool is_lru_map,
1563 bool is_percpu, u64 flags)
1564 {
1565 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1566 struct hlist_nulls_head *head;
1567 unsigned long bflags;
1568 struct htab_elem *l;
1569 u32 hash, key_size;
1570 struct bucket *b;
1571 int ret;
1572
1573 key_size = map->key_size;
1574
1575 hash = htab_map_hash(key, key_size, htab->hashrnd);
1576 b = __select_bucket(htab, hash);
1577 head = &b->head;
1578
1579 ret = htab_lock_bucket(htab, b, hash, &bflags);
1580 if (ret)
1581 return ret;
1582
1583 l = lookup_elem_raw(head, hash, key, key_size);
1584 if (!l) {
1585 ret = -ENOENT;
1586 } else {
1587 if (is_percpu) {
1588 u32 roundup_value_size = round_up(map->value_size, 8);
1589 void __percpu *pptr;
1590 int off = 0, cpu;
1591
1592 pptr = htab_elem_get_ptr(l, key_size);
1593 for_each_possible_cpu(cpu) {
1594 bpf_long_memcpy(value + off,
1595 per_cpu_ptr(pptr, cpu),
1596 roundup_value_size);
1597 off += roundup_value_size;
1598 }
1599 } else {
1600 u32 roundup_key_size = round_up(map->key_size, 8);
1601
1602 if (flags & BPF_F_LOCK)
1603 copy_map_value_locked(map, value, l->key +
1604 roundup_key_size,
1605 true);
1606 else
1607 copy_map_value(map, value, l->key +
1608 roundup_key_size);
1609 /* Zeroing special fields in the temp buffer */
1610 check_and_init_map_value(map, value);
1611 }
1612
1613 hlist_nulls_del_rcu(&l->hash_node);
1614 if (!is_lru_map)
1615 free_htab_elem(htab, l);
1616 }
1617
1618 htab_unlock_bucket(htab, b, hash, bflags);
1619
1620 if (is_lru_map && l)
1621 htab_lru_push_free(htab, l);
1622
1623 return ret;
1624 }
1625
htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1626 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1627 void *value, u64 flags)
1628 {
1629 return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1630 flags);
1631 }
1632
htab_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1633 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1634 void *key, void *value,
1635 u64 flags)
1636 {
1637 return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1638 flags);
1639 }
1640
htab_lru_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1641 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1642 void *value, u64 flags)
1643 {
1644 return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1645 flags);
1646 }
1647
htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1648 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1649 void *key, void *value,
1650 u64 flags)
1651 {
1652 return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1653 flags);
1654 }
1655
1656 static int
__htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr,bool do_delete,bool is_lru_map,bool is_percpu)1657 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1658 const union bpf_attr *attr,
1659 union bpf_attr __user *uattr,
1660 bool do_delete, bool is_lru_map,
1661 bool is_percpu)
1662 {
1663 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1664 u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1665 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1666 void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1667 void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1668 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1669 u32 batch, max_count, size, bucket_size, map_id;
1670 struct htab_elem *node_to_free = NULL;
1671 u64 elem_map_flags, map_flags;
1672 struct hlist_nulls_head *head;
1673 struct hlist_nulls_node *n;
1674 unsigned long flags = 0;
1675 bool locked = false;
1676 struct htab_elem *l;
1677 struct bucket *b;
1678 int ret = 0;
1679
1680 elem_map_flags = attr->batch.elem_flags;
1681 if ((elem_map_flags & ~BPF_F_LOCK) ||
1682 ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1683 return -EINVAL;
1684
1685 map_flags = attr->batch.flags;
1686 if (map_flags)
1687 return -EINVAL;
1688
1689 max_count = attr->batch.count;
1690 if (!max_count)
1691 return 0;
1692
1693 if (put_user(0, &uattr->batch.count))
1694 return -EFAULT;
1695
1696 batch = 0;
1697 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1698 return -EFAULT;
1699
1700 if (batch >= htab->n_buckets)
1701 return -ENOENT;
1702
1703 key_size = htab->map.key_size;
1704 roundup_key_size = round_up(htab->map.key_size, 8);
1705 value_size = htab->map.value_size;
1706 size = round_up(value_size, 8);
1707 if (is_percpu)
1708 value_size = size * num_possible_cpus();
1709 total = 0;
1710 /* while experimenting with hash tables with sizes ranging from 10 to
1711 * 1000, it was observed that a bucket can have up to 5 entries.
1712 */
1713 bucket_size = 5;
1714
1715 alloc:
1716 /* We cannot do copy_from_user or copy_to_user inside
1717 * the rcu_read_lock. Allocate enough space here.
1718 */
1719 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1720 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1721 if (!keys || !values) {
1722 ret = -ENOMEM;
1723 goto after_loop;
1724 }
1725
1726 again:
1727 bpf_disable_instrumentation();
1728 rcu_read_lock();
1729 again_nocopy:
1730 dst_key = keys;
1731 dst_val = values;
1732 b = &htab->buckets[batch];
1733 head = &b->head;
1734 /* do not grab the lock unless need it (bucket_cnt > 0). */
1735 if (locked) {
1736 ret = htab_lock_bucket(htab, b, batch, &flags);
1737 if (ret) {
1738 rcu_read_unlock();
1739 bpf_enable_instrumentation();
1740 goto after_loop;
1741 }
1742 }
1743
1744 bucket_cnt = 0;
1745 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1746 bucket_cnt++;
1747
1748 if (bucket_cnt && !locked) {
1749 locked = true;
1750 goto again_nocopy;
1751 }
1752
1753 if (bucket_cnt > (max_count - total)) {
1754 if (total == 0)
1755 ret = -ENOSPC;
1756 /* Note that since bucket_cnt > 0 here, it is implicit
1757 * that the locked was grabbed, so release it.
1758 */
1759 htab_unlock_bucket(htab, b, batch, flags);
1760 rcu_read_unlock();
1761 bpf_enable_instrumentation();
1762 goto after_loop;
1763 }
1764
1765 if (bucket_cnt > bucket_size) {
1766 bucket_size = bucket_cnt;
1767 /* Note that since bucket_cnt > 0 here, it is implicit
1768 * that the locked was grabbed, so release it.
1769 */
1770 htab_unlock_bucket(htab, b, batch, flags);
1771 rcu_read_unlock();
1772 bpf_enable_instrumentation();
1773 kvfree(keys);
1774 kvfree(values);
1775 goto alloc;
1776 }
1777
1778 /* Next block is only safe to run if you have grabbed the lock */
1779 if (!locked)
1780 goto next_batch;
1781
1782 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1783 memcpy(dst_key, l->key, key_size);
1784
1785 if (is_percpu) {
1786 int off = 0, cpu;
1787 void __percpu *pptr;
1788
1789 pptr = htab_elem_get_ptr(l, map->key_size);
1790 for_each_possible_cpu(cpu) {
1791 bpf_long_memcpy(dst_val + off,
1792 per_cpu_ptr(pptr, cpu), size);
1793 off += size;
1794 }
1795 } else {
1796 value = l->key + roundup_key_size;
1797 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1798 struct bpf_map **inner_map = value;
1799
1800 /* Actual value is the id of the inner map */
1801 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1802 value = &map_id;
1803 }
1804
1805 if (elem_map_flags & BPF_F_LOCK)
1806 copy_map_value_locked(map, dst_val, value,
1807 true);
1808 else
1809 copy_map_value(map, dst_val, value);
1810 /* Zeroing special fields in the temp buffer */
1811 check_and_init_map_value(map, dst_val);
1812 }
1813 if (do_delete) {
1814 hlist_nulls_del_rcu(&l->hash_node);
1815
1816 /* bpf_lru_push_free() will acquire lru_lock, which
1817 * may cause deadlock. See comments in function
1818 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1819 * after releasing the bucket lock.
1820 */
1821 if (is_lru_map) {
1822 l->batch_flink = node_to_free;
1823 node_to_free = l;
1824 } else {
1825 free_htab_elem(htab, l);
1826 }
1827 }
1828 dst_key += key_size;
1829 dst_val += value_size;
1830 }
1831
1832 htab_unlock_bucket(htab, b, batch, flags);
1833 locked = false;
1834
1835 while (node_to_free) {
1836 l = node_to_free;
1837 node_to_free = node_to_free->batch_flink;
1838 htab_lru_push_free(htab, l);
1839 }
1840
1841 next_batch:
1842 /* If we are not copying data, we can go to next bucket and avoid
1843 * unlocking the rcu.
1844 */
1845 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1846 batch++;
1847 goto again_nocopy;
1848 }
1849
1850 rcu_read_unlock();
1851 bpf_enable_instrumentation();
1852 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1853 key_size * bucket_cnt) ||
1854 copy_to_user(uvalues + total * value_size, values,
1855 value_size * bucket_cnt))) {
1856 ret = -EFAULT;
1857 goto after_loop;
1858 }
1859
1860 total += bucket_cnt;
1861 batch++;
1862 if (batch >= htab->n_buckets) {
1863 ret = -ENOENT;
1864 goto after_loop;
1865 }
1866 goto again;
1867
1868 after_loop:
1869 if (ret == -EFAULT)
1870 goto out;
1871
1872 /* copy # of entries and next batch */
1873 ubatch = u64_to_user_ptr(attr->batch.out_batch);
1874 if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1875 put_user(total, &uattr->batch.count))
1876 ret = -EFAULT;
1877
1878 out:
1879 kvfree(keys);
1880 kvfree(values);
1881 return ret;
1882 }
1883
1884 static int
htab_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1885 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1886 union bpf_attr __user *uattr)
1887 {
1888 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1889 false, true);
1890 }
1891
1892 static int
htab_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1893 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1894 const union bpf_attr *attr,
1895 union bpf_attr __user *uattr)
1896 {
1897 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1898 false, true);
1899 }
1900
1901 static int
htab_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1902 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1903 union bpf_attr __user *uattr)
1904 {
1905 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1906 false, false);
1907 }
1908
1909 static int
htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1910 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1911 const union bpf_attr *attr,
1912 union bpf_attr __user *uattr)
1913 {
1914 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1915 false, false);
1916 }
1917
1918 static int
htab_lru_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1919 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1920 const union bpf_attr *attr,
1921 union bpf_attr __user *uattr)
1922 {
1923 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1924 true, true);
1925 }
1926
1927 static int
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1928 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1929 const union bpf_attr *attr,
1930 union bpf_attr __user *uattr)
1931 {
1932 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1933 true, true);
1934 }
1935
1936 static int
htab_lru_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1937 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1938 union bpf_attr __user *uattr)
1939 {
1940 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1941 true, false);
1942 }
1943
1944 static int
htab_lru_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1945 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1946 const union bpf_attr *attr,
1947 union bpf_attr __user *uattr)
1948 {
1949 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1950 true, false);
1951 }
1952
1953 struct bpf_iter_seq_hash_map_info {
1954 struct bpf_map *map;
1955 struct bpf_htab *htab;
1956 void *percpu_value_buf; // non-zero means percpu hash
1957 u32 bucket_id;
1958 u32 skip_elems;
1959 };
1960
1961 static struct htab_elem *
bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info * info,struct htab_elem * prev_elem)1962 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1963 struct htab_elem *prev_elem)
1964 {
1965 const struct bpf_htab *htab = info->htab;
1966 u32 skip_elems = info->skip_elems;
1967 u32 bucket_id = info->bucket_id;
1968 struct hlist_nulls_head *head;
1969 struct hlist_nulls_node *n;
1970 struct htab_elem *elem;
1971 struct bucket *b;
1972 u32 i, count;
1973
1974 if (bucket_id >= htab->n_buckets)
1975 return NULL;
1976
1977 /* try to find next elem in the same bucket */
1978 if (prev_elem) {
1979 /* no update/deletion on this bucket, prev_elem should be still valid
1980 * and we won't skip elements.
1981 */
1982 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
1983 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
1984 if (elem)
1985 return elem;
1986
1987 /* not found, unlock and go to the next bucket */
1988 b = &htab->buckets[bucket_id++];
1989 rcu_read_unlock();
1990 skip_elems = 0;
1991 }
1992
1993 for (i = bucket_id; i < htab->n_buckets; i++) {
1994 b = &htab->buckets[i];
1995 rcu_read_lock();
1996
1997 count = 0;
1998 head = &b->head;
1999 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2000 if (count >= skip_elems) {
2001 info->bucket_id = i;
2002 info->skip_elems = count;
2003 return elem;
2004 }
2005 count++;
2006 }
2007
2008 rcu_read_unlock();
2009 skip_elems = 0;
2010 }
2011
2012 info->bucket_id = i;
2013 info->skip_elems = 0;
2014 return NULL;
2015 }
2016
bpf_hash_map_seq_start(struct seq_file * seq,loff_t * pos)2017 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2018 {
2019 struct bpf_iter_seq_hash_map_info *info = seq->private;
2020 struct htab_elem *elem;
2021
2022 elem = bpf_hash_map_seq_find_next(info, NULL);
2023 if (!elem)
2024 return NULL;
2025
2026 if (*pos == 0)
2027 ++*pos;
2028 return elem;
2029 }
2030
bpf_hash_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)2031 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2032 {
2033 struct bpf_iter_seq_hash_map_info *info = seq->private;
2034
2035 ++*pos;
2036 ++info->skip_elems;
2037 return bpf_hash_map_seq_find_next(info, v);
2038 }
2039
__bpf_hash_map_seq_show(struct seq_file * seq,struct htab_elem * elem)2040 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2041 {
2042 struct bpf_iter_seq_hash_map_info *info = seq->private;
2043 u32 roundup_key_size, roundup_value_size;
2044 struct bpf_iter__bpf_map_elem ctx = {};
2045 struct bpf_map *map = info->map;
2046 struct bpf_iter_meta meta;
2047 int ret = 0, off = 0, cpu;
2048 struct bpf_prog *prog;
2049 void __percpu *pptr;
2050
2051 meta.seq = seq;
2052 prog = bpf_iter_get_info(&meta, elem == NULL);
2053 if (prog) {
2054 ctx.meta = &meta;
2055 ctx.map = info->map;
2056 if (elem) {
2057 roundup_key_size = round_up(map->key_size, 8);
2058 ctx.key = elem->key;
2059 if (!info->percpu_value_buf) {
2060 ctx.value = elem->key + roundup_key_size;
2061 } else {
2062 roundup_value_size = round_up(map->value_size, 8);
2063 pptr = htab_elem_get_ptr(elem, map->key_size);
2064 for_each_possible_cpu(cpu) {
2065 bpf_long_memcpy(info->percpu_value_buf + off,
2066 per_cpu_ptr(pptr, cpu),
2067 roundup_value_size);
2068 off += roundup_value_size;
2069 }
2070 ctx.value = info->percpu_value_buf;
2071 }
2072 }
2073 ret = bpf_iter_run_prog(prog, &ctx);
2074 }
2075
2076 return ret;
2077 }
2078
bpf_hash_map_seq_show(struct seq_file * seq,void * v)2079 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2080 {
2081 return __bpf_hash_map_seq_show(seq, v);
2082 }
2083
bpf_hash_map_seq_stop(struct seq_file * seq,void * v)2084 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2085 {
2086 if (!v)
2087 (void)__bpf_hash_map_seq_show(seq, NULL);
2088 else
2089 rcu_read_unlock();
2090 }
2091
bpf_iter_init_hash_map(void * priv_data,struct bpf_iter_aux_info * aux)2092 static int bpf_iter_init_hash_map(void *priv_data,
2093 struct bpf_iter_aux_info *aux)
2094 {
2095 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2096 struct bpf_map *map = aux->map;
2097 void *value_buf;
2098 u32 buf_size;
2099
2100 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2101 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2102 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2103 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2104 if (!value_buf)
2105 return -ENOMEM;
2106
2107 seq_info->percpu_value_buf = value_buf;
2108 }
2109
2110 bpf_map_inc_with_uref(map);
2111 seq_info->map = map;
2112 seq_info->htab = container_of(map, struct bpf_htab, map);
2113 return 0;
2114 }
2115
bpf_iter_fini_hash_map(void * priv_data)2116 static void bpf_iter_fini_hash_map(void *priv_data)
2117 {
2118 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2119
2120 bpf_map_put_with_uref(seq_info->map);
2121 kfree(seq_info->percpu_value_buf);
2122 }
2123
2124 static const struct seq_operations bpf_hash_map_seq_ops = {
2125 .start = bpf_hash_map_seq_start,
2126 .next = bpf_hash_map_seq_next,
2127 .stop = bpf_hash_map_seq_stop,
2128 .show = bpf_hash_map_seq_show,
2129 };
2130
2131 static const struct bpf_iter_seq_info iter_seq_info = {
2132 .seq_ops = &bpf_hash_map_seq_ops,
2133 .init_seq_private = bpf_iter_init_hash_map,
2134 .fini_seq_private = bpf_iter_fini_hash_map,
2135 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
2136 };
2137
bpf_for_each_hash_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)2138 static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2139 void *callback_ctx, u64 flags)
2140 {
2141 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2142 struct hlist_nulls_head *head;
2143 struct hlist_nulls_node *n;
2144 struct htab_elem *elem;
2145 u32 roundup_key_size;
2146 int i, num_elems = 0;
2147 void __percpu *pptr;
2148 struct bucket *b;
2149 void *key, *val;
2150 bool is_percpu;
2151 u64 ret = 0;
2152
2153 if (flags != 0)
2154 return -EINVAL;
2155
2156 is_percpu = htab_is_percpu(htab);
2157
2158 roundup_key_size = round_up(map->key_size, 8);
2159 /* disable migration so percpu value prepared here will be the
2160 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2161 */
2162 if (is_percpu)
2163 migrate_disable();
2164 for (i = 0; i < htab->n_buckets; i++) {
2165 b = &htab->buckets[i];
2166 rcu_read_lock();
2167 head = &b->head;
2168 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2169 key = elem->key;
2170 if (is_percpu) {
2171 /* current cpu value for percpu map */
2172 pptr = htab_elem_get_ptr(elem, map->key_size);
2173 val = this_cpu_ptr(pptr);
2174 } else {
2175 val = elem->key + roundup_key_size;
2176 }
2177 num_elems++;
2178 ret = callback_fn((u64)(long)map, (u64)(long)key,
2179 (u64)(long)val, (u64)(long)callback_ctx, 0);
2180 /* return value: 0 - continue, 1 - stop and return */
2181 if (ret) {
2182 rcu_read_unlock();
2183 goto out;
2184 }
2185 }
2186 rcu_read_unlock();
2187 }
2188 out:
2189 if (is_percpu)
2190 migrate_enable();
2191 return num_elems;
2192 }
2193
2194 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2195 const struct bpf_map_ops htab_map_ops = {
2196 .map_meta_equal = bpf_map_meta_equal,
2197 .map_alloc_check = htab_map_alloc_check,
2198 .map_alloc = htab_map_alloc,
2199 .map_free = htab_map_free,
2200 .map_get_next_key = htab_map_get_next_key,
2201 .map_release_uref = htab_map_free_timers,
2202 .map_lookup_elem = htab_map_lookup_elem,
2203 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2204 .map_update_elem = htab_map_update_elem,
2205 .map_delete_elem = htab_map_delete_elem,
2206 .map_gen_lookup = htab_map_gen_lookup,
2207 .map_seq_show_elem = htab_map_seq_show_elem,
2208 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2209 .map_for_each_callback = bpf_for_each_hash_elem,
2210 BATCH_OPS(htab),
2211 .map_btf_id = &htab_map_btf_ids[0],
2212 .iter_seq_info = &iter_seq_info,
2213 };
2214
2215 const struct bpf_map_ops htab_lru_map_ops = {
2216 .map_meta_equal = bpf_map_meta_equal,
2217 .map_alloc_check = htab_map_alloc_check,
2218 .map_alloc = htab_map_alloc,
2219 .map_free = htab_map_free,
2220 .map_get_next_key = htab_map_get_next_key,
2221 .map_release_uref = htab_map_free_timers,
2222 .map_lookup_elem = htab_lru_map_lookup_elem,
2223 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2224 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2225 .map_update_elem = htab_lru_map_update_elem,
2226 .map_delete_elem = htab_lru_map_delete_elem,
2227 .map_gen_lookup = htab_lru_map_gen_lookup,
2228 .map_seq_show_elem = htab_map_seq_show_elem,
2229 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2230 .map_for_each_callback = bpf_for_each_hash_elem,
2231 BATCH_OPS(htab_lru),
2232 .map_btf_id = &htab_map_btf_ids[0],
2233 .iter_seq_info = &iter_seq_info,
2234 };
2235
2236 /* Called from eBPF program */
htab_percpu_map_lookup_elem(struct bpf_map * map,void * key)2237 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2238 {
2239 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2240
2241 if (l)
2242 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2243 else
2244 return NULL;
2245 }
2246
htab_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2247 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2248 {
2249 struct htab_elem *l;
2250
2251 if (cpu >= nr_cpu_ids)
2252 return NULL;
2253
2254 l = __htab_map_lookup_elem(map, key);
2255 if (l)
2256 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2257 else
2258 return NULL;
2259 }
2260
htab_lru_percpu_map_lookup_elem(struct bpf_map * map,void * key)2261 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2262 {
2263 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2264
2265 if (l) {
2266 bpf_lru_node_set_ref(&l->lru_node);
2267 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2268 }
2269
2270 return NULL;
2271 }
2272
htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2273 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2274 {
2275 struct htab_elem *l;
2276
2277 if (cpu >= nr_cpu_ids)
2278 return NULL;
2279
2280 l = __htab_map_lookup_elem(map, key);
2281 if (l) {
2282 bpf_lru_node_set_ref(&l->lru_node);
2283 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2284 }
2285
2286 return NULL;
2287 }
2288
bpf_percpu_hash_copy(struct bpf_map * map,void * key,void * value)2289 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2290 {
2291 struct htab_elem *l;
2292 void __percpu *pptr;
2293 int ret = -ENOENT;
2294 int cpu, off = 0;
2295 u32 size;
2296
2297 /* per_cpu areas are zero-filled and bpf programs can only
2298 * access 'value_size' of them, so copying rounded areas
2299 * will not leak any kernel data
2300 */
2301 size = round_up(map->value_size, 8);
2302 rcu_read_lock();
2303 l = __htab_map_lookup_elem(map, key);
2304 if (!l)
2305 goto out;
2306 /* We do not mark LRU map element here in order to not mess up
2307 * eviction heuristics when user space does a map walk.
2308 */
2309 pptr = htab_elem_get_ptr(l, map->key_size);
2310 for_each_possible_cpu(cpu) {
2311 bpf_long_memcpy(value + off,
2312 per_cpu_ptr(pptr, cpu), size);
2313 off += size;
2314 }
2315 ret = 0;
2316 out:
2317 rcu_read_unlock();
2318 return ret;
2319 }
2320
bpf_percpu_hash_update(struct bpf_map * map,void * key,void * value,u64 map_flags)2321 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2322 u64 map_flags)
2323 {
2324 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2325 int ret;
2326
2327 rcu_read_lock();
2328 if (htab_is_lru(htab))
2329 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2330 map_flags, true);
2331 else
2332 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2333 true);
2334 rcu_read_unlock();
2335
2336 return ret;
2337 }
2338
htab_percpu_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)2339 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2340 struct seq_file *m)
2341 {
2342 struct htab_elem *l;
2343 void __percpu *pptr;
2344 int cpu;
2345
2346 rcu_read_lock();
2347
2348 l = __htab_map_lookup_elem(map, key);
2349 if (!l) {
2350 rcu_read_unlock();
2351 return;
2352 }
2353
2354 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2355 seq_puts(m, ": {\n");
2356 pptr = htab_elem_get_ptr(l, map->key_size);
2357 for_each_possible_cpu(cpu) {
2358 seq_printf(m, "\tcpu%d: ", cpu);
2359 btf_type_seq_show(map->btf, map->btf_value_type_id,
2360 per_cpu_ptr(pptr, cpu), m);
2361 seq_puts(m, "\n");
2362 }
2363 seq_puts(m, "}\n");
2364
2365 rcu_read_unlock();
2366 }
2367
2368 const struct bpf_map_ops htab_percpu_map_ops = {
2369 .map_meta_equal = bpf_map_meta_equal,
2370 .map_alloc_check = htab_map_alloc_check,
2371 .map_alloc = htab_map_alloc,
2372 .map_free = htab_map_free,
2373 .map_get_next_key = htab_map_get_next_key,
2374 .map_lookup_elem = htab_percpu_map_lookup_elem,
2375 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2376 .map_update_elem = htab_percpu_map_update_elem,
2377 .map_delete_elem = htab_map_delete_elem,
2378 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2379 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2380 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2381 .map_for_each_callback = bpf_for_each_hash_elem,
2382 BATCH_OPS(htab_percpu),
2383 .map_btf_id = &htab_map_btf_ids[0],
2384 .iter_seq_info = &iter_seq_info,
2385 };
2386
2387 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2388 .map_meta_equal = bpf_map_meta_equal,
2389 .map_alloc_check = htab_map_alloc_check,
2390 .map_alloc = htab_map_alloc,
2391 .map_free = htab_map_free,
2392 .map_get_next_key = htab_map_get_next_key,
2393 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2394 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2395 .map_update_elem = htab_lru_percpu_map_update_elem,
2396 .map_delete_elem = htab_lru_map_delete_elem,
2397 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2398 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2399 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2400 .map_for_each_callback = bpf_for_each_hash_elem,
2401 BATCH_OPS(htab_lru_percpu),
2402 .map_btf_id = &htab_map_btf_ids[0],
2403 .iter_seq_info = &iter_seq_info,
2404 };
2405
fd_htab_map_alloc_check(union bpf_attr * attr)2406 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2407 {
2408 if (attr->value_size != sizeof(u32))
2409 return -EINVAL;
2410 return htab_map_alloc_check(attr);
2411 }
2412
fd_htab_map_free(struct bpf_map * map)2413 static void fd_htab_map_free(struct bpf_map *map)
2414 {
2415 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2416 struct hlist_nulls_node *n;
2417 struct hlist_nulls_head *head;
2418 struct htab_elem *l;
2419 int i;
2420
2421 for (i = 0; i < htab->n_buckets; i++) {
2422 head = select_bucket(htab, i);
2423
2424 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2425 void *ptr = fd_htab_map_get_ptr(map, l);
2426
2427 map->ops->map_fd_put_ptr(ptr);
2428 }
2429 }
2430
2431 htab_map_free(map);
2432 }
2433
2434 /* only called from syscall */
bpf_fd_htab_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)2435 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2436 {
2437 void **ptr;
2438 int ret = 0;
2439
2440 if (!map->ops->map_fd_sys_lookup_elem)
2441 return -ENOTSUPP;
2442
2443 rcu_read_lock();
2444 ptr = htab_map_lookup_elem(map, key);
2445 if (ptr)
2446 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2447 else
2448 ret = -ENOENT;
2449 rcu_read_unlock();
2450
2451 return ret;
2452 }
2453
2454 /* only called from syscall */
bpf_fd_htab_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)2455 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2456 void *key, void *value, u64 map_flags)
2457 {
2458 void *ptr;
2459 int ret;
2460 u32 ufd = *(u32 *)value;
2461
2462 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2463 if (IS_ERR(ptr))
2464 return PTR_ERR(ptr);
2465
2466 ret = htab_map_update_elem(map, key, &ptr, map_flags);
2467 if (ret)
2468 map->ops->map_fd_put_ptr(ptr);
2469
2470 return ret;
2471 }
2472
htab_of_map_alloc(union bpf_attr * attr)2473 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2474 {
2475 struct bpf_map *map, *inner_map_meta;
2476
2477 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2478 if (IS_ERR(inner_map_meta))
2479 return inner_map_meta;
2480
2481 map = htab_map_alloc(attr);
2482 if (IS_ERR(map)) {
2483 bpf_map_meta_free(inner_map_meta);
2484 return map;
2485 }
2486
2487 map->inner_map_meta = inner_map_meta;
2488
2489 return map;
2490 }
2491
htab_of_map_lookup_elem(struct bpf_map * map,void * key)2492 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2493 {
2494 struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
2495
2496 if (!inner_map)
2497 return NULL;
2498
2499 return READ_ONCE(*inner_map);
2500 }
2501
htab_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2502 static int htab_of_map_gen_lookup(struct bpf_map *map,
2503 struct bpf_insn *insn_buf)
2504 {
2505 struct bpf_insn *insn = insn_buf;
2506 const int ret = BPF_REG_0;
2507
2508 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2509 (void *(*)(struct bpf_map *map, void *key))NULL));
2510 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2511 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2512 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2513 offsetof(struct htab_elem, key) +
2514 round_up(map->key_size, 8));
2515 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2516
2517 return insn - insn_buf;
2518 }
2519
htab_of_map_free(struct bpf_map * map)2520 static void htab_of_map_free(struct bpf_map *map)
2521 {
2522 bpf_map_meta_free(map->inner_map_meta);
2523 fd_htab_map_free(map);
2524 }
2525
2526 const struct bpf_map_ops htab_of_maps_map_ops = {
2527 .map_alloc_check = fd_htab_map_alloc_check,
2528 .map_alloc = htab_of_map_alloc,
2529 .map_free = htab_of_map_free,
2530 .map_get_next_key = htab_map_get_next_key,
2531 .map_lookup_elem = htab_of_map_lookup_elem,
2532 .map_delete_elem = htab_map_delete_elem,
2533 .map_fd_get_ptr = bpf_map_fd_get_ptr,
2534 .map_fd_put_ptr = bpf_map_fd_put_ptr,
2535 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2536 .map_gen_lookup = htab_of_map_gen_lookup,
2537 .map_check_btf = map_check_no_btf,
2538 BATCH_OPS(htab),
2539 .map_btf_id = &htab_map_btf_ids[0],
2540 };
2541