1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14
15 #include "map_in_map.h"
16
17 #define ARRAY_CREATE_FLAG_MASK \
18 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
20
bpf_array_free_percpu(struct bpf_array * array)21 static void bpf_array_free_percpu(struct bpf_array *array)
22 {
23 int i;
24
25 for (i = 0; i < array->map.max_entries; i++) {
26 free_percpu(array->pptrs[i]);
27 cond_resched();
28 }
29 }
30
bpf_array_alloc_percpu(struct bpf_array * array)31 static int bpf_array_alloc_percpu(struct bpf_array *array)
32 {
33 void __percpu *ptr;
34 int i;
35
36 for (i = 0; i < array->map.max_entries; i++) {
37 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
38 GFP_USER | __GFP_NOWARN);
39 if (!ptr) {
40 bpf_array_free_percpu(array);
41 return -ENOMEM;
42 }
43 array->pptrs[i] = ptr;
44 cond_resched();
45 }
46
47 return 0;
48 }
49
50 /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)51 int array_map_alloc_check(union bpf_attr *attr)
52 {
53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
54 int numa_node = bpf_map_attr_numa_node(attr);
55
56 /* check sanity of attributes */
57 if (attr->max_entries == 0 || attr->key_size != 4 ||
58 attr->value_size == 0 ||
59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
60 !bpf_map_flags_access_ok(attr->map_flags) ||
61 (percpu && numa_node != NUMA_NO_NODE))
62 return -EINVAL;
63
64 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
66 return -EINVAL;
67
68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69 attr->map_flags & BPF_F_PRESERVE_ELEMS)
70 return -EINVAL;
71
72 if (attr->value_size > KMALLOC_MAX_SIZE)
73 /* if value_size is bigger, the user space won't be able to
74 * access the elements.
75 */
76 return -E2BIG;
77
78 return 0;
79 }
80
array_map_alloc(union bpf_attr * attr)81 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
82 {
83 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
84 int numa_node = bpf_map_attr_numa_node(attr);
85 u32 elem_size, index_mask, max_entries;
86 bool bypass_spec_v1 = bpf_bypass_spec_v1();
87 u64 array_size, mask64;
88 struct bpf_array *array;
89
90 elem_size = round_up(attr->value_size, 8);
91
92 max_entries = attr->max_entries;
93
94 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
95 * upper most bit set in u32 space is undefined behavior due to
96 * resulting 1U << 32, so do it manually here in u64 space.
97 */
98 mask64 = fls_long(max_entries - 1);
99 mask64 = 1ULL << mask64;
100 mask64 -= 1;
101
102 index_mask = mask64;
103 if (!bypass_spec_v1) {
104 /* round up array size to nearest power of 2,
105 * since cpu will speculate within index_mask limits
106 */
107 max_entries = index_mask + 1;
108 /* Check for overflows. */
109 if (max_entries < attr->max_entries)
110 return ERR_PTR(-E2BIG);
111 }
112
113 array_size = sizeof(*array);
114 if (percpu) {
115 array_size += (u64) max_entries * sizeof(void *);
116 } else {
117 /* rely on vmalloc() to return page-aligned memory and
118 * ensure array->value is exactly page-aligned
119 */
120 if (attr->map_flags & BPF_F_MMAPABLE) {
121 array_size = PAGE_ALIGN(array_size);
122 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
123 } else {
124 array_size += (u64) max_entries * elem_size;
125 }
126 }
127
128 /* allocate all map elements and zero-initialize them */
129 if (attr->map_flags & BPF_F_MMAPABLE) {
130 void *data;
131
132 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
133 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
134 if (!data)
135 return ERR_PTR(-ENOMEM);
136 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
137 - offsetof(struct bpf_array, value);
138 } else {
139 array = bpf_map_area_alloc(array_size, numa_node);
140 }
141 if (!array)
142 return ERR_PTR(-ENOMEM);
143 array->index_mask = index_mask;
144 array->map.bypass_spec_v1 = bypass_spec_v1;
145
146 /* copy mandatory map attributes */
147 bpf_map_init_from_attr(&array->map, attr);
148 array->elem_size = elem_size;
149
150 if (percpu && bpf_array_alloc_percpu(array)) {
151 bpf_map_area_free(array);
152 return ERR_PTR(-ENOMEM);
153 }
154
155 return &array->map;
156 }
157
158 /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)159 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
160 {
161 struct bpf_array *array = container_of(map, struct bpf_array, map);
162 u32 index = *(u32 *)key;
163
164 if (unlikely(index >= array->map.max_entries))
165 return NULL;
166
167 return array->value + array->elem_size * (index & array->index_mask);
168 }
169
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)170 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
171 u32 off)
172 {
173 struct bpf_array *array = container_of(map, struct bpf_array, map);
174
175 if (map->max_entries != 1)
176 return -ENOTSUPP;
177 if (off >= map->value_size)
178 return -EINVAL;
179
180 *imm = (unsigned long)array->value;
181 return 0;
182 }
183
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)184 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
185 u32 *off)
186 {
187 struct bpf_array *array = container_of(map, struct bpf_array, map);
188 u64 base = (unsigned long)array->value;
189 u64 range = array->elem_size;
190
191 if (map->max_entries != 1)
192 return -ENOTSUPP;
193 if (imm < base || imm >= base + range)
194 return -ENOENT;
195
196 *off = imm - base;
197 return 0;
198 }
199
200 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)201 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
202 {
203 struct bpf_array *array = container_of(map, struct bpf_array, map);
204 struct bpf_insn *insn = insn_buf;
205 u32 elem_size = round_up(map->value_size, 8);
206 const int ret = BPF_REG_0;
207 const int map_ptr = BPF_REG_1;
208 const int index = BPF_REG_2;
209
210 if (map->map_flags & BPF_F_INNER_MAP)
211 return -EOPNOTSUPP;
212
213 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
214 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
215 if (!map->bypass_spec_v1) {
216 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
217 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
218 } else {
219 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
220 }
221
222 if (is_power_of_2(elem_size)) {
223 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
224 } else {
225 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
226 }
227 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
228 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
229 *insn++ = BPF_MOV64_IMM(ret, 0);
230 return insn - insn_buf;
231 }
232
233 /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)234 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
235 {
236 struct bpf_array *array = container_of(map, struct bpf_array, map);
237 u32 index = *(u32 *)key;
238
239 if (unlikely(index >= array->map.max_entries))
240 return NULL;
241
242 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
243 }
244
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)245 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
246 {
247 struct bpf_array *array = container_of(map, struct bpf_array, map);
248 u32 index = *(u32 *)key;
249 void __percpu *pptr;
250 int cpu, off = 0;
251 u32 size;
252
253 if (unlikely(index >= array->map.max_entries))
254 return -ENOENT;
255
256 /* per_cpu areas are zero-filled and bpf programs can only
257 * access 'value_size' of them, so copying rounded areas
258 * will not leak any kernel data
259 */
260 size = round_up(map->value_size, 8);
261 rcu_read_lock();
262 pptr = array->pptrs[index & array->index_mask];
263 for_each_possible_cpu(cpu) {
264 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
265 off += size;
266 }
267 rcu_read_unlock();
268 return 0;
269 }
270
271 /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)272 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
273 {
274 struct bpf_array *array = container_of(map, struct bpf_array, map);
275 u32 index = key ? *(u32 *)key : U32_MAX;
276 u32 *next = (u32 *)next_key;
277
278 if (index >= array->map.max_entries) {
279 *next = 0;
280 return 0;
281 }
282
283 if (index == array->map.max_entries - 1)
284 return -ENOENT;
285
286 *next = index + 1;
287 return 0;
288 }
289
check_and_free_timer_in_array(struct bpf_array * arr,void * val)290 static void check_and_free_timer_in_array(struct bpf_array *arr, void *val)
291 {
292 if (unlikely(map_value_has_timer(&arr->map)))
293 bpf_timer_cancel_and_free(val + arr->map.timer_off);
294 }
295
296 /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)297 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
298 u64 map_flags)
299 {
300 struct bpf_array *array = container_of(map, struct bpf_array, map);
301 u32 index = *(u32 *)key;
302 char *val;
303
304 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
305 /* unknown flags */
306 return -EINVAL;
307
308 if (unlikely(index >= array->map.max_entries))
309 /* all elements were pre-allocated, cannot insert a new one */
310 return -E2BIG;
311
312 if (unlikely(map_flags & BPF_NOEXIST))
313 /* all elements already exist */
314 return -EEXIST;
315
316 if (unlikely((map_flags & BPF_F_LOCK) &&
317 !map_value_has_spin_lock(map)))
318 return -EINVAL;
319
320 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
321 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
322 value, map->value_size);
323 } else {
324 val = array->value +
325 array->elem_size * (index & array->index_mask);
326 if (map_flags & BPF_F_LOCK)
327 copy_map_value_locked(map, val, value, false);
328 else
329 copy_map_value(map, val, value);
330 check_and_free_timer_in_array(array, val);
331 }
332 return 0;
333 }
334
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)335 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
336 u64 map_flags)
337 {
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
340 void __percpu *pptr;
341 int cpu, off = 0;
342 u32 size;
343
344 if (unlikely(map_flags > BPF_EXIST))
345 /* unknown flags */
346 return -EINVAL;
347
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
350 return -E2BIG;
351
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
354 return -EEXIST;
355
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
361 */
362 size = round_up(map->value_size, 8);
363 rcu_read_lock();
364 pptr = array->pptrs[index & array->index_mask];
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
367 off += size;
368 }
369 rcu_read_unlock();
370 return 0;
371 }
372
373 /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)374 static int array_map_delete_elem(struct bpf_map *map, void *key)
375 {
376 return -EINVAL;
377 }
378
array_map_vmalloc_addr(struct bpf_array * array)379 static void *array_map_vmalloc_addr(struct bpf_array *array)
380 {
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
382 }
383
array_map_free_timers(struct bpf_map * map)384 static void array_map_free_timers(struct bpf_map *map)
385 {
386 struct bpf_array *array = container_of(map, struct bpf_array, map);
387 int i;
388
389 if (likely(!map_value_has_timer(map)))
390 return;
391
392 for (i = 0; i < array->map.max_entries; i++)
393 bpf_timer_cancel_and_free(array->value + array->elem_size * i +
394 map->timer_off);
395 }
396
397 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)398 static void array_map_free(struct bpf_map *map)
399 {
400 struct bpf_array *array = container_of(map, struct bpf_array, map);
401
402 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
403 bpf_array_free_percpu(array);
404
405 if (array->map.map_flags & BPF_F_MMAPABLE)
406 bpf_map_area_free(array_map_vmalloc_addr(array));
407 else
408 bpf_map_area_free(array);
409 }
410
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)411 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
412 struct seq_file *m)
413 {
414 void *value;
415
416 rcu_read_lock();
417
418 value = array_map_lookup_elem(map, key);
419 if (!value) {
420 rcu_read_unlock();
421 return;
422 }
423
424 if (map->btf_key_type_id)
425 seq_printf(m, "%u: ", *(u32 *)key);
426 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
427 seq_puts(m, "\n");
428
429 rcu_read_unlock();
430 }
431
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)432 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
433 struct seq_file *m)
434 {
435 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 u32 index = *(u32 *)key;
437 void __percpu *pptr;
438 int cpu;
439
440 rcu_read_lock();
441
442 seq_printf(m, "%u: {\n", *(u32 *)key);
443 pptr = array->pptrs[index & array->index_mask];
444 for_each_possible_cpu(cpu) {
445 seq_printf(m, "\tcpu%d: ", cpu);
446 btf_type_seq_show(map->btf, map->btf_value_type_id,
447 per_cpu_ptr(pptr, cpu), m);
448 seq_puts(m, "\n");
449 }
450 seq_puts(m, "}\n");
451
452 rcu_read_unlock();
453 }
454
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)455 static int array_map_check_btf(const struct bpf_map *map,
456 const struct btf *btf,
457 const struct btf_type *key_type,
458 const struct btf_type *value_type)
459 {
460 u32 int_data;
461
462 /* One exception for keyless BTF: .bss/.data/.rodata map */
463 if (btf_type_is_void(key_type)) {
464 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
465 map->max_entries != 1)
466 return -EINVAL;
467
468 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
469 return -EINVAL;
470
471 return 0;
472 }
473
474 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
475 return -EINVAL;
476
477 int_data = *(u32 *)(key_type + 1);
478 /* bpf array can only take a u32 key. This check makes sure
479 * that the btf matches the attr used during map_create.
480 */
481 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
482 return -EINVAL;
483
484 return 0;
485 }
486
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)487 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
488 {
489 struct bpf_array *array = container_of(map, struct bpf_array, map);
490 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
491
492 if (!(map->map_flags & BPF_F_MMAPABLE))
493 return -EINVAL;
494
495 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
496 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
497 return -EINVAL;
498
499 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
500 vma->vm_pgoff + pgoff);
501 }
502
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)503 static bool array_map_meta_equal(const struct bpf_map *meta0,
504 const struct bpf_map *meta1)
505 {
506 if (!bpf_map_meta_equal(meta0, meta1))
507 return false;
508 return meta0->map_flags & BPF_F_INNER_MAP ? true :
509 meta0->max_entries == meta1->max_entries;
510 }
511
512 struct bpf_iter_seq_array_map_info {
513 struct bpf_map *map;
514 void *percpu_value_buf;
515 u32 index;
516 };
517
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)518 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
519 {
520 struct bpf_iter_seq_array_map_info *info = seq->private;
521 struct bpf_map *map = info->map;
522 struct bpf_array *array;
523 u32 index;
524
525 if (info->index >= map->max_entries)
526 return NULL;
527
528 if (*pos == 0)
529 ++*pos;
530 array = container_of(map, struct bpf_array, map);
531 index = info->index & array->index_mask;
532 if (info->percpu_value_buf)
533 return array->pptrs[index];
534 return array->value + array->elem_size * index;
535 }
536
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)537 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
538 {
539 struct bpf_iter_seq_array_map_info *info = seq->private;
540 struct bpf_map *map = info->map;
541 struct bpf_array *array;
542 u32 index;
543
544 ++*pos;
545 ++info->index;
546 if (info->index >= map->max_entries)
547 return NULL;
548
549 array = container_of(map, struct bpf_array, map);
550 index = info->index & array->index_mask;
551 if (info->percpu_value_buf)
552 return array->pptrs[index];
553 return array->value + array->elem_size * index;
554 }
555
__bpf_array_map_seq_show(struct seq_file * seq,void * v)556 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
557 {
558 struct bpf_iter_seq_array_map_info *info = seq->private;
559 struct bpf_iter__bpf_map_elem ctx = {};
560 struct bpf_map *map = info->map;
561 struct bpf_iter_meta meta;
562 struct bpf_prog *prog;
563 int off = 0, cpu = 0;
564 void __percpu **pptr;
565 u32 size;
566
567 meta.seq = seq;
568 prog = bpf_iter_get_info(&meta, v == NULL);
569 if (!prog)
570 return 0;
571
572 ctx.meta = &meta;
573 ctx.map = info->map;
574 if (v) {
575 ctx.key = &info->index;
576
577 if (!info->percpu_value_buf) {
578 ctx.value = v;
579 } else {
580 pptr = v;
581 size = round_up(map->value_size, 8);
582 for_each_possible_cpu(cpu) {
583 bpf_long_memcpy(info->percpu_value_buf + off,
584 per_cpu_ptr(pptr, cpu),
585 size);
586 off += size;
587 }
588 ctx.value = info->percpu_value_buf;
589 }
590 }
591
592 return bpf_iter_run_prog(prog, &ctx);
593 }
594
bpf_array_map_seq_show(struct seq_file * seq,void * v)595 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
596 {
597 return __bpf_array_map_seq_show(seq, v);
598 }
599
bpf_array_map_seq_stop(struct seq_file * seq,void * v)600 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
601 {
602 if (!v)
603 (void)__bpf_array_map_seq_show(seq, NULL);
604 }
605
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)606 static int bpf_iter_init_array_map(void *priv_data,
607 struct bpf_iter_aux_info *aux)
608 {
609 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
610 struct bpf_map *map = aux->map;
611 void *value_buf;
612 u32 buf_size;
613
614 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
615 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
616 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
617 if (!value_buf)
618 return -ENOMEM;
619
620 seq_info->percpu_value_buf = value_buf;
621 }
622
623 /* bpf_iter_attach_map() acquires a map uref, and the uref may be
624 * released before or in the middle of iterating map elements, so
625 * acquire an extra map uref for iterator.
626 */
627 bpf_map_inc_with_uref(map);
628 seq_info->map = map;
629 return 0;
630 }
631
bpf_iter_fini_array_map(void * priv_data)632 static void bpf_iter_fini_array_map(void *priv_data)
633 {
634 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
635
636 bpf_map_put_with_uref(seq_info->map);
637 kfree(seq_info->percpu_value_buf);
638 }
639
640 static const struct seq_operations bpf_array_map_seq_ops = {
641 .start = bpf_array_map_seq_start,
642 .next = bpf_array_map_seq_next,
643 .stop = bpf_array_map_seq_stop,
644 .show = bpf_array_map_seq_show,
645 };
646
647 static const struct bpf_iter_seq_info iter_seq_info = {
648 .seq_ops = &bpf_array_map_seq_ops,
649 .init_seq_private = bpf_iter_init_array_map,
650 .fini_seq_private = bpf_iter_fini_array_map,
651 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
652 };
653
bpf_for_each_array_elem(struct bpf_map * map,void * callback_fn,void * callback_ctx,u64 flags)654 static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
655 void *callback_ctx, u64 flags)
656 {
657 u32 i, key, num_elems = 0;
658 struct bpf_array *array;
659 bool is_percpu;
660 u64 ret = 0;
661 void *val;
662
663 if (flags != 0)
664 return -EINVAL;
665
666 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
667 array = container_of(map, struct bpf_array, map);
668 if (is_percpu)
669 migrate_disable();
670 for (i = 0; i < map->max_entries; i++) {
671 if (is_percpu)
672 val = this_cpu_ptr(array->pptrs[i]);
673 else
674 val = array->value + array->elem_size * i;
675 num_elems++;
676 key = i;
677 ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
678 (u64)(long)&key, (u64)(long)val,
679 (u64)(long)callback_ctx, 0);
680 /* return value: 0 - continue, 1 - stop and return */
681 if (ret)
682 break;
683 }
684
685 if (is_percpu)
686 migrate_enable();
687 return num_elems;
688 }
689
690 static int array_map_btf_id;
691 const struct bpf_map_ops array_map_ops = {
692 .map_meta_equal = array_map_meta_equal,
693 .map_alloc_check = array_map_alloc_check,
694 .map_alloc = array_map_alloc,
695 .map_free = array_map_free,
696 .map_get_next_key = array_map_get_next_key,
697 .map_release_uref = array_map_free_timers,
698 .map_lookup_elem = array_map_lookup_elem,
699 .map_update_elem = array_map_update_elem,
700 .map_delete_elem = array_map_delete_elem,
701 .map_gen_lookup = array_map_gen_lookup,
702 .map_direct_value_addr = array_map_direct_value_addr,
703 .map_direct_value_meta = array_map_direct_value_meta,
704 .map_mmap = array_map_mmap,
705 .map_seq_show_elem = array_map_seq_show_elem,
706 .map_check_btf = array_map_check_btf,
707 .map_lookup_batch = generic_map_lookup_batch,
708 .map_update_batch = generic_map_update_batch,
709 .map_set_for_each_callback_args = map_set_for_each_callback_args,
710 .map_for_each_callback = bpf_for_each_array_elem,
711 .map_btf_name = "bpf_array",
712 .map_btf_id = &array_map_btf_id,
713 .iter_seq_info = &iter_seq_info,
714 };
715
716 static int percpu_array_map_btf_id;
717 const struct bpf_map_ops percpu_array_map_ops = {
718 .map_meta_equal = bpf_map_meta_equal,
719 .map_alloc_check = array_map_alloc_check,
720 .map_alloc = array_map_alloc,
721 .map_free = array_map_free,
722 .map_get_next_key = array_map_get_next_key,
723 .map_lookup_elem = percpu_array_map_lookup_elem,
724 .map_update_elem = array_map_update_elem,
725 .map_delete_elem = array_map_delete_elem,
726 .map_seq_show_elem = percpu_array_map_seq_show_elem,
727 .map_check_btf = array_map_check_btf,
728 .map_lookup_batch = generic_map_lookup_batch,
729 .map_update_batch = generic_map_update_batch,
730 .map_set_for_each_callback_args = map_set_for_each_callback_args,
731 .map_for_each_callback = bpf_for_each_array_elem,
732 .map_btf_name = "bpf_array",
733 .map_btf_id = &percpu_array_map_btf_id,
734 .iter_seq_info = &iter_seq_info,
735 };
736
fd_array_map_alloc_check(union bpf_attr * attr)737 static int fd_array_map_alloc_check(union bpf_attr *attr)
738 {
739 /* only file descriptors can be stored in this type of map */
740 if (attr->value_size != sizeof(u32))
741 return -EINVAL;
742 /* Program read-only/write-only not supported for special maps yet. */
743 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
744 return -EINVAL;
745 return array_map_alloc_check(attr);
746 }
747
fd_array_map_free(struct bpf_map * map)748 static void fd_array_map_free(struct bpf_map *map)
749 {
750 struct bpf_array *array = container_of(map, struct bpf_array, map);
751 int i;
752
753 /* make sure it's empty */
754 for (i = 0; i < array->map.max_entries; i++)
755 BUG_ON(array->ptrs[i] != NULL);
756
757 bpf_map_area_free(array);
758 }
759
fd_array_map_lookup_elem(struct bpf_map * map,void * key)760 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
761 {
762 return ERR_PTR(-EOPNOTSUPP);
763 }
764
765 /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)766 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
767 {
768 void **elem, *ptr;
769 int ret = 0;
770
771 if (!map->ops->map_fd_sys_lookup_elem)
772 return -ENOTSUPP;
773
774 rcu_read_lock();
775 elem = array_map_lookup_elem(map, key);
776 if (elem && (ptr = READ_ONCE(*elem)))
777 *value = map->ops->map_fd_sys_lookup_elem(ptr);
778 else
779 ret = -ENOENT;
780 rcu_read_unlock();
781
782 return ret;
783 }
784
785 /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)786 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
787 void *key, void *value, u64 map_flags)
788 {
789 struct bpf_array *array = container_of(map, struct bpf_array, map);
790 void *new_ptr, *old_ptr;
791 u32 index = *(u32 *)key, ufd;
792
793 if (map_flags != BPF_ANY)
794 return -EINVAL;
795
796 if (index >= array->map.max_entries)
797 return -E2BIG;
798
799 ufd = *(u32 *)value;
800 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
801 if (IS_ERR(new_ptr))
802 return PTR_ERR(new_ptr);
803
804 if (map->ops->map_poke_run) {
805 mutex_lock(&array->aux->poke_mutex);
806 old_ptr = xchg(array->ptrs + index, new_ptr);
807 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
808 mutex_unlock(&array->aux->poke_mutex);
809 } else {
810 old_ptr = xchg(array->ptrs + index, new_ptr);
811 }
812
813 if (old_ptr)
814 map->ops->map_fd_put_ptr(old_ptr);
815 return 0;
816 }
817
fd_array_map_delete_elem(struct bpf_map * map,void * key)818 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
819 {
820 struct bpf_array *array = container_of(map, struct bpf_array, map);
821 void *old_ptr;
822 u32 index = *(u32 *)key;
823
824 if (index >= array->map.max_entries)
825 return -E2BIG;
826
827 if (map->ops->map_poke_run) {
828 mutex_lock(&array->aux->poke_mutex);
829 old_ptr = xchg(array->ptrs + index, NULL);
830 map->ops->map_poke_run(map, index, old_ptr, NULL);
831 mutex_unlock(&array->aux->poke_mutex);
832 } else {
833 old_ptr = xchg(array->ptrs + index, NULL);
834 }
835
836 if (old_ptr) {
837 map->ops->map_fd_put_ptr(old_ptr);
838 return 0;
839 } else {
840 return -ENOENT;
841 }
842 }
843
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)844 static void *prog_fd_array_get_ptr(struct bpf_map *map,
845 struct file *map_file, int fd)
846 {
847 struct bpf_array *array = container_of(map, struct bpf_array, map);
848 struct bpf_prog *prog = bpf_prog_get(fd);
849
850 if (IS_ERR(prog))
851 return prog;
852
853 if (!bpf_prog_array_compatible(array, prog)) {
854 bpf_prog_put(prog);
855 return ERR_PTR(-EINVAL);
856 }
857
858 return prog;
859 }
860
prog_fd_array_put_ptr(void * ptr)861 static void prog_fd_array_put_ptr(void *ptr)
862 {
863 bpf_prog_put(ptr);
864 }
865
prog_fd_array_sys_lookup_elem(void * ptr)866 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
867 {
868 return ((struct bpf_prog *)ptr)->aux->id;
869 }
870
871 /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map)872 static void bpf_fd_array_map_clear(struct bpf_map *map)
873 {
874 struct bpf_array *array = container_of(map, struct bpf_array, map);
875 int i;
876
877 for (i = 0; i < array->map.max_entries; i++)
878 fd_array_map_delete_elem(map, &i);
879 }
880
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)881 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
882 struct seq_file *m)
883 {
884 void **elem, *ptr;
885 u32 prog_id;
886
887 rcu_read_lock();
888
889 elem = array_map_lookup_elem(map, key);
890 if (elem) {
891 ptr = READ_ONCE(*elem);
892 if (ptr) {
893 seq_printf(m, "%u: ", *(u32 *)key);
894 prog_id = prog_fd_array_sys_lookup_elem(ptr);
895 btf_type_seq_show(map->btf, map->btf_value_type_id,
896 &prog_id, m);
897 seq_puts(m, "\n");
898 }
899 }
900
901 rcu_read_unlock();
902 }
903
904 struct prog_poke_elem {
905 struct list_head list;
906 struct bpf_prog_aux *aux;
907 };
908
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)909 static int prog_array_map_poke_track(struct bpf_map *map,
910 struct bpf_prog_aux *prog_aux)
911 {
912 struct prog_poke_elem *elem;
913 struct bpf_array_aux *aux;
914 int ret = 0;
915
916 aux = container_of(map, struct bpf_array, map)->aux;
917 mutex_lock(&aux->poke_mutex);
918 list_for_each_entry(elem, &aux->poke_progs, list) {
919 if (elem->aux == prog_aux)
920 goto out;
921 }
922
923 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
924 if (!elem) {
925 ret = -ENOMEM;
926 goto out;
927 }
928
929 INIT_LIST_HEAD(&elem->list);
930 /* We must track the program's aux info at this point in time
931 * since the program pointer itself may not be stable yet, see
932 * also comment in prog_array_map_poke_run().
933 */
934 elem->aux = prog_aux;
935
936 list_add_tail(&elem->list, &aux->poke_progs);
937 out:
938 mutex_unlock(&aux->poke_mutex);
939 return ret;
940 }
941
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)942 static void prog_array_map_poke_untrack(struct bpf_map *map,
943 struct bpf_prog_aux *prog_aux)
944 {
945 struct prog_poke_elem *elem, *tmp;
946 struct bpf_array_aux *aux;
947
948 aux = container_of(map, struct bpf_array, map)->aux;
949 mutex_lock(&aux->poke_mutex);
950 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
951 if (elem->aux == prog_aux) {
952 list_del_init(&elem->list);
953 kfree(elem);
954 break;
955 }
956 }
957 mutex_unlock(&aux->poke_mutex);
958 }
959
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)960 void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
961 struct bpf_prog *new, struct bpf_prog *old)
962 {
963 WARN_ON_ONCE(1);
964 }
965
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)966 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
967 struct bpf_prog *old,
968 struct bpf_prog *new)
969 {
970 struct prog_poke_elem *elem;
971 struct bpf_array_aux *aux;
972
973 aux = container_of(map, struct bpf_array, map)->aux;
974 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
975
976 list_for_each_entry(elem, &aux->poke_progs, list) {
977 struct bpf_jit_poke_descriptor *poke;
978 int i;
979
980 for (i = 0; i < elem->aux->size_poke_tab; i++) {
981 poke = &elem->aux->poke_tab[i];
982
983 /* Few things to be aware of:
984 *
985 * 1) We can only ever access aux in this context, but
986 * not aux->prog since it might not be stable yet and
987 * there could be danger of use after free otherwise.
988 * 2) Initially when we start tracking aux, the program
989 * is not JITed yet and also does not have a kallsyms
990 * entry. We skip these as poke->tailcall_target_stable
991 * is not active yet. The JIT will do the final fixup
992 * before setting it stable. The various
993 * poke->tailcall_target_stable are successively
994 * activated, so tail call updates can arrive from here
995 * while JIT is still finishing its final fixup for
996 * non-activated poke entries.
997 * 3) Also programs reaching refcount of zero while patching
998 * is in progress is okay since we're protected under
999 * poke_mutex and untrack the programs before the JIT
1000 * buffer is freed.
1001 */
1002 if (!READ_ONCE(poke->tailcall_target_stable))
1003 continue;
1004 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1005 continue;
1006 if (poke->tail_call.map != map ||
1007 poke->tail_call.key != key)
1008 continue;
1009
1010 bpf_arch_poke_desc_update(poke, new, old);
1011 }
1012 }
1013 }
1014
prog_array_map_clear_deferred(struct work_struct * work)1015 static void prog_array_map_clear_deferred(struct work_struct *work)
1016 {
1017 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1018 work)->map;
1019 bpf_fd_array_map_clear(map);
1020 bpf_map_put(map);
1021 }
1022
prog_array_map_clear(struct bpf_map * map)1023 static void prog_array_map_clear(struct bpf_map *map)
1024 {
1025 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1026 map)->aux;
1027 bpf_map_inc(map);
1028 schedule_work(&aux->work);
1029 }
1030
prog_array_map_alloc(union bpf_attr * attr)1031 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1032 {
1033 struct bpf_array_aux *aux;
1034 struct bpf_map *map;
1035
1036 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1037 if (!aux)
1038 return ERR_PTR(-ENOMEM);
1039
1040 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1041 INIT_LIST_HEAD(&aux->poke_progs);
1042 mutex_init(&aux->poke_mutex);
1043 spin_lock_init(&aux->owner.lock);
1044
1045 map = array_map_alloc(attr);
1046 if (IS_ERR(map)) {
1047 kfree(aux);
1048 return map;
1049 }
1050
1051 container_of(map, struct bpf_array, map)->aux = aux;
1052 aux->map = map;
1053
1054 return map;
1055 }
1056
prog_array_map_free(struct bpf_map * map)1057 static void prog_array_map_free(struct bpf_map *map)
1058 {
1059 struct prog_poke_elem *elem, *tmp;
1060 struct bpf_array_aux *aux;
1061
1062 aux = container_of(map, struct bpf_array, map)->aux;
1063 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1064 list_del_init(&elem->list);
1065 kfree(elem);
1066 }
1067 kfree(aux);
1068 fd_array_map_free(map);
1069 }
1070
1071 /* prog_array->aux->{type,jited} is a runtime binding.
1072 * Doing static check alone in the verifier is not enough.
1073 * Thus, prog_array_map cannot be used as an inner_map
1074 * and map_meta_equal is not implemented.
1075 */
1076 static int prog_array_map_btf_id;
1077 const struct bpf_map_ops prog_array_map_ops = {
1078 .map_alloc_check = fd_array_map_alloc_check,
1079 .map_alloc = prog_array_map_alloc,
1080 .map_free = prog_array_map_free,
1081 .map_poke_track = prog_array_map_poke_track,
1082 .map_poke_untrack = prog_array_map_poke_untrack,
1083 .map_poke_run = prog_array_map_poke_run,
1084 .map_get_next_key = array_map_get_next_key,
1085 .map_lookup_elem = fd_array_map_lookup_elem,
1086 .map_delete_elem = fd_array_map_delete_elem,
1087 .map_fd_get_ptr = prog_fd_array_get_ptr,
1088 .map_fd_put_ptr = prog_fd_array_put_ptr,
1089 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1090 .map_release_uref = prog_array_map_clear,
1091 .map_seq_show_elem = prog_array_map_seq_show_elem,
1092 .map_btf_name = "bpf_array",
1093 .map_btf_id = &prog_array_map_btf_id,
1094 };
1095
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)1096 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1097 struct file *map_file)
1098 {
1099 struct bpf_event_entry *ee;
1100
1101 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1102 if (ee) {
1103 ee->event = perf_file->private_data;
1104 ee->perf_file = perf_file;
1105 ee->map_file = map_file;
1106 }
1107
1108 return ee;
1109 }
1110
__bpf_event_entry_free(struct rcu_head * rcu)1111 static void __bpf_event_entry_free(struct rcu_head *rcu)
1112 {
1113 struct bpf_event_entry *ee;
1114
1115 ee = container_of(rcu, struct bpf_event_entry, rcu);
1116 fput(ee->perf_file);
1117 kfree(ee);
1118 }
1119
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)1120 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1121 {
1122 call_rcu(&ee->rcu, __bpf_event_entry_free);
1123 }
1124
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1125 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1126 struct file *map_file, int fd)
1127 {
1128 struct bpf_event_entry *ee;
1129 struct perf_event *event;
1130 struct file *perf_file;
1131 u64 value;
1132
1133 perf_file = perf_event_get(fd);
1134 if (IS_ERR(perf_file))
1135 return perf_file;
1136
1137 ee = ERR_PTR(-EOPNOTSUPP);
1138 event = perf_file->private_data;
1139 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1140 goto err_out;
1141
1142 ee = bpf_event_entry_gen(perf_file, map_file);
1143 if (ee)
1144 return ee;
1145 ee = ERR_PTR(-ENOMEM);
1146 err_out:
1147 fput(perf_file);
1148 return ee;
1149 }
1150
perf_event_fd_array_put_ptr(void * ptr)1151 static void perf_event_fd_array_put_ptr(void *ptr)
1152 {
1153 bpf_event_entry_free_rcu(ptr);
1154 }
1155
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)1156 static void perf_event_fd_array_release(struct bpf_map *map,
1157 struct file *map_file)
1158 {
1159 struct bpf_array *array = container_of(map, struct bpf_array, map);
1160 struct bpf_event_entry *ee;
1161 int i;
1162
1163 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1164 return;
1165
1166 rcu_read_lock();
1167 for (i = 0; i < array->map.max_entries; i++) {
1168 ee = READ_ONCE(array->ptrs[i]);
1169 if (ee && ee->map_file == map_file)
1170 fd_array_map_delete_elem(map, &i);
1171 }
1172 rcu_read_unlock();
1173 }
1174
perf_event_fd_array_map_free(struct bpf_map * map)1175 static void perf_event_fd_array_map_free(struct bpf_map *map)
1176 {
1177 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1178 bpf_fd_array_map_clear(map);
1179 fd_array_map_free(map);
1180 }
1181
1182 static int perf_event_array_map_btf_id;
1183 const struct bpf_map_ops perf_event_array_map_ops = {
1184 .map_meta_equal = bpf_map_meta_equal,
1185 .map_alloc_check = fd_array_map_alloc_check,
1186 .map_alloc = array_map_alloc,
1187 .map_free = perf_event_fd_array_map_free,
1188 .map_get_next_key = array_map_get_next_key,
1189 .map_lookup_elem = fd_array_map_lookup_elem,
1190 .map_delete_elem = fd_array_map_delete_elem,
1191 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1192 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1193 .map_release = perf_event_fd_array_release,
1194 .map_check_btf = map_check_no_btf,
1195 .map_btf_name = "bpf_array",
1196 .map_btf_id = &perf_event_array_map_btf_id,
1197 };
1198
1199 #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1200 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1201 struct file *map_file /* not used */,
1202 int fd)
1203 {
1204 return cgroup_get_from_fd(fd);
1205 }
1206
cgroup_fd_array_put_ptr(void * ptr)1207 static void cgroup_fd_array_put_ptr(void *ptr)
1208 {
1209 /* cgroup_put free cgrp after a rcu grace period */
1210 cgroup_put(ptr);
1211 }
1212
cgroup_fd_array_free(struct bpf_map * map)1213 static void cgroup_fd_array_free(struct bpf_map *map)
1214 {
1215 bpf_fd_array_map_clear(map);
1216 fd_array_map_free(map);
1217 }
1218
1219 static int cgroup_array_map_btf_id;
1220 const struct bpf_map_ops cgroup_array_map_ops = {
1221 .map_meta_equal = bpf_map_meta_equal,
1222 .map_alloc_check = fd_array_map_alloc_check,
1223 .map_alloc = array_map_alloc,
1224 .map_free = cgroup_fd_array_free,
1225 .map_get_next_key = array_map_get_next_key,
1226 .map_lookup_elem = fd_array_map_lookup_elem,
1227 .map_delete_elem = fd_array_map_delete_elem,
1228 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1229 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1230 .map_check_btf = map_check_no_btf,
1231 .map_btf_name = "bpf_array",
1232 .map_btf_id = &cgroup_array_map_btf_id,
1233 };
1234 #endif
1235
array_of_map_alloc(union bpf_attr * attr)1236 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1237 {
1238 struct bpf_map *map, *inner_map_meta;
1239
1240 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1241 if (IS_ERR(inner_map_meta))
1242 return inner_map_meta;
1243
1244 map = array_map_alloc(attr);
1245 if (IS_ERR(map)) {
1246 bpf_map_meta_free(inner_map_meta);
1247 return map;
1248 }
1249
1250 map->inner_map_meta = inner_map_meta;
1251
1252 return map;
1253 }
1254
array_of_map_free(struct bpf_map * map)1255 static void array_of_map_free(struct bpf_map *map)
1256 {
1257 /* map->inner_map_meta is only accessed by syscall which
1258 * is protected by fdget/fdput.
1259 */
1260 bpf_map_meta_free(map->inner_map_meta);
1261 bpf_fd_array_map_clear(map);
1262 fd_array_map_free(map);
1263 }
1264
array_of_map_lookup_elem(struct bpf_map * map,void * key)1265 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1266 {
1267 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1268
1269 if (!inner_map)
1270 return NULL;
1271
1272 return READ_ONCE(*inner_map);
1273 }
1274
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)1275 static int array_of_map_gen_lookup(struct bpf_map *map,
1276 struct bpf_insn *insn_buf)
1277 {
1278 struct bpf_array *array = container_of(map, struct bpf_array, map);
1279 u32 elem_size = round_up(map->value_size, 8);
1280 struct bpf_insn *insn = insn_buf;
1281 const int ret = BPF_REG_0;
1282 const int map_ptr = BPF_REG_1;
1283 const int index = BPF_REG_2;
1284
1285 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1286 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1287 if (!map->bypass_spec_v1) {
1288 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1289 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1290 } else {
1291 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1292 }
1293 if (is_power_of_2(elem_size))
1294 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1295 else
1296 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1297 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1298 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1299 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1300 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1301 *insn++ = BPF_MOV64_IMM(ret, 0);
1302
1303 return insn - insn_buf;
1304 }
1305
1306 static int array_of_maps_map_btf_id;
1307 const struct bpf_map_ops array_of_maps_map_ops = {
1308 .map_alloc_check = fd_array_map_alloc_check,
1309 .map_alloc = array_of_map_alloc,
1310 .map_free = array_of_map_free,
1311 .map_get_next_key = array_map_get_next_key,
1312 .map_lookup_elem = array_of_map_lookup_elem,
1313 .map_delete_elem = fd_array_map_delete_elem,
1314 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1315 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1316 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1317 .map_gen_lookup = array_of_map_gen_lookup,
1318 .map_check_btf = map_check_no_btf,
1319 .map_btf_name = "bpf_array",
1320 .map_btf_id = &array_of_maps_map_btf_id,
1321 };
1322