• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016,2017 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 
15 #include "map_in_map.h"
16 
17 #define ARRAY_CREATE_FLAG_MASK \
18 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
20 
bpf_array_free_percpu(struct bpf_array * array)21 static void bpf_array_free_percpu(struct bpf_array *array)
22 {
23 	int i;
24 
25 	for (i = 0; i < array->map.max_entries; i++) {
26 		free_percpu(array->pptrs[i]);
27 		cond_resched();
28 	}
29 }
30 
bpf_array_alloc_percpu(struct bpf_array * array)31 static int bpf_array_alloc_percpu(struct bpf_array *array)
32 {
33 	void __percpu *ptr;
34 	int i;
35 
36 	for (i = 0; i < array->map.max_entries; i++) {
37 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
38 					 GFP_USER | __GFP_NOWARN);
39 		if (!ptr) {
40 			bpf_array_free_percpu(array);
41 			return -ENOMEM;
42 		}
43 		array->pptrs[i] = ptr;
44 		cond_resched();
45 	}
46 
47 	return 0;
48 }
49 
50 /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)51 int array_map_alloc_check(union bpf_attr *attr)
52 {
53 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
54 	int numa_node = bpf_map_attr_numa_node(attr);
55 
56 	/* check sanity of attributes */
57 	if (attr->max_entries == 0 || attr->key_size != 4 ||
58 	    attr->value_size == 0 ||
59 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
60 	    !bpf_map_flags_access_ok(attr->map_flags) ||
61 	    (percpu && numa_node != NUMA_NO_NODE))
62 		return -EINVAL;
63 
64 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
65 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
66 		return -EINVAL;
67 
68 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
70 		return -EINVAL;
71 
72 	if (attr->value_size > KMALLOC_MAX_SIZE)
73 		/* if value_size is bigger, the user space won't be able to
74 		 * access the elements.
75 		 */
76 		return -E2BIG;
77 
78 	return 0;
79 }
80 
array_map_alloc(union bpf_attr * attr)81 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
82 {
83 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
84 	int ret, numa_node = bpf_map_attr_numa_node(attr);
85 	u32 elem_size, index_mask, max_entries;
86 	bool bypass_spec_v1 = bpf_bypass_spec_v1();
87 	u64 cost, array_size, mask64;
88 	struct bpf_map_memory mem;
89 	struct bpf_array *array;
90 
91 	elem_size = round_up(attr->value_size, 8);
92 
93 	max_entries = attr->max_entries;
94 
95 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
96 	 * upper most bit set in u32 space is undefined behavior due to
97 	 * resulting 1U << 32, so do it manually here in u64 space.
98 	 */
99 	mask64 = fls_long(max_entries - 1);
100 	mask64 = 1ULL << mask64;
101 	mask64 -= 1;
102 
103 	index_mask = mask64;
104 	if (!bypass_spec_v1) {
105 		/* round up array size to nearest power of 2,
106 		 * since cpu will speculate within index_mask limits
107 		 */
108 		max_entries = index_mask + 1;
109 		/* Check for overflows. */
110 		if (max_entries < attr->max_entries)
111 			return ERR_PTR(-E2BIG);
112 	}
113 
114 	array_size = sizeof(*array);
115 	if (percpu) {
116 		array_size += (u64) max_entries * sizeof(void *);
117 	} else {
118 		/* rely on vmalloc() to return page-aligned memory and
119 		 * ensure array->value is exactly page-aligned
120 		 */
121 		if (attr->map_flags & BPF_F_MMAPABLE) {
122 			array_size = PAGE_ALIGN(array_size);
123 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124 		} else {
125 			array_size += (u64) max_entries * elem_size;
126 		}
127 	}
128 
129 	/* make sure there is no u32 overflow later in round_up() */
130 	cost = array_size;
131 	if (percpu)
132 		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
133 
134 	ret = bpf_map_charge_init(&mem, cost);
135 	if (ret < 0)
136 		return ERR_PTR(ret);
137 
138 	/* allocate all map elements and zero-initialize them */
139 	if (attr->map_flags & BPF_F_MMAPABLE) {
140 		void *data;
141 
142 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
143 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
144 		if (!data) {
145 			bpf_map_charge_finish(&mem);
146 			return ERR_PTR(-ENOMEM);
147 		}
148 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
149 			- offsetof(struct bpf_array, value);
150 	} else {
151 		array = bpf_map_area_alloc(array_size, numa_node);
152 	}
153 	if (!array) {
154 		bpf_map_charge_finish(&mem);
155 		return ERR_PTR(-ENOMEM);
156 	}
157 	array->index_mask = index_mask;
158 	array->map.bypass_spec_v1 = bypass_spec_v1;
159 
160 	/* copy mandatory map attributes */
161 	bpf_map_init_from_attr(&array->map, attr);
162 	bpf_map_charge_move(&array->map.memory, &mem);
163 	array->elem_size = elem_size;
164 
165 	if (percpu && bpf_array_alloc_percpu(array)) {
166 		bpf_map_charge_finish(&array->map.memory);
167 		bpf_map_area_free(array);
168 		return ERR_PTR(-ENOMEM);
169 	}
170 
171 	return &array->map;
172 }
173 
174 /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)175 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
176 {
177 	struct bpf_array *array = container_of(map, struct bpf_array, map);
178 	u32 index = *(u32 *)key;
179 
180 	if (unlikely(index >= array->map.max_entries))
181 		return NULL;
182 
183 	return array->value + array->elem_size * (index & array->index_mask);
184 }
185 
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)186 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
187 				       u32 off)
188 {
189 	struct bpf_array *array = container_of(map, struct bpf_array, map);
190 
191 	if (map->max_entries != 1)
192 		return -ENOTSUPP;
193 	if (off >= map->value_size)
194 		return -EINVAL;
195 
196 	*imm = (unsigned long)array->value;
197 	return 0;
198 }
199 
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)200 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
201 				       u32 *off)
202 {
203 	struct bpf_array *array = container_of(map, struct bpf_array, map);
204 	u64 base = (unsigned long)array->value;
205 	u64 range = array->elem_size;
206 
207 	if (map->max_entries != 1)
208 		return -ENOTSUPP;
209 	if (imm < base || imm >= base + range)
210 		return -ENOENT;
211 
212 	*off = imm - base;
213 	return 0;
214 }
215 
216 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)217 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
218 {
219 	struct bpf_array *array = container_of(map, struct bpf_array, map);
220 	struct bpf_insn *insn = insn_buf;
221 	u32 elem_size = round_up(map->value_size, 8);
222 	const int ret = BPF_REG_0;
223 	const int map_ptr = BPF_REG_1;
224 	const int index = BPF_REG_2;
225 
226 	if (map->map_flags & BPF_F_INNER_MAP)
227 		return -EOPNOTSUPP;
228 
229 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
230 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
231 	if (!map->bypass_spec_v1) {
232 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
233 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
234 	} else {
235 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
236 	}
237 
238 	if (is_power_of_2(elem_size)) {
239 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
240 	} else {
241 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
242 	}
243 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
244 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
245 	*insn++ = BPF_MOV64_IMM(ret, 0);
246 	return insn - insn_buf;
247 }
248 
249 /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)250 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
251 {
252 	struct bpf_array *array = container_of(map, struct bpf_array, map);
253 	u32 index = *(u32 *)key;
254 
255 	if (unlikely(index >= array->map.max_entries))
256 		return NULL;
257 
258 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
259 }
260 
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)261 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
262 {
263 	struct bpf_array *array = container_of(map, struct bpf_array, map);
264 	u32 index = *(u32 *)key;
265 	void __percpu *pptr;
266 	int cpu, off = 0;
267 	u32 size;
268 
269 	if (unlikely(index >= array->map.max_entries))
270 		return -ENOENT;
271 
272 	/* per_cpu areas are zero-filled and bpf programs can only
273 	 * access 'value_size' of them, so copying rounded areas
274 	 * will not leak any kernel data
275 	 */
276 	size = round_up(map->value_size, 8);
277 	rcu_read_lock();
278 	pptr = array->pptrs[index & array->index_mask];
279 	for_each_possible_cpu(cpu) {
280 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
281 		off += size;
282 	}
283 	rcu_read_unlock();
284 	return 0;
285 }
286 
287 /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)288 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
289 {
290 	struct bpf_array *array = container_of(map, struct bpf_array, map);
291 	u32 index = key ? *(u32 *)key : U32_MAX;
292 	u32 *next = (u32 *)next_key;
293 
294 	if (index >= array->map.max_entries) {
295 		*next = 0;
296 		return 0;
297 	}
298 
299 	if (index == array->map.max_entries - 1)
300 		return -ENOENT;
301 
302 	*next = index + 1;
303 	return 0;
304 }
305 
306 /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)307 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
308 				 u64 map_flags)
309 {
310 	struct bpf_array *array = container_of(map, struct bpf_array, map);
311 	u32 index = *(u32 *)key;
312 	char *val;
313 
314 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
315 		/* unknown flags */
316 		return -EINVAL;
317 
318 	if (unlikely(index >= array->map.max_entries))
319 		/* all elements were pre-allocated, cannot insert a new one */
320 		return -E2BIG;
321 
322 	if (unlikely(map_flags & BPF_NOEXIST))
323 		/* all elements already exist */
324 		return -EEXIST;
325 
326 	if (unlikely((map_flags & BPF_F_LOCK) &&
327 		     !map_value_has_spin_lock(map)))
328 		return -EINVAL;
329 
330 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
331 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
332 		       value, map->value_size);
333 	} else {
334 		val = array->value +
335 			array->elem_size * (index & array->index_mask);
336 		if (map_flags & BPF_F_LOCK)
337 			copy_map_value_locked(map, val, value, false);
338 		else
339 			copy_map_value(map, val, value);
340 	}
341 	return 0;
342 }
343 
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)344 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
345 			    u64 map_flags)
346 {
347 	struct bpf_array *array = container_of(map, struct bpf_array, map);
348 	u32 index = *(u32 *)key;
349 	void __percpu *pptr;
350 	int cpu, off = 0;
351 	u32 size;
352 
353 	if (unlikely(map_flags > BPF_EXIST))
354 		/* unknown flags */
355 		return -EINVAL;
356 
357 	if (unlikely(index >= array->map.max_entries))
358 		/* all elements were pre-allocated, cannot insert a new one */
359 		return -E2BIG;
360 
361 	if (unlikely(map_flags == BPF_NOEXIST))
362 		/* all elements already exist */
363 		return -EEXIST;
364 
365 	/* the user space will provide round_up(value_size, 8) bytes that
366 	 * will be copied into per-cpu area. bpf programs can only access
367 	 * value_size of it. During lookup the same extra bytes will be
368 	 * returned or zeros which were zero-filled by percpu_alloc,
369 	 * so no kernel data leaks possible
370 	 */
371 	size = round_up(map->value_size, 8);
372 	rcu_read_lock();
373 	pptr = array->pptrs[index & array->index_mask];
374 	for_each_possible_cpu(cpu) {
375 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
376 		off += size;
377 	}
378 	rcu_read_unlock();
379 	return 0;
380 }
381 
382 /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)383 static int array_map_delete_elem(struct bpf_map *map, void *key)
384 {
385 	return -EINVAL;
386 }
387 
array_map_vmalloc_addr(struct bpf_array * array)388 static void *array_map_vmalloc_addr(struct bpf_array *array)
389 {
390 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
391 }
392 
393 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)394 static void array_map_free(struct bpf_map *map)
395 {
396 	struct bpf_array *array = container_of(map, struct bpf_array, map);
397 
398 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
399 		bpf_array_free_percpu(array);
400 
401 	if (array->map.map_flags & BPF_F_MMAPABLE)
402 		bpf_map_area_free(array_map_vmalloc_addr(array));
403 	else
404 		bpf_map_area_free(array);
405 }
406 
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)407 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
408 				    struct seq_file *m)
409 {
410 	void *value;
411 
412 	rcu_read_lock();
413 
414 	value = array_map_lookup_elem(map, key);
415 	if (!value) {
416 		rcu_read_unlock();
417 		return;
418 	}
419 
420 	if (map->btf_key_type_id)
421 		seq_printf(m, "%u: ", *(u32 *)key);
422 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
423 	seq_puts(m, "\n");
424 
425 	rcu_read_unlock();
426 }
427 
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)428 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
429 					   struct seq_file *m)
430 {
431 	struct bpf_array *array = container_of(map, struct bpf_array, map);
432 	u32 index = *(u32 *)key;
433 	void __percpu *pptr;
434 	int cpu;
435 
436 	rcu_read_lock();
437 
438 	seq_printf(m, "%u: {\n", *(u32 *)key);
439 	pptr = array->pptrs[index & array->index_mask];
440 	for_each_possible_cpu(cpu) {
441 		seq_printf(m, "\tcpu%d: ", cpu);
442 		btf_type_seq_show(map->btf, map->btf_value_type_id,
443 				  per_cpu_ptr(pptr, cpu), m);
444 		seq_puts(m, "\n");
445 	}
446 	seq_puts(m, "}\n");
447 
448 	rcu_read_unlock();
449 }
450 
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)451 static int array_map_check_btf(const struct bpf_map *map,
452 			       const struct btf *btf,
453 			       const struct btf_type *key_type,
454 			       const struct btf_type *value_type)
455 {
456 	u32 int_data;
457 
458 	/* One exception for keyless BTF: .bss/.data/.rodata map */
459 	if (btf_type_is_void(key_type)) {
460 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
461 		    map->max_entries != 1)
462 			return -EINVAL;
463 
464 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
465 			return -EINVAL;
466 
467 		return 0;
468 	}
469 
470 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
471 		return -EINVAL;
472 
473 	int_data = *(u32 *)(key_type + 1);
474 	/* bpf array can only take a u32 key. This check makes sure
475 	 * that the btf matches the attr used during map_create.
476 	 */
477 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
478 		return -EINVAL;
479 
480 	return 0;
481 }
482 
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)483 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
484 {
485 	struct bpf_array *array = container_of(map, struct bpf_array, map);
486 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
487 
488 	if (!(map->map_flags & BPF_F_MMAPABLE))
489 		return -EINVAL;
490 
491 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
492 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
493 		return -EINVAL;
494 
495 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
496 				   vma->vm_pgoff + pgoff);
497 }
498 
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)499 static bool array_map_meta_equal(const struct bpf_map *meta0,
500 				 const struct bpf_map *meta1)
501 {
502 	if (!bpf_map_meta_equal(meta0, meta1))
503 		return false;
504 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
505 	       meta0->max_entries == meta1->max_entries;
506 }
507 
508 struct bpf_iter_seq_array_map_info {
509 	struct bpf_map *map;
510 	void *percpu_value_buf;
511 	u32 index;
512 };
513 
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)514 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
515 {
516 	struct bpf_iter_seq_array_map_info *info = seq->private;
517 	struct bpf_map *map = info->map;
518 	struct bpf_array *array;
519 	u32 index;
520 
521 	if (info->index >= map->max_entries)
522 		return NULL;
523 
524 	if (*pos == 0)
525 		++*pos;
526 	array = container_of(map, struct bpf_array, map);
527 	index = info->index & array->index_mask;
528 	if (info->percpu_value_buf)
529 	       return array->pptrs[index];
530 	return array->value + array->elem_size * index;
531 }
532 
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)533 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
534 {
535 	struct bpf_iter_seq_array_map_info *info = seq->private;
536 	struct bpf_map *map = info->map;
537 	struct bpf_array *array;
538 	u32 index;
539 
540 	++*pos;
541 	++info->index;
542 	if (info->index >= map->max_entries)
543 		return NULL;
544 
545 	array = container_of(map, struct bpf_array, map);
546 	index = info->index & array->index_mask;
547 	if (info->percpu_value_buf)
548 	       return array->pptrs[index];
549 	return array->value + array->elem_size * index;
550 }
551 
__bpf_array_map_seq_show(struct seq_file * seq,void * v)552 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
553 {
554 	struct bpf_iter_seq_array_map_info *info = seq->private;
555 	struct bpf_iter__bpf_map_elem ctx = {};
556 	struct bpf_map *map = info->map;
557 	struct bpf_iter_meta meta;
558 	struct bpf_prog *prog;
559 	int off = 0, cpu = 0;
560 	void __percpu **pptr;
561 	u32 size;
562 
563 	meta.seq = seq;
564 	prog = bpf_iter_get_info(&meta, v == NULL);
565 	if (!prog)
566 		return 0;
567 
568 	ctx.meta = &meta;
569 	ctx.map = info->map;
570 	if (v) {
571 		ctx.key = &info->index;
572 
573 		if (!info->percpu_value_buf) {
574 			ctx.value = v;
575 		} else {
576 			pptr = v;
577 			size = round_up(map->value_size, 8);
578 			for_each_possible_cpu(cpu) {
579 				bpf_long_memcpy(info->percpu_value_buf + off,
580 						per_cpu_ptr(pptr, cpu),
581 						size);
582 				off += size;
583 			}
584 			ctx.value = info->percpu_value_buf;
585 		}
586 	}
587 
588 	return bpf_iter_run_prog(prog, &ctx);
589 }
590 
bpf_array_map_seq_show(struct seq_file * seq,void * v)591 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
592 {
593 	return __bpf_array_map_seq_show(seq, v);
594 }
595 
bpf_array_map_seq_stop(struct seq_file * seq,void * v)596 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
597 {
598 	if (!v)
599 		(void)__bpf_array_map_seq_show(seq, NULL);
600 }
601 
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)602 static int bpf_iter_init_array_map(void *priv_data,
603 				   struct bpf_iter_aux_info *aux)
604 {
605 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
606 	struct bpf_map *map = aux->map;
607 	void *value_buf;
608 	u32 buf_size;
609 
610 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
611 		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
612 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
613 		if (!value_buf)
614 			return -ENOMEM;
615 
616 		seq_info->percpu_value_buf = value_buf;
617 	}
618 
619 	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
620 	 * released before or in the middle of iterating map elements, so
621 	 * acquire an extra map uref for iterator.
622 	 */
623 	bpf_map_inc_with_uref(map);
624 	seq_info->map = map;
625 	return 0;
626 }
627 
bpf_iter_fini_array_map(void * priv_data)628 static void bpf_iter_fini_array_map(void *priv_data)
629 {
630 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
631 
632 	bpf_map_put_with_uref(seq_info->map);
633 	kfree(seq_info->percpu_value_buf);
634 }
635 
636 static const struct seq_operations bpf_array_map_seq_ops = {
637 	.start	= bpf_array_map_seq_start,
638 	.next	= bpf_array_map_seq_next,
639 	.stop	= bpf_array_map_seq_stop,
640 	.show	= bpf_array_map_seq_show,
641 };
642 
643 static const struct bpf_iter_seq_info iter_seq_info = {
644 	.seq_ops		= &bpf_array_map_seq_ops,
645 	.init_seq_private	= bpf_iter_init_array_map,
646 	.fini_seq_private	= bpf_iter_fini_array_map,
647 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
648 };
649 
650 static int array_map_btf_id;
651 const struct bpf_map_ops array_map_ops = {
652 	.map_meta_equal = array_map_meta_equal,
653 	.map_alloc_check = array_map_alloc_check,
654 	.map_alloc = array_map_alloc,
655 	.map_free = array_map_free,
656 	.map_get_next_key = array_map_get_next_key,
657 	.map_lookup_elem = array_map_lookup_elem,
658 	.map_update_elem = array_map_update_elem,
659 	.map_delete_elem = array_map_delete_elem,
660 	.map_gen_lookup = array_map_gen_lookup,
661 	.map_direct_value_addr = array_map_direct_value_addr,
662 	.map_direct_value_meta = array_map_direct_value_meta,
663 	.map_mmap = array_map_mmap,
664 	.map_seq_show_elem = array_map_seq_show_elem,
665 	.map_check_btf = array_map_check_btf,
666 	.map_lookup_batch = generic_map_lookup_batch,
667 	.map_update_batch = generic_map_update_batch,
668 	.map_btf_name = "bpf_array",
669 	.map_btf_id = &array_map_btf_id,
670 	.iter_seq_info = &iter_seq_info,
671 };
672 
673 static int percpu_array_map_btf_id;
674 const struct bpf_map_ops percpu_array_map_ops = {
675 	.map_meta_equal = bpf_map_meta_equal,
676 	.map_alloc_check = array_map_alloc_check,
677 	.map_alloc = array_map_alloc,
678 	.map_free = array_map_free,
679 	.map_get_next_key = array_map_get_next_key,
680 	.map_lookup_elem = percpu_array_map_lookup_elem,
681 	.map_update_elem = array_map_update_elem,
682 	.map_delete_elem = array_map_delete_elem,
683 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
684 	.map_check_btf = array_map_check_btf,
685 	.map_btf_name = "bpf_array",
686 	.map_btf_id = &percpu_array_map_btf_id,
687 	.iter_seq_info = &iter_seq_info,
688 };
689 
fd_array_map_alloc_check(union bpf_attr * attr)690 static int fd_array_map_alloc_check(union bpf_attr *attr)
691 {
692 	/* only file descriptors can be stored in this type of map */
693 	if (attr->value_size != sizeof(u32))
694 		return -EINVAL;
695 	/* Program read-only/write-only not supported for special maps yet. */
696 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
697 		return -EINVAL;
698 	return array_map_alloc_check(attr);
699 }
700 
fd_array_map_free(struct bpf_map * map)701 static void fd_array_map_free(struct bpf_map *map)
702 {
703 	struct bpf_array *array = container_of(map, struct bpf_array, map);
704 	int i;
705 
706 	/* make sure it's empty */
707 	for (i = 0; i < array->map.max_entries; i++)
708 		BUG_ON(array->ptrs[i] != NULL);
709 
710 	bpf_map_area_free(array);
711 }
712 
fd_array_map_lookup_elem(struct bpf_map * map,void * key)713 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
714 {
715 	return ERR_PTR(-EOPNOTSUPP);
716 }
717 
718 /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)719 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
720 {
721 	void **elem, *ptr;
722 	int ret =  0;
723 
724 	if (!map->ops->map_fd_sys_lookup_elem)
725 		return -ENOTSUPP;
726 
727 	rcu_read_lock();
728 	elem = array_map_lookup_elem(map, key);
729 	if (elem && (ptr = READ_ONCE(*elem)))
730 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
731 	else
732 		ret = -ENOENT;
733 	rcu_read_unlock();
734 
735 	return ret;
736 }
737 
738 /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)739 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
740 				 void *key, void *value, u64 map_flags)
741 {
742 	struct bpf_array *array = container_of(map, struct bpf_array, map);
743 	void *new_ptr, *old_ptr;
744 	u32 index = *(u32 *)key, ufd;
745 
746 	if (map_flags != BPF_ANY)
747 		return -EINVAL;
748 
749 	if (index >= array->map.max_entries)
750 		return -E2BIG;
751 
752 	ufd = *(u32 *)value;
753 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
754 	if (IS_ERR(new_ptr))
755 		return PTR_ERR(new_ptr);
756 
757 	if (map->ops->map_poke_run) {
758 		mutex_lock(&array->aux->poke_mutex);
759 		old_ptr = xchg(array->ptrs + index, new_ptr);
760 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
761 		mutex_unlock(&array->aux->poke_mutex);
762 	} else {
763 		old_ptr = xchg(array->ptrs + index, new_ptr);
764 	}
765 
766 	if (old_ptr)
767 		map->ops->map_fd_put_ptr(old_ptr);
768 	return 0;
769 }
770 
fd_array_map_delete_elem(struct bpf_map * map,void * key)771 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
772 {
773 	struct bpf_array *array = container_of(map, struct bpf_array, map);
774 	void *old_ptr;
775 	u32 index = *(u32 *)key;
776 
777 	if (index >= array->map.max_entries)
778 		return -E2BIG;
779 
780 	if (map->ops->map_poke_run) {
781 		mutex_lock(&array->aux->poke_mutex);
782 		old_ptr = xchg(array->ptrs + index, NULL);
783 		map->ops->map_poke_run(map, index, old_ptr, NULL);
784 		mutex_unlock(&array->aux->poke_mutex);
785 	} else {
786 		old_ptr = xchg(array->ptrs + index, NULL);
787 	}
788 
789 	if (old_ptr) {
790 		map->ops->map_fd_put_ptr(old_ptr);
791 		return 0;
792 	} else {
793 		return -ENOENT;
794 	}
795 }
796 
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)797 static void *prog_fd_array_get_ptr(struct bpf_map *map,
798 				   struct file *map_file, int fd)
799 {
800 	struct bpf_array *array = container_of(map, struct bpf_array, map);
801 	struct bpf_prog *prog = bpf_prog_get(fd);
802 
803 	if (IS_ERR(prog))
804 		return prog;
805 
806 	if (!bpf_prog_array_compatible(array, prog)) {
807 		bpf_prog_put(prog);
808 		return ERR_PTR(-EINVAL);
809 	}
810 
811 	return prog;
812 }
813 
prog_fd_array_put_ptr(void * ptr)814 static void prog_fd_array_put_ptr(void *ptr)
815 {
816 	bpf_prog_put(ptr);
817 }
818 
prog_fd_array_sys_lookup_elem(void * ptr)819 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
820 {
821 	return ((struct bpf_prog *)ptr)->aux->id;
822 }
823 
824 /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map)825 static void bpf_fd_array_map_clear(struct bpf_map *map)
826 {
827 	struct bpf_array *array = container_of(map, struct bpf_array, map);
828 	int i;
829 
830 	for (i = 0; i < array->map.max_entries; i++)
831 		fd_array_map_delete_elem(map, &i);
832 }
833 
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)834 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
835 					 struct seq_file *m)
836 {
837 	void **elem, *ptr;
838 	u32 prog_id;
839 
840 	rcu_read_lock();
841 
842 	elem = array_map_lookup_elem(map, key);
843 	if (elem) {
844 		ptr = READ_ONCE(*elem);
845 		if (ptr) {
846 			seq_printf(m, "%u: ", *(u32 *)key);
847 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
848 			btf_type_seq_show(map->btf, map->btf_value_type_id,
849 					  &prog_id, m);
850 			seq_puts(m, "\n");
851 		}
852 	}
853 
854 	rcu_read_unlock();
855 }
856 
857 struct prog_poke_elem {
858 	struct list_head list;
859 	struct bpf_prog_aux *aux;
860 };
861 
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)862 static int prog_array_map_poke_track(struct bpf_map *map,
863 				     struct bpf_prog_aux *prog_aux)
864 {
865 	struct prog_poke_elem *elem;
866 	struct bpf_array_aux *aux;
867 	int ret = 0;
868 
869 	aux = container_of(map, struct bpf_array, map)->aux;
870 	mutex_lock(&aux->poke_mutex);
871 	list_for_each_entry(elem, &aux->poke_progs, list) {
872 		if (elem->aux == prog_aux)
873 			goto out;
874 	}
875 
876 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
877 	if (!elem) {
878 		ret = -ENOMEM;
879 		goto out;
880 	}
881 
882 	INIT_LIST_HEAD(&elem->list);
883 	/* We must track the program's aux info at this point in time
884 	 * since the program pointer itself may not be stable yet, see
885 	 * also comment in prog_array_map_poke_run().
886 	 */
887 	elem->aux = prog_aux;
888 
889 	list_add_tail(&elem->list, &aux->poke_progs);
890 out:
891 	mutex_unlock(&aux->poke_mutex);
892 	return ret;
893 }
894 
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)895 static void prog_array_map_poke_untrack(struct bpf_map *map,
896 					struct bpf_prog_aux *prog_aux)
897 {
898 	struct prog_poke_elem *elem, *tmp;
899 	struct bpf_array_aux *aux;
900 
901 	aux = container_of(map, struct bpf_array, map)->aux;
902 	mutex_lock(&aux->poke_mutex);
903 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
904 		if (elem->aux == prog_aux) {
905 			list_del_init(&elem->list);
906 			kfree(elem);
907 			break;
908 		}
909 	}
910 	mutex_unlock(&aux->poke_mutex);
911 }
912 
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)913 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
914 				    struct bpf_prog *old,
915 				    struct bpf_prog *new)
916 {
917 	u8 *old_addr, *new_addr, *old_bypass_addr;
918 	struct prog_poke_elem *elem;
919 	struct bpf_array_aux *aux;
920 
921 	aux = container_of(map, struct bpf_array, map)->aux;
922 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
923 
924 	list_for_each_entry(elem, &aux->poke_progs, list) {
925 		struct bpf_jit_poke_descriptor *poke;
926 		int i, ret;
927 
928 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
929 			poke = &elem->aux->poke_tab[i];
930 
931 			/* Few things to be aware of:
932 			 *
933 			 * 1) We can only ever access aux in this context, but
934 			 *    not aux->prog since it might not be stable yet and
935 			 *    there could be danger of use after free otherwise.
936 			 * 2) Initially when we start tracking aux, the program
937 			 *    is not JITed yet and also does not have a kallsyms
938 			 *    entry. We skip these as poke->tailcall_target_stable
939 			 *    is not active yet. The JIT will do the final fixup
940 			 *    before setting it stable. The various
941 			 *    poke->tailcall_target_stable are successively
942 			 *    activated, so tail call updates can arrive from here
943 			 *    while JIT is still finishing its final fixup for
944 			 *    non-activated poke entries.
945 			 * 3) On program teardown, the program's kallsym entry gets
946 			 *    removed out of RCU callback, but we can only untrack
947 			 *    from sleepable context, therefore bpf_arch_text_poke()
948 			 *    might not see that this is in BPF text section and
949 			 *    bails out with -EINVAL. As these are unreachable since
950 			 *    RCU grace period already passed, we simply skip them.
951 			 * 4) Also programs reaching refcount of zero while patching
952 			 *    is in progress is okay since we're protected under
953 			 *    poke_mutex and untrack the programs before the JIT
954 			 *    buffer is freed. When we're still in the middle of
955 			 *    patching and suddenly kallsyms entry of the program
956 			 *    gets evicted, we just skip the rest which is fine due
957 			 *    to point 3).
958 			 * 5) Any other error happening below from bpf_arch_text_poke()
959 			 *    is a unexpected bug.
960 			 */
961 			if (!READ_ONCE(poke->tailcall_target_stable))
962 				continue;
963 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
964 				continue;
965 			if (poke->tail_call.map != map ||
966 			    poke->tail_call.key != key)
967 				continue;
968 
969 			old_bypass_addr = old ? NULL : poke->bypass_addr;
970 			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
971 			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
972 
973 			if (new) {
974 				ret = bpf_arch_text_poke(poke->tailcall_target,
975 							 BPF_MOD_JUMP,
976 							 old_addr, new_addr);
977 				BUG_ON(ret < 0 && ret != -EINVAL);
978 				if (!old) {
979 					ret = bpf_arch_text_poke(poke->tailcall_bypass,
980 								 BPF_MOD_JUMP,
981 								 poke->bypass_addr,
982 								 NULL);
983 					BUG_ON(ret < 0 && ret != -EINVAL);
984 				}
985 			} else {
986 				ret = bpf_arch_text_poke(poke->tailcall_bypass,
987 							 BPF_MOD_JUMP,
988 							 old_bypass_addr,
989 							 poke->bypass_addr);
990 				BUG_ON(ret < 0 && ret != -EINVAL);
991 				/* let other CPUs finish the execution of program
992 				 * so that it will not possible to expose them
993 				 * to invalid nop, stack unwind, nop state
994 				 */
995 				if (!ret)
996 					synchronize_rcu();
997 				ret = bpf_arch_text_poke(poke->tailcall_target,
998 							 BPF_MOD_JUMP,
999 							 old_addr, NULL);
1000 				BUG_ON(ret < 0 && ret != -EINVAL);
1001 			}
1002 		}
1003 	}
1004 }
1005 
prog_array_map_clear_deferred(struct work_struct * work)1006 static void prog_array_map_clear_deferred(struct work_struct *work)
1007 {
1008 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1009 					   work)->map;
1010 	bpf_fd_array_map_clear(map);
1011 	bpf_map_put(map);
1012 }
1013 
prog_array_map_clear(struct bpf_map * map)1014 static void prog_array_map_clear(struct bpf_map *map)
1015 {
1016 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1017 						 map)->aux;
1018 	bpf_map_inc(map);
1019 	schedule_work(&aux->work);
1020 }
1021 
prog_array_map_alloc(union bpf_attr * attr)1022 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1023 {
1024 	struct bpf_array_aux *aux;
1025 	struct bpf_map *map;
1026 
1027 	aux = kzalloc(sizeof(*aux), GFP_KERNEL);
1028 	if (!aux)
1029 		return ERR_PTR(-ENOMEM);
1030 
1031 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1032 	INIT_LIST_HEAD(&aux->poke_progs);
1033 	mutex_init(&aux->poke_mutex);
1034 	spin_lock_init(&aux->owner.lock);
1035 
1036 	map = array_map_alloc(attr);
1037 	if (IS_ERR(map)) {
1038 		kfree(aux);
1039 		return map;
1040 	}
1041 
1042 	container_of(map, struct bpf_array, map)->aux = aux;
1043 	aux->map = map;
1044 
1045 	return map;
1046 }
1047 
prog_array_map_free(struct bpf_map * map)1048 static void prog_array_map_free(struct bpf_map *map)
1049 {
1050 	struct prog_poke_elem *elem, *tmp;
1051 	struct bpf_array_aux *aux;
1052 
1053 	aux = container_of(map, struct bpf_array, map)->aux;
1054 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1055 		list_del_init(&elem->list);
1056 		kfree(elem);
1057 	}
1058 	kfree(aux);
1059 	fd_array_map_free(map);
1060 }
1061 
1062 /* prog_array->aux->{type,jited} is a runtime binding.
1063  * Doing static check alone in the verifier is not enough.
1064  * Thus, prog_array_map cannot be used as an inner_map
1065  * and map_meta_equal is not implemented.
1066  */
1067 static int prog_array_map_btf_id;
1068 const struct bpf_map_ops prog_array_map_ops = {
1069 	.map_alloc_check = fd_array_map_alloc_check,
1070 	.map_alloc = prog_array_map_alloc,
1071 	.map_free = prog_array_map_free,
1072 	.map_poke_track = prog_array_map_poke_track,
1073 	.map_poke_untrack = prog_array_map_poke_untrack,
1074 	.map_poke_run = prog_array_map_poke_run,
1075 	.map_get_next_key = array_map_get_next_key,
1076 	.map_lookup_elem = fd_array_map_lookup_elem,
1077 	.map_delete_elem = fd_array_map_delete_elem,
1078 	.map_fd_get_ptr = prog_fd_array_get_ptr,
1079 	.map_fd_put_ptr = prog_fd_array_put_ptr,
1080 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1081 	.map_release_uref = prog_array_map_clear,
1082 	.map_seq_show_elem = prog_array_map_seq_show_elem,
1083 	.map_btf_name = "bpf_array",
1084 	.map_btf_id = &prog_array_map_btf_id,
1085 };
1086 
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)1087 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1088 						   struct file *map_file)
1089 {
1090 	struct bpf_event_entry *ee;
1091 
1092 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1093 	if (ee) {
1094 		ee->event = perf_file->private_data;
1095 		ee->perf_file = perf_file;
1096 		ee->map_file = map_file;
1097 	}
1098 
1099 	return ee;
1100 }
1101 
__bpf_event_entry_free(struct rcu_head * rcu)1102 static void __bpf_event_entry_free(struct rcu_head *rcu)
1103 {
1104 	struct bpf_event_entry *ee;
1105 
1106 	ee = container_of(rcu, struct bpf_event_entry, rcu);
1107 	fput(ee->perf_file);
1108 	kfree(ee);
1109 }
1110 
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)1111 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1112 {
1113 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1114 }
1115 
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1116 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1117 					 struct file *map_file, int fd)
1118 {
1119 	struct bpf_event_entry *ee;
1120 	struct perf_event *event;
1121 	struct file *perf_file;
1122 	u64 value;
1123 
1124 	perf_file = perf_event_get(fd);
1125 	if (IS_ERR(perf_file))
1126 		return perf_file;
1127 
1128 	ee = ERR_PTR(-EOPNOTSUPP);
1129 	event = perf_file->private_data;
1130 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1131 		goto err_out;
1132 
1133 	ee = bpf_event_entry_gen(perf_file, map_file);
1134 	if (ee)
1135 		return ee;
1136 	ee = ERR_PTR(-ENOMEM);
1137 err_out:
1138 	fput(perf_file);
1139 	return ee;
1140 }
1141 
perf_event_fd_array_put_ptr(void * ptr)1142 static void perf_event_fd_array_put_ptr(void *ptr)
1143 {
1144 	bpf_event_entry_free_rcu(ptr);
1145 }
1146 
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)1147 static void perf_event_fd_array_release(struct bpf_map *map,
1148 					struct file *map_file)
1149 {
1150 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1151 	struct bpf_event_entry *ee;
1152 	int i;
1153 
1154 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1155 		return;
1156 
1157 	rcu_read_lock();
1158 	for (i = 0; i < array->map.max_entries; i++) {
1159 		ee = READ_ONCE(array->ptrs[i]);
1160 		if (ee && ee->map_file == map_file)
1161 			fd_array_map_delete_elem(map, &i);
1162 	}
1163 	rcu_read_unlock();
1164 }
1165 
perf_event_fd_array_map_free(struct bpf_map * map)1166 static void perf_event_fd_array_map_free(struct bpf_map *map)
1167 {
1168 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1169 		bpf_fd_array_map_clear(map);
1170 	fd_array_map_free(map);
1171 }
1172 
1173 static int perf_event_array_map_btf_id;
1174 const struct bpf_map_ops perf_event_array_map_ops = {
1175 	.map_meta_equal = bpf_map_meta_equal,
1176 	.map_alloc_check = fd_array_map_alloc_check,
1177 	.map_alloc = array_map_alloc,
1178 	.map_free = perf_event_fd_array_map_free,
1179 	.map_get_next_key = array_map_get_next_key,
1180 	.map_lookup_elem = fd_array_map_lookup_elem,
1181 	.map_delete_elem = fd_array_map_delete_elem,
1182 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1183 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1184 	.map_release = perf_event_fd_array_release,
1185 	.map_check_btf = map_check_no_btf,
1186 	.map_btf_name = "bpf_array",
1187 	.map_btf_id = &perf_event_array_map_btf_id,
1188 };
1189 
1190 #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1191 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1192 				     struct file *map_file /* not used */,
1193 				     int fd)
1194 {
1195 	return cgroup_get_from_fd(fd);
1196 }
1197 
cgroup_fd_array_put_ptr(void * ptr)1198 static void cgroup_fd_array_put_ptr(void *ptr)
1199 {
1200 	/* cgroup_put free cgrp after a rcu grace period */
1201 	cgroup_put(ptr);
1202 }
1203 
cgroup_fd_array_free(struct bpf_map * map)1204 static void cgroup_fd_array_free(struct bpf_map *map)
1205 {
1206 	bpf_fd_array_map_clear(map);
1207 	fd_array_map_free(map);
1208 }
1209 
1210 static int cgroup_array_map_btf_id;
1211 const struct bpf_map_ops cgroup_array_map_ops = {
1212 	.map_meta_equal = bpf_map_meta_equal,
1213 	.map_alloc_check = fd_array_map_alloc_check,
1214 	.map_alloc = array_map_alloc,
1215 	.map_free = cgroup_fd_array_free,
1216 	.map_get_next_key = array_map_get_next_key,
1217 	.map_lookup_elem = fd_array_map_lookup_elem,
1218 	.map_delete_elem = fd_array_map_delete_elem,
1219 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1220 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1221 	.map_check_btf = map_check_no_btf,
1222 	.map_btf_name = "bpf_array",
1223 	.map_btf_id = &cgroup_array_map_btf_id,
1224 };
1225 #endif
1226 
array_of_map_alloc(union bpf_attr * attr)1227 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1228 {
1229 	struct bpf_map *map, *inner_map_meta;
1230 
1231 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1232 	if (IS_ERR(inner_map_meta))
1233 		return inner_map_meta;
1234 
1235 	map = array_map_alloc(attr);
1236 	if (IS_ERR(map)) {
1237 		bpf_map_meta_free(inner_map_meta);
1238 		return map;
1239 	}
1240 
1241 	map->inner_map_meta = inner_map_meta;
1242 
1243 	return map;
1244 }
1245 
array_of_map_free(struct bpf_map * map)1246 static void array_of_map_free(struct bpf_map *map)
1247 {
1248 	/* map->inner_map_meta is only accessed by syscall which
1249 	 * is protected by fdget/fdput.
1250 	 */
1251 	bpf_map_meta_free(map->inner_map_meta);
1252 	bpf_fd_array_map_clear(map);
1253 	fd_array_map_free(map);
1254 }
1255 
array_of_map_lookup_elem(struct bpf_map * map,void * key)1256 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1257 {
1258 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1259 
1260 	if (!inner_map)
1261 		return NULL;
1262 
1263 	return READ_ONCE(*inner_map);
1264 }
1265 
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)1266 static int array_of_map_gen_lookup(struct bpf_map *map,
1267 				   struct bpf_insn *insn_buf)
1268 {
1269 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1270 	u32 elem_size = round_up(map->value_size, 8);
1271 	struct bpf_insn *insn = insn_buf;
1272 	const int ret = BPF_REG_0;
1273 	const int map_ptr = BPF_REG_1;
1274 	const int index = BPF_REG_2;
1275 
1276 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1277 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1278 	if (!map->bypass_spec_v1) {
1279 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1280 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1281 	} else {
1282 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1283 	}
1284 	if (is_power_of_2(elem_size))
1285 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1286 	else
1287 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1288 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1289 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1290 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1291 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1292 	*insn++ = BPF_MOV64_IMM(ret, 0);
1293 
1294 	return insn - insn_buf;
1295 }
1296 
1297 static int array_of_maps_map_btf_id;
1298 const struct bpf_map_ops array_of_maps_map_ops = {
1299 	.map_alloc_check = fd_array_map_alloc_check,
1300 	.map_alloc = array_of_map_alloc,
1301 	.map_free = array_of_map_free,
1302 	.map_get_next_key = array_map_get_next_key,
1303 	.map_lookup_elem = array_of_map_lookup_elem,
1304 	.map_delete_elem = fd_array_map_delete_elem,
1305 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1306 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1307 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1308 	.map_gen_lookup = array_of_map_gen_lookup,
1309 	.map_check_btf = map_check_no_btf,
1310 	.map_btf_name = "bpf_array",
1311 	.map_btf_id = &array_of_maps_map_btf_id,
1312 };
1313