1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 #include <linux/rcupdate_trace.h>
33 #include <linux/memcontrol.h>
34
35 #include <trace/hooks/syscall_check.h>
36
37 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
38 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
39 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
40 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
41 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
42 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
43 IS_FD_HASH(map))
44
45 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
46
47 DEFINE_PER_CPU(int, bpf_prog_active);
48 static DEFINE_IDR(prog_idr);
49 static DEFINE_SPINLOCK(prog_idr_lock);
50 static DEFINE_IDR(map_idr);
51 static DEFINE_SPINLOCK(map_idr_lock);
52 static DEFINE_IDR(link_idr);
53 static DEFINE_SPINLOCK(link_idr_lock);
54
55 int sysctl_unprivileged_bpf_disabled __read_mostly =
56 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
57
58 static const struct bpf_map_ops * const bpf_map_types[] = {
59 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
60 #define BPF_MAP_TYPE(_id, _ops) \
61 [_id] = &_ops,
62 #define BPF_LINK_TYPE(_id, _name)
63 #include <linux/bpf_types.h>
64 #undef BPF_PROG_TYPE
65 #undef BPF_MAP_TYPE
66 #undef BPF_LINK_TYPE
67 };
68
69 /*
70 * If we're handed a bigger struct than we know of, ensure all the unknown bits
71 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
72 * we don't know about yet.
73 *
74 * There is a ToCToU between this function call and the following
75 * copy_from_user() call. However, this is not a concern since this function is
76 * meant to be a future-proofing of bits.
77 */
bpf_check_uarg_tail_zero(bpfptr_t uaddr,size_t expected_size,size_t actual_size)78 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
79 size_t expected_size,
80 size_t actual_size)
81 {
82 int res;
83
84 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
85 return -E2BIG;
86
87 if (actual_size <= expected_size)
88 return 0;
89
90 if (uaddr.is_kernel)
91 res = memchr_inv(uaddr.kernel + expected_size, 0,
92 actual_size - expected_size) == NULL;
93 else
94 res = check_zeroed_user(uaddr.user + expected_size,
95 actual_size - expected_size);
96 if (res < 0)
97 return res;
98 return res ? 0 : -E2BIG;
99 }
100
101 const struct bpf_map_ops bpf_map_offload_ops = {
102 .map_meta_equal = bpf_map_meta_equal,
103 .map_alloc = bpf_map_offload_map_alloc,
104 .map_free = bpf_map_offload_map_free,
105 .map_check_btf = map_check_no_btf,
106 };
107
find_and_alloc_map(union bpf_attr * attr)108 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
109 {
110 const struct bpf_map_ops *ops;
111 u32 type = attr->map_type;
112 struct bpf_map *map;
113 int err;
114
115 if (type >= ARRAY_SIZE(bpf_map_types))
116 return ERR_PTR(-EINVAL);
117 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
118 ops = bpf_map_types[type];
119 if (!ops)
120 return ERR_PTR(-EINVAL);
121
122 if (ops->map_alloc_check) {
123 err = ops->map_alloc_check(attr);
124 if (err)
125 return ERR_PTR(err);
126 }
127 if (attr->map_ifindex)
128 ops = &bpf_map_offload_ops;
129 map = ops->map_alloc(attr);
130 if (IS_ERR(map))
131 return map;
132 map->ops = ops;
133 map->map_type = type;
134 return map;
135 }
136
bpf_map_write_active_inc(struct bpf_map * map)137 static void bpf_map_write_active_inc(struct bpf_map *map)
138 {
139 atomic64_inc(&map->writecnt);
140 }
141
bpf_map_write_active_dec(struct bpf_map * map)142 static void bpf_map_write_active_dec(struct bpf_map *map)
143 {
144 atomic64_dec(&map->writecnt);
145 }
146
bpf_map_write_active(const struct bpf_map * map)147 bool bpf_map_write_active(const struct bpf_map *map)
148 {
149 return atomic64_read(&map->writecnt) != 0;
150 }
151
bpf_map_value_size(const struct bpf_map * map)152 static u32 bpf_map_value_size(const struct bpf_map *map)
153 {
154 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
155 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
156 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
157 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
158 return round_up(map->value_size, 8) * num_possible_cpus();
159 else if (IS_FD_MAP(map))
160 return sizeof(u32);
161 else
162 return map->value_size;
163 }
164
maybe_wait_bpf_programs(struct bpf_map * map)165 static void maybe_wait_bpf_programs(struct bpf_map *map)
166 {
167 /* Wait for any running BPF programs to complete so that
168 * userspace, when we return to it, knows that all programs
169 * that could be running use the new map value.
170 */
171 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
172 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
173 synchronize_rcu();
174 }
175
bpf_map_update_value(struct bpf_map * map,struct fd f,void * key,void * value,__u64 flags)176 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
177 void *value, __u64 flags)
178 {
179 int err;
180
181 /* Need to create a kthread, thus must support schedule */
182 if (bpf_map_is_dev_bound(map)) {
183 return bpf_map_offload_update_elem(map, key, value, flags);
184 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
185 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
186 return map->ops->map_update_elem(map, key, value, flags);
187 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
188 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
189 return sock_map_update_elem_sys(map, key, value, flags);
190 } else if (IS_FD_PROG_ARRAY(map)) {
191 return bpf_fd_array_map_update_elem(map, f.file, key, value,
192 flags);
193 }
194
195 bpf_disable_instrumentation();
196 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
197 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
198 err = bpf_percpu_hash_update(map, key, value, flags);
199 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
200 err = bpf_percpu_array_update(map, key, value, flags);
201 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
202 err = bpf_percpu_cgroup_storage_update(map, key, value,
203 flags);
204 } else if (IS_FD_ARRAY(map)) {
205 rcu_read_lock();
206 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
207 flags);
208 rcu_read_unlock();
209 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
210 rcu_read_lock();
211 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
212 flags);
213 rcu_read_unlock();
214 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
215 /* rcu_read_lock() is not needed */
216 err = bpf_fd_reuseport_array_update_elem(map, key, value,
217 flags);
218 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
219 map->map_type == BPF_MAP_TYPE_STACK) {
220 err = map->ops->map_push_elem(map, value, flags);
221 } else {
222 rcu_read_lock();
223 err = map->ops->map_update_elem(map, key, value, flags);
224 rcu_read_unlock();
225 }
226 bpf_enable_instrumentation();
227 maybe_wait_bpf_programs(map);
228
229 return err;
230 }
231
bpf_map_copy_value(struct bpf_map * map,void * key,void * value,__u64 flags)232 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
233 __u64 flags)
234 {
235 void *ptr;
236 int err;
237
238 if (bpf_map_is_dev_bound(map))
239 return bpf_map_offload_lookup_elem(map, key, value);
240
241 bpf_disable_instrumentation();
242 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
243 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
244 err = bpf_percpu_hash_copy(map, key, value);
245 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
246 err = bpf_percpu_array_copy(map, key, value);
247 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
248 err = bpf_percpu_cgroup_storage_copy(map, key, value);
249 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
250 err = bpf_stackmap_copy(map, key, value);
251 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
252 err = bpf_fd_array_map_lookup_elem(map, key, value);
253 } else if (IS_FD_HASH(map)) {
254 err = bpf_fd_htab_map_lookup_elem(map, key, value);
255 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
256 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
257 } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
258 map->map_type == BPF_MAP_TYPE_STACK) {
259 err = map->ops->map_peek_elem(map, value);
260 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
261 /* struct_ops map requires directly updating "value" */
262 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
263 } else {
264 rcu_read_lock();
265 if (map->ops->map_lookup_elem_sys_only)
266 ptr = map->ops->map_lookup_elem_sys_only(map, key);
267 else
268 ptr = map->ops->map_lookup_elem(map, key);
269 if (IS_ERR(ptr)) {
270 err = PTR_ERR(ptr);
271 } else if (!ptr) {
272 err = -ENOENT;
273 } else {
274 err = 0;
275 if (flags & BPF_F_LOCK)
276 /* lock 'ptr' and copy everything but lock */
277 copy_map_value_locked(map, value, ptr, true);
278 else
279 copy_map_value(map, value, ptr);
280 /* mask lock and timer, since value wasn't zero inited */
281 check_and_init_map_value(map, value);
282 }
283 rcu_read_unlock();
284 }
285
286 bpf_enable_instrumentation();
287 maybe_wait_bpf_programs(map);
288
289 return err;
290 }
291
292 /* Please, do not use this function outside from the map creation path
293 * (e.g. in map update path) without taking care of setting the active
294 * memory cgroup (see at bpf_map_kmalloc_node() for example).
295 */
__bpf_map_area_alloc(u64 size,int numa_node,bool mmapable)296 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
297 {
298 /* We really just want to fail instead of triggering OOM killer
299 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
300 * which is used for lower order allocation requests.
301 *
302 * It has been observed that higher order allocation requests done by
303 * vmalloc with __GFP_NORETRY being set might fail due to not trying
304 * to reclaim memory from the page cache, thus we set
305 * __GFP_RETRY_MAYFAIL to avoid such situations.
306 */
307
308 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
309 unsigned int flags = 0;
310 unsigned long align = 1;
311 void *area;
312
313 if (size >= SIZE_MAX)
314 return NULL;
315
316 /* kmalloc()'ed memory can't be mmap()'ed */
317 if (mmapable) {
318 BUG_ON(!PAGE_ALIGNED(size));
319 align = SHMLBA;
320 flags = VM_USERMAP;
321 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
322 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
323 numa_node);
324 if (area != NULL)
325 return area;
326 }
327
328 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
329 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
330 flags, numa_node, __builtin_return_address(0));
331 }
332
bpf_map_area_alloc(u64 size,int numa_node)333 void *bpf_map_area_alloc(u64 size, int numa_node)
334 {
335 return __bpf_map_area_alloc(size, numa_node, false);
336 }
337
bpf_map_area_mmapable_alloc(u64 size,int numa_node)338 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
339 {
340 return __bpf_map_area_alloc(size, numa_node, true);
341 }
342
bpf_map_area_free(void * area)343 void bpf_map_area_free(void *area)
344 {
345 kvfree(area);
346 }
347
bpf_map_flags_retain_permanent(u32 flags)348 static u32 bpf_map_flags_retain_permanent(u32 flags)
349 {
350 /* Some map creation flags are not tied to the map object but
351 * rather to the map fd instead, so they have no meaning upon
352 * map object inspection since multiple file descriptors with
353 * different (access) properties can exist here. Thus, given
354 * this has zero meaning for the map itself, lets clear these
355 * from here.
356 */
357 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
358 }
359
bpf_map_init_from_attr(struct bpf_map * map,union bpf_attr * attr)360 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
361 {
362 map->map_type = attr->map_type;
363 map->key_size = attr->key_size;
364 map->value_size = attr->value_size;
365 map->max_entries = attr->max_entries;
366 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
367 map->numa_node = bpf_map_attr_numa_node(attr);
368 }
369
bpf_map_alloc_id(struct bpf_map * map)370 static int bpf_map_alloc_id(struct bpf_map *map)
371 {
372 int id;
373
374 idr_preload(GFP_KERNEL);
375 spin_lock_bh(&map_idr_lock);
376 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
377 if (id > 0)
378 map->id = id;
379 spin_unlock_bh(&map_idr_lock);
380 idr_preload_end();
381
382 if (WARN_ON_ONCE(!id))
383 return -ENOSPC;
384
385 return id > 0 ? 0 : id;
386 }
387
bpf_map_free_id(struct bpf_map * map,bool do_idr_lock)388 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
389 {
390 unsigned long flags;
391
392 /* Offloaded maps are removed from the IDR store when their device
393 * disappears - even if someone holds an fd to them they are unusable,
394 * the memory is gone, all ops will fail; they are simply waiting for
395 * refcnt to drop to be freed.
396 */
397 if (!map->id)
398 return;
399
400 if (do_idr_lock)
401 spin_lock_irqsave(&map_idr_lock, flags);
402 else
403 __acquire(&map_idr_lock);
404
405 idr_remove(&map_idr, map->id);
406 map->id = 0;
407
408 if (do_idr_lock)
409 spin_unlock_irqrestore(&map_idr_lock, flags);
410 else
411 __release(&map_idr_lock);
412 }
413
414 #ifdef CONFIG_MEMCG_KMEM
bpf_map_save_memcg(struct bpf_map * map)415 static void bpf_map_save_memcg(struct bpf_map *map)
416 {
417 map->memcg = get_mem_cgroup_from_mm(current->mm);
418 }
419
bpf_map_release_memcg(struct bpf_map * map)420 static void bpf_map_release_memcg(struct bpf_map *map)
421 {
422 mem_cgroup_put(map->memcg);
423 }
424
bpf_map_kmalloc_node(const struct bpf_map * map,size_t size,gfp_t flags,int node)425 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
426 int node)
427 {
428 struct mem_cgroup *old_memcg;
429 void *ptr;
430
431 old_memcg = set_active_memcg(map->memcg);
432 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
433 set_active_memcg(old_memcg);
434
435 return ptr;
436 }
437
bpf_map_kzalloc(const struct bpf_map * map,size_t size,gfp_t flags)438 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
439 {
440 struct mem_cgroup *old_memcg;
441 void *ptr;
442
443 old_memcg = set_active_memcg(map->memcg);
444 ptr = kzalloc(size, flags | __GFP_ACCOUNT);
445 set_active_memcg(old_memcg);
446
447 return ptr;
448 }
449
bpf_map_alloc_percpu(const struct bpf_map * map,size_t size,size_t align,gfp_t flags)450 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
451 size_t align, gfp_t flags)
452 {
453 struct mem_cgroup *old_memcg;
454 void __percpu *ptr;
455
456 old_memcg = set_active_memcg(map->memcg);
457 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
458 set_active_memcg(old_memcg);
459
460 return ptr;
461 }
462
463 #else
bpf_map_save_memcg(struct bpf_map * map)464 static void bpf_map_save_memcg(struct bpf_map *map)
465 {
466 }
467
bpf_map_release_memcg(struct bpf_map * map)468 static void bpf_map_release_memcg(struct bpf_map *map)
469 {
470 }
471 #endif
472
473 /* called from workqueue */
bpf_map_free_deferred(struct work_struct * work)474 static void bpf_map_free_deferred(struct work_struct *work)
475 {
476 struct bpf_map *map = container_of(work, struct bpf_map, work);
477
478 security_bpf_map_free(map);
479 bpf_map_release_memcg(map);
480 /* implementation dependent freeing */
481 map->ops->map_free(map);
482 }
483
bpf_map_put_uref(struct bpf_map * map)484 static void bpf_map_put_uref(struct bpf_map *map)
485 {
486 if (atomic64_dec_and_test(&map->usercnt)) {
487 if (map->ops->map_release_uref)
488 map->ops->map_release_uref(map);
489 }
490 }
491
492 /* decrement map refcnt and schedule it for freeing via workqueue
493 * (unrelying map implementation ops->map_free() might sleep)
494 */
__bpf_map_put(struct bpf_map * map,bool do_idr_lock)495 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
496 {
497 if (atomic64_dec_and_test(&map->refcnt)) {
498 /* bpf_map_free_id() must be called first */
499 bpf_map_free_id(map, do_idr_lock);
500 btf_put(map->btf);
501 INIT_WORK(&map->work, bpf_map_free_deferred);
502 schedule_work(&map->work);
503 }
504 }
505
bpf_map_put(struct bpf_map * map)506 void bpf_map_put(struct bpf_map *map)
507 {
508 __bpf_map_put(map, true);
509 }
510 EXPORT_SYMBOL_GPL(bpf_map_put);
511
bpf_map_put_with_uref(struct bpf_map * map)512 void bpf_map_put_with_uref(struct bpf_map *map)
513 {
514 bpf_map_put_uref(map);
515 bpf_map_put(map);
516 }
517
bpf_map_release(struct inode * inode,struct file * filp)518 static int bpf_map_release(struct inode *inode, struct file *filp)
519 {
520 struct bpf_map *map = filp->private_data;
521
522 if (map->ops->map_release)
523 map->ops->map_release(map, filp);
524
525 bpf_map_put_with_uref(map);
526 return 0;
527 }
528
map_get_sys_perms(struct bpf_map * map,struct fd f)529 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
530 {
531 fmode_t mode = f.file->f_mode;
532
533 /* Our file permissions may have been overridden by global
534 * map permissions facing syscall side.
535 */
536 if (READ_ONCE(map->frozen))
537 mode &= ~FMODE_CAN_WRITE;
538 return mode;
539 }
540
541 #ifdef CONFIG_PROC_FS
542 /* Provides an approximation of the map's memory footprint.
543 * Used only to provide a backward compatibility and display
544 * a reasonable "memlock" info.
545 */
bpf_map_memory_footprint(const struct bpf_map * map)546 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
547 {
548 unsigned long size;
549
550 size = round_up(map->key_size + bpf_map_value_size(map), 8);
551
552 return round_up(map->max_entries * size, PAGE_SIZE);
553 }
554
bpf_map_show_fdinfo(struct seq_file * m,struct file * filp)555 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
556 {
557 const struct bpf_map *map = filp->private_data;
558 const struct bpf_array *array;
559 u32 type = 0, jited = 0;
560
561 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
562 array = container_of(map, struct bpf_array, map);
563 spin_lock(&array->aux->owner.lock);
564 type = array->aux->owner.type;
565 jited = array->aux->owner.jited;
566 spin_unlock(&array->aux->owner.lock);
567 }
568
569 seq_printf(m,
570 "map_type:\t%u\n"
571 "key_size:\t%u\n"
572 "value_size:\t%u\n"
573 "max_entries:\t%u\n"
574 "map_flags:\t%#x\n"
575 "memlock:\t%lu\n"
576 "map_id:\t%u\n"
577 "frozen:\t%u\n",
578 map->map_type,
579 map->key_size,
580 map->value_size,
581 map->max_entries,
582 map->map_flags,
583 bpf_map_memory_footprint(map),
584 map->id,
585 READ_ONCE(map->frozen));
586 if (type) {
587 seq_printf(m, "owner_prog_type:\t%u\n", type);
588 seq_printf(m, "owner_jited:\t%u\n", jited);
589 }
590 }
591 #endif
592
bpf_dummy_read(struct file * filp,char __user * buf,size_t siz,loff_t * ppos)593 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
594 loff_t *ppos)
595 {
596 /* We need this handler such that alloc_file() enables
597 * f_mode with FMODE_CAN_READ.
598 */
599 return -EINVAL;
600 }
601
bpf_dummy_write(struct file * filp,const char __user * buf,size_t siz,loff_t * ppos)602 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
603 size_t siz, loff_t *ppos)
604 {
605 /* We need this handler such that alloc_file() enables
606 * f_mode with FMODE_CAN_WRITE.
607 */
608 return -EINVAL;
609 }
610
611 /* called for any extra memory-mapped regions (except initial) */
bpf_map_mmap_open(struct vm_area_struct * vma)612 static void bpf_map_mmap_open(struct vm_area_struct *vma)
613 {
614 struct bpf_map *map = vma->vm_file->private_data;
615
616 if (vma->vm_flags & VM_MAYWRITE)
617 bpf_map_write_active_inc(map);
618 }
619
620 /* called for all unmapped memory region (including initial) */
bpf_map_mmap_close(struct vm_area_struct * vma)621 static void bpf_map_mmap_close(struct vm_area_struct *vma)
622 {
623 struct bpf_map *map = vma->vm_file->private_data;
624
625 if (vma->vm_flags & VM_MAYWRITE)
626 bpf_map_write_active_dec(map);
627 }
628
629 static const struct vm_operations_struct bpf_map_default_vmops = {
630 .open = bpf_map_mmap_open,
631 .close = bpf_map_mmap_close,
632 };
633
bpf_map_mmap(struct file * filp,struct vm_area_struct * vma)634 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
635 {
636 struct bpf_map *map = filp->private_data;
637 int err;
638
639 if (!map->ops->map_mmap || map_value_has_spin_lock(map) ||
640 map_value_has_timer(map))
641 return -ENOTSUPP;
642
643 if (!(vma->vm_flags & VM_SHARED))
644 return -EINVAL;
645
646 mutex_lock(&map->freeze_mutex);
647
648 if (vma->vm_flags & VM_WRITE) {
649 if (map->frozen) {
650 err = -EPERM;
651 goto out;
652 }
653 /* map is meant to be read-only, so do not allow mapping as
654 * writable, because it's possible to leak a writable page
655 * reference and allows user-space to still modify it after
656 * freezing, while verifier will assume contents do not change
657 */
658 if (map->map_flags & BPF_F_RDONLY_PROG) {
659 err = -EACCES;
660 goto out;
661 }
662 }
663
664 /* set default open/close callbacks */
665 vma->vm_ops = &bpf_map_default_vmops;
666 vma->vm_private_data = map;
667 vma->vm_flags &= ~VM_MAYEXEC;
668 if (!(vma->vm_flags & VM_WRITE))
669 /* disallow re-mapping with PROT_WRITE */
670 vma->vm_flags &= ~VM_MAYWRITE;
671
672 err = map->ops->map_mmap(map, vma);
673 if (err)
674 goto out;
675
676 if (vma->vm_flags & VM_MAYWRITE)
677 bpf_map_write_active_inc(map);
678 out:
679 mutex_unlock(&map->freeze_mutex);
680 return err;
681 }
682
bpf_map_poll(struct file * filp,struct poll_table_struct * pts)683 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
684 {
685 struct bpf_map *map = filp->private_data;
686
687 if (map->ops->map_poll)
688 return map->ops->map_poll(map, filp, pts);
689
690 return EPOLLERR;
691 }
692
693 const struct file_operations bpf_map_fops = {
694 #ifdef CONFIG_PROC_FS
695 .show_fdinfo = bpf_map_show_fdinfo,
696 #endif
697 .release = bpf_map_release,
698 .read = bpf_dummy_read,
699 .write = bpf_dummy_write,
700 .mmap = bpf_map_mmap,
701 .poll = bpf_map_poll,
702 };
703
bpf_map_new_fd(struct bpf_map * map,int flags)704 int bpf_map_new_fd(struct bpf_map *map, int flags)
705 {
706 int ret;
707
708 ret = security_bpf_map(map, OPEN_FMODE(flags));
709 if (ret < 0)
710 return ret;
711
712 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
713 flags | O_CLOEXEC);
714 }
715
bpf_get_file_flag(int flags)716 int bpf_get_file_flag(int flags)
717 {
718 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
719 return -EINVAL;
720 if (flags & BPF_F_RDONLY)
721 return O_RDONLY;
722 if (flags & BPF_F_WRONLY)
723 return O_WRONLY;
724 return O_RDWR;
725 }
726
727 /* helper macro to check that unused fields 'union bpf_attr' are zero */
728 #define CHECK_ATTR(CMD) \
729 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
730 sizeof(attr->CMD##_LAST_FIELD), 0, \
731 sizeof(*attr) - \
732 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
733 sizeof(attr->CMD##_LAST_FIELD)) != NULL
734
735 /* dst and src must have at least "size" number of bytes.
736 * Return strlen on success and < 0 on error.
737 */
bpf_obj_name_cpy(char * dst,const char * src,unsigned int size)738 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
739 {
740 const char *end = src + size;
741 const char *orig_src = src;
742
743 memset(dst, 0, size);
744 /* Copy all isalnum(), '_' and '.' chars. */
745 while (src < end && *src) {
746 if (!isalnum(*src) &&
747 *src != '_' && *src != '.')
748 return -EINVAL;
749 *dst++ = *src++;
750 }
751
752 /* No '\0' found in "size" number of bytes */
753 if (src == end)
754 return -EINVAL;
755
756 return src - orig_src;
757 }
758
map_check_no_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)759 int map_check_no_btf(const struct bpf_map *map,
760 const struct btf *btf,
761 const struct btf_type *key_type,
762 const struct btf_type *value_type)
763 {
764 return -ENOTSUPP;
765 }
766
map_check_btf(struct bpf_map * map,const struct btf * btf,u32 btf_key_id,u32 btf_value_id)767 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
768 u32 btf_key_id, u32 btf_value_id)
769 {
770 const struct btf_type *key_type, *value_type;
771 u32 key_size, value_size;
772 int ret = 0;
773
774 /* Some maps allow key to be unspecified. */
775 if (btf_key_id) {
776 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
777 if (!key_type || key_size != map->key_size)
778 return -EINVAL;
779 } else {
780 key_type = btf_type_by_id(btf, 0);
781 if (!map->ops->map_check_btf)
782 return -EINVAL;
783 }
784
785 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
786 if (!value_type || value_size != map->value_size)
787 return -EINVAL;
788
789 map->spin_lock_off = btf_find_spin_lock(btf, value_type);
790
791 if (map_value_has_spin_lock(map)) {
792 if (map->map_flags & BPF_F_RDONLY_PROG)
793 return -EACCES;
794 if (map->map_type != BPF_MAP_TYPE_HASH &&
795 map->map_type != BPF_MAP_TYPE_ARRAY &&
796 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
797 map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
798 map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
799 map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
800 return -ENOTSUPP;
801 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
802 map->value_size) {
803 WARN_ONCE(1,
804 "verifier bug spin_lock_off %d value_size %d\n",
805 map->spin_lock_off, map->value_size);
806 return -EFAULT;
807 }
808 }
809
810 map->timer_off = btf_find_timer(btf, value_type);
811 if (map_value_has_timer(map)) {
812 if (map->map_flags & BPF_F_RDONLY_PROG)
813 return -EACCES;
814 if (map->map_type != BPF_MAP_TYPE_HASH &&
815 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
816 map->map_type != BPF_MAP_TYPE_ARRAY)
817 return -EOPNOTSUPP;
818 }
819
820 if (map->ops->map_check_btf)
821 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
822
823 return ret;
824 }
825
826 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
827 /* called via syscall */
map_create(union bpf_attr * attr)828 static int map_create(union bpf_attr *attr)
829 {
830 int numa_node = bpf_map_attr_numa_node(attr);
831 struct bpf_map *map;
832 int f_flags;
833 int err;
834
835 err = CHECK_ATTR(BPF_MAP_CREATE);
836 if (err)
837 return -EINVAL;
838
839 if (attr->btf_vmlinux_value_type_id) {
840 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
841 attr->btf_key_type_id || attr->btf_value_type_id)
842 return -EINVAL;
843 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
844 return -EINVAL;
845 }
846
847 f_flags = bpf_get_file_flag(attr->map_flags);
848 if (f_flags < 0)
849 return f_flags;
850
851 if (numa_node != NUMA_NO_NODE &&
852 ((unsigned int)numa_node >= nr_node_ids ||
853 !node_online(numa_node)))
854 return -EINVAL;
855
856 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
857 map = find_and_alloc_map(attr);
858 if (IS_ERR(map))
859 return PTR_ERR(map);
860
861 err = bpf_obj_name_cpy(map->name, attr->map_name,
862 sizeof(attr->map_name));
863 if (err < 0)
864 goto free_map;
865
866 atomic64_set(&map->refcnt, 1);
867 atomic64_set(&map->usercnt, 1);
868 mutex_init(&map->freeze_mutex);
869
870 map->spin_lock_off = -EINVAL;
871 map->timer_off = -EINVAL;
872 if (attr->btf_key_type_id || attr->btf_value_type_id ||
873 /* Even the map's value is a kernel's struct,
874 * the bpf_prog.o must have BTF to begin with
875 * to figure out the corresponding kernel's
876 * counter part. Thus, attr->btf_fd has
877 * to be valid also.
878 */
879 attr->btf_vmlinux_value_type_id) {
880 struct btf *btf;
881
882 btf = btf_get_by_fd(attr->btf_fd);
883 if (IS_ERR(btf)) {
884 err = PTR_ERR(btf);
885 goto free_map;
886 }
887 if (btf_is_kernel(btf)) {
888 btf_put(btf);
889 err = -EACCES;
890 goto free_map;
891 }
892 map->btf = btf;
893
894 if (attr->btf_value_type_id) {
895 err = map_check_btf(map, btf, attr->btf_key_type_id,
896 attr->btf_value_type_id);
897 if (err)
898 goto free_map;
899 }
900
901 map->btf_key_type_id = attr->btf_key_type_id;
902 map->btf_value_type_id = attr->btf_value_type_id;
903 map->btf_vmlinux_value_type_id =
904 attr->btf_vmlinux_value_type_id;
905 }
906
907 err = security_bpf_map_alloc(map);
908 if (err)
909 goto free_map;
910
911 err = bpf_map_alloc_id(map);
912 if (err)
913 goto free_map_sec;
914
915 bpf_map_save_memcg(map);
916
917 err = bpf_map_new_fd(map, f_flags);
918 if (err < 0) {
919 /* failed to allocate fd.
920 * bpf_map_put_with_uref() is needed because the above
921 * bpf_map_alloc_id() has published the map
922 * to the userspace and the userspace may
923 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
924 */
925 bpf_map_put_with_uref(map);
926 return err;
927 }
928
929 return err;
930
931 free_map_sec:
932 security_bpf_map_free(map);
933 free_map:
934 btf_put(map->btf);
935 map->ops->map_free(map);
936 return err;
937 }
938
939 /* if error is returned, fd is released.
940 * On success caller should complete fd access with matching fdput()
941 */
__bpf_map_get(struct fd f)942 struct bpf_map *__bpf_map_get(struct fd f)
943 {
944 if (!f.file)
945 return ERR_PTR(-EBADF);
946 if (f.file->f_op != &bpf_map_fops) {
947 fdput(f);
948 return ERR_PTR(-EINVAL);
949 }
950
951 return f.file->private_data;
952 }
953
bpf_map_inc(struct bpf_map * map)954 void bpf_map_inc(struct bpf_map *map)
955 {
956 atomic64_inc(&map->refcnt);
957 }
958 EXPORT_SYMBOL_GPL(bpf_map_inc);
959
bpf_map_inc_with_uref(struct bpf_map * map)960 void bpf_map_inc_with_uref(struct bpf_map *map)
961 {
962 atomic64_inc(&map->refcnt);
963 atomic64_inc(&map->usercnt);
964 }
965 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
966
bpf_map_get(u32 ufd)967 struct bpf_map *bpf_map_get(u32 ufd)
968 {
969 struct fd f = fdget(ufd);
970 struct bpf_map *map;
971
972 map = __bpf_map_get(f);
973 if (IS_ERR(map))
974 return map;
975
976 bpf_map_inc(map);
977 fdput(f);
978
979 return map;
980 }
981
bpf_map_get_with_uref(u32 ufd)982 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
983 {
984 struct fd f = fdget(ufd);
985 struct bpf_map *map;
986
987 map = __bpf_map_get(f);
988 if (IS_ERR(map))
989 return map;
990
991 bpf_map_inc_with_uref(map);
992 fdput(f);
993
994 return map;
995 }
996
997 /* map_idr_lock should have been held */
__bpf_map_inc_not_zero(struct bpf_map * map,bool uref)998 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
999 {
1000 int refold;
1001
1002 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1003 if (!refold)
1004 return ERR_PTR(-ENOENT);
1005 if (uref)
1006 atomic64_inc(&map->usercnt);
1007
1008 return map;
1009 }
1010
bpf_map_inc_not_zero(struct bpf_map * map)1011 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1012 {
1013 spin_lock_bh(&map_idr_lock);
1014 map = __bpf_map_inc_not_zero(map, false);
1015 spin_unlock_bh(&map_idr_lock);
1016
1017 return map;
1018 }
1019 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1020
bpf_stackmap_copy(struct bpf_map * map,void * key,void * value)1021 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1022 {
1023 return -ENOTSUPP;
1024 }
1025
__bpf_copy_key(void __user * ukey,u64 key_size)1026 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1027 {
1028 if (key_size)
1029 return vmemdup_user(ukey, key_size);
1030
1031 if (ukey)
1032 return ERR_PTR(-EINVAL);
1033
1034 return NULL;
1035 }
1036
___bpf_copy_key(bpfptr_t ukey,u64 key_size)1037 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1038 {
1039 if (key_size)
1040 return kvmemdup_bpfptr(ukey, key_size);
1041
1042 if (!bpfptr_is_null(ukey))
1043 return ERR_PTR(-EINVAL);
1044
1045 return NULL;
1046 }
1047
1048 /* last field in 'union bpf_attr' used by this command */
1049 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1050
map_lookup_elem(union bpf_attr * attr)1051 static int map_lookup_elem(union bpf_attr *attr)
1052 {
1053 void __user *ukey = u64_to_user_ptr(attr->key);
1054 void __user *uvalue = u64_to_user_ptr(attr->value);
1055 int ufd = attr->map_fd;
1056 struct bpf_map *map;
1057 void *key, *value;
1058 u32 value_size;
1059 struct fd f;
1060 int err;
1061
1062 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1063 return -EINVAL;
1064
1065 if (attr->flags & ~BPF_F_LOCK)
1066 return -EINVAL;
1067
1068 f = fdget(ufd);
1069 map = __bpf_map_get(f);
1070 if (IS_ERR(map))
1071 return PTR_ERR(map);
1072 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1073 err = -EPERM;
1074 goto err_put;
1075 }
1076
1077 if ((attr->flags & BPF_F_LOCK) &&
1078 !map_value_has_spin_lock(map)) {
1079 err = -EINVAL;
1080 goto err_put;
1081 }
1082
1083 key = __bpf_copy_key(ukey, map->key_size);
1084 if (IS_ERR(key)) {
1085 err = PTR_ERR(key);
1086 goto err_put;
1087 }
1088
1089 value_size = bpf_map_value_size(map);
1090
1091 err = -ENOMEM;
1092 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1093 if (!value)
1094 goto free_key;
1095
1096 err = bpf_map_copy_value(map, key, value, attr->flags);
1097 if (err)
1098 goto free_value;
1099
1100 err = -EFAULT;
1101 if (copy_to_user(uvalue, value, value_size) != 0)
1102 goto free_value;
1103
1104 err = 0;
1105
1106 free_value:
1107 kvfree(value);
1108 free_key:
1109 kvfree(key);
1110 err_put:
1111 fdput(f);
1112 return err;
1113 }
1114
1115
1116 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1117
map_update_elem(union bpf_attr * attr,bpfptr_t uattr)1118 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1119 {
1120 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1121 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1122 int ufd = attr->map_fd;
1123 struct bpf_map *map;
1124 void *key, *value;
1125 u32 value_size;
1126 struct fd f;
1127 int err;
1128
1129 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1130 return -EINVAL;
1131
1132 f = fdget(ufd);
1133 map = __bpf_map_get(f);
1134 if (IS_ERR(map))
1135 return PTR_ERR(map);
1136 bpf_map_write_active_inc(map);
1137 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1138 err = -EPERM;
1139 goto err_put;
1140 }
1141
1142 if ((attr->flags & BPF_F_LOCK) &&
1143 !map_value_has_spin_lock(map)) {
1144 err = -EINVAL;
1145 goto err_put;
1146 }
1147
1148 key = ___bpf_copy_key(ukey, map->key_size);
1149 if (IS_ERR(key)) {
1150 err = PTR_ERR(key);
1151 goto err_put;
1152 }
1153
1154 value_size = bpf_map_value_size(map);
1155
1156 err = -ENOMEM;
1157 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1158 if (!value)
1159 goto free_key;
1160
1161 err = -EFAULT;
1162 if (copy_from_bpfptr(value, uvalue, value_size) != 0)
1163 goto free_value;
1164
1165 err = bpf_map_update_value(map, f, key, value, attr->flags);
1166
1167 free_value:
1168 kvfree(value);
1169 free_key:
1170 kvfree(key);
1171 err_put:
1172 bpf_map_write_active_dec(map);
1173 fdput(f);
1174 return err;
1175 }
1176
1177 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1178
map_delete_elem(union bpf_attr * attr)1179 static int map_delete_elem(union bpf_attr *attr)
1180 {
1181 void __user *ukey = u64_to_user_ptr(attr->key);
1182 int ufd = attr->map_fd;
1183 struct bpf_map *map;
1184 struct fd f;
1185 void *key;
1186 int err;
1187
1188 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1189 return -EINVAL;
1190
1191 f = fdget(ufd);
1192 map = __bpf_map_get(f);
1193 if (IS_ERR(map))
1194 return PTR_ERR(map);
1195 bpf_map_write_active_inc(map);
1196 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1197 err = -EPERM;
1198 goto err_put;
1199 }
1200
1201 key = __bpf_copy_key(ukey, map->key_size);
1202 if (IS_ERR(key)) {
1203 err = PTR_ERR(key);
1204 goto err_put;
1205 }
1206
1207 if (bpf_map_is_dev_bound(map)) {
1208 err = bpf_map_offload_delete_elem(map, key);
1209 goto out;
1210 } else if (IS_FD_PROG_ARRAY(map) ||
1211 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1212 /* These maps require sleepable context */
1213 err = map->ops->map_delete_elem(map, key);
1214 goto out;
1215 }
1216
1217 bpf_disable_instrumentation();
1218 rcu_read_lock();
1219 err = map->ops->map_delete_elem(map, key);
1220 rcu_read_unlock();
1221 bpf_enable_instrumentation();
1222 maybe_wait_bpf_programs(map);
1223 out:
1224 kvfree(key);
1225 err_put:
1226 bpf_map_write_active_dec(map);
1227 fdput(f);
1228 return err;
1229 }
1230
1231 /* last field in 'union bpf_attr' used by this command */
1232 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1233
map_get_next_key(union bpf_attr * attr)1234 static int map_get_next_key(union bpf_attr *attr)
1235 {
1236 void __user *ukey = u64_to_user_ptr(attr->key);
1237 void __user *unext_key = u64_to_user_ptr(attr->next_key);
1238 int ufd = attr->map_fd;
1239 struct bpf_map *map;
1240 void *key, *next_key;
1241 struct fd f;
1242 int err;
1243
1244 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1245 return -EINVAL;
1246
1247 f = fdget(ufd);
1248 map = __bpf_map_get(f);
1249 if (IS_ERR(map))
1250 return PTR_ERR(map);
1251 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1252 err = -EPERM;
1253 goto err_put;
1254 }
1255
1256 if (ukey) {
1257 key = __bpf_copy_key(ukey, map->key_size);
1258 if (IS_ERR(key)) {
1259 err = PTR_ERR(key);
1260 goto err_put;
1261 }
1262 } else {
1263 key = NULL;
1264 }
1265
1266 err = -ENOMEM;
1267 next_key = kvmalloc(map->key_size, GFP_USER);
1268 if (!next_key)
1269 goto free_key;
1270
1271 if (bpf_map_is_dev_bound(map)) {
1272 err = bpf_map_offload_get_next_key(map, key, next_key);
1273 goto out;
1274 }
1275
1276 rcu_read_lock();
1277 err = map->ops->map_get_next_key(map, key, next_key);
1278 rcu_read_unlock();
1279 out:
1280 if (err)
1281 goto free_next_key;
1282
1283 err = -EFAULT;
1284 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1285 goto free_next_key;
1286
1287 err = 0;
1288
1289 free_next_key:
1290 kvfree(next_key);
1291 free_key:
1292 kvfree(key);
1293 err_put:
1294 fdput(f);
1295 return err;
1296 }
1297
generic_map_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1298 int generic_map_delete_batch(struct bpf_map *map,
1299 const union bpf_attr *attr,
1300 union bpf_attr __user *uattr)
1301 {
1302 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1303 u32 cp, max_count;
1304 int err = 0;
1305 void *key;
1306
1307 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1308 return -EINVAL;
1309
1310 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1311 !map_value_has_spin_lock(map)) {
1312 return -EINVAL;
1313 }
1314
1315 max_count = attr->batch.count;
1316 if (!max_count)
1317 return 0;
1318
1319 if (put_user(0, &uattr->batch.count))
1320 return -EFAULT;
1321
1322 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1323 if (!key)
1324 return -ENOMEM;
1325
1326 for (cp = 0; cp < max_count; cp++) {
1327 err = -EFAULT;
1328 if (copy_from_user(key, keys + cp * map->key_size,
1329 map->key_size))
1330 break;
1331
1332 if (bpf_map_is_dev_bound(map)) {
1333 err = bpf_map_offload_delete_elem(map, key);
1334 break;
1335 }
1336
1337 bpf_disable_instrumentation();
1338 rcu_read_lock();
1339 err = map->ops->map_delete_elem(map, key);
1340 rcu_read_unlock();
1341 bpf_enable_instrumentation();
1342 maybe_wait_bpf_programs(map);
1343 if (err)
1344 break;
1345 cond_resched();
1346 }
1347 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1348 err = -EFAULT;
1349
1350 kvfree(key);
1351 return err;
1352 }
1353
generic_map_update_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1354 int generic_map_update_batch(struct bpf_map *map,
1355 const union bpf_attr *attr,
1356 union bpf_attr __user *uattr)
1357 {
1358 void __user *values = u64_to_user_ptr(attr->batch.values);
1359 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1360 u32 value_size, cp, max_count;
1361 int ufd = attr->batch.map_fd;
1362 void *key, *value;
1363 struct fd f;
1364 int err = 0;
1365
1366 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1367 return -EINVAL;
1368
1369 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1370 !map_value_has_spin_lock(map)) {
1371 return -EINVAL;
1372 }
1373
1374 value_size = bpf_map_value_size(map);
1375
1376 max_count = attr->batch.count;
1377 if (!max_count)
1378 return 0;
1379
1380 if (put_user(0, &uattr->batch.count))
1381 return -EFAULT;
1382
1383 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1384 if (!key)
1385 return -ENOMEM;
1386
1387 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1388 if (!value) {
1389 kvfree(key);
1390 return -ENOMEM;
1391 }
1392
1393 f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
1394 for (cp = 0; cp < max_count; cp++) {
1395 err = -EFAULT;
1396 if (copy_from_user(key, keys + cp * map->key_size,
1397 map->key_size) ||
1398 copy_from_user(value, values + cp * value_size, value_size))
1399 break;
1400
1401 err = bpf_map_update_value(map, f, key, value,
1402 attr->batch.elem_flags);
1403
1404 if (err)
1405 break;
1406 cond_resched();
1407 }
1408
1409 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1410 err = -EFAULT;
1411
1412 kvfree(value);
1413 kvfree(key);
1414 fdput(f);
1415 return err;
1416 }
1417
1418 #define MAP_LOOKUP_RETRIES 3
1419
generic_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1420 int generic_map_lookup_batch(struct bpf_map *map,
1421 const union bpf_attr *attr,
1422 union bpf_attr __user *uattr)
1423 {
1424 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1425 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1426 void __user *values = u64_to_user_ptr(attr->batch.values);
1427 void __user *keys = u64_to_user_ptr(attr->batch.keys);
1428 void *buf, *buf_prevkey, *prev_key, *key, *value;
1429 int err, retry = MAP_LOOKUP_RETRIES;
1430 u32 value_size, cp, max_count;
1431
1432 if (attr->batch.elem_flags & ~BPF_F_LOCK)
1433 return -EINVAL;
1434
1435 if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1436 !map_value_has_spin_lock(map))
1437 return -EINVAL;
1438
1439 value_size = bpf_map_value_size(map);
1440
1441 max_count = attr->batch.count;
1442 if (!max_count)
1443 return 0;
1444
1445 if (put_user(0, &uattr->batch.count))
1446 return -EFAULT;
1447
1448 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1449 if (!buf_prevkey)
1450 return -ENOMEM;
1451
1452 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1453 if (!buf) {
1454 kvfree(buf_prevkey);
1455 return -ENOMEM;
1456 }
1457
1458 err = -EFAULT;
1459 prev_key = NULL;
1460 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1461 goto free_buf;
1462 key = buf;
1463 value = key + map->key_size;
1464 if (ubatch)
1465 prev_key = buf_prevkey;
1466
1467 for (cp = 0; cp < max_count;) {
1468 rcu_read_lock();
1469 err = map->ops->map_get_next_key(map, prev_key, key);
1470 rcu_read_unlock();
1471 if (err)
1472 break;
1473 err = bpf_map_copy_value(map, key, value,
1474 attr->batch.elem_flags);
1475
1476 if (err == -ENOENT) {
1477 if (retry) {
1478 retry--;
1479 continue;
1480 }
1481 err = -EINTR;
1482 break;
1483 }
1484
1485 if (err)
1486 goto free_buf;
1487
1488 if (copy_to_user(keys + cp * map->key_size, key,
1489 map->key_size)) {
1490 err = -EFAULT;
1491 goto free_buf;
1492 }
1493 if (copy_to_user(values + cp * value_size, value, value_size)) {
1494 err = -EFAULT;
1495 goto free_buf;
1496 }
1497
1498 if (!prev_key)
1499 prev_key = buf_prevkey;
1500
1501 swap(prev_key, key);
1502 retry = MAP_LOOKUP_RETRIES;
1503 cp++;
1504 cond_resched();
1505 }
1506
1507 if (err == -EFAULT)
1508 goto free_buf;
1509
1510 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1511 (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1512 err = -EFAULT;
1513
1514 free_buf:
1515 kvfree(buf_prevkey);
1516 kvfree(buf);
1517 return err;
1518 }
1519
1520 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1521
map_lookup_and_delete_elem(union bpf_attr * attr)1522 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1523 {
1524 void __user *ukey = u64_to_user_ptr(attr->key);
1525 void __user *uvalue = u64_to_user_ptr(attr->value);
1526 int ufd = attr->map_fd;
1527 struct bpf_map *map;
1528 void *key, *value;
1529 u32 value_size;
1530 struct fd f;
1531 int err;
1532
1533 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1534 return -EINVAL;
1535
1536 if (attr->flags & ~BPF_F_LOCK)
1537 return -EINVAL;
1538
1539 f = fdget(ufd);
1540 map = __bpf_map_get(f);
1541 if (IS_ERR(map))
1542 return PTR_ERR(map);
1543 bpf_map_write_active_inc(map);
1544 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1545 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1546 err = -EPERM;
1547 goto err_put;
1548 }
1549
1550 if (attr->flags &&
1551 (map->map_type == BPF_MAP_TYPE_QUEUE ||
1552 map->map_type == BPF_MAP_TYPE_STACK)) {
1553 err = -EINVAL;
1554 goto err_put;
1555 }
1556
1557 if ((attr->flags & BPF_F_LOCK) &&
1558 !map_value_has_spin_lock(map)) {
1559 err = -EINVAL;
1560 goto err_put;
1561 }
1562
1563 key = __bpf_copy_key(ukey, map->key_size);
1564 if (IS_ERR(key)) {
1565 err = PTR_ERR(key);
1566 goto err_put;
1567 }
1568
1569 value_size = bpf_map_value_size(map);
1570
1571 err = -ENOMEM;
1572 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1573 if (!value)
1574 goto free_key;
1575
1576 err = -ENOTSUPP;
1577 if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1578 map->map_type == BPF_MAP_TYPE_STACK) {
1579 err = map->ops->map_pop_elem(map, value);
1580 } else if (map->map_type == BPF_MAP_TYPE_HASH ||
1581 map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1582 map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1583 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1584 if (!bpf_map_is_dev_bound(map)) {
1585 bpf_disable_instrumentation();
1586 rcu_read_lock();
1587 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1588 rcu_read_unlock();
1589 bpf_enable_instrumentation();
1590 }
1591 }
1592
1593 if (err)
1594 goto free_value;
1595
1596 if (copy_to_user(uvalue, value, value_size) != 0) {
1597 err = -EFAULT;
1598 goto free_value;
1599 }
1600
1601 err = 0;
1602
1603 free_value:
1604 kvfree(value);
1605 free_key:
1606 kvfree(key);
1607 err_put:
1608 bpf_map_write_active_dec(map);
1609 fdput(f);
1610 return err;
1611 }
1612
1613 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1614
map_freeze(const union bpf_attr * attr)1615 static int map_freeze(const union bpf_attr *attr)
1616 {
1617 int err = 0, ufd = attr->map_fd;
1618 struct bpf_map *map;
1619 struct fd f;
1620
1621 if (CHECK_ATTR(BPF_MAP_FREEZE))
1622 return -EINVAL;
1623
1624 f = fdget(ufd);
1625 map = __bpf_map_get(f);
1626 if (IS_ERR(map))
1627 return PTR_ERR(map);
1628
1629 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS ||
1630 map_value_has_timer(map)) {
1631 fdput(f);
1632 return -ENOTSUPP;
1633 }
1634
1635 mutex_lock(&map->freeze_mutex);
1636 if (bpf_map_write_active(map)) {
1637 err = -EBUSY;
1638 goto err_put;
1639 }
1640 if (READ_ONCE(map->frozen)) {
1641 err = -EBUSY;
1642 goto err_put;
1643 }
1644 if (!bpf_capable()) {
1645 err = -EPERM;
1646 goto err_put;
1647 }
1648
1649 WRITE_ONCE(map->frozen, true);
1650 err_put:
1651 mutex_unlock(&map->freeze_mutex);
1652 fdput(f);
1653 return err;
1654 }
1655
1656 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1657 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1658 [_id] = & _name ## _prog_ops,
1659 #define BPF_MAP_TYPE(_id, _ops)
1660 #define BPF_LINK_TYPE(_id, _name)
1661 #include <linux/bpf_types.h>
1662 #undef BPF_PROG_TYPE
1663 #undef BPF_MAP_TYPE
1664 #undef BPF_LINK_TYPE
1665 };
1666
find_prog_type(enum bpf_prog_type type,struct bpf_prog * prog)1667 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1668 {
1669 const struct bpf_prog_ops *ops;
1670
1671 if (type >= ARRAY_SIZE(bpf_prog_types))
1672 return -EINVAL;
1673 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1674 ops = bpf_prog_types[type];
1675 if (!ops)
1676 return -EINVAL;
1677
1678 if (!bpf_prog_is_dev_bound(prog->aux))
1679 prog->aux->ops = ops;
1680 else
1681 prog->aux->ops = &bpf_offload_prog_ops;
1682 prog->type = type;
1683 return 0;
1684 }
1685
1686 enum bpf_audit {
1687 BPF_AUDIT_LOAD,
1688 BPF_AUDIT_UNLOAD,
1689 BPF_AUDIT_MAX,
1690 };
1691
1692 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1693 [BPF_AUDIT_LOAD] = "LOAD",
1694 [BPF_AUDIT_UNLOAD] = "UNLOAD",
1695 };
1696
bpf_audit_prog(const struct bpf_prog * prog,unsigned int op)1697 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1698 {
1699 struct audit_context *ctx = NULL;
1700 struct audit_buffer *ab;
1701
1702 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1703 return;
1704 if (audit_enabled == AUDIT_OFF)
1705 return;
1706 if (!in_irq() && !irqs_disabled())
1707 ctx = audit_context();
1708 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1709 if (unlikely(!ab))
1710 return;
1711 audit_log_format(ab, "prog-id=%u op=%s",
1712 prog->aux->id, bpf_audit_str[op]);
1713 audit_log_end(ab);
1714 }
1715
bpf_prog_alloc_id(struct bpf_prog * prog)1716 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1717 {
1718 int id;
1719
1720 idr_preload(GFP_KERNEL);
1721 spin_lock_bh(&prog_idr_lock);
1722 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1723 if (id > 0)
1724 prog->aux->id = id;
1725 spin_unlock_bh(&prog_idr_lock);
1726 idr_preload_end();
1727
1728 /* id is in [1, INT_MAX) */
1729 if (WARN_ON_ONCE(!id))
1730 return -ENOSPC;
1731
1732 return id > 0 ? 0 : id;
1733 }
1734
bpf_prog_free_id(struct bpf_prog * prog,bool do_idr_lock)1735 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1736 {
1737 unsigned long flags;
1738
1739 /* cBPF to eBPF migrations are currently not in the idr store.
1740 * Offloaded programs are removed from the store when their device
1741 * disappears - even if someone grabs an fd to them they are unusable,
1742 * simply waiting for refcnt to drop to be freed.
1743 */
1744 if (!prog->aux->id)
1745 return;
1746
1747 if (do_idr_lock)
1748 spin_lock_irqsave(&prog_idr_lock, flags);
1749 else
1750 __acquire(&prog_idr_lock);
1751
1752 idr_remove(&prog_idr, prog->aux->id);
1753 prog->aux->id = 0;
1754
1755 if (do_idr_lock)
1756 spin_unlock_irqrestore(&prog_idr_lock, flags);
1757 else
1758 __release(&prog_idr_lock);
1759 }
1760
__bpf_prog_put_rcu(struct rcu_head * rcu)1761 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1762 {
1763 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1764
1765 kvfree(aux->func_info);
1766 kfree(aux->func_info_aux);
1767 free_uid(aux->user);
1768 security_bpf_prog_free(aux);
1769 bpf_prog_free(aux->prog);
1770 }
1771
__bpf_prog_put_noref(struct bpf_prog * prog,bool deferred)1772 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1773 {
1774 bpf_prog_kallsyms_del_all(prog);
1775 btf_put(prog->aux->btf);
1776 kvfree(prog->aux->jited_linfo);
1777 kvfree(prog->aux->linfo);
1778 kfree(prog->aux->kfunc_tab);
1779 if (prog->aux->attach_btf)
1780 btf_put(prog->aux->attach_btf);
1781
1782 if (deferred) {
1783 if (prog->aux->sleepable)
1784 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1785 else
1786 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1787 } else {
1788 __bpf_prog_put_rcu(&prog->aux->rcu);
1789 }
1790 }
1791
bpf_prog_put_deferred(struct work_struct * work)1792 static void bpf_prog_put_deferred(struct work_struct *work)
1793 {
1794 struct bpf_prog_aux *aux;
1795 struct bpf_prog *prog;
1796
1797 aux = container_of(work, struct bpf_prog_aux, work);
1798 prog = aux->prog;
1799 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1800 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1801 bpf_prog_free_id(prog, true);
1802 __bpf_prog_put_noref(prog, true);
1803 }
1804
__bpf_prog_put(struct bpf_prog * prog,bool do_idr_lock)1805 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1806 {
1807 struct bpf_prog_aux *aux = prog->aux;
1808
1809 if (atomic64_dec_and_test(&aux->refcnt)) {
1810 if (in_irq() || irqs_disabled()) {
1811 INIT_WORK(&aux->work, bpf_prog_put_deferred);
1812 schedule_work(&aux->work);
1813 } else {
1814 bpf_prog_put_deferred(&aux->work);
1815 }
1816 }
1817 }
1818
bpf_prog_put(struct bpf_prog * prog)1819 void bpf_prog_put(struct bpf_prog *prog)
1820 {
1821 __bpf_prog_put(prog, true);
1822 }
1823 EXPORT_SYMBOL_GPL(bpf_prog_put);
1824
bpf_prog_release(struct inode * inode,struct file * filp)1825 static int bpf_prog_release(struct inode *inode, struct file *filp)
1826 {
1827 struct bpf_prog *prog = filp->private_data;
1828
1829 bpf_prog_put(prog);
1830 return 0;
1831 }
1832
1833 struct bpf_prog_kstats {
1834 u64 nsecs;
1835 u64 cnt;
1836 u64 misses;
1837 };
1838
bpf_prog_get_stats(const struct bpf_prog * prog,struct bpf_prog_kstats * stats)1839 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1840 struct bpf_prog_kstats *stats)
1841 {
1842 u64 nsecs = 0, cnt = 0, misses = 0;
1843 int cpu;
1844
1845 for_each_possible_cpu(cpu) {
1846 const struct bpf_prog_stats *st;
1847 unsigned int start;
1848 u64 tnsecs, tcnt, tmisses;
1849
1850 st = per_cpu_ptr(prog->stats, cpu);
1851 do {
1852 start = u64_stats_fetch_begin_irq(&st->syncp);
1853 tnsecs = u64_stats_read(&st->nsecs);
1854 tcnt = u64_stats_read(&st->cnt);
1855 tmisses = u64_stats_read(&st->misses);
1856 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1857 nsecs += tnsecs;
1858 cnt += tcnt;
1859 misses += tmisses;
1860 }
1861 stats->nsecs = nsecs;
1862 stats->cnt = cnt;
1863 stats->misses = misses;
1864 }
1865
1866 #ifdef CONFIG_PROC_FS
bpf_prog_show_fdinfo(struct seq_file * m,struct file * filp)1867 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1868 {
1869 const struct bpf_prog *prog = filp->private_data;
1870 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1871 struct bpf_prog_kstats stats;
1872
1873 bpf_prog_get_stats(prog, &stats);
1874 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1875 seq_printf(m,
1876 "prog_type:\t%u\n"
1877 "prog_jited:\t%u\n"
1878 "prog_tag:\t%s\n"
1879 "memlock:\t%llu\n"
1880 "prog_id:\t%u\n"
1881 "run_time_ns:\t%llu\n"
1882 "run_cnt:\t%llu\n"
1883 "recursion_misses:\t%llu\n",
1884 prog->type,
1885 prog->jited,
1886 prog_tag,
1887 prog->pages * 1ULL << PAGE_SHIFT,
1888 prog->aux->id,
1889 stats.nsecs,
1890 stats.cnt,
1891 stats.misses);
1892 }
1893 #endif
1894
1895 const struct file_operations bpf_prog_fops = {
1896 #ifdef CONFIG_PROC_FS
1897 .show_fdinfo = bpf_prog_show_fdinfo,
1898 #endif
1899 .release = bpf_prog_release,
1900 .read = bpf_dummy_read,
1901 .write = bpf_dummy_write,
1902 };
1903
bpf_prog_new_fd(struct bpf_prog * prog)1904 int bpf_prog_new_fd(struct bpf_prog *prog)
1905 {
1906 int ret;
1907
1908 ret = security_bpf_prog(prog);
1909 if (ret < 0)
1910 return ret;
1911
1912 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1913 O_RDWR | O_CLOEXEC);
1914 }
1915
____bpf_prog_get(struct fd f)1916 static struct bpf_prog *____bpf_prog_get(struct fd f)
1917 {
1918 if (!f.file)
1919 return ERR_PTR(-EBADF);
1920 if (f.file->f_op != &bpf_prog_fops) {
1921 fdput(f);
1922 return ERR_PTR(-EINVAL);
1923 }
1924
1925 return f.file->private_data;
1926 }
1927
bpf_prog_add(struct bpf_prog * prog,int i)1928 void bpf_prog_add(struct bpf_prog *prog, int i)
1929 {
1930 atomic64_add(i, &prog->aux->refcnt);
1931 }
1932 EXPORT_SYMBOL_GPL(bpf_prog_add);
1933
bpf_prog_sub(struct bpf_prog * prog,int i)1934 void bpf_prog_sub(struct bpf_prog *prog, int i)
1935 {
1936 /* Only to be used for undoing previous bpf_prog_add() in some
1937 * error path. We still know that another entity in our call
1938 * path holds a reference to the program, thus atomic_sub() can
1939 * be safely used in such cases!
1940 */
1941 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1942 }
1943 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1944
bpf_prog_inc(struct bpf_prog * prog)1945 void bpf_prog_inc(struct bpf_prog *prog)
1946 {
1947 atomic64_inc(&prog->aux->refcnt);
1948 }
1949 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1950
1951 /* prog_idr_lock should have been held */
bpf_prog_inc_not_zero(struct bpf_prog * prog)1952 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1953 {
1954 int refold;
1955
1956 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1957
1958 if (!refold)
1959 return ERR_PTR(-ENOENT);
1960
1961 return prog;
1962 }
1963 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1964
bpf_prog_get_ok(struct bpf_prog * prog,enum bpf_prog_type * attach_type,bool attach_drv)1965 bool bpf_prog_get_ok(struct bpf_prog *prog,
1966 enum bpf_prog_type *attach_type, bool attach_drv)
1967 {
1968 /* not an attachment, just a refcount inc, always allow */
1969 if (!attach_type)
1970 return true;
1971
1972 if (prog->type != *attach_type)
1973 return false;
1974 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1975 return false;
1976
1977 return true;
1978 }
1979
__bpf_prog_get(u32 ufd,enum bpf_prog_type * attach_type,bool attach_drv)1980 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1981 bool attach_drv)
1982 {
1983 struct fd f = fdget(ufd);
1984 struct bpf_prog *prog;
1985
1986 prog = ____bpf_prog_get(f);
1987 if (IS_ERR(prog))
1988 return prog;
1989 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1990 prog = ERR_PTR(-EINVAL);
1991 goto out;
1992 }
1993
1994 bpf_prog_inc(prog);
1995 out:
1996 fdput(f);
1997 return prog;
1998 }
1999
bpf_prog_get(u32 ufd)2000 struct bpf_prog *bpf_prog_get(u32 ufd)
2001 {
2002 return __bpf_prog_get(ufd, NULL, false);
2003 }
2004
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)2005 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2006 bool attach_drv)
2007 {
2008 return __bpf_prog_get(ufd, &type, attach_drv);
2009 }
2010 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2011
2012 /* Initially all BPF programs could be loaded w/o specifying
2013 * expected_attach_type. Later for some of them specifying expected_attach_type
2014 * at load time became required so that program could be validated properly.
2015 * Programs of types that are allowed to be loaded both w/ and w/o (for
2016 * backward compatibility) expected_attach_type, should have the default attach
2017 * type assigned to expected_attach_type for the latter case, so that it can be
2018 * validated later at attach time.
2019 *
2020 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2021 * prog type requires it but has some attach types that have to be backward
2022 * compatible.
2023 */
bpf_prog_load_fixup_attach_type(union bpf_attr * attr)2024 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2025 {
2026 switch (attr->prog_type) {
2027 case BPF_PROG_TYPE_CGROUP_SOCK:
2028 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2029 * exist so checking for non-zero is the way to go here.
2030 */
2031 if (!attr->expected_attach_type)
2032 attr->expected_attach_type =
2033 BPF_CGROUP_INET_SOCK_CREATE;
2034 break;
2035 case BPF_PROG_TYPE_SK_REUSEPORT:
2036 if (!attr->expected_attach_type)
2037 attr->expected_attach_type =
2038 BPF_SK_REUSEPORT_SELECT;
2039 break;
2040 }
2041 }
2042
2043 static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,enum bpf_attach_type expected_attach_type,struct btf * attach_btf,u32 btf_id,struct bpf_prog * dst_prog)2044 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2045 enum bpf_attach_type expected_attach_type,
2046 struct btf *attach_btf, u32 btf_id,
2047 struct bpf_prog *dst_prog)
2048 {
2049 if (btf_id) {
2050 if (btf_id > BTF_MAX_TYPE)
2051 return -EINVAL;
2052
2053 if (!attach_btf && !dst_prog)
2054 return -EINVAL;
2055
2056 switch (prog_type) {
2057 case BPF_PROG_TYPE_TRACING:
2058 case BPF_PROG_TYPE_LSM:
2059 case BPF_PROG_TYPE_STRUCT_OPS:
2060 case BPF_PROG_TYPE_EXT:
2061 break;
2062 default:
2063 return -EINVAL;
2064 }
2065 }
2066
2067 if (attach_btf && (!btf_id || dst_prog))
2068 return -EINVAL;
2069
2070 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2071 prog_type != BPF_PROG_TYPE_EXT)
2072 return -EINVAL;
2073
2074 switch (prog_type) {
2075 case BPF_PROG_TYPE_CGROUP_SOCK:
2076 switch (expected_attach_type) {
2077 case BPF_CGROUP_INET_SOCK_CREATE:
2078 case BPF_CGROUP_INET_SOCK_RELEASE:
2079 case BPF_CGROUP_INET4_POST_BIND:
2080 case BPF_CGROUP_INET6_POST_BIND:
2081 return 0;
2082 default:
2083 return -EINVAL;
2084 }
2085 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2086 switch (expected_attach_type) {
2087 case BPF_CGROUP_INET4_BIND:
2088 case BPF_CGROUP_INET6_BIND:
2089 case BPF_CGROUP_INET4_CONNECT:
2090 case BPF_CGROUP_INET6_CONNECT:
2091 case BPF_CGROUP_INET4_GETPEERNAME:
2092 case BPF_CGROUP_INET6_GETPEERNAME:
2093 case BPF_CGROUP_INET4_GETSOCKNAME:
2094 case BPF_CGROUP_INET6_GETSOCKNAME:
2095 case BPF_CGROUP_UDP4_SENDMSG:
2096 case BPF_CGROUP_UDP6_SENDMSG:
2097 case BPF_CGROUP_UDP4_RECVMSG:
2098 case BPF_CGROUP_UDP6_RECVMSG:
2099 return 0;
2100 default:
2101 return -EINVAL;
2102 }
2103 case BPF_PROG_TYPE_CGROUP_SKB:
2104 switch (expected_attach_type) {
2105 case BPF_CGROUP_INET_INGRESS:
2106 case BPF_CGROUP_INET_EGRESS:
2107 return 0;
2108 default:
2109 return -EINVAL;
2110 }
2111 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2112 switch (expected_attach_type) {
2113 case BPF_CGROUP_SETSOCKOPT:
2114 case BPF_CGROUP_GETSOCKOPT:
2115 return 0;
2116 default:
2117 return -EINVAL;
2118 }
2119 case BPF_PROG_TYPE_SK_LOOKUP:
2120 if (expected_attach_type == BPF_SK_LOOKUP)
2121 return 0;
2122 return -EINVAL;
2123 case BPF_PROG_TYPE_SK_REUSEPORT:
2124 switch (expected_attach_type) {
2125 case BPF_SK_REUSEPORT_SELECT:
2126 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2127 return 0;
2128 default:
2129 return -EINVAL;
2130 }
2131 case BPF_PROG_TYPE_SYSCALL:
2132 case BPF_PROG_TYPE_EXT:
2133 if (expected_attach_type)
2134 return -EINVAL;
2135 fallthrough;
2136 default:
2137 return 0;
2138 }
2139 }
2140
is_net_admin_prog_type(enum bpf_prog_type prog_type)2141 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2142 {
2143 switch (prog_type) {
2144 case BPF_PROG_TYPE_SCHED_CLS:
2145 case BPF_PROG_TYPE_SCHED_ACT:
2146 case BPF_PROG_TYPE_XDP:
2147 case BPF_PROG_TYPE_LWT_IN:
2148 case BPF_PROG_TYPE_LWT_OUT:
2149 case BPF_PROG_TYPE_LWT_XMIT:
2150 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2151 case BPF_PROG_TYPE_SK_SKB:
2152 case BPF_PROG_TYPE_SK_MSG:
2153 case BPF_PROG_TYPE_LIRC_MODE2:
2154 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2155 case BPF_PROG_TYPE_CGROUP_DEVICE:
2156 case BPF_PROG_TYPE_CGROUP_SOCK:
2157 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2158 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2159 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2160 case BPF_PROG_TYPE_SOCK_OPS:
2161 case BPF_PROG_TYPE_EXT: /* extends any prog */
2162 return true;
2163 case BPF_PROG_TYPE_CGROUP_SKB:
2164 /* always unpriv */
2165 case BPF_PROG_TYPE_SK_REUSEPORT:
2166 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2167 default:
2168 return false;
2169 }
2170 }
2171
is_perfmon_prog_type(enum bpf_prog_type prog_type)2172 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2173 {
2174 switch (prog_type) {
2175 case BPF_PROG_TYPE_KPROBE:
2176 case BPF_PROG_TYPE_TRACEPOINT:
2177 case BPF_PROG_TYPE_PERF_EVENT:
2178 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2179 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2180 case BPF_PROG_TYPE_TRACING:
2181 case BPF_PROG_TYPE_LSM:
2182 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2183 case BPF_PROG_TYPE_EXT: /* extends any prog */
2184 return true;
2185 default:
2186 return false;
2187 }
2188 }
2189
2190 /* last field in 'union bpf_attr' used by this command */
2191 #define BPF_PROG_LOAD_LAST_FIELD fd_array
2192
bpf_prog_load(union bpf_attr * attr,bpfptr_t uattr)2193 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
2194 {
2195 enum bpf_prog_type type = attr->prog_type;
2196 struct bpf_prog *prog, *dst_prog = NULL;
2197 struct btf *attach_btf = NULL;
2198 int err;
2199 char license[128];
2200 bool is_gpl;
2201
2202 if (CHECK_ATTR(BPF_PROG_LOAD))
2203 return -EINVAL;
2204
2205 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2206 BPF_F_ANY_ALIGNMENT |
2207 BPF_F_TEST_STATE_FREQ |
2208 BPF_F_SLEEPABLE |
2209 BPF_F_TEST_RND_HI32))
2210 return -EINVAL;
2211
2212 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2213 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2214 !bpf_capable())
2215 return -EPERM;
2216
2217 /* copy eBPF program license from user space */
2218 if (strncpy_from_bpfptr(license,
2219 make_bpfptr(attr->license, uattr.is_kernel),
2220 sizeof(license) - 1) < 0)
2221 return -EFAULT;
2222 license[sizeof(license) - 1] = 0;
2223
2224 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2225 is_gpl = license_is_gpl_compatible(license);
2226
2227 if (attr->insn_cnt == 0 ||
2228 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2229 return -E2BIG;
2230 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2231 type != BPF_PROG_TYPE_CGROUP_SKB &&
2232 !bpf_capable())
2233 return -EPERM;
2234
2235 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2236 return -EPERM;
2237 if (is_perfmon_prog_type(type) && !perfmon_capable())
2238 return -EPERM;
2239
2240 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2241 * or btf, we need to check which one it is
2242 */
2243 if (attr->attach_prog_fd) {
2244 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2245 if (IS_ERR(dst_prog)) {
2246 dst_prog = NULL;
2247 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2248 if (IS_ERR(attach_btf))
2249 return -EINVAL;
2250 if (!btf_is_kernel(attach_btf)) {
2251 /* attaching through specifying bpf_prog's BTF
2252 * objects directly might be supported eventually
2253 */
2254 btf_put(attach_btf);
2255 return -ENOTSUPP;
2256 }
2257 }
2258 } else if (attr->attach_btf_id) {
2259 /* fall back to vmlinux BTF, if BTF type ID is specified */
2260 attach_btf = bpf_get_btf_vmlinux();
2261 if (IS_ERR(attach_btf))
2262 return PTR_ERR(attach_btf);
2263 if (!attach_btf)
2264 return -EINVAL;
2265 btf_get(attach_btf);
2266 }
2267
2268 bpf_prog_load_fixup_attach_type(attr);
2269 if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2270 attach_btf, attr->attach_btf_id,
2271 dst_prog)) {
2272 if (dst_prog)
2273 bpf_prog_put(dst_prog);
2274 if (attach_btf)
2275 btf_put(attach_btf);
2276 return -EINVAL;
2277 }
2278
2279 /* plain bpf_prog allocation */
2280 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2281 if (!prog) {
2282 if (dst_prog)
2283 bpf_prog_put(dst_prog);
2284 if (attach_btf)
2285 btf_put(attach_btf);
2286 return -ENOMEM;
2287 }
2288
2289 prog->expected_attach_type = attr->expected_attach_type;
2290 prog->aux->attach_btf = attach_btf;
2291 prog->aux->attach_btf_id = attr->attach_btf_id;
2292 prog->aux->dst_prog = dst_prog;
2293 prog->aux->offload_requested = !!attr->prog_ifindex;
2294 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2295
2296 err = security_bpf_prog_alloc(prog->aux);
2297 if (err)
2298 goto free_prog;
2299
2300 prog->aux->user = get_current_user();
2301 prog->len = attr->insn_cnt;
2302
2303 err = -EFAULT;
2304 if (copy_from_bpfptr(prog->insns,
2305 make_bpfptr(attr->insns, uattr.is_kernel),
2306 bpf_prog_insn_size(prog)) != 0)
2307 goto free_prog_sec;
2308
2309 prog->orig_prog = NULL;
2310 prog->jited = 0;
2311
2312 atomic64_set(&prog->aux->refcnt, 1);
2313 prog->gpl_compatible = is_gpl ? 1 : 0;
2314
2315 if (bpf_prog_is_dev_bound(prog->aux)) {
2316 err = bpf_prog_offload_init(prog, attr);
2317 if (err)
2318 goto free_prog_sec;
2319 }
2320
2321 /* find program type: socket_filter vs tracing_filter */
2322 err = find_prog_type(type, prog);
2323 if (err < 0)
2324 goto free_prog_sec;
2325
2326 prog->aux->load_time = ktime_get_boottime_ns();
2327 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2328 sizeof(attr->prog_name));
2329 if (err < 0)
2330 goto free_prog_sec;
2331
2332 /* run eBPF verifier */
2333 err = bpf_check(&prog, attr, uattr);
2334 if (err < 0)
2335 goto free_used_maps;
2336
2337 prog = bpf_prog_select_runtime(prog, &err);
2338 if (err < 0)
2339 goto free_used_maps;
2340
2341 err = bpf_prog_alloc_id(prog);
2342 if (err)
2343 goto free_used_maps;
2344
2345 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2346 * effectively publicly exposed. However, retrieving via
2347 * bpf_prog_get_fd_by_id() will take another reference,
2348 * therefore it cannot be gone underneath us.
2349 *
2350 * Only for the time /after/ successful bpf_prog_new_fd()
2351 * and before returning to userspace, we might just hold
2352 * one reference and any parallel close on that fd could
2353 * rip everything out. Hence, below notifications must
2354 * happen before bpf_prog_new_fd().
2355 *
2356 * Also, any failure handling from this point onwards must
2357 * be using bpf_prog_put() given the program is exposed.
2358 */
2359 bpf_prog_kallsyms_add(prog);
2360 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2361 bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2362
2363 err = bpf_prog_new_fd(prog);
2364 if (err < 0)
2365 bpf_prog_put(prog);
2366 return err;
2367
2368 free_used_maps:
2369 /* In case we have subprogs, we need to wait for a grace
2370 * period before we can tear down JIT memory since symbols
2371 * are already exposed under kallsyms.
2372 */
2373 __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2374 return err;
2375 free_prog_sec:
2376 free_uid(prog->aux->user);
2377 security_bpf_prog_free(prog->aux);
2378 free_prog:
2379 if (prog->aux->attach_btf)
2380 btf_put(prog->aux->attach_btf);
2381 bpf_prog_free(prog);
2382 return err;
2383 }
2384
2385 #define BPF_OBJ_LAST_FIELD file_flags
2386
bpf_obj_pin(const union bpf_attr * attr)2387 static int bpf_obj_pin(const union bpf_attr *attr)
2388 {
2389 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2390 return -EINVAL;
2391
2392 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2393 }
2394
bpf_obj_get(const union bpf_attr * attr)2395 static int bpf_obj_get(const union bpf_attr *attr)
2396 {
2397 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2398 attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2399 return -EINVAL;
2400
2401 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2402 attr->file_flags);
2403 }
2404
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog)2405 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2406 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2407 {
2408 atomic64_set(&link->refcnt, 1);
2409 link->type = type;
2410 link->id = 0;
2411 link->ops = ops;
2412 link->prog = prog;
2413 }
2414
bpf_link_free_id(int id)2415 static void bpf_link_free_id(int id)
2416 {
2417 if (!id)
2418 return;
2419
2420 spin_lock_bh(&link_idr_lock);
2421 idr_remove(&link_idr, id);
2422 spin_unlock_bh(&link_idr_lock);
2423 }
2424
2425 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2426 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2427 * anon_inode's release() call. This helper marksbpf_link as
2428 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2429 * is not decremented, it's the responsibility of a calling code that failed
2430 * to complete bpf_link initialization.
2431 */
bpf_link_cleanup(struct bpf_link_primer * primer)2432 void bpf_link_cleanup(struct bpf_link_primer *primer)
2433 {
2434 primer->link->prog = NULL;
2435 bpf_link_free_id(primer->id);
2436 fput(primer->file);
2437 put_unused_fd(primer->fd);
2438 }
2439
bpf_link_inc(struct bpf_link * link)2440 void bpf_link_inc(struct bpf_link *link)
2441 {
2442 atomic64_inc(&link->refcnt);
2443 }
2444
2445 /* bpf_link_free is guaranteed to be called from process context */
bpf_link_free(struct bpf_link * link)2446 static void bpf_link_free(struct bpf_link *link)
2447 {
2448 bpf_link_free_id(link->id);
2449 if (link->prog) {
2450 /* detach BPF program, clean up used resources */
2451 link->ops->release(link);
2452 bpf_prog_put(link->prog);
2453 }
2454 /* free bpf_link and its containing memory */
2455 link->ops->dealloc(link);
2456 }
2457
bpf_link_put_deferred(struct work_struct * work)2458 static void bpf_link_put_deferred(struct work_struct *work)
2459 {
2460 struct bpf_link *link = container_of(work, struct bpf_link, work);
2461
2462 bpf_link_free(link);
2463 }
2464
2465 /* bpf_link_put can be called from atomic context, but ensures that resources
2466 * are freed from process context
2467 */
bpf_link_put(struct bpf_link * link)2468 void bpf_link_put(struct bpf_link *link)
2469 {
2470 if (!atomic64_dec_and_test(&link->refcnt))
2471 return;
2472
2473 if (in_atomic()) {
2474 INIT_WORK(&link->work, bpf_link_put_deferred);
2475 schedule_work(&link->work);
2476 } else {
2477 bpf_link_free(link);
2478 }
2479 }
2480
bpf_link_release(struct inode * inode,struct file * filp)2481 static int bpf_link_release(struct inode *inode, struct file *filp)
2482 {
2483 struct bpf_link *link = filp->private_data;
2484
2485 bpf_link_put(link);
2486 return 0;
2487 }
2488
2489 #ifdef CONFIG_PROC_FS
2490 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2491 #define BPF_MAP_TYPE(_id, _ops)
2492 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2493 static const char *bpf_link_type_strs[] = {
2494 [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2495 #include <linux/bpf_types.h>
2496 };
2497 #undef BPF_PROG_TYPE
2498 #undef BPF_MAP_TYPE
2499 #undef BPF_LINK_TYPE
2500
bpf_link_show_fdinfo(struct seq_file * m,struct file * filp)2501 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2502 {
2503 const struct bpf_link *link = filp->private_data;
2504 const struct bpf_prog *prog = link->prog;
2505 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2506
2507 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2508 seq_printf(m,
2509 "link_type:\t%s\n"
2510 "link_id:\t%u\n"
2511 "prog_tag:\t%s\n"
2512 "prog_id:\t%u\n",
2513 bpf_link_type_strs[link->type],
2514 link->id,
2515 prog_tag,
2516 prog->aux->id);
2517 if (link->ops->show_fdinfo)
2518 link->ops->show_fdinfo(link, m);
2519 }
2520 #endif
2521
2522 static const struct file_operations bpf_link_fops = {
2523 #ifdef CONFIG_PROC_FS
2524 .show_fdinfo = bpf_link_show_fdinfo,
2525 #endif
2526 .release = bpf_link_release,
2527 .read = bpf_dummy_read,
2528 .write = bpf_dummy_write,
2529 };
2530
bpf_link_alloc_id(struct bpf_link * link)2531 static int bpf_link_alloc_id(struct bpf_link *link)
2532 {
2533 int id;
2534
2535 idr_preload(GFP_KERNEL);
2536 spin_lock_bh(&link_idr_lock);
2537 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2538 spin_unlock_bh(&link_idr_lock);
2539 idr_preload_end();
2540
2541 return id;
2542 }
2543
2544 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2545 * reserving unused FD and allocating ID from link_idr. This is to be paired
2546 * with bpf_link_settle() to install FD and ID and expose bpf_link to
2547 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2548 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2549 * transient state is passed around in struct bpf_link_primer.
2550 * This is preferred way to create and initialize bpf_link, especially when
2551 * there are complicated and expensive operations inbetween creating bpf_link
2552 * itself and attaching it to BPF hook. By using bpf_link_prime() and
2553 * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2554 * expensive (and potentially failing) roll back operations in a rare case
2555 * that file, FD, or ID can't be allocated.
2556 */
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)2557 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2558 {
2559 struct file *file;
2560 int fd, id;
2561
2562 fd = get_unused_fd_flags(O_CLOEXEC);
2563 if (fd < 0)
2564 return fd;
2565
2566
2567 id = bpf_link_alloc_id(link);
2568 if (id < 0) {
2569 put_unused_fd(fd);
2570 return id;
2571 }
2572
2573 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2574 if (IS_ERR(file)) {
2575 bpf_link_free_id(id);
2576 put_unused_fd(fd);
2577 return PTR_ERR(file);
2578 }
2579
2580 primer->link = link;
2581 primer->file = file;
2582 primer->fd = fd;
2583 primer->id = id;
2584 return 0;
2585 }
2586
bpf_link_settle(struct bpf_link_primer * primer)2587 int bpf_link_settle(struct bpf_link_primer *primer)
2588 {
2589 /* make bpf_link fetchable by ID */
2590 spin_lock_bh(&link_idr_lock);
2591 primer->link->id = primer->id;
2592 spin_unlock_bh(&link_idr_lock);
2593 /* make bpf_link fetchable by FD */
2594 fd_install(primer->fd, primer->file);
2595 /* pass through installed FD */
2596 return primer->fd;
2597 }
2598
bpf_link_new_fd(struct bpf_link * link)2599 int bpf_link_new_fd(struct bpf_link *link)
2600 {
2601 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2602 }
2603
bpf_link_get_from_fd(u32 ufd)2604 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2605 {
2606 struct fd f = fdget(ufd);
2607 struct bpf_link *link;
2608
2609 if (!f.file)
2610 return ERR_PTR(-EBADF);
2611 if (f.file->f_op != &bpf_link_fops) {
2612 fdput(f);
2613 return ERR_PTR(-EINVAL);
2614 }
2615
2616 link = f.file->private_data;
2617 bpf_link_inc(link);
2618 fdput(f);
2619
2620 return link;
2621 }
2622
2623 struct bpf_tracing_link {
2624 struct bpf_link link;
2625 enum bpf_attach_type attach_type;
2626 struct bpf_trampoline *trampoline;
2627 struct bpf_prog *tgt_prog;
2628 };
2629
bpf_tracing_link_release(struct bpf_link * link)2630 static void bpf_tracing_link_release(struct bpf_link *link)
2631 {
2632 struct bpf_tracing_link *tr_link =
2633 container_of(link, struct bpf_tracing_link, link);
2634
2635 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2636 tr_link->trampoline));
2637
2638 bpf_trampoline_put(tr_link->trampoline);
2639
2640 /* tgt_prog is NULL if target is a kernel function */
2641 if (tr_link->tgt_prog)
2642 bpf_prog_put(tr_link->tgt_prog);
2643 }
2644
bpf_tracing_link_dealloc(struct bpf_link * link)2645 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2646 {
2647 struct bpf_tracing_link *tr_link =
2648 container_of(link, struct bpf_tracing_link, link);
2649
2650 kfree(tr_link);
2651 }
2652
bpf_tracing_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)2653 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2654 struct seq_file *seq)
2655 {
2656 struct bpf_tracing_link *tr_link =
2657 container_of(link, struct bpf_tracing_link, link);
2658
2659 seq_printf(seq,
2660 "attach_type:\t%d\n",
2661 tr_link->attach_type);
2662 }
2663
bpf_tracing_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2664 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2665 struct bpf_link_info *info)
2666 {
2667 struct bpf_tracing_link *tr_link =
2668 container_of(link, struct bpf_tracing_link, link);
2669
2670 info->tracing.attach_type = tr_link->attach_type;
2671 bpf_trampoline_unpack_key(tr_link->trampoline->key,
2672 &info->tracing.target_obj_id,
2673 &info->tracing.target_btf_id);
2674
2675 return 0;
2676 }
2677
2678 static const struct bpf_link_ops bpf_tracing_link_lops = {
2679 .release = bpf_tracing_link_release,
2680 .dealloc = bpf_tracing_link_dealloc,
2681 .show_fdinfo = bpf_tracing_link_show_fdinfo,
2682 .fill_link_info = bpf_tracing_link_fill_link_info,
2683 };
2684
bpf_tracing_prog_attach(struct bpf_prog * prog,int tgt_prog_fd,u32 btf_id)2685 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2686 int tgt_prog_fd,
2687 u32 btf_id)
2688 {
2689 struct bpf_link_primer link_primer;
2690 struct bpf_prog *tgt_prog = NULL;
2691 struct bpf_trampoline *tr = NULL;
2692 struct bpf_tracing_link *link;
2693 u64 key = 0;
2694 int err;
2695
2696 switch (prog->type) {
2697 case BPF_PROG_TYPE_TRACING:
2698 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2699 prog->expected_attach_type != BPF_TRACE_FEXIT &&
2700 prog->expected_attach_type != BPF_MODIFY_RETURN) {
2701 err = -EINVAL;
2702 goto out_put_prog;
2703 }
2704 break;
2705 case BPF_PROG_TYPE_EXT:
2706 if (prog->expected_attach_type != 0) {
2707 err = -EINVAL;
2708 goto out_put_prog;
2709 }
2710 break;
2711 case BPF_PROG_TYPE_LSM:
2712 if (prog->expected_attach_type != BPF_LSM_MAC) {
2713 err = -EINVAL;
2714 goto out_put_prog;
2715 }
2716 break;
2717 default:
2718 err = -EINVAL;
2719 goto out_put_prog;
2720 }
2721
2722 if (!!tgt_prog_fd != !!btf_id) {
2723 err = -EINVAL;
2724 goto out_put_prog;
2725 }
2726
2727 if (tgt_prog_fd) {
2728 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2729 if (prog->type != BPF_PROG_TYPE_EXT) {
2730 err = -EINVAL;
2731 goto out_put_prog;
2732 }
2733
2734 tgt_prog = bpf_prog_get(tgt_prog_fd);
2735 if (IS_ERR(tgt_prog)) {
2736 err = PTR_ERR(tgt_prog);
2737 tgt_prog = NULL;
2738 goto out_put_prog;
2739 }
2740
2741 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2742 }
2743
2744 link = kzalloc(sizeof(*link), GFP_USER);
2745 if (!link) {
2746 err = -ENOMEM;
2747 goto out_put_prog;
2748 }
2749 bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2750 &bpf_tracing_link_lops, prog);
2751 link->attach_type = prog->expected_attach_type;
2752
2753 mutex_lock(&prog->aux->dst_mutex);
2754
2755 /* There are a few possible cases here:
2756 *
2757 * - if prog->aux->dst_trampoline is set, the program was just loaded
2758 * and not yet attached to anything, so we can use the values stored
2759 * in prog->aux
2760 *
2761 * - if prog->aux->dst_trampoline is NULL, the program has already been
2762 * attached to a target and its initial target was cleared (below)
2763 *
2764 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2765 * target_btf_id using the link_create API.
2766 *
2767 * - if tgt_prog == NULL when this function was called using the old
2768 * raw_tracepoint_open API, and we need a target from prog->aux
2769 *
2770 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
2771 * was detached and is going for re-attachment.
2772 *
2773 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
2774 * are NULL, then program was already attached and user did not provide
2775 * tgt_prog_fd so we have no way to find out or create trampoline
2776 */
2777 if (!prog->aux->dst_trampoline && !tgt_prog) {
2778 /*
2779 * Allow re-attach for TRACING and LSM programs. If it's
2780 * currently linked, bpf_trampoline_link_prog will fail.
2781 * EXT programs need to specify tgt_prog_fd, so they
2782 * re-attach in separate code path.
2783 */
2784 if (prog->type != BPF_PROG_TYPE_TRACING &&
2785 prog->type != BPF_PROG_TYPE_LSM) {
2786 err = -EINVAL;
2787 goto out_unlock;
2788 }
2789 /* We can allow re-attach only if we have valid attach_btf. */
2790 if (!prog->aux->attach_btf) {
2791 err = -EINVAL;
2792 goto out_unlock;
2793 }
2794 btf_id = prog->aux->attach_btf_id;
2795 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
2796 }
2797
2798 if (!prog->aux->dst_trampoline ||
2799 (key && key != prog->aux->dst_trampoline->key)) {
2800 /* If there is no saved target, or the specified target is
2801 * different from the destination specified at load time, we
2802 * need a new trampoline and a check for compatibility
2803 */
2804 struct bpf_attach_target_info tgt_info = {};
2805
2806 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2807 &tgt_info);
2808 if (err)
2809 goto out_unlock;
2810
2811 tr = bpf_trampoline_get(key, &tgt_info);
2812 if (!tr) {
2813 err = -ENOMEM;
2814 goto out_unlock;
2815 }
2816 } else {
2817 /* The caller didn't specify a target, or the target was the
2818 * same as the destination supplied during program load. This
2819 * means we can reuse the trampoline and reference from program
2820 * load time, and there is no need to allocate a new one. This
2821 * can only happen once for any program, as the saved values in
2822 * prog->aux are cleared below.
2823 */
2824 tr = prog->aux->dst_trampoline;
2825 tgt_prog = prog->aux->dst_prog;
2826 }
2827
2828 err = bpf_link_prime(&link->link, &link_primer);
2829 if (err)
2830 goto out_unlock;
2831
2832 err = bpf_trampoline_link_prog(prog, tr);
2833 if (err) {
2834 bpf_link_cleanup(&link_primer);
2835 link = NULL;
2836 goto out_unlock;
2837 }
2838
2839 link->tgt_prog = tgt_prog;
2840 link->trampoline = tr;
2841
2842 /* Always clear the trampoline and target prog from prog->aux to make
2843 * sure the original attach destination is not kept alive after a
2844 * program is (re-)attached to another target.
2845 */
2846 if (prog->aux->dst_prog &&
2847 (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2848 /* got extra prog ref from syscall, or attaching to different prog */
2849 bpf_prog_put(prog->aux->dst_prog);
2850 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2851 /* we allocated a new trampoline, so free the old one */
2852 bpf_trampoline_put(prog->aux->dst_trampoline);
2853
2854 prog->aux->dst_prog = NULL;
2855 prog->aux->dst_trampoline = NULL;
2856 mutex_unlock(&prog->aux->dst_mutex);
2857
2858 return bpf_link_settle(&link_primer);
2859 out_unlock:
2860 if (tr && tr != prog->aux->dst_trampoline)
2861 bpf_trampoline_put(tr);
2862 mutex_unlock(&prog->aux->dst_mutex);
2863 kfree(link);
2864 out_put_prog:
2865 if (tgt_prog_fd && tgt_prog)
2866 bpf_prog_put(tgt_prog);
2867 return err;
2868 }
2869
2870 struct bpf_raw_tp_link {
2871 struct bpf_link link;
2872 struct bpf_raw_event_map *btp;
2873 };
2874
bpf_raw_tp_link_release(struct bpf_link * link)2875 static void bpf_raw_tp_link_release(struct bpf_link *link)
2876 {
2877 struct bpf_raw_tp_link *raw_tp =
2878 container_of(link, struct bpf_raw_tp_link, link);
2879
2880 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2881 bpf_put_raw_tracepoint(raw_tp->btp);
2882 }
2883
bpf_raw_tp_link_dealloc(struct bpf_link * link)2884 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2885 {
2886 struct bpf_raw_tp_link *raw_tp =
2887 container_of(link, struct bpf_raw_tp_link, link);
2888
2889 kfree(raw_tp);
2890 }
2891
bpf_raw_tp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)2892 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2893 struct seq_file *seq)
2894 {
2895 struct bpf_raw_tp_link *raw_tp_link =
2896 container_of(link, struct bpf_raw_tp_link, link);
2897
2898 seq_printf(seq,
2899 "tp_name:\t%s\n",
2900 raw_tp_link->btp->tp->name);
2901 }
2902
bpf_raw_tp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2903 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2904 struct bpf_link_info *info)
2905 {
2906 struct bpf_raw_tp_link *raw_tp_link =
2907 container_of(link, struct bpf_raw_tp_link, link);
2908 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2909 const char *tp_name = raw_tp_link->btp->tp->name;
2910 u32 ulen = info->raw_tracepoint.tp_name_len;
2911 size_t tp_len = strlen(tp_name);
2912
2913 if (!ulen ^ !ubuf)
2914 return -EINVAL;
2915
2916 info->raw_tracepoint.tp_name_len = tp_len + 1;
2917
2918 if (!ubuf)
2919 return 0;
2920
2921 if (ulen >= tp_len + 1) {
2922 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2923 return -EFAULT;
2924 } else {
2925 char zero = '\0';
2926
2927 if (copy_to_user(ubuf, tp_name, ulen - 1))
2928 return -EFAULT;
2929 if (put_user(zero, ubuf + ulen - 1))
2930 return -EFAULT;
2931 return -ENOSPC;
2932 }
2933
2934 return 0;
2935 }
2936
2937 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2938 .release = bpf_raw_tp_link_release,
2939 .dealloc = bpf_raw_tp_link_dealloc,
2940 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2941 .fill_link_info = bpf_raw_tp_link_fill_link_info,
2942 };
2943
2944 #ifdef CONFIG_PERF_EVENTS
2945 struct bpf_perf_link {
2946 struct bpf_link link;
2947 struct file *perf_file;
2948 };
2949
bpf_perf_link_release(struct bpf_link * link)2950 static void bpf_perf_link_release(struct bpf_link *link)
2951 {
2952 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
2953 struct perf_event *event = perf_link->perf_file->private_data;
2954
2955 perf_event_free_bpf_prog(event);
2956 fput(perf_link->perf_file);
2957 }
2958
bpf_perf_link_dealloc(struct bpf_link * link)2959 static void bpf_perf_link_dealloc(struct bpf_link *link)
2960 {
2961 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
2962
2963 kfree(perf_link);
2964 }
2965
2966 static const struct bpf_link_ops bpf_perf_link_lops = {
2967 .release = bpf_perf_link_release,
2968 .dealloc = bpf_perf_link_dealloc,
2969 };
2970
bpf_perf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)2971 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2972 {
2973 struct bpf_link_primer link_primer;
2974 struct bpf_perf_link *link;
2975 struct perf_event *event;
2976 struct file *perf_file;
2977 int err;
2978
2979 if (attr->link_create.flags)
2980 return -EINVAL;
2981
2982 perf_file = perf_event_get(attr->link_create.target_fd);
2983 if (IS_ERR(perf_file))
2984 return PTR_ERR(perf_file);
2985
2986 link = kzalloc(sizeof(*link), GFP_USER);
2987 if (!link) {
2988 err = -ENOMEM;
2989 goto out_put_file;
2990 }
2991 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
2992 link->perf_file = perf_file;
2993
2994 err = bpf_link_prime(&link->link, &link_primer);
2995 if (err) {
2996 kfree(link);
2997 goto out_put_file;
2998 }
2999
3000 event = perf_file->private_data;
3001 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3002 if (err) {
3003 bpf_link_cleanup(&link_primer);
3004 goto out_put_file;
3005 }
3006 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3007 bpf_prog_inc(prog);
3008
3009 return bpf_link_settle(&link_primer);
3010
3011 out_put_file:
3012 fput(perf_file);
3013 return err;
3014 }
3015 #endif /* CONFIG_PERF_EVENTS */
3016
3017 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3018
bpf_raw_tracepoint_open(const union bpf_attr * attr)3019 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3020 {
3021 struct bpf_link_primer link_primer;
3022 struct bpf_raw_tp_link *link;
3023 struct bpf_raw_event_map *btp;
3024 struct bpf_prog *prog;
3025 const char *tp_name;
3026 char buf[128];
3027 int err;
3028
3029 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3030 return -EINVAL;
3031
3032 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3033 if (IS_ERR(prog))
3034 return PTR_ERR(prog);
3035
3036 switch (prog->type) {
3037 case BPF_PROG_TYPE_TRACING:
3038 case BPF_PROG_TYPE_EXT:
3039 case BPF_PROG_TYPE_LSM:
3040 if (attr->raw_tracepoint.name) {
3041 /* The attach point for this category of programs
3042 * should be specified via btf_id during program load.
3043 */
3044 err = -EINVAL;
3045 goto out_put_prog;
3046 }
3047 if (prog->type == BPF_PROG_TYPE_TRACING &&
3048 prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3049 tp_name = prog->aux->attach_func_name;
3050 break;
3051 }
3052 err = bpf_tracing_prog_attach(prog, 0, 0);
3053 if (err >= 0)
3054 return err;
3055 goto out_put_prog;
3056 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3057 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3058 if (strncpy_from_user(buf,
3059 u64_to_user_ptr(attr->raw_tracepoint.name),
3060 sizeof(buf) - 1) < 0) {
3061 err = -EFAULT;
3062 goto out_put_prog;
3063 }
3064 buf[sizeof(buf) - 1] = 0;
3065 tp_name = buf;
3066 break;
3067 default:
3068 err = -EINVAL;
3069 goto out_put_prog;
3070 }
3071
3072 btp = bpf_get_raw_tracepoint(tp_name);
3073 if (!btp) {
3074 err = -ENOENT;
3075 goto out_put_prog;
3076 }
3077
3078 link = kzalloc(sizeof(*link), GFP_USER);
3079 if (!link) {
3080 err = -ENOMEM;
3081 goto out_put_btp;
3082 }
3083 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3084 &bpf_raw_tp_link_lops, prog);
3085 link->btp = btp;
3086
3087 err = bpf_link_prime(&link->link, &link_primer);
3088 if (err) {
3089 kfree(link);
3090 goto out_put_btp;
3091 }
3092
3093 err = bpf_probe_register(link->btp, prog);
3094 if (err) {
3095 bpf_link_cleanup(&link_primer);
3096 goto out_put_btp;
3097 }
3098
3099 return bpf_link_settle(&link_primer);
3100
3101 out_put_btp:
3102 bpf_put_raw_tracepoint(btp);
3103 out_put_prog:
3104 bpf_prog_put(prog);
3105 return err;
3106 }
3107
bpf_prog_attach_check_attach_type(const struct bpf_prog * prog,enum bpf_attach_type attach_type)3108 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3109 enum bpf_attach_type attach_type)
3110 {
3111 switch (prog->type) {
3112 case BPF_PROG_TYPE_CGROUP_SOCK:
3113 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3114 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3115 case BPF_PROG_TYPE_SK_LOOKUP:
3116 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3117 case BPF_PROG_TYPE_CGROUP_SKB:
3118 if (!capable(CAP_NET_ADMIN))
3119 /* cg-skb progs can be loaded by unpriv user.
3120 * check permissions at attach time.
3121 */
3122 return -EPERM;
3123 return prog->enforce_expected_attach_type &&
3124 prog->expected_attach_type != attach_type ?
3125 -EINVAL : 0;
3126 default:
3127 return 0;
3128 }
3129 }
3130
3131 static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)3132 attach_type_to_prog_type(enum bpf_attach_type attach_type)
3133 {
3134 switch (attach_type) {
3135 case BPF_CGROUP_INET_INGRESS:
3136 case BPF_CGROUP_INET_EGRESS:
3137 return BPF_PROG_TYPE_CGROUP_SKB;
3138 case BPF_CGROUP_INET_SOCK_CREATE:
3139 case BPF_CGROUP_INET_SOCK_RELEASE:
3140 case BPF_CGROUP_INET4_POST_BIND:
3141 case BPF_CGROUP_INET6_POST_BIND:
3142 return BPF_PROG_TYPE_CGROUP_SOCK;
3143 case BPF_CGROUP_INET4_BIND:
3144 case BPF_CGROUP_INET6_BIND:
3145 case BPF_CGROUP_INET4_CONNECT:
3146 case BPF_CGROUP_INET6_CONNECT:
3147 case BPF_CGROUP_INET4_GETPEERNAME:
3148 case BPF_CGROUP_INET6_GETPEERNAME:
3149 case BPF_CGROUP_INET4_GETSOCKNAME:
3150 case BPF_CGROUP_INET6_GETSOCKNAME:
3151 case BPF_CGROUP_UDP4_SENDMSG:
3152 case BPF_CGROUP_UDP6_SENDMSG:
3153 case BPF_CGROUP_UDP4_RECVMSG:
3154 case BPF_CGROUP_UDP6_RECVMSG:
3155 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3156 case BPF_CGROUP_SOCK_OPS:
3157 return BPF_PROG_TYPE_SOCK_OPS;
3158 case BPF_CGROUP_DEVICE:
3159 return BPF_PROG_TYPE_CGROUP_DEVICE;
3160 case BPF_SK_MSG_VERDICT:
3161 return BPF_PROG_TYPE_SK_MSG;
3162 case BPF_SK_SKB_STREAM_PARSER:
3163 case BPF_SK_SKB_STREAM_VERDICT:
3164 case BPF_SK_SKB_VERDICT:
3165 return BPF_PROG_TYPE_SK_SKB;
3166 case BPF_LIRC_MODE2:
3167 return BPF_PROG_TYPE_LIRC_MODE2;
3168 case BPF_FLOW_DISSECTOR:
3169 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3170 case BPF_CGROUP_SYSCTL:
3171 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3172 case BPF_CGROUP_GETSOCKOPT:
3173 case BPF_CGROUP_SETSOCKOPT:
3174 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3175 case BPF_TRACE_ITER:
3176 return BPF_PROG_TYPE_TRACING;
3177 case BPF_SK_LOOKUP:
3178 return BPF_PROG_TYPE_SK_LOOKUP;
3179 case BPF_XDP:
3180 return BPF_PROG_TYPE_XDP;
3181 default:
3182 return BPF_PROG_TYPE_UNSPEC;
3183 }
3184 }
3185
3186 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3187
3188 #define BPF_F_ATTACH_MASK \
3189 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3190
bpf_prog_attach(const union bpf_attr * attr)3191 static int bpf_prog_attach(const union bpf_attr *attr)
3192 {
3193 enum bpf_prog_type ptype;
3194 struct bpf_prog *prog;
3195 int ret;
3196
3197 if (CHECK_ATTR(BPF_PROG_ATTACH))
3198 return -EINVAL;
3199
3200 if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3201 return -EINVAL;
3202
3203 ptype = attach_type_to_prog_type(attr->attach_type);
3204 if (ptype == BPF_PROG_TYPE_UNSPEC)
3205 return -EINVAL;
3206
3207 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3208 if (IS_ERR(prog))
3209 return PTR_ERR(prog);
3210
3211 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3212 bpf_prog_put(prog);
3213 return -EINVAL;
3214 }
3215
3216 switch (ptype) {
3217 case BPF_PROG_TYPE_SK_SKB:
3218 case BPF_PROG_TYPE_SK_MSG:
3219 ret = sock_map_get_from_fd(attr, prog);
3220 break;
3221 case BPF_PROG_TYPE_LIRC_MODE2:
3222 ret = lirc_prog_attach(attr, prog);
3223 break;
3224 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3225 ret = netns_bpf_prog_attach(attr, prog);
3226 break;
3227 case BPF_PROG_TYPE_CGROUP_DEVICE:
3228 case BPF_PROG_TYPE_CGROUP_SKB:
3229 case BPF_PROG_TYPE_CGROUP_SOCK:
3230 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3231 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3232 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3233 case BPF_PROG_TYPE_SOCK_OPS:
3234 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3235 break;
3236 default:
3237 ret = -EINVAL;
3238 }
3239
3240 if (ret)
3241 bpf_prog_put(prog);
3242 return ret;
3243 }
3244
3245 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3246
bpf_prog_detach(const union bpf_attr * attr)3247 static int bpf_prog_detach(const union bpf_attr *attr)
3248 {
3249 enum bpf_prog_type ptype;
3250
3251 if (CHECK_ATTR(BPF_PROG_DETACH))
3252 return -EINVAL;
3253
3254 ptype = attach_type_to_prog_type(attr->attach_type);
3255
3256 switch (ptype) {
3257 case BPF_PROG_TYPE_SK_MSG:
3258 case BPF_PROG_TYPE_SK_SKB:
3259 return sock_map_prog_detach(attr, ptype);
3260 case BPF_PROG_TYPE_LIRC_MODE2:
3261 return lirc_prog_detach(attr);
3262 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3263 return netns_bpf_prog_detach(attr, ptype);
3264 case BPF_PROG_TYPE_CGROUP_DEVICE:
3265 case BPF_PROG_TYPE_CGROUP_SKB:
3266 case BPF_PROG_TYPE_CGROUP_SOCK:
3267 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3268 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3269 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3270 case BPF_PROG_TYPE_SOCK_OPS:
3271 return cgroup_bpf_prog_detach(attr, ptype);
3272 default:
3273 return -EINVAL;
3274 }
3275 }
3276
3277 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3278
bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)3279 static int bpf_prog_query(const union bpf_attr *attr,
3280 union bpf_attr __user *uattr)
3281 {
3282 if (!capable(CAP_NET_ADMIN))
3283 return -EPERM;
3284 if (CHECK_ATTR(BPF_PROG_QUERY))
3285 return -EINVAL;
3286 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3287 return -EINVAL;
3288
3289 switch (attr->query.attach_type) {
3290 case BPF_CGROUP_INET_INGRESS:
3291 case BPF_CGROUP_INET_EGRESS:
3292 case BPF_CGROUP_INET_SOCK_CREATE:
3293 case BPF_CGROUP_INET_SOCK_RELEASE:
3294 case BPF_CGROUP_INET4_BIND:
3295 case BPF_CGROUP_INET6_BIND:
3296 case BPF_CGROUP_INET4_POST_BIND:
3297 case BPF_CGROUP_INET6_POST_BIND:
3298 case BPF_CGROUP_INET4_CONNECT:
3299 case BPF_CGROUP_INET6_CONNECT:
3300 case BPF_CGROUP_INET4_GETPEERNAME:
3301 case BPF_CGROUP_INET6_GETPEERNAME:
3302 case BPF_CGROUP_INET4_GETSOCKNAME:
3303 case BPF_CGROUP_INET6_GETSOCKNAME:
3304 case BPF_CGROUP_UDP4_SENDMSG:
3305 case BPF_CGROUP_UDP6_SENDMSG:
3306 case BPF_CGROUP_UDP4_RECVMSG:
3307 case BPF_CGROUP_UDP6_RECVMSG:
3308 case BPF_CGROUP_SOCK_OPS:
3309 case BPF_CGROUP_DEVICE:
3310 case BPF_CGROUP_SYSCTL:
3311 case BPF_CGROUP_GETSOCKOPT:
3312 case BPF_CGROUP_SETSOCKOPT:
3313 return cgroup_bpf_prog_query(attr, uattr);
3314 case BPF_LIRC_MODE2:
3315 return lirc_prog_query(attr, uattr);
3316 case BPF_FLOW_DISSECTOR:
3317 case BPF_SK_LOOKUP:
3318 return netns_bpf_prog_query(attr, uattr);
3319 default:
3320 return -EINVAL;
3321 }
3322 }
3323
3324 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3325
bpf_prog_test_run(const union bpf_attr * attr,union bpf_attr __user * uattr)3326 static int bpf_prog_test_run(const union bpf_attr *attr,
3327 union bpf_attr __user *uattr)
3328 {
3329 struct bpf_prog *prog;
3330 int ret = -ENOTSUPP;
3331
3332 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3333 return -EINVAL;
3334
3335 if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3336 (!attr->test.ctx_size_in && attr->test.ctx_in))
3337 return -EINVAL;
3338
3339 if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3340 (!attr->test.ctx_size_out && attr->test.ctx_out))
3341 return -EINVAL;
3342
3343 prog = bpf_prog_get(attr->test.prog_fd);
3344 if (IS_ERR(prog))
3345 return PTR_ERR(prog);
3346
3347 if (prog->aux->ops->test_run)
3348 ret = prog->aux->ops->test_run(prog, attr, uattr);
3349
3350 bpf_prog_put(prog);
3351 return ret;
3352 }
3353
3354 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3355
bpf_obj_get_next_id(const union bpf_attr * attr,union bpf_attr __user * uattr,struct idr * idr,spinlock_t * lock)3356 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3357 union bpf_attr __user *uattr,
3358 struct idr *idr,
3359 spinlock_t *lock)
3360 {
3361 u32 next_id = attr->start_id;
3362 int err = 0;
3363
3364 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3365 return -EINVAL;
3366
3367 if (!capable(CAP_SYS_ADMIN))
3368 return -EPERM;
3369
3370 next_id++;
3371 spin_lock_bh(lock);
3372 if (!idr_get_next(idr, &next_id))
3373 err = -ENOENT;
3374 spin_unlock_bh(lock);
3375
3376 if (!err)
3377 err = put_user(next_id, &uattr->next_id);
3378
3379 return err;
3380 }
3381
bpf_map_get_curr_or_next(u32 * id)3382 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3383 {
3384 struct bpf_map *map;
3385
3386 spin_lock_bh(&map_idr_lock);
3387 again:
3388 map = idr_get_next(&map_idr, id);
3389 if (map) {
3390 map = __bpf_map_inc_not_zero(map, false);
3391 if (IS_ERR(map)) {
3392 (*id)++;
3393 goto again;
3394 }
3395 }
3396 spin_unlock_bh(&map_idr_lock);
3397
3398 return map;
3399 }
3400
bpf_prog_get_curr_or_next(u32 * id)3401 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3402 {
3403 struct bpf_prog *prog;
3404
3405 spin_lock_bh(&prog_idr_lock);
3406 again:
3407 prog = idr_get_next(&prog_idr, id);
3408 if (prog) {
3409 prog = bpf_prog_inc_not_zero(prog);
3410 if (IS_ERR(prog)) {
3411 (*id)++;
3412 goto again;
3413 }
3414 }
3415 spin_unlock_bh(&prog_idr_lock);
3416
3417 return prog;
3418 }
3419
3420 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3421
bpf_prog_by_id(u32 id)3422 struct bpf_prog *bpf_prog_by_id(u32 id)
3423 {
3424 struct bpf_prog *prog;
3425
3426 if (!id)
3427 return ERR_PTR(-ENOENT);
3428
3429 spin_lock_bh(&prog_idr_lock);
3430 prog = idr_find(&prog_idr, id);
3431 if (prog)
3432 prog = bpf_prog_inc_not_zero(prog);
3433 else
3434 prog = ERR_PTR(-ENOENT);
3435 spin_unlock_bh(&prog_idr_lock);
3436 return prog;
3437 }
3438
bpf_prog_get_fd_by_id(const union bpf_attr * attr)3439 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3440 {
3441 struct bpf_prog *prog;
3442 u32 id = attr->prog_id;
3443 int fd;
3444
3445 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3446 return -EINVAL;
3447
3448 if (!capable(CAP_SYS_ADMIN))
3449 return -EPERM;
3450
3451 prog = bpf_prog_by_id(id);
3452 if (IS_ERR(prog))
3453 return PTR_ERR(prog);
3454
3455 fd = bpf_prog_new_fd(prog);
3456 if (fd < 0)
3457 bpf_prog_put(prog);
3458
3459 return fd;
3460 }
3461
3462 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3463
bpf_map_get_fd_by_id(const union bpf_attr * attr)3464 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3465 {
3466 struct bpf_map *map;
3467 u32 id = attr->map_id;
3468 int f_flags;
3469 int fd;
3470
3471 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3472 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3473 return -EINVAL;
3474
3475 if (!capable(CAP_SYS_ADMIN))
3476 return -EPERM;
3477
3478 f_flags = bpf_get_file_flag(attr->open_flags);
3479 if (f_flags < 0)
3480 return f_flags;
3481
3482 spin_lock_bh(&map_idr_lock);
3483 map = idr_find(&map_idr, id);
3484 if (map)
3485 map = __bpf_map_inc_not_zero(map, true);
3486 else
3487 map = ERR_PTR(-ENOENT);
3488 spin_unlock_bh(&map_idr_lock);
3489
3490 if (IS_ERR(map))
3491 return PTR_ERR(map);
3492
3493 fd = bpf_map_new_fd(map, f_flags);
3494 if (fd < 0)
3495 bpf_map_put_with_uref(map);
3496
3497 return fd;
3498 }
3499
bpf_map_from_imm(const struct bpf_prog * prog,unsigned long addr,u32 * off,u32 * type)3500 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3501 unsigned long addr, u32 *off,
3502 u32 *type)
3503 {
3504 const struct bpf_map *map;
3505 int i;
3506
3507 mutex_lock(&prog->aux->used_maps_mutex);
3508 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3509 map = prog->aux->used_maps[i];
3510 if (map == (void *)addr) {
3511 *type = BPF_PSEUDO_MAP_FD;
3512 goto out;
3513 }
3514 if (!map->ops->map_direct_value_meta)
3515 continue;
3516 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3517 *type = BPF_PSEUDO_MAP_VALUE;
3518 goto out;
3519 }
3520 }
3521 map = NULL;
3522
3523 out:
3524 mutex_unlock(&prog->aux->used_maps_mutex);
3525 return map;
3526 }
3527
bpf_insn_prepare_dump(const struct bpf_prog * prog,const struct cred * f_cred)3528 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3529 const struct cred *f_cred)
3530 {
3531 const struct bpf_map *map;
3532 struct bpf_insn *insns;
3533 u32 off, type;
3534 u64 imm;
3535 u8 code;
3536 int i;
3537
3538 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3539 GFP_USER);
3540 if (!insns)
3541 return insns;
3542
3543 for (i = 0; i < prog->len; i++) {
3544 code = insns[i].code;
3545
3546 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3547 insns[i].code = BPF_JMP | BPF_CALL;
3548 insns[i].imm = BPF_FUNC_tail_call;
3549 /* fall-through */
3550 }
3551 if (code == (BPF_JMP | BPF_CALL) ||
3552 code == (BPF_JMP | BPF_CALL_ARGS)) {
3553 if (code == (BPF_JMP | BPF_CALL_ARGS))
3554 insns[i].code = BPF_JMP | BPF_CALL;
3555 if (!bpf_dump_raw_ok(f_cred))
3556 insns[i].imm = 0;
3557 continue;
3558 }
3559 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3560 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3561 continue;
3562 }
3563
3564 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3565 continue;
3566
3567 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3568 map = bpf_map_from_imm(prog, imm, &off, &type);
3569 if (map) {
3570 insns[i].src_reg = type;
3571 insns[i].imm = map->id;
3572 insns[i + 1].imm = off;
3573 continue;
3574 }
3575 }
3576
3577 return insns;
3578 }
3579
set_info_rec_size(struct bpf_prog_info * info)3580 static int set_info_rec_size(struct bpf_prog_info *info)
3581 {
3582 /*
3583 * Ensure info.*_rec_size is the same as kernel expected size
3584 *
3585 * or
3586 *
3587 * Only allow zero *_rec_size if both _rec_size and _cnt are
3588 * zero. In this case, the kernel will set the expected
3589 * _rec_size back to the info.
3590 */
3591
3592 if ((info->nr_func_info || info->func_info_rec_size) &&
3593 info->func_info_rec_size != sizeof(struct bpf_func_info))
3594 return -EINVAL;
3595
3596 if ((info->nr_line_info || info->line_info_rec_size) &&
3597 info->line_info_rec_size != sizeof(struct bpf_line_info))
3598 return -EINVAL;
3599
3600 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3601 info->jited_line_info_rec_size != sizeof(__u64))
3602 return -EINVAL;
3603
3604 info->func_info_rec_size = sizeof(struct bpf_func_info);
3605 info->line_info_rec_size = sizeof(struct bpf_line_info);
3606 info->jited_line_info_rec_size = sizeof(__u64);
3607
3608 return 0;
3609 }
3610
bpf_prog_get_info_by_fd(struct file * file,struct bpf_prog * prog,const union bpf_attr * attr,union bpf_attr __user * uattr)3611 static int bpf_prog_get_info_by_fd(struct file *file,
3612 struct bpf_prog *prog,
3613 const union bpf_attr *attr,
3614 union bpf_attr __user *uattr)
3615 {
3616 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3617 struct bpf_prog_info info;
3618 u32 info_len = attr->info.info_len;
3619 struct bpf_prog_kstats stats;
3620 char __user *uinsns;
3621 u32 ulen;
3622 int err;
3623
3624 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3625 if (err)
3626 return err;
3627 info_len = min_t(u32, sizeof(info), info_len);
3628
3629 memset(&info, 0, sizeof(info));
3630 if (copy_from_user(&info, uinfo, info_len))
3631 return -EFAULT;
3632
3633 info.type = prog->type;
3634 info.id = prog->aux->id;
3635 info.load_time = prog->aux->load_time;
3636 info.created_by_uid = from_kuid_munged(current_user_ns(),
3637 prog->aux->user->uid);
3638 info.gpl_compatible = prog->gpl_compatible;
3639
3640 memcpy(info.tag, prog->tag, sizeof(prog->tag));
3641 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3642
3643 mutex_lock(&prog->aux->used_maps_mutex);
3644 ulen = info.nr_map_ids;
3645 info.nr_map_ids = prog->aux->used_map_cnt;
3646 ulen = min_t(u32, info.nr_map_ids, ulen);
3647 if (ulen) {
3648 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3649 u32 i;
3650
3651 for (i = 0; i < ulen; i++)
3652 if (put_user(prog->aux->used_maps[i]->id,
3653 &user_map_ids[i])) {
3654 mutex_unlock(&prog->aux->used_maps_mutex);
3655 return -EFAULT;
3656 }
3657 }
3658 mutex_unlock(&prog->aux->used_maps_mutex);
3659
3660 err = set_info_rec_size(&info);
3661 if (err)
3662 return err;
3663
3664 bpf_prog_get_stats(prog, &stats);
3665 info.run_time_ns = stats.nsecs;
3666 info.run_cnt = stats.cnt;
3667 info.recursion_misses = stats.misses;
3668
3669 if (!bpf_capable()) {
3670 info.jited_prog_len = 0;
3671 info.xlated_prog_len = 0;
3672 info.nr_jited_ksyms = 0;
3673 info.nr_jited_func_lens = 0;
3674 info.nr_func_info = 0;
3675 info.nr_line_info = 0;
3676 info.nr_jited_line_info = 0;
3677 goto done;
3678 }
3679
3680 ulen = info.xlated_prog_len;
3681 info.xlated_prog_len = bpf_prog_insn_size(prog);
3682 if (info.xlated_prog_len && ulen) {
3683 struct bpf_insn *insns_sanitized;
3684 bool fault;
3685
3686 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3687 info.xlated_prog_insns = 0;
3688 goto done;
3689 }
3690 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3691 if (!insns_sanitized)
3692 return -ENOMEM;
3693 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3694 ulen = min_t(u32, info.xlated_prog_len, ulen);
3695 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3696 kfree(insns_sanitized);
3697 if (fault)
3698 return -EFAULT;
3699 }
3700
3701 if (bpf_prog_is_dev_bound(prog->aux)) {
3702 err = bpf_prog_offload_info_fill(&info, prog);
3703 if (err)
3704 return err;
3705 goto done;
3706 }
3707
3708 /* NOTE: the following code is supposed to be skipped for offload.
3709 * bpf_prog_offload_info_fill() is the place to fill similar fields
3710 * for offload.
3711 */
3712 ulen = info.jited_prog_len;
3713 if (prog->aux->func_cnt) {
3714 u32 i;
3715
3716 info.jited_prog_len = 0;
3717 for (i = 0; i < prog->aux->func_cnt; i++)
3718 info.jited_prog_len += prog->aux->func[i]->jited_len;
3719 } else {
3720 info.jited_prog_len = prog->jited_len;
3721 }
3722
3723 if (info.jited_prog_len && ulen) {
3724 if (bpf_dump_raw_ok(file->f_cred)) {
3725 uinsns = u64_to_user_ptr(info.jited_prog_insns);
3726 ulen = min_t(u32, info.jited_prog_len, ulen);
3727
3728 /* for multi-function programs, copy the JITed
3729 * instructions for all the functions
3730 */
3731 if (prog->aux->func_cnt) {
3732 u32 len, free, i;
3733 u8 *img;
3734
3735 free = ulen;
3736 for (i = 0; i < prog->aux->func_cnt; i++) {
3737 len = prog->aux->func[i]->jited_len;
3738 len = min_t(u32, len, free);
3739 img = (u8 *) prog->aux->func[i]->bpf_func;
3740 if (copy_to_user(uinsns, img, len))
3741 return -EFAULT;
3742 uinsns += len;
3743 free -= len;
3744 if (!free)
3745 break;
3746 }
3747 } else {
3748 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3749 return -EFAULT;
3750 }
3751 } else {
3752 info.jited_prog_insns = 0;
3753 }
3754 }
3755
3756 ulen = info.nr_jited_ksyms;
3757 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3758 if (ulen) {
3759 if (bpf_dump_raw_ok(file->f_cred)) {
3760 unsigned long ksym_addr;
3761 u64 __user *user_ksyms;
3762 u32 i;
3763
3764 /* copy the address of the kernel symbol
3765 * corresponding to each function
3766 */
3767 ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3768 user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3769 if (prog->aux->func_cnt) {
3770 for (i = 0; i < ulen; i++) {
3771 ksym_addr = (unsigned long)
3772 prog->aux->func[i]->bpf_func;
3773 if (put_user((u64) ksym_addr,
3774 &user_ksyms[i]))
3775 return -EFAULT;
3776 }
3777 } else {
3778 ksym_addr = (unsigned long) prog->bpf_func;
3779 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3780 return -EFAULT;
3781 }
3782 } else {
3783 info.jited_ksyms = 0;
3784 }
3785 }
3786
3787 ulen = info.nr_jited_func_lens;
3788 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3789 if (ulen) {
3790 if (bpf_dump_raw_ok(file->f_cred)) {
3791 u32 __user *user_lens;
3792 u32 func_len, i;
3793
3794 /* copy the JITed image lengths for each function */
3795 ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3796 user_lens = u64_to_user_ptr(info.jited_func_lens);
3797 if (prog->aux->func_cnt) {
3798 for (i = 0; i < ulen; i++) {
3799 func_len =
3800 prog->aux->func[i]->jited_len;
3801 if (put_user(func_len, &user_lens[i]))
3802 return -EFAULT;
3803 }
3804 } else {
3805 func_len = prog->jited_len;
3806 if (put_user(func_len, &user_lens[0]))
3807 return -EFAULT;
3808 }
3809 } else {
3810 info.jited_func_lens = 0;
3811 }
3812 }
3813
3814 if (prog->aux->btf)
3815 info.btf_id = btf_obj_id(prog->aux->btf);
3816
3817 ulen = info.nr_func_info;
3818 info.nr_func_info = prog->aux->func_info_cnt;
3819 if (info.nr_func_info && ulen) {
3820 char __user *user_finfo;
3821
3822 user_finfo = u64_to_user_ptr(info.func_info);
3823 ulen = min_t(u32, info.nr_func_info, ulen);
3824 if (copy_to_user(user_finfo, prog->aux->func_info,
3825 info.func_info_rec_size * ulen))
3826 return -EFAULT;
3827 }
3828
3829 ulen = info.nr_line_info;
3830 info.nr_line_info = prog->aux->nr_linfo;
3831 if (info.nr_line_info && ulen) {
3832 __u8 __user *user_linfo;
3833
3834 user_linfo = u64_to_user_ptr(info.line_info);
3835 ulen = min_t(u32, info.nr_line_info, ulen);
3836 if (copy_to_user(user_linfo, prog->aux->linfo,
3837 info.line_info_rec_size * ulen))
3838 return -EFAULT;
3839 }
3840
3841 ulen = info.nr_jited_line_info;
3842 if (prog->aux->jited_linfo)
3843 info.nr_jited_line_info = prog->aux->nr_linfo;
3844 else
3845 info.nr_jited_line_info = 0;
3846 if (info.nr_jited_line_info && ulen) {
3847 if (bpf_dump_raw_ok(file->f_cred)) {
3848 __u64 __user *user_linfo;
3849 u32 i;
3850
3851 user_linfo = u64_to_user_ptr(info.jited_line_info);
3852 ulen = min_t(u32, info.nr_jited_line_info, ulen);
3853 for (i = 0; i < ulen; i++) {
3854 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3855 &user_linfo[i]))
3856 return -EFAULT;
3857 }
3858 } else {
3859 info.jited_line_info = 0;
3860 }
3861 }
3862
3863 ulen = info.nr_prog_tags;
3864 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3865 if (ulen) {
3866 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3867 u32 i;
3868
3869 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3870 ulen = min_t(u32, info.nr_prog_tags, ulen);
3871 if (prog->aux->func_cnt) {
3872 for (i = 0; i < ulen; i++) {
3873 if (copy_to_user(user_prog_tags[i],
3874 prog->aux->func[i]->tag,
3875 BPF_TAG_SIZE))
3876 return -EFAULT;
3877 }
3878 } else {
3879 if (copy_to_user(user_prog_tags[0],
3880 prog->tag, BPF_TAG_SIZE))
3881 return -EFAULT;
3882 }
3883 }
3884
3885 done:
3886 if (copy_to_user(uinfo, &info, info_len) ||
3887 put_user(info_len, &uattr->info.info_len))
3888 return -EFAULT;
3889
3890 return 0;
3891 }
3892
bpf_map_get_info_by_fd(struct file * file,struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)3893 static int bpf_map_get_info_by_fd(struct file *file,
3894 struct bpf_map *map,
3895 const union bpf_attr *attr,
3896 union bpf_attr __user *uattr)
3897 {
3898 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3899 struct bpf_map_info info;
3900 u32 info_len = attr->info.info_len;
3901 int err;
3902
3903 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3904 if (err)
3905 return err;
3906 info_len = min_t(u32, sizeof(info), info_len);
3907
3908 memset(&info, 0, sizeof(info));
3909 info.type = map->map_type;
3910 info.id = map->id;
3911 info.key_size = map->key_size;
3912 info.value_size = map->value_size;
3913 info.max_entries = map->max_entries;
3914 info.map_flags = map->map_flags;
3915 memcpy(info.name, map->name, sizeof(map->name));
3916
3917 if (map->btf) {
3918 info.btf_id = btf_obj_id(map->btf);
3919 info.btf_key_type_id = map->btf_key_type_id;
3920 info.btf_value_type_id = map->btf_value_type_id;
3921 }
3922 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3923
3924 if (bpf_map_is_dev_bound(map)) {
3925 err = bpf_map_offload_info_fill(&info, map);
3926 if (err)
3927 return err;
3928 }
3929
3930 if (copy_to_user(uinfo, &info, info_len) ||
3931 put_user(info_len, &uattr->info.info_len))
3932 return -EFAULT;
3933
3934 return 0;
3935 }
3936
bpf_btf_get_info_by_fd(struct file * file,struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)3937 static int bpf_btf_get_info_by_fd(struct file *file,
3938 struct btf *btf,
3939 const union bpf_attr *attr,
3940 union bpf_attr __user *uattr)
3941 {
3942 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3943 u32 info_len = attr->info.info_len;
3944 int err;
3945
3946 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
3947 if (err)
3948 return err;
3949
3950 return btf_get_info_by_fd(btf, attr, uattr);
3951 }
3952
bpf_link_get_info_by_fd(struct file * file,struct bpf_link * link,const union bpf_attr * attr,union bpf_attr __user * uattr)3953 static int bpf_link_get_info_by_fd(struct file *file,
3954 struct bpf_link *link,
3955 const union bpf_attr *attr,
3956 union bpf_attr __user *uattr)
3957 {
3958 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3959 struct bpf_link_info info;
3960 u32 info_len = attr->info.info_len;
3961 int err;
3962
3963 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3964 if (err)
3965 return err;
3966 info_len = min_t(u32, sizeof(info), info_len);
3967
3968 memset(&info, 0, sizeof(info));
3969 if (copy_from_user(&info, uinfo, info_len))
3970 return -EFAULT;
3971
3972 info.type = link->type;
3973 info.id = link->id;
3974 info.prog_id = link->prog->aux->id;
3975
3976 if (link->ops->fill_link_info) {
3977 err = link->ops->fill_link_info(link, &info);
3978 if (err)
3979 return err;
3980 }
3981
3982 if (copy_to_user(uinfo, &info, info_len) ||
3983 put_user(info_len, &uattr->info.info_len))
3984 return -EFAULT;
3985
3986 return 0;
3987 }
3988
3989
3990 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3991
bpf_obj_get_info_by_fd(const union bpf_attr * attr,union bpf_attr __user * uattr)3992 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3993 union bpf_attr __user *uattr)
3994 {
3995 int ufd = attr->info.bpf_fd;
3996 struct fd f;
3997 int err;
3998
3999 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4000 return -EINVAL;
4001
4002 f = fdget(ufd);
4003 if (!f.file)
4004 return -EBADFD;
4005
4006 if (f.file->f_op == &bpf_prog_fops)
4007 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4008 uattr);
4009 else if (f.file->f_op == &bpf_map_fops)
4010 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4011 uattr);
4012 else if (f.file->f_op == &btf_fops)
4013 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4014 else if (f.file->f_op == &bpf_link_fops)
4015 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4016 attr, uattr);
4017 else
4018 err = -EINVAL;
4019
4020 fdput(f);
4021 return err;
4022 }
4023
4024 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
4025
bpf_btf_load(const union bpf_attr * attr,bpfptr_t uattr)4026 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
4027 {
4028 if (CHECK_ATTR(BPF_BTF_LOAD))
4029 return -EINVAL;
4030
4031 if (!bpf_capable())
4032 return -EPERM;
4033
4034 return btf_new_fd(attr, uattr);
4035 }
4036
4037 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4038
bpf_btf_get_fd_by_id(const union bpf_attr * attr)4039 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4040 {
4041 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4042 return -EINVAL;
4043
4044 if (!capable(CAP_SYS_ADMIN))
4045 return -EPERM;
4046
4047 return btf_get_fd_by_id(attr->btf_id);
4048 }
4049
bpf_task_fd_query_copy(const union bpf_attr * attr,union bpf_attr __user * uattr,u32 prog_id,u32 fd_type,const char * buf,u64 probe_offset,u64 probe_addr)4050 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4051 union bpf_attr __user *uattr,
4052 u32 prog_id, u32 fd_type,
4053 const char *buf, u64 probe_offset,
4054 u64 probe_addr)
4055 {
4056 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4057 u32 len = buf ? strlen(buf) : 0, input_len;
4058 int err = 0;
4059
4060 if (put_user(len, &uattr->task_fd_query.buf_len))
4061 return -EFAULT;
4062 input_len = attr->task_fd_query.buf_len;
4063 if (input_len && ubuf) {
4064 if (!len) {
4065 /* nothing to copy, just make ubuf NULL terminated */
4066 char zero = '\0';
4067
4068 if (put_user(zero, ubuf))
4069 return -EFAULT;
4070 } else if (input_len >= len + 1) {
4071 /* ubuf can hold the string with NULL terminator */
4072 if (copy_to_user(ubuf, buf, len + 1))
4073 return -EFAULT;
4074 } else {
4075 /* ubuf cannot hold the string with NULL terminator,
4076 * do a partial copy with NULL terminator.
4077 */
4078 char zero = '\0';
4079
4080 err = -ENOSPC;
4081 if (copy_to_user(ubuf, buf, input_len - 1))
4082 return -EFAULT;
4083 if (put_user(zero, ubuf + input_len - 1))
4084 return -EFAULT;
4085 }
4086 }
4087
4088 if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4089 put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4090 put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4091 put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4092 return -EFAULT;
4093
4094 return err;
4095 }
4096
4097 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4098
bpf_task_fd_query(const union bpf_attr * attr,union bpf_attr __user * uattr)4099 static int bpf_task_fd_query(const union bpf_attr *attr,
4100 union bpf_attr __user *uattr)
4101 {
4102 pid_t pid = attr->task_fd_query.pid;
4103 u32 fd = attr->task_fd_query.fd;
4104 const struct perf_event *event;
4105 struct task_struct *task;
4106 struct file *file;
4107 int err;
4108
4109 if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4110 return -EINVAL;
4111
4112 if (!capable(CAP_SYS_ADMIN))
4113 return -EPERM;
4114
4115 if (attr->task_fd_query.flags != 0)
4116 return -EINVAL;
4117
4118 rcu_read_lock();
4119 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4120 rcu_read_unlock();
4121 if (!task)
4122 return -ENOENT;
4123
4124 err = 0;
4125 file = fget_task(task, fd);
4126 put_task_struct(task);
4127 if (!file)
4128 return -EBADF;
4129
4130 if (file->f_op == &bpf_link_fops) {
4131 struct bpf_link *link = file->private_data;
4132
4133 if (link->ops == &bpf_raw_tp_link_lops) {
4134 struct bpf_raw_tp_link *raw_tp =
4135 container_of(link, struct bpf_raw_tp_link, link);
4136 struct bpf_raw_event_map *btp = raw_tp->btp;
4137
4138 err = bpf_task_fd_query_copy(attr, uattr,
4139 raw_tp->link.prog->aux->id,
4140 BPF_FD_TYPE_RAW_TRACEPOINT,
4141 btp->tp->name, 0, 0);
4142 goto put_file;
4143 }
4144 goto out_not_supp;
4145 }
4146
4147 event = perf_get_event(file);
4148 if (!IS_ERR(event)) {
4149 u64 probe_offset, probe_addr;
4150 u32 prog_id, fd_type;
4151 const char *buf;
4152
4153 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4154 &buf, &probe_offset,
4155 &probe_addr);
4156 if (!err)
4157 err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4158 fd_type, buf,
4159 probe_offset,
4160 probe_addr);
4161 goto put_file;
4162 }
4163
4164 out_not_supp:
4165 err = -ENOTSUPP;
4166 put_file:
4167 fput(file);
4168 return err;
4169 }
4170
4171 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
4172
4173 #define BPF_DO_BATCH(fn) \
4174 do { \
4175 if (!fn) { \
4176 err = -ENOTSUPP; \
4177 goto err_put; \
4178 } \
4179 err = fn(map, attr, uattr); \
4180 } while (0)
4181
bpf_map_do_batch(const union bpf_attr * attr,union bpf_attr __user * uattr,int cmd)4182 static int bpf_map_do_batch(const union bpf_attr *attr,
4183 union bpf_attr __user *uattr,
4184 int cmd)
4185 {
4186 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
4187 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4188 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4189 struct bpf_map *map;
4190 int err, ufd;
4191 struct fd f;
4192
4193 if (CHECK_ATTR(BPF_MAP_BATCH))
4194 return -EINVAL;
4195
4196 ufd = attr->batch.map_fd;
4197 f = fdget(ufd);
4198 map = __bpf_map_get(f);
4199 if (IS_ERR(map))
4200 return PTR_ERR(map);
4201 if (has_write)
4202 bpf_map_write_active_inc(map);
4203 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4204 err = -EPERM;
4205 goto err_put;
4206 }
4207 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4208 err = -EPERM;
4209 goto err_put;
4210 }
4211
4212 if (cmd == BPF_MAP_LOOKUP_BATCH)
4213 BPF_DO_BATCH(map->ops->map_lookup_batch);
4214 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4215 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4216 else if (cmd == BPF_MAP_UPDATE_BATCH)
4217 BPF_DO_BATCH(map->ops->map_update_batch);
4218 else
4219 BPF_DO_BATCH(map->ops->map_delete_batch);
4220 err_put:
4221 if (has_write)
4222 bpf_map_write_active_dec(map);
4223 fdput(f);
4224 return err;
4225 }
4226
tracing_bpf_link_attach(const union bpf_attr * attr,bpfptr_t uattr,struct bpf_prog * prog)4227 static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
4228 struct bpf_prog *prog)
4229 {
4230 if (attr->link_create.attach_type != prog->expected_attach_type)
4231 return -EINVAL;
4232
4233 if (prog->expected_attach_type == BPF_TRACE_ITER)
4234 return bpf_iter_link_attach(attr, uattr, prog);
4235 else if (prog->type == BPF_PROG_TYPE_EXT)
4236 return bpf_tracing_prog_attach(prog,
4237 attr->link_create.target_fd,
4238 attr->link_create.target_btf_id);
4239 return -EINVAL;
4240 }
4241
4242 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
link_create(union bpf_attr * attr,bpfptr_t uattr)4243 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4244 {
4245 enum bpf_prog_type ptype;
4246 struct bpf_prog *prog;
4247 int ret;
4248
4249 if (CHECK_ATTR(BPF_LINK_CREATE))
4250 return -EINVAL;
4251
4252 prog = bpf_prog_get(attr->link_create.prog_fd);
4253 if (IS_ERR(prog))
4254 return PTR_ERR(prog);
4255
4256 ret = bpf_prog_attach_check_attach_type(prog,
4257 attr->link_create.attach_type);
4258 if (ret)
4259 goto out;
4260
4261 switch (prog->type) {
4262 case BPF_PROG_TYPE_EXT:
4263 ret = tracing_bpf_link_attach(attr, uattr, prog);
4264 goto out;
4265 case BPF_PROG_TYPE_PERF_EVENT:
4266 case BPF_PROG_TYPE_KPROBE:
4267 case BPF_PROG_TYPE_TRACEPOINT:
4268 if (attr->link_create.attach_type != BPF_PERF_EVENT) {
4269 ret = -EINVAL;
4270 goto out;
4271 }
4272 ptype = prog->type;
4273 break;
4274 default:
4275 ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4276 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4277 ret = -EINVAL;
4278 goto out;
4279 }
4280 break;
4281 }
4282
4283 switch (ptype) {
4284 case BPF_PROG_TYPE_CGROUP_SKB:
4285 case BPF_PROG_TYPE_CGROUP_SOCK:
4286 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4287 case BPF_PROG_TYPE_SOCK_OPS:
4288 case BPF_PROG_TYPE_CGROUP_DEVICE:
4289 case BPF_PROG_TYPE_CGROUP_SYSCTL:
4290 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4291 ret = cgroup_bpf_link_attach(attr, prog);
4292 break;
4293 case BPF_PROG_TYPE_TRACING:
4294 ret = tracing_bpf_link_attach(attr, uattr, prog);
4295 break;
4296 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4297 case BPF_PROG_TYPE_SK_LOOKUP:
4298 ret = netns_bpf_link_create(attr, prog);
4299 break;
4300 #ifdef CONFIG_NET
4301 case BPF_PROG_TYPE_XDP:
4302 ret = bpf_xdp_link_attach(attr, prog);
4303 break;
4304 #endif
4305 #ifdef CONFIG_PERF_EVENTS
4306 case BPF_PROG_TYPE_PERF_EVENT:
4307 case BPF_PROG_TYPE_TRACEPOINT:
4308 case BPF_PROG_TYPE_KPROBE:
4309 ret = bpf_perf_link_attach(attr, prog);
4310 break;
4311 #endif
4312 default:
4313 ret = -EINVAL;
4314 }
4315
4316 out:
4317 if (ret < 0)
4318 bpf_prog_put(prog);
4319 return ret;
4320 }
4321
4322 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4323
link_update(union bpf_attr * attr)4324 static int link_update(union bpf_attr *attr)
4325 {
4326 struct bpf_prog *old_prog = NULL, *new_prog;
4327 struct bpf_link *link;
4328 u32 flags;
4329 int ret;
4330
4331 if (CHECK_ATTR(BPF_LINK_UPDATE))
4332 return -EINVAL;
4333
4334 flags = attr->link_update.flags;
4335 if (flags & ~BPF_F_REPLACE)
4336 return -EINVAL;
4337
4338 link = bpf_link_get_from_fd(attr->link_update.link_fd);
4339 if (IS_ERR(link))
4340 return PTR_ERR(link);
4341
4342 new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4343 if (IS_ERR(new_prog)) {
4344 ret = PTR_ERR(new_prog);
4345 goto out_put_link;
4346 }
4347
4348 if (flags & BPF_F_REPLACE) {
4349 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4350 if (IS_ERR(old_prog)) {
4351 ret = PTR_ERR(old_prog);
4352 old_prog = NULL;
4353 goto out_put_progs;
4354 }
4355 } else if (attr->link_update.old_prog_fd) {
4356 ret = -EINVAL;
4357 goto out_put_progs;
4358 }
4359
4360 if (link->ops->update_prog)
4361 ret = link->ops->update_prog(link, new_prog, old_prog);
4362 else
4363 ret = -EINVAL;
4364
4365 out_put_progs:
4366 if (old_prog)
4367 bpf_prog_put(old_prog);
4368 if (ret)
4369 bpf_prog_put(new_prog);
4370 out_put_link:
4371 bpf_link_put(link);
4372 return ret;
4373 }
4374
4375 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4376
link_detach(union bpf_attr * attr)4377 static int link_detach(union bpf_attr *attr)
4378 {
4379 struct bpf_link *link;
4380 int ret;
4381
4382 if (CHECK_ATTR(BPF_LINK_DETACH))
4383 return -EINVAL;
4384
4385 link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4386 if (IS_ERR(link))
4387 return PTR_ERR(link);
4388
4389 if (link->ops->detach)
4390 ret = link->ops->detach(link);
4391 else
4392 ret = -EOPNOTSUPP;
4393
4394 bpf_link_put(link);
4395 return ret;
4396 }
4397
bpf_link_inc_not_zero(struct bpf_link * link)4398 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4399 {
4400 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4401 }
4402
bpf_link_by_id(u32 id)4403 struct bpf_link *bpf_link_by_id(u32 id)
4404 {
4405 struct bpf_link *link;
4406
4407 if (!id)
4408 return ERR_PTR(-ENOENT);
4409
4410 spin_lock_bh(&link_idr_lock);
4411 /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4412 link = idr_find(&link_idr, id);
4413 if (link) {
4414 if (link->id)
4415 link = bpf_link_inc_not_zero(link);
4416 else
4417 link = ERR_PTR(-EAGAIN);
4418 } else {
4419 link = ERR_PTR(-ENOENT);
4420 }
4421 spin_unlock_bh(&link_idr_lock);
4422 return link;
4423 }
4424
4425 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4426
bpf_link_get_fd_by_id(const union bpf_attr * attr)4427 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4428 {
4429 struct bpf_link *link;
4430 u32 id = attr->link_id;
4431 int fd;
4432
4433 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4434 return -EINVAL;
4435
4436 if (!capable(CAP_SYS_ADMIN))
4437 return -EPERM;
4438
4439 link = bpf_link_by_id(id);
4440 if (IS_ERR(link))
4441 return PTR_ERR(link);
4442
4443 fd = bpf_link_new_fd(link);
4444 if (fd < 0)
4445 bpf_link_put(link);
4446
4447 return fd;
4448 }
4449
4450 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4451
bpf_stats_release(struct inode * inode,struct file * file)4452 static int bpf_stats_release(struct inode *inode, struct file *file)
4453 {
4454 mutex_lock(&bpf_stats_enabled_mutex);
4455 static_key_slow_dec(&bpf_stats_enabled_key.key);
4456 mutex_unlock(&bpf_stats_enabled_mutex);
4457 return 0;
4458 }
4459
4460 static const struct file_operations bpf_stats_fops = {
4461 .release = bpf_stats_release,
4462 };
4463
bpf_enable_runtime_stats(void)4464 static int bpf_enable_runtime_stats(void)
4465 {
4466 int fd;
4467
4468 mutex_lock(&bpf_stats_enabled_mutex);
4469
4470 /* Set a very high limit to avoid overflow */
4471 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4472 mutex_unlock(&bpf_stats_enabled_mutex);
4473 return -EBUSY;
4474 }
4475
4476 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4477 if (fd >= 0)
4478 static_key_slow_inc(&bpf_stats_enabled_key.key);
4479
4480 mutex_unlock(&bpf_stats_enabled_mutex);
4481 return fd;
4482 }
4483
4484 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4485
bpf_enable_stats(union bpf_attr * attr)4486 static int bpf_enable_stats(union bpf_attr *attr)
4487 {
4488
4489 if (CHECK_ATTR(BPF_ENABLE_STATS))
4490 return -EINVAL;
4491
4492 if (!capable(CAP_SYS_ADMIN))
4493 return -EPERM;
4494
4495 switch (attr->enable_stats.type) {
4496 case BPF_STATS_RUN_TIME:
4497 return bpf_enable_runtime_stats();
4498 default:
4499 break;
4500 }
4501 return -EINVAL;
4502 }
4503
4504 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4505
bpf_iter_create(union bpf_attr * attr)4506 static int bpf_iter_create(union bpf_attr *attr)
4507 {
4508 struct bpf_link *link;
4509 int err;
4510
4511 if (CHECK_ATTR(BPF_ITER_CREATE))
4512 return -EINVAL;
4513
4514 if (attr->iter_create.flags)
4515 return -EINVAL;
4516
4517 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4518 if (IS_ERR(link))
4519 return PTR_ERR(link);
4520
4521 err = bpf_iter_new_fd(link);
4522 bpf_link_put(link);
4523
4524 return err;
4525 }
4526
4527 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4528
bpf_prog_bind_map(union bpf_attr * attr)4529 static int bpf_prog_bind_map(union bpf_attr *attr)
4530 {
4531 struct bpf_prog *prog;
4532 struct bpf_map *map;
4533 struct bpf_map **used_maps_old, **used_maps_new;
4534 int i, ret = 0;
4535
4536 if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4537 return -EINVAL;
4538
4539 if (attr->prog_bind_map.flags)
4540 return -EINVAL;
4541
4542 prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4543 if (IS_ERR(prog))
4544 return PTR_ERR(prog);
4545
4546 map = bpf_map_get(attr->prog_bind_map.map_fd);
4547 if (IS_ERR(map)) {
4548 ret = PTR_ERR(map);
4549 goto out_prog_put;
4550 }
4551
4552 mutex_lock(&prog->aux->used_maps_mutex);
4553
4554 used_maps_old = prog->aux->used_maps;
4555
4556 for (i = 0; i < prog->aux->used_map_cnt; i++)
4557 if (used_maps_old[i] == map) {
4558 bpf_map_put(map);
4559 goto out_unlock;
4560 }
4561
4562 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4563 sizeof(used_maps_new[0]),
4564 GFP_KERNEL);
4565 if (!used_maps_new) {
4566 ret = -ENOMEM;
4567 goto out_unlock;
4568 }
4569
4570 memcpy(used_maps_new, used_maps_old,
4571 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4572 used_maps_new[prog->aux->used_map_cnt] = map;
4573
4574 prog->aux->used_map_cnt++;
4575 prog->aux->used_maps = used_maps_new;
4576
4577 kfree(used_maps_old);
4578
4579 out_unlock:
4580 mutex_unlock(&prog->aux->used_maps_mutex);
4581
4582 if (ret)
4583 bpf_map_put(map);
4584 out_prog_put:
4585 bpf_prog_put(prog);
4586 return ret;
4587 }
4588
__sys_bpf(int cmd,bpfptr_t uattr,unsigned int size)4589 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
4590 {
4591 union bpf_attr attr;
4592 int err;
4593
4594 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4595 return -EPERM;
4596
4597 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4598 if (err)
4599 return err;
4600 size = min_t(u32, size, sizeof(attr));
4601
4602 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4603 memset(&attr, 0, sizeof(attr));
4604 if (copy_from_bpfptr(&attr, uattr, size) != 0)
4605 return -EFAULT;
4606
4607 trace_android_vh_check_bpf_syscall(cmd, &attr, size);
4608
4609 err = security_bpf(cmd, &attr, size);
4610 if (err < 0)
4611 return err;
4612
4613 switch (cmd) {
4614 case BPF_MAP_CREATE:
4615 err = map_create(&attr);
4616 break;
4617 case BPF_MAP_LOOKUP_ELEM:
4618 err = map_lookup_elem(&attr);
4619 break;
4620 case BPF_MAP_UPDATE_ELEM:
4621 err = map_update_elem(&attr, uattr);
4622 break;
4623 case BPF_MAP_DELETE_ELEM:
4624 err = map_delete_elem(&attr);
4625 break;
4626 case BPF_MAP_GET_NEXT_KEY:
4627 err = map_get_next_key(&attr);
4628 break;
4629 case BPF_MAP_FREEZE:
4630 err = map_freeze(&attr);
4631 break;
4632 case BPF_PROG_LOAD:
4633 err = bpf_prog_load(&attr, uattr);
4634 break;
4635 case BPF_OBJ_PIN:
4636 err = bpf_obj_pin(&attr);
4637 break;
4638 case BPF_OBJ_GET:
4639 err = bpf_obj_get(&attr);
4640 break;
4641 case BPF_PROG_ATTACH:
4642 err = bpf_prog_attach(&attr);
4643 break;
4644 case BPF_PROG_DETACH:
4645 err = bpf_prog_detach(&attr);
4646 break;
4647 case BPF_PROG_QUERY:
4648 err = bpf_prog_query(&attr, uattr.user);
4649 break;
4650 case BPF_PROG_TEST_RUN:
4651 err = bpf_prog_test_run(&attr, uattr.user);
4652 break;
4653 case BPF_PROG_GET_NEXT_ID:
4654 err = bpf_obj_get_next_id(&attr, uattr.user,
4655 &prog_idr, &prog_idr_lock);
4656 break;
4657 case BPF_MAP_GET_NEXT_ID:
4658 err = bpf_obj_get_next_id(&attr, uattr.user,
4659 &map_idr, &map_idr_lock);
4660 break;
4661 case BPF_BTF_GET_NEXT_ID:
4662 err = bpf_obj_get_next_id(&attr, uattr.user,
4663 &btf_idr, &btf_idr_lock);
4664 break;
4665 case BPF_PROG_GET_FD_BY_ID:
4666 err = bpf_prog_get_fd_by_id(&attr);
4667 break;
4668 case BPF_MAP_GET_FD_BY_ID:
4669 err = bpf_map_get_fd_by_id(&attr);
4670 break;
4671 case BPF_OBJ_GET_INFO_BY_FD:
4672 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
4673 break;
4674 case BPF_RAW_TRACEPOINT_OPEN:
4675 err = bpf_raw_tracepoint_open(&attr);
4676 break;
4677 case BPF_BTF_LOAD:
4678 err = bpf_btf_load(&attr, uattr);
4679 break;
4680 case BPF_BTF_GET_FD_BY_ID:
4681 err = bpf_btf_get_fd_by_id(&attr);
4682 break;
4683 case BPF_TASK_FD_QUERY:
4684 err = bpf_task_fd_query(&attr, uattr.user);
4685 break;
4686 case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4687 err = map_lookup_and_delete_elem(&attr);
4688 break;
4689 case BPF_MAP_LOOKUP_BATCH:
4690 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
4691 break;
4692 case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4693 err = bpf_map_do_batch(&attr, uattr.user,
4694 BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4695 break;
4696 case BPF_MAP_UPDATE_BATCH:
4697 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
4698 break;
4699 case BPF_MAP_DELETE_BATCH:
4700 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
4701 break;
4702 case BPF_LINK_CREATE:
4703 err = link_create(&attr, uattr);
4704 break;
4705 case BPF_LINK_UPDATE:
4706 err = link_update(&attr);
4707 break;
4708 case BPF_LINK_GET_FD_BY_ID:
4709 err = bpf_link_get_fd_by_id(&attr);
4710 break;
4711 case BPF_LINK_GET_NEXT_ID:
4712 err = bpf_obj_get_next_id(&attr, uattr.user,
4713 &link_idr, &link_idr_lock);
4714 break;
4715 case BPF_ENABLE_STATS:
4716 err = bpf_enable_stats(&attr);
4717 break;
4718 case BPF_ITER_CREATE:
4719 err = bpf_iter_create(&attr);
4720 break;
4721 case BPF_LINK_DETACH:
4722 err = link_detach(&attr);
4723 break;
4724 case BPF_PROG_BIND_MAP:
4725 err = bpf_prog_bind_map(&attr);
4726 break;
4727 default:
4728 err = -EINVAL;
4729 break;
4730 }
4731
4732 return err;
4733 }
4734
SYSCALL_DEFINE3(bpf,int,cmd,union bpf_attr __user *,uattr,unsigned int,size)4735 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4736 {
4737 return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
4738 }
4739
syscall_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)4740 static bool syscall_prog_is_valid_access(int off, int size,
4741 enum bpf_access_type type,
4742 const struct bpf_prog *prog,
4743 struct bpf_insn_access_aux *info)
4744 {
4745 if (off < 0 || off >= U16_MAX)
4746 return false;
4747 if (off % size != 0)
4748 return false;
4749 return true;
4750 }
4751
BPF_CALL_3(bpf_sys_bpf,int,cmd,void *,attr,u32,attr_size)4752 BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
4753 {
4754 switch (cmd) {
4755 case BPF_MAP_CREATE:
4756 case BPF_MAP_UPDATE_ELEM:
4757 case BPF_MAP_FREEZE:
4758 case BPF_PROG_LOAD:
4759 case BPF_BTF_LOAD:
4760 break;
4761 /* case BPF_PROG_TEST_RUN:
4762 * is not part of this list to prevent recursive test_run
4763 */
4764 default:
4765 return -EINVAL;
4766 }
4767 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
4768 }
4769
4770 static const struct bpf_func_proto bpf_sys_bpf_proto = {
4771 .func = bpf_sys_bpf,
4772 .gpl_only = false,
4773 .ret_type = RET_INTEGER,
4774 .arg1_type = ARG_ANYTHING,
4775 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
4776 .arg3_type = ARG_CONST_SIZE,
4777 };
4778
4779 const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)4780 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4781 {
4782 return bpf_base_func_proto(func_id);
4783 }
4784
BPF_CALL_1(bpf_sys_close,u32,fd)4785 BPF_CALL_1(bpf_sys_close, u32, fd)
4786 {
4787 /* When bpf program calls this helper there should not be
4788 * an fdget() without matching completed fdput().
4789 * This helper is allowed in the following callchain only:
4790 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
4791 */
4792 return close_fd(fd);
4793 }
4794
4795 static const struct bpf_func_proto bpf_sys_close_proto = {
4796 .func = bpf_sys_close,
4797 .gpl_only = false,
4798 .ret_type = RET_INTEGER,
4799 .arg1_type = ARG_ANYTHING,
4800 };
4801
4802 static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)4803 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4804 {
4805 switch (func_id) {
4806 case BPF_FUNC_sys_bpf:
4807 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
4808 case BPF_FUNC_btf_find_by_name_kind:
4809 return &bpf_btf_find_by_name_kind_proto;
4810 case BPF_FUNC_sys_close:
4811 return &bpf_sys_close_proto;
4812 default:
4813 return tracing_prog_func_proto(func_id, prog);
4814 }
4815 }
4816
4817 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
4818 .get_func_proto = syscall_prog_func_proto,
4819 .is_valid_access = syscall_prog_is_valid_access,
4820 };
4821
4822 const struct bpf_prog_ops bpf_syscall_prog_ops = {
4823 .test_run = bpf_prog_test_run_syscall,
4824 };
4825