Lines Matching +full:pre +full:- +full:fetchable
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
32 #include <linux/bpf-netns.h>
35 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
36 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
69 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
74 * meant to be a future-proofing of bits.
84 return -E2BIG; in bpf_check_uarg_tail_zero()
89 res = check_zeroed_user(addr, actual_size - expected_size); in bpf_check_uarg_tail_zero()
92 return res ? 0 : -E2BIG; in bpf_check_uarg_tail_zero()
105 u32 type = attr->map_type; in find_and_alloc_map()
110 return ERR_PTR(-EINVAL); in find_and_alloc_map()
114 return ERR_PTR(-EINVAL); in find_and_alloc_map()
116 if (ops->map_alloc_check) { in find_and_alloc_map()
117 err = ops->map_alloc_check(attr); in find_and_alloc_map()
121 if (attr->map_ifindex) in find_and_alloc_map()
123 map = ops->map_alloc(attr); in find_and_alloc_map()
126 map->ops = ops; in find_and_alloc_map()
127 map->map_type = type; in find_and_alloc_map()
133 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
138 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
143 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
148 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
149 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
150 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
151 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
152 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
156 return map->value_size; in bpf_map_value_size()
165 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
166 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
178 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
179 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
180 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
181 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
182 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
190 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
191 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
193 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
195 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
203 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
208 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
212 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
213 map->map_type == BPF_MAP_TYPE_STACK) { in bpf_map_update_value()
214 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
217 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
236 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
237 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
239 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
241 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
243 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
249 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
251 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
252 map->map_type == BPF_MAP_TYPE_STACK) { in bpf_map_copy_value()
253 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
254 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
259 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
260 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
262 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
266 err = -ENOENT; in bpf_map_copy_value()
352 map->map_type = attr->map_type; in bpf_map_init_from_attr()
353 map->key_size = attr->key_size; in bpf_map_init_from_attr()
354 map->value_size = attr->value_size; in bpf_map_init_from_attr()
355 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
356 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
357 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
364 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { in bpf_charge_memlock()
365 atomic_long_sub(pages, &user->locked_vm); in bpf_charge_memlock()
366 return -EPERM; in bpf_charge_memlock()
374 atomic_long_sub(pages, &user->locked_vm); in bpf_uncharge_memlock()
383 if (size >= U32_MAX - PAGE_SIZE) in bpf_map_charge_init()
384 return -E2BIG; in bpf_map_charge_init()
393 mem->pages = pages; in bpf_map_charge_init()
394 mem->user = user; in bpf_map_charge_init()
401 bpf_uncharge_memlock(mem->user, mem->pages); in bpf_map_charge_finish()
402 free_uid(mem->user); in bpf_map_charge_finish()
418 ret = bpf_charge_memlock(map->memory.user, pages); in bpf_map_charge_memlock()
421 map->memory.pages += pages; in bpf_map_charge_memlock()
427 bpf_uncharge_memlock(map->memory.user, pages); in bpf_map_uncharge_memlock()
428 map->memory.pages -= pages; in bpf_map_uncharge_memlock()
439 map->id = id; in bpf_map_alloc_id()
444 return -ENOSPC; in bpf_map_alloc_id()
454 * disappears - even if someone holds an fd to them they are unusable, in bpf_map_free_id()
458 if (!map->id) in bpf_map_free_id()
466 idr_remove(&map_idr, map->id); in bpf_map_free_id()
467 map->id = 0; in bpf_map_free_id()
481 bpf_map_charge_move(&mem, &map->memory); in bpf_map_free_deferred()
484 map->ops->map_free(map); in bpf_map_free_deferred()
490 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
491 if (map->ops->map_release_uref) in bpf_map_put_uref()
492 map->ops->map_release_uref(map); in bpf_map_put_uref()
497 * (unrelying map implementation ops->map_free() might sleep)
501 if (atomic64_dec_and_test(&map->refcnt)) { in __bpf_map_put()
504 btf_put(map->btf); in __bpf_map_put()
505 INIT_WORK(&map->work, bpf_map_free_deferred); in __bpf_map_put()
506 schedule_work(&map->work); in __bpf_map_put()
524 struct bpf_map *map = filp->private_data; in bpf_map_release()
526 if (map->ops->map_release) in bpf_map_release()
527 map->ops->map_release(map, filp); in bpf_map_release()
535 fmode_t mode = f.file->f_mode; in map_get_sys_perms()
540 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
548 const struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo()
552 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { in bpf_map_show_fdinfo()
554 spin_lock(&array->aux->owner.lock); in bpf_map_show_fdinfo()
555 type = array->aux->owner.type; in bpf_map_show_fdinfo()
556 jited = array->aux->owner.jited; in bpf_map_show_fdinfo()
557 spin_unlock(&array->aux->owner.lock); in bpf_map_show_fdinfo()
569 map->map_type, in bpf_map_show_fdinfo()
570 map->key_size, in bpf_map_show_fdinfo()
571 map->value_size, in bpf_map_show_fdinfo()
572 map->max_entries, in bpf_map_show_fdinfo()
573 map->map_flags, in bpf_map_show_fdinfo()
574 map->memory.pages * 1ULL << PAGE_SHIFT, in bpf_map_show_fdinfo()
575 map->id, in bpf_map_show_fdinfo()
576 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
590 return -EINVAL; in bpf_dummy_read()
599 return -EINVAL; in bpf_dummy_write()
602 /* called for any extra memory-mapped regions (except initial) */
605 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open()
607 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open()
614 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close()
616 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close()
627 struct bpf_map *map = filp->private_data; in bpf_map_mmap()
630 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) in bpf_map_mmap()
631 return -ENOTSUPP; in bpf_map_mmap()
633 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap()
634 return -EINVAL; in bpf_map_mmap()
636 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
638 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap()
639 if (map->frozen) { in bpf_map_mmap()
640 err = -EPERM; in bpf_map_mmap()
643 /* map is meant to be read-only, so do not allow mapping as in bpf_map_mmap()
645 * reference and allows user-space to still modify it after in bpf_map_mmap()
648 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
649 err = -EACCES; in bpf_map_mmap()
655 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap()
656 vma->vm_private_data = map; in bpf_map_mmap()
657 vma->vm_flags &= ~VM_MAYEXEC; in bpf_map_mmap()
658 if (!(vma->vm_flags & VM_WRITE)) in bpf_map_mmap()
659 /* disallow re-mapping with PROT_WRITE */ in bpf_map_mmap()
660 vma->vm_flags &= ~VM_MAYWRITE; in bpf_map_mmap()
662 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
666 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap()
669 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
675 struct bpf_map *map = filp->private_data; in bpf_map_poll()
677 if (map->ops->map_poll) in bpf_map_poll()
678 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
702 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
709 return -EINVAL; in bpf_get_file_flag()
719 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
720 sizeof(attr->CMD##_LAST_FIELD), 0, \
721 sizeof(*attr) - \
722 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
723 sizeof(attr->CMD##_LAST_FIELD)) != NULL
738 return -EINVAL; in bpf_obj_name_cpy()
744 return -EINVAL; in bpf_obj_name_cpy()
746 return src - orig_src; in bpf_obj_name_cpy()
754 return -ENOTSUPP; in map_check_no_btf()
767 if (!key_type || key_size != map->key_size) in map_check_btf()
768 return -EINVAL; in map_check_btf()
771 if (!map->ops->map_check_btf) in map_check_btf()
772 return -EINVAL; in map_check_btf()
776 if (!value_type || value_size != map->value_size) in map_check_btf()
777 return -EINVAL; in map_check_btf()
779 map->spin_lock_off = btf_find_spin_lock(btf, value_type); in map_check_btf()
782 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
783 return -EACCES; in map_check_btf()
784 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
785 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
786 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
787 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
788 map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in map_check_btf()
789 return -ENOTSUPP; in map_check_btf()
790 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > in map_check_btf()
791 map->value_size) { in map_check_btf()
794 map->spin_lock_off, map->value_size); in map_check_btf()
795 return -EFAULT; in map_check_btf()
799 if (map->ops->map_check_btf) in map_check_btf()
800 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
817 return -EINVAL; in map_create()
819 if (attr->btf_vmlinux_value_type_id) { in map_create()
820 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || in map_create()
821 attr->btf_key_type_id || attr->btf_value_type_id) in map_create()
822 return -EINVAL; in map_create()
823 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { in map_create()
824 return -EINVAL; in map_create()
827 f_flags = bpf_get_file_flag(attr->map_flags); in map_create()
834 return -EINVAL; in map_create()
841 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
842 sizeof(attr->map_name)); in map_create()
846 atomic64_set(&map->refcnt, 1); in map_create()
847 atomic64_set(&map->usercnt, 1); in map_create()
848 mutex_init(&map->freeze_mutex); in map_create()
850 map->spin_lock_off = -EINVAL; in map_create()
851 if (attr->btf_key_type_id || attr->btf_value_type_id || in map_create()
855 * counter part. Thus, attr->btf_fd has in map_create()
858 attr->btf_vmlinux_value_type_id) { in map_create()
861 btf = btf_get_by_fd(attr->btf_fd); in map_create()
866 map->btf = btf; in map_create()
868 if (attr->btf_value_type_id) { in map_create()
869 err = map_check_btf(map, btf, attr->btf_key_type_id, in map_create()
870 attr->btf_value_type_id); in map_create()
875 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
876 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
877 map->btf_vmlinux_value_type_id = in map_create()
878 attr->btf_vmlinux_value_type_id; in map_create()
895 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. in map_create()
906 btf_put(map->btf); in map_create()
907 bpf_map_charge_move(&mem, &map->memory); in map_create()
908 map->ops->map_free(map); in map_create()
919 return ERR_PTR(-EBADF); in __bpf_map_get()
920 if (f.file->f_op != &bpf_map_fops) { in __bpf_map_get()
922 return ERR_PTR(-EINVAL); in __bpf_map_get()
925 return f.file->private_data; in __bpf_map_get()
930 atomic64_inc(&map->refcnt); in bpf_map_inc()
936 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
937 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
976 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
978 return ERR_PTR(-ENOENT); in __bpf_map_inc_not_zero()
980 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
997 return -ENOTSUPP; in bpf_stackmap_copy()
1006 return ERR_PTR(-EINVAL); in __bpf_copy_key()
1016 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_elem()
1017 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_elem()
1018 int ufd = attr->map_fd; in map_lookup_elem()
1026 return -EINVAL; in map_lookup_elem()
1028 if (attr->flags & ~BPF_F_LOCK) in map_lookup_elem()
1029 return -EINVAL; in map_lookup_elem()
1036 err = -EPERM; in map_lookup_elem()
1040 if ((attr->flags & BPF_F_LOCK) && in map_lookup_elem()
1042 err = -EINVAL; in map_lookup_elem()
1046 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1054 err = -ENOMEM; in map_lookup_elem()
1059 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1063 err = -EFAULT; in map_lookup_elem()
1083 void __user *ukey = u64_to_user_ptr(attr->key); in map_update_elem()
1084 void __user *uvalue = u64_to_user_ptr(attr->value); in map_update_elem()
1085 int ufd = attr->map_fd; in map_update_elem()
1093 return -EINVAL; in map_update_elem()
1101 err = -EPERM; in map_update_elem()
1105 if ((attr->flags & BPF_F_LOCK) && in map_update_elem()
1107 err = -EINVAL; in map_update_elem()
1111 key = __bpf_copy_key(ukey, map->key_size); in map_update_elem()
1117 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_update_elem()
1118 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in map_update_elem()
1119 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in map_update_elem()
1120 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in map_update_elem()
1121 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in map_update_elem()
1123 value_size = map->value_size; in map_update_elem()
1125 err = -ENOMEM; in map_update_elem()
1130 err = -EFAULT; in map_update_elem()
1134 err = bpf_map_update_value(map, f, key, value, attr->flags); in map_update_elem()
1150 void __user *ukey = u64_to_user_ptr(attr->key); in map_delete_elem()
1151 int ufd = attr->map_fd; in map_delete_elem()
1158 return -EINVAL; in map_delete_elem()
1166 err = -EPERM; in map_delete_elem()
1170 key = __bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1180 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1182 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1188 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1205 void __user *ukey = u64_to_user_ptr(attr->key); in map_get_next_key()
1206 void __user *unext_key = u64_to_user_ptr(attr->next_key); in map_get_next_key()
1207 int ufd = attr->map_fd; in map_get_next_key()
1214 return -EINVAL; in map_get_next_key()
1221 err = -EPERM; in map_get_next_key()
1226 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1235 err = -ENOMEM; in map_get_next_key()
1236 next_key = kmalloc(map->key_size, GFP_USER); in map_get_next_key()
1246 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1252 err = -EFAULT; in map_get_next_key()
1253 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1271 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_delete_batch()
1276 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_delete_batch()
1277 return -EINVAL; in generic_map_delete_batch()
1279 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_delete_batch()
1281 return -EINVAL; in generic_map_delete_batch()
1284 max_count = attr->batch.count; in generic_map_delete_batch()
1288 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1290 return -ENOMEM; in generic_map_delete_batch()
1293 err = -EFAULT; in generic_map_delete_batch()
1294 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1295 map->key_size)) in generic_map_delete_batch()
1305 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1313 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_delete_batch()
1314 err = -EFAULT; in generic_map_delete_batch()
1324 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_update_batch()
1325 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_update_batch()
1327 int ufd = attr->batch.map_fd; in generic_map_update_batch()
1332 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_update_batch()
1333 return -EINVAL; in generic_map_update_batch()
1335 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_update_batch()
1337 return -EINVAL; in generic_map_update_batch()
1342 max_count = attr->batch.count; in generic_map_update_batch()
1346 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1348 return -ENOMEM; in generic_map_update_batch()
1353 return -ENOMEM; in generic_map_update_batch()
1358 err = -EFAULT; in generic_map_update_batch()
1359 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1360 map->key_size) || in generic_map_update_batch()
1365 attr->batch.elem_flags); in generic_map_update_batch()
1372 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_update_batch()
1373 err = -EFAULT; in generic_map_update_batch()
1387 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); in generic_map_lookup_batch()
1388 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in generic_map_lookup_batch()
1389 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_lookup_batch()
1390 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_lookup_batch()
1395 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_lookup_batch()
1396 return -EINVAL; in generic_map_lookup_batch()
1398 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_lookup_batch()
1400 return -EINVAL; in generic_map_lookup_batch()
1404 max_count = attr->batch.count; in generic_map_lookup_batch()
1408 if (put_user(0, &uattr->batch.count)) in generic_map_lookup_batch()
1409 return -EFAULT; in generic_map_lookup_batch()
1411 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1413 return -ENOMEM; in generic_map_lookup_batch()
1415 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1418 return -ENOMEM; in generic_map_lookup_batch()
1421 err = -EFAULT; in generic_map_lookup_batch()
1423 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
1426 value = key + map->key_size; in generic_map_lookup_batch()
1432 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
1437 attr->batch.elem_flags); in generic_map_lookup_batch()
1439 if (err == -ENOENT) { in generic_map_lookup_batch()
1441 retry--; in generic_map_lookup_batch()
1444 err = -EINTR; in generic_map_lookup_batch()
1451 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
1452 map->key_size)) { in generic_map_lookup_batch()
1453 err = -EFAULT; in generic_map_lookup_batch()
1457 err = -EFAULT; in generic_map_lookup_batch()
1470 if (err == -EFAULT) in generic_map_lookup_batch()
1473 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || in generic_map_lookup_batch()
1474 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
1475 err = -EFAULT; in generic_map_lookup_batch()
1487 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_and_delete_elem()
1488 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_and_delete_elem()
1489 int ufd = attr->map_fd; in map_lookup_and_delete_elem()
1497 return -EINVAL; in map_lookup_and_delete_elem()
1506 err = -EPERM; in map_lookup_and_delete_elem()
1510 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
1516 value_size = map->value_size; in map_lookup_and_delete_elem()
1518 err = -ENOMEM; in map_lookup_and_delete_elem()
1523 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1524 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
1525 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
1527 err = -ENOTSUPP; in map_lookup_and_delete_elem()
1534 err = -EFAULT; in map_lookup_and_delete_elem()
1554 int err = 0, ufd = attr->map_fd; in map_freeze()
1559 return -EINVAL; in map_freeze()
1566 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_freeze()
1568 return -ENOTSUPP; in map_freeze()
1571 mutex_lock(&map->freeze_mutex); in map_freeze()
1573 err = -EBUSY; in map_freeze()
1576 if (READ_ONCE(map->frozen)) { in map_freeze()
1577 err = -EBUSY; in map_freeze()
1581 err = -EPERM; in map_freeze()
1585 WRITE_ONCE(map->frozen, true); in map_freeze()
1587 mutex_unlock(&map->freeze_mutex); in map_freeze()
1608 return -EINVAL; in find_prog_type()
1612 return -EINVAL; in find_prog_type()
1614 if (!bpf_prog_is_dev_bound(prog->aux)) in find_prog_type()
1615 prog->aux->ops = ops; in find_prog_type()
1617 prog->aux->ops = &bpf_offload_prog_ops; in find_prog_type()
1618 prog->type = type; in find_prog_type()
1647 audit_log_format(ab, "prog-id=%u op=%s", in bpf_audit_prog()
1648 prog->aux->id, bpf_audit_str[op]); in bpf_audit_prog()
1658 user_bufs = atomic_long_add_return(pages, &user->locked_vm); in __bpf_prog_charge()
1660 atomic_long_sub(pages, &user->locked_vm); in __bpf_prog_charge()
1661 return -EPERM; in __bpf_prog_charge()
1671 atomic_long_sub(pages, &user->locked_vm); in __bpf_prog_uncharge()
1679 ret = __bpf_prog_charge(user, prog->pages); in bpf_prog_charge_memlock()
1685 prog->aux->user = user; in bpf_prog_charge_memlock()
1691 struct user_struct *user = prog->aux->user; in bpf_prog_uncharge_memlock()
1693 __bpf_prog_uncharge(user, prog->pages); in bpf_prog_uncharge_memlock()
1705 prog->aux->id = id; in bpf_prog_alloc_id()
1711 return -ENOSPC; in bpf_prog_alloc_id()
1720 * disappears - even if someone grabs an fd to them they are unusable, in bpf_prog_free_id()
1723 if (!prog->aux->id) in bpf_prog_free_id()
1731 idr_remove(&prog_idr, prog->aux->id); in bpf_prog_free_id()
1732 prog->aux->id = 0; in bpf_prog_free_id()
1744 kvfree(aux->func_info); in __bpf_prog_put_rcu()
1745 kfree(aux->func_info_aux); in __bpf_prog_put_rcu()
1746 bpf_prog_uncharge_memlock(aux->prog); in __bpf_prog_put_rcu()
1748 bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
1754 btf_put(prog->aux->btf); in __bpf_prog_put_noref()
1758 if (prog->aux->sleepable) in __bpf_prog_put_noref()
1759 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
1761 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
1763 __bpf_prog_put_rcu(&prog->aux->rcu); in __bpf_prog_put_noref()
1769 if (atomic64_dec_and_test(&prog->aux->refcnt)) { in __bpf_prog_put()
1786 struct bpf_prog *prog = filp->private_data; in bpf_prog_release()
1803 st = per_cpu_ptr(prog->aux->stats, cpu); in bpf_prog_get_stats()
1805 start = u64_stats_fetch_begin_irq(&st->syncp); in bpf_prog_get_stats()
1806 tnsecs = st->nsecs; in bpf_prog_get_stats()
1807 tcnt = st->cnt; in bpf_prog_get_stats()
1808 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); in bpf_prog_get_stats()
1812 stats->nsecs = nsecs; in bpf_prog_get_stats()
1813 stats->cnt = cnt; in bpf_prog_get_stats()
1819 const struct bpf_prog *prog = filp->private_data; in bpf_prog_show_fdinfo()
1820 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_prog_show_fdinfo()
1824 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_prog_show_fdinfo()
1833 prog->type, in bpf_prog_show_fdinfo()
1834 prog->jited, in bpf_prog_show_fdinfo()
1836 prog->pages * 1ULL << PAGE_SHIFT, in bpf_prog_show_fdinfo()
1837 prog->aux->id, in bpf_prog_show_fdinfo()
1860 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, in bpf_prog_new_fd()
1867 return ERR_PTR(-EBADF); in ____bpf_prog_get()
1868 if (f.file->f_op != &bpf_prog_fops) { in ____bpf_prog_get()
1870 return ERR_PTR(-EINVAL); in ____bpf_prog_get()
1873 return f.file->private_data; in ____bpf_prog_get()
1878 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add()
1889 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub()
1895 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc()
1904 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero()
1907 return ERR_PTR(-ENOENT); in bpf_prog_inc_not_zero()
1920 if (prog->type != *attach_type) in bpf_prog_get_ok()
1922 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) in bpf_prog_get_ok()
1938 prog = ERR_PTR(-EINVAL); in __bpf_prog_get()
1974 switch (attr->prog_type) { in bpf_prog_load_fixup_attach_type()
1977 * exist so checking for non-zero is the way to go here. in bpf_prog_load_fixup_attach_type()
1979 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
1980 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
1993 return -EINVAL; in bpf_prog_load_check_attach()
2002 return -EINVAL; in bpf_prog_load_check_attach()
2008 return -EINVAL; in bpf_prog_load_check_attach()
2019 return -EINVAL; in bpf_prog_load_check_attach()
2037 return -EINVAL; in bpf_prog_load_check_attach()
2045 return -EINVAL; in bpf_prog_load_check_attach()
2053 return -EINVAL; in bpf_prog_load_check_attach()
2058 return -EINVAL; in bpf_prog_load_check_attach()
2061 return -EINVAL; in bpf_prog_load_check_attach()
2122 enum bpf_prog_type type = attr->prog_type; in bpf_prog_load()
2129 return -EINVAL; in bpf_prog_load()
2131 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | in bpf_prog_load()
2136 return -EINVAL; in bpf_prog_load()
2139 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && in bpf_prog_load()
2141 return -EPERM; in bpf_prog_load()
2144 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), in bpf_prog_load()
2145 sizeof(license) - 1) < 0) in bpf_prog_load()
2146 return -EFAULT; in bpf_prog_load()
2147 license[sizeof(license) - 1] = 0; in bpf_prog_load()
2149 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in bpf_prog_load()
2152 if (attr->insn_cnt == 0 || in bpf_prog_load()
2153 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) in bpf_prog_load()
2154 return -E2BIG; in bpf_prog_load()
2158 return -EPERM; in bpf_prog_load()
2161 return -EPERM; in bpf_prog_load()
2163 return -EPERM; in bpf_prog_load()
2166 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, in bpf_prog_load()
2167 attr->attach_btf_id, in bpf_prog_load()
2168 attr->attach_prog_fd)) in bpf_prog_load()
2169 return -EINVAL; in bpf_prog_load()
2172 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); in bpf_prog_load()
2174 return -ENOMEM; in bpf_prog_load()
2176 prog->expected_attach_type = attr->expected_attach_type; in bpf_prog_load()
2177 prog->aux->attach_btf_id = attr->attach_btf_id; in bpf_prog_load()
2178 if (attr->attach_prog_fd) { in bpf_prog_load()
2181 dst_prog = bpf_prog_get(attr->attach_prog_fd); in bpf_prog_load()
2186 prog->aux->dst_prog = dst_prog; in bpf_prog_load()
2189 prog->aux->offload_requested = !!attr->prog_ifindex; in bpf_prog_load()
2190 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; in bpf_prog_load()
2192 err = security_bpf_prog_alloc(prog->aux); in bpf_prog_load()
2200 prog->len = attr->insn_cnt; in bpf_prog_load()
2202 err = -EFAULT; in bpf_prog_load()
2203 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), in bpf_prog_load()
2207 prog->orig_prog = NULL; in bpf_prog_load()
2208 prog->jited = 0; in bpf_prog_load()
2210 atomic64_set(&prog->aux->refcnt, 1); in bpf_prog_load()
2211 prog->gpl_compatible = is_gpl ? 1 : 0; in bpf_prog_load()
2213 if (bpf_prog_is_dev_bound(prog->aux)) { in bpf_prog_load()
2224 prog->aux->load_time = ktime_get_boottime_ns(); in bpf_prog_load()
2225 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, in bpf_prog_load()
2226 sizeof(attr->prog_name)); in bpf_prog_load()
2271 __bpf_prog_put_noref(prog, prog->aux->func_cnt); in bpf_prog_load()
2276 security_bpf_prog_free(prog->aux); in bpf_prog_load()
2286 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) in bpf_obj_pin()
2287 return -EINVAL; in bpf_obj_pin()
2289 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); in bpf_obj_pin()
2294 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || in bpf_obj_get()
2295 attr->file_flags & ~BPF_OBJ_FLAG_MASK) in bpf_obj_get()
2296 return -EINVAL; in bpf_obj_get()
2298 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), in bpf_obj_get()
2299 attr->file_flags); in bpf_obj_get()
2305 atomic64_set(&link->refcnt, 1); in bpf_link_init()
2306 link->type = type; in bpf_link_init()
2307 link->id = 0; in bpf_link_init()
2308 link->ops = ops; in bpf_link_init()
2309 link->prog = prog; in bpf_link_init()
2331 primer->link->prog = NULL; in bpf_link_cleanup()
2332 bpf_link_free_id(primer->id); in bpf_link_cleanup()
2333 fput(primer->file); in bpf_link_cleanup()
2334 put_unused_fd(primer->fd); in bpf_link_cleanup()
2339 atomic64_inc(&link->refcnt); in bpf_link_inc()
2345 bpf_link_free_id(link->id); in bpf_link_free()
2346 if (link->prog) { in bpf_link_free()
2348 link->ops->release(link); in bpf_link_free()
2349 bpf_prog_put(link->prog); in bpf_link_free()
2352 link->ops->dealloc(link); in bpf_link_free()
2367 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put()
2371 INIT_WORK(&link->work, bpf_link_put_deferred); in bpf_link_put()
2372 schedule_work(&link->work); in bpf_link_put()
2380 struct bpf_link *link = filp->private_data; in bpf_link_release()
2400 const struct bpf_link *link = filp->private_data; in bpf_link_show_fdinfo()
2401 const struct bpf_prog *prog = link->prog; in bpf_link_show_fdinfo()
2402 enum bpf_link_type type = link->type; in bpf_link_show_fdinfo()
2403 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_link_show_fdinfo()
2405 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_link_show_fdinfo()
2416 link->id, in bpf_link_show_fdinfo()
2418 prog->aux->id); in bpf_link_show_fdinfo()
2419 if (link->ops->show_fdinfo) in bpf_link_show_fdinfo()
2420 link->ops->show_fdinfo(link, m); in bpf_link_show_fdinfo()
2446 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2449 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2450 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2482 primer->link = link; in bpf_link_prime()
2483 primer->file = file; in bpf_link_prime()
2484 primer->fd = fd; in bpf_link_prime()
2485 primer->id = id; in bpf_link_prime()
2491 /* make bpf_link fetchable by ID */ in bpf_link_settle()
2493 primer->link->id = primer->id; in bpf_link_settle()
2495 /* make bpf_link fetchable by FD */ in bpf_link_settle()
2496 fd_install(primer->fd, primer->file); in bpf_link_settle()
2498 return primer->fd; in bpf_link_settle()
2503 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); in bpf_link_new_fd()
2512 return ERR_PTR(-EBADF); in bpf_link_get_from_fd()
2513 if (f.file->f_op != &bpf_link_fops) { in bpf_link_get_from_fd()
2515 return ERR_PTR(-EINVAL); in bpf_link_get_from_fd()
2518 link = f.file->private_data; in bpf_link_get_from_fd()
2537 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, in bpf_tracing_link_release()
2538 tr_link->trampoline)); in bpf_tracing_link_release()
2540 bpf_trampoline_put(tr_link->trampoline); in bpf_tracing_link_release()
2543 if (tr_link->tgt_prog) in bpf_tracing_link_release()
2544 bpf_prog_put(tr_link->tgt_prog); in bpf_tracing_link_release()
2563 tr_link->attach_type); in bpf_tracing_link_show_fdinfo()
2572 info->tracing.attach_type = tr_link->attach_type; in bpf_tracing_link_fill_link_info()
2595 switch (prog->type) { in bpf_tracing_prog_attach()
2597 if (prog->expected_attach_type != BPF_TRACE_FENTRY && in bpf_tracing_prog_attach()
2598 prog->expected_attach_type != BPF_TRACE_FEXIT && in bpf_tracing_prog_attach()
2599 prog->expected_attach_type != BPF_MODIFY_RETURN) { in bpf_tracing_prog_attach()
2600 err = -EINVAL; in bpf_tracing_prog_attach()
2605 if (prog->expected_attach_type != 0) { in bpf_tracing_prog_attach()
2606 err = -EINVAL; in bpf_tracing_prog_attach()
2611 if (prog->expected_attach_type != BPF_LSM_MAC) { in bpf_tracing_prog_attach()
2612 err = -EINVAL; in bpf_tracing_prog_attach()
2617 err = -EINVAL; in bpf_tracing_prog_attach()
2622 err = -EINVAL; in bpf_tracing_prog_attach()
2628 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_tracing_prog_attach()
2629 err = -EINVAL; in bpf_tracing_prog_attach()
2645 err = -ENOMEM; in bpf_tracing_prog_attach()
2648 bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, in bpf_tracing_prog_attach()
2650 link->attach_type = prog->expected_attach_type; in bpf_tracing_prog_attach()
2652 mutex_lock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
2656 * - if prog->aux->dst_trampoline is set, the program was just loaded in bpf_tracing_prog_attach()
2658 * in prog->aux in bpf_tracing_prog_attach()
2660 * - if prog->aux->dst_trampoline is NULL, the program has already been in bpf_tracing_prog_attach()
2663 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + in bpf_tracing_prog_attach()
2666 * - if tgt_prog == NULL when this function was called using the old in bpf_tracing_prog_attach()
2667 * raw_tracepoint_open API, and we need a target from prog->aux in bpf_tracing_prog_attach()
2669 * The combination of no saved target in prog->aux, and no target in bpf_tracing_prog_attach()
2672 if (!prog->aux->dst_trampoline && !tgt_prog) { in bpf_tracing_prog_attach()
2673 err = -ENOENT; in bpf_tracing_prog_attach()
2677 if (!prog->aux->dst_trampoline || in bpf_tracing_prog_attach()
2678 (key && key != prog->aux->dst_trampoline->key)) { in bpf_tracing_prog_attach()
2692 err = -ENOMEM; in bpf_tracing_prog_attach()
2701 * prog->aux are cleared below. in bpf_tracing_prog_attach()
2703 tr = prog->aux->dst_trampoline; in bpf_tracing_prog_attach()
2704 tgt_prog = prog->aux->dst_prog; in bpf_tracing_prog_attach()
2707 err = bpf_link_prime(&link->link, &link_primer); in bpf_tracing_prog_attach()
2718 link->tgt_prog = tgt_prog; in bpf_tracing_prog_attach()
2719 link->trampoline = tr; in bpf_tracing_prog_attach()
2721 /* Always clear the trampoline and target prog from prog->aux to make in bpf_tracing_prog_attach()
2723 * program is (re-)attached to another target. in bpf_tracing_prog_attach()
2725 if (prog->aux->dst_prog && in bpf_tracing_prog_attach()
2726 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) in bpf_tracing_prog_attach()
2728 bpf_prog_put(prog->aux->dst_prog); in bpf_tracing_prog_attach()
2729 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
2731 bpf_trampoline_put(prog->aux->dst_trampoline); in bpf_tracing_prog_attach()
2733 prog->aux->dst_prog = NULL; in bpf_tracing_prog_attach()
2734 prog->aux->dst_trampoline = NULL; in bpf_tracing_prog_attach()
2735 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
2739 if (tr && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
2741 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
2759 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); in bpf_raw_tp_link_release()
2760 bpf_put_raw_tracepoint(raw_tp->btp); in bpf_raw_tp_link_release()
2779 raw_tp_link->btp->tp->name); in bpf_raw_tp_link_show_fdinfo()
2787 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); in bpf_raw_tp_link_fill_link_info()
2788 const char *tp_name = raw_tp_link->btp->tp->name; in bpf_raw_tp_link_fill_link_info()
2789 u32 ulen = info->raw_tracepoint.tp_name_len; in bpf_raw_tp_link_fill_link_info()
2793 return -EINVAL; in bpf_raw_tp_link_fill_link_info()
2795 info->raw_tracepoint.tp_name_len = tp_len + 1; in bpf_raw_tp_link_fill_link_info()
2802 return -EFAULT; in bpf_raw_tp_link_fill_link_info()
2806 if (copy_to_user(ubuf, tp_name, ulen - 1)) in bpf_raw_tp_link_fill_link_info()
2807 return -EFAULT; in bpf_raw_tp_link_fill_link_info()
2808 if (put_user(zero, ubuf + ulen - 1)) in bpf_raw_tp_link_fill_link_info()
2809 return -EFAULT; in bpf_raw_tp_link_fill_link_info()
2810 return -ENOSPC; in bpf_raw_tp_link_fill_link_info()
2836 return -EINVAL; in bpf_raw_tracepoint_open()
2838 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); in bpf_raw_tracepoint_open()
2842 switch (prog->type) { in bpf_raw_tracepoint_open()
2846 if (attr->raw_tracepoint.name) { in bpf_raw_tracepoint_open()
2850 err = -EINVAL; in bpf_raw_tracepoint_open()
2853 if (prog->type == BPF_PROG_TYPE_TRACING && in bpf_raw_tracepoint_open()
2854 prog->expected_attach_type == BPF_TRACE_RAW_TP) { in bpf_raw_tracepoint_open()
2855 tp_name = prog->aux->attach_func_name; in bpf_raw_tracepoint_open()
2865 u64_to_user_ptr(attr->raw_tracepoint.name), in bpf_raw_tracepoint_open()
2866 sizeof(buf) - 1) < 0) { in bpf_raw_tracepoint_open()
2867 err = -EFAULT; in bpf_raw_tracepoint_open()
2870 buf[sizeof(buf) - 1] = 0; in bpf_raw_tracepoint_open()
2874 err = -EINVAL; in bpf_raw_tracepoint_open()
2880 err = -ENOENT; in bpf_raw_tracepoint_open()
2886 err = -ENOMEM; in bpf_raw_tracepoint_open()
2889 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, in bpf_raw_tracepoint_open()
2891 link->btp = btp; in bpf_raw_tracepoint_open()
2893 err = bpf_link_prime(&link->link, &link_primer); in bpf_raw_tracepoint_open()
2899 err = bpf_probe_register(link->btp, prog); in bpf_raw_tracepoint_open()
2973 switch (prog->type) { in bpf_prog_attach_check_attach_type()
2978 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; in bpf_prog_attach_check_attach_type()
2981 /* cg-skb progs can be loaded by unpriv user. in bpf_prog_attach_check_attach_type()
2984 return -EPERM; in bpf_prog_attach_check_attach_type()
2987 if (prog->type != ptype) in bpf_prog_attach_check_attach_type()
2988 return -EINVAL; in bpf_prog_attach_check_attach_type()
2990 return prog->enforce_expected_attach_type && in bpf_prog_attach_check_attach_type()
2991 prog->expected_attach_type != attach_type ? in bpf_prog_attach_check_attach_type()
2992 -EINVAL : 0; in bpf_prog_attach_check_attach_type()
3010 return -EINVAL; in bpf_prog_attach()
3012 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) in bpf_prog_attach()
3013 return -EINVAL; in bpf_prog_attach()
3015 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_attach()
3017 return -EINVAL; in bpf_prog_attach()
3019 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_attach()
3023 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { in bpf_prog_attach()
3025 return -EINVAL; in bpf_prog_attach()
3049 ret = -EINVAL; in bpf_prog_attach()
3064 return -EINVAL; in bpf_prog_detach()
3066 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_detach()
3085 return -EINVAL; in bpf_prog_detach()
3095 return -EPERM; in bpf_prog_query()
3097 return -EINVAL; in bpf_prog_query()
3098 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) in bpf_prog_query()
3099 return -EINVAL; in bpf_prog_query()
3101 switch (attr->query.attach_type) { in bpf_prog_query()
3132 return -EINVAL; in bpf_prog_query()
3142 int ret = -ENOTSUPP; in bpf_prog_test_run()
3145 return -EINVAL; in bpf_prog_test_run()
3147 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || in bpf_prog_test_run()
3148 (!attr->test.ctx_size_in && attr->test.ctx_in)) in bpf_prog_test_run()
3149 return -EINVAL; in bpf_prog_test_run()
3151 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || in bpf_prog_test_run()
3152 (!attr->test.ctx_size_out && attr->test.ctx_out)) in bpf_prog_test_run()
3153 return -EINVAL; in bpf_prog_test_run()
3155 prog = bpf_prog_get(attr->test.prog_fd); in bpf_prog_test_run()
3159 if (prog->aux->ops->test_run) in bpf_prog_test_run()
3160 ret = prog->aux->ops->test_run(prog, attr, uattr); in bpf_prog_test_run()
3173 u32 next_id = attr->start_id; in bpf_obj_get_next_id()
3177 return -EINVAL; in bpf_obj_get_next_id()
3180 return -EPERM; in bpf_obj_get_next_id()
3185 err = -ENOENT; in bpf_obj_get_next_id()
3189 err = put_user(next_id, &uattr->next_id); in bpf_obj_get_next_id()
3239 return ERR_PTR(-ENOENT); in bpf_prog_by_id()
3246 prog = ERR_PTR(-ENOENT); in bpf_prog_by_id()
3254 u32 id = attr->prog_id; in bpf_prog_get_fd_by_id()
3258 return -EINVAL; in bpf_prog_get_fd_by_id()
3261 return -EPERM; in bpf_prog_get_fd_by_id()
3279 u32 id = attr->map_id; in bpf_map_get_fd_by_id()
3284 attr->open_flags & ~BPF_OBJ_FLAG_MASK) in bpf_map_get_fd_by_id()
3285 return -EINVAL; in bpf_map_get_fd_by_id()
3288 return -EPERM; in bpf_map_get_fd_by_id()
3290 f_flags = bpf_get_file_flag(attr->open_flags); in bpf_map_get_fd_by_id()
3299 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
3319 mutex_lock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
3320 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { in bpf_map_from_imm()
3321 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
3326 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
3328 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
3336 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
3350 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), in bpf_insn_prepare_dump()
3355 for (i = 0; i < prog->len; i++) { in bpf_insn_prepare_dump()
3361 /* fall-through */ in bpf_insn_prepare_dump()
3383 insns[i].imm = map->id; in bpf_insn_prepare_dump()
3404 if ((info->nr_func_info || info->func_info_rec_size) && in set_info_rec_size()
3405 info->func_info_rec_size != sizeof(struct bpf_func_info)) in set_info_rec_size()
3406 return -EINVAL; in set_info_rec_size()
3408 if ((info->nr_line_info || info->line_info_rec_size) && in set_info_rec_size()
3409 info->line_info_rec_size != sizeof(struct bpf_line_info)) in set_info_rec_size()
3410 return -EINVAL; in set_info_rec_size()
3412 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && in set_info_rec_size()
3413 info->jited_line_info_rec_size != sizeof(__u64)) in set_info_rec_size()
3414 return -EINVAL; in set_info_rec_size()
3416 info->func_info_rec_size = sizeof(struct bpf_func_info); in set_info_rec_size()
3417 info->line_info_rec_size = sizeof(struct bpf_line_info); in set_info_rec_size()
3418 info->jited_line_info_rec_size = sizeof(__u64); in set_info_rec_size()
3428 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_prog_get_info_by_fd()
3430 u32 info_len = attr->info.info_len; in bpf_prog_get_info_by_fd()
3443 return -EFAULT; in bpf_prog_get_info_by_fd()
3445 info.type = prog->type; in bpf_prog_get_info_by_fd()
3446 info.id = prog->aux->id; in bpf_prog_get_info_by_fd()
3447 info.load_time = prog->aux->load_time; in bpf_prog_get_info_by_fd()
3449 prog->aux->user->uid); in bpf_prog_get_info_by_fd()
3450 info.gpl_compatible = prog->gpl_compatible; in bpf_prog_get_info_by_fd()
3452 memcpy(info.tag, prog->tag, sizeof(prog->tag)); in bpf_prog_get_info_by_fd()
3453 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); in bpf_prog_get_info_by_fd()
3455 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
3457 info.nr_map_ids = prog->aux->used_map_cnt; in bpf_prog_get_info_by_fd()
3464 if (put_user(prog->aux->used_maps[i]->id, in bpf_prog_get_info_by_fd()
3466 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
3467 return -EFAULT; in bpf_prog_get_info_by_fd()
3470 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
3497 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
3501 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); in bpf_prog_get_info_by_fd()
3503 return -ENOMEM; in bpf_prog_get_info_by_fd()
3509 return -EFAULT; in bpf_prog_get_info_by_fd()
3512 if (bpf_prog_is_dev_bound(prog->aux)) { in bpf_prog_get_info_by_fd()
3524 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
3528 for (i = 0; i < prog->aux->func_cnt; i++) in bpf_prog_get_info_by_fd()
3529 info.jited_prog_len += prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
3531 info.jited_prog_len = prog->jited_len; in bpf_prog_get_info_by_fd()
3535 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
3539 /* for multi-function programs, copy the JITed in bpf_prog_get_info_by_fd()
3542 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
3547 for (i = 0; i < prog->aux->func_cnt; i++) { in bpf_prog_get_info_by_fd()
3548 len = prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
3550 img = (u8 *) prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
3552 return -EFAULT; in bpf_prog_get_info_by_fd()
3554 free -= len; in bpf_prog_get_info_by_fd()
3559 if (copy_to_user(uinsns, prog->bpf_func, ulen)) in bpf_prog_get_info_by_fd()
3560 return -EFAULT; in bpf_prog_get_info_by_fd()
3568 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
3570 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
3580 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
3583 prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
3586 return -EFAULT; in bpf_prog_get_info_by_fd()
3589 ksym_addr = (unsigned long) prog->bpf_func; in bpf_prog_get_info_by_fd()
3591 return -EFAULT; in bpf_prog_get_info_by_fd()
3599 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
3601 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
3608 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
3611 prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
3613 return -EFAULT; in bpf_prog_get_info_by_fd()
3616 func_len = prog->jited_len; in bpf_prog_get_info_by_fd()
3618 return -EFAULT; in bpf_prog_get_info_by_fd()
3625 if (prog->aux->btf) in bpf_prog_get_info_by_fd()
3626 info.btf_id = btf_id(prog->aux->btf); in bpf_prog_get_info_by_fd()
3629 info.nr_func_info = prog->aux->func_info_cnt; in bpf_prog_get_info_by_fd()
3635 if (copy_to_user(user_finfo, prog->aux->func_info, in bpf_prog_get_info_by_fd()
3637 return -EFAULT; in bpf_prog_get_info_by_fd()
3641 info.nr_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
3647 if (copy_to_user(user_linfo, prog->aux->linfo, in bpf_prog_get_info_by_fd()
3649 return -EFAULT; in bpf_prog_get_info_by_fd()
3653 if (prog->aux->jited_linfo) in bpf_prog_get_info_by_fd()
3654 info.nr_jited_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
3658 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
3665 if (put_user((__u64)(long)prog->aux->jited_linfo[i], in bpf_prog_get_info_by_fd()
3667 return -EFAULT; in bpf_prog_get_info_by_fd()
3675 info.nr_prog_tags = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
3682 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
3685 prog->aux->func[i]->tag, in bpf_prog_get_info_by_fd()
3687 return -EFAULT; in bpf_prog_get_info_by_fd()
3691 prog->tag, BPF_TAG_SIZE)) in bpf_prog_get_info_by_fd()
3692 return -EFAULT; in bpf_prog_get_info_by_fd()
3698 put_user(info_len, &uattr->info.info_len)) in bpf_prog_get_info_by_fd()
3699 return -EFAULT; in bpf_prog_get_info_by_fd()
3709 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_map_get_info_by_fd()
3711 u32 info_len = attr->info.info_len; in bpf_map_get_info_by_fd()
3720 info.type = map->map_type; in bpf_map_get_info_by_fd()
3721 info.id = map->id; in bpf_map_get_info_by_fd()
3722 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
3723 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
3724 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
3725 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
3726 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
3728 if (map->btf) { in bpf_map_get_info_by_fd()
3729 info.btf_id = btf_id(map->btf); in bpf_map_get_info_by_fd()
3730 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
3731 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
3733 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
3742 put_user(info_len, &uattr->info.info_len)) in bpf_map_get_info_by_fd()
3743 return -EFAULT; in bpf_map_get_info_by_fd()
3753 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_btf_get_info_by_fd()
3754 u32 info_len = attr->info.info_len; in bpf_btf_get_info_by_fd()
3769 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_link_get_info_by_fd()
3771 u32 info_len = attr->info.info_len; in bpf_link_get_info_by_fd()
3781 return -EFAULT; in bpf_link_get_info_by_fd()
3783 info.type = link->type; in bpf_link_get_info_by_fd()
3784 info.id = link->id; in bpf_link_get_info_by_fd()
3785 info.prog_id = link->prog->aux->id; in bpf_link_get_info_by_fd()
3787 if (link->ops->fill_link_info) { in bpf_link_get_info_by_fd()
3788 err = link->ops->fill_link_info(link, &info); in bpf_link_get_info_by_fd()
3794 put_user(info_len, &uattr->info.info_len)) in bpf_link_get_info_by_fd()
3795 return -EFAULT; in bpf_link_get_info_by_fd()
3806 int ufd = attr->info.bpf_fd; in bpf_obj_get_info_by_fd()
3811 return -EINVAL; in bpf_obj_get_info_by_fd()
3815 return -EBADFD; in bpf_obj_get_info_by_fd()
3817 if (f.file->f_op == &bpf_prog_fops) in bpf_obj_get_info_by_fd()
3818 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, in bpf_obj_get_info_by_fd()
3820 else if (f.file->f_op == &bpf_map_fops) in bpf_obj_get_info_by_fd()
3821 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, in bpf_obj_get_info_by_fd()
3823 else if (f.file->f_op == &btf_fops) in bpf_obj_get_info_by_fd()
3824 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); in bpf_obj_get_info_by_fd()
3825 else if (f.file->f_op == &bpf_link_fops) in bpf_obj_get_info_by_fd()
3826 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, in bpf_obj_get_info_by_fd()
3829 err = -EINVAL; in bpf_obj_get_info_by_fd()
3840 return -EINVAL; in bpf_btf_load()
3843 return -EPERM; in bpf_btf_load()
3853 return -EINVAL; in bpf_btf_get_fd_by_id()
3856 return -EPERM; in bpf_btf_get_fd_by_id()
3858 return btf_get_fd_by_id(attr->btf_id); in bpf_btf_get_fd_by_id()
3867 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); in bpf_task_fd_query_copy()
3871 if (put_user(len, &uattr->task_fd_query.buf_len)) in bpf_task_fd_query_copy()
3872 return -EFAULT; in bpf_task_fd_query_copy()
3873 input_len = attr->task_fd_query.buf_len; in bpf_task_fd_query_copy()
3880 return -EFAULT; in bpf_task_fd_query_copy()
3884 return -EFAULT; in bpf_task_fd_query_copy()
3891 err = -ENOSPC; in bpf_task_fd_query_copy()
3892 if (copy_to_user(ubuf, buf, input_len - 1)) in bpf_task_fd_query_copy()
3893 return -EFAULT; in bpf_task_fd_query_copy()
3894 if (put_user(zero, ubuf + input_len - 1)) in bpf_task_fd_query_copy()
3895 return -EFAULT; in bpf_task_fd_query_copy()
3899 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || in bpf_task_fd_query_copy()
3900 put_user(fd_type, &uattr->task_fd_query.fd_type) || in bpf_task_fd_query_copy()
3901 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || in bpf_task_fd_query_copy()
3902 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) in bpf_task_fd_query_copy()
3903 return -EFAULT; in bpf_task_fd_query_copy()
3913 pid_t pid = attr->task_fd_query.pid; in bpf_task_fd_query()
3914 u32 fd = attr->task_fd_query.fd; in bpf_task_fd_query()
3922 return -EINVAL; in bpf_task_fd_query()
3925 return -EPERM; in bpf_task_fd_query()
3927 if (attr->task_fd_query.flags != 0) in bpf_task_fd_query()
3928 return -EINVAL; in bpf_task_fd_query()
3934 return -ENOENT; in bpf_task_fd_query()
3939 return -ENOENT; in bpf_task_fd_query()
3942 spin_lock(&files->file_lock); in bpf_task_fd_query()
3945 err = -EBADF; in bpf_task_fd_query()
3948 spin_unlock(&files->file_lock); in bpf_task_fd_query()
3954 if (file->f_op == &bpf_link_fops) { in bpf_task_fd_query()
3955 struct bpf_link *link = file->private_data; in bpf_task_fd_query()
3957 if (link->ops == &bpf_raw_tp_link_lops) { in bpf_task_fd_query()
3960 struct bpf_raw_event_map *btp = raw_tp->btp; in bpf_task_fd_query()
3963 raw_tp->link.prog->aux->id, in bpf_task_fd_query()
3965 btp->tp->name, 0, 0); in bpf_task_fd_query()
3989 err = -ENOTSUPP; in bpf_task_fd_query()
4001 err = -ENOTSUPP; \
4019 return -EINVAL; in bpf_map_do_batch()
4021 ufd = attr->batch.map_fd; in bpf_map_do_batch()
4029 err = -EPERM; in bpf_map_do_batch()
4033 err = -EPERM; in bpf_map_do_batch()
4038 BPF_DO_BATCH(map->ops->map_lookup_batch); in bpf_map_do_batch()
4040 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); in bpf_map_do_batch()
4042 BPF_DO_BATCH(map->ops->map_update_batch); in bpf_map_do_batch()
4044 BPF_DO_BATCH(map->ops->map_delete_batch); in bpf_map_do_batch()
4054 if (attr->link_create.attach_type != prog->expected_attach_type) in tracing_bpf_link_attach()
4055 return -EINVAL; in tracing_bpf_link_attach()
4057 if (prog->expected_attach_type == BPF_TRACE_ITER) in tracing_bpf_link_attach()
4059 else if (prog->type == BPF_PROG_TYPE_EXT) in tracing_bpf_link_attach()
4061 attr->link_create.target_fd, in tracing_bpf_link_attach()
4062 attr->link_create.target_btf_id); in tracing_bpf_link_attach()
4063 return -EINVAL; in tracing_bpf_link_attach()
4074 return -EINVAL; in link_create()
4076 prog = bpf_prog_get(attr->link_create.prog_fd); in link_create()
4081 attr->link_create.attach_type); in link_create()
4085 if (prog->type == BPF_PROG_TYPE_EXT) { in link_create()
4090 ptype = attach_type_to_prog_type(attr->link_create.attach_type); in link_create()
4091 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { in link_create()
4092 ret = -EINVAL; in link_create()
4119 ret = -EINVAL; in link_create()
4138 return -EINVAL; in link_update()
4140 flags = attr->link_update.flags; in link_update()
4142 return -EINVAL; in link_update()
4144 link = bpf_link_get_from_fd(attr->link_update.link_fd); in link_update()
4148 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); in link_update()
4155 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); in link_update()
4161 } else if (attr->link_update.old_prog_fd) { in link_update()
4162 ret = -EINVAL; in link_update()
4166 if (link->ops->update_prog) in link_update()
4167 ret = link->ops->update_prog(link, new_prog, old_prog); in link_update()
4169 ret = -EINVAL; in link_update()
4189 return -EINVAL; in link_detach()
4191 link = bpf_link_get_from_fd(attr->link_detach.link_fd); in link_detach()
4195 if (link->ops->detach) in link_detach()
4196 ret = link->ops->detach(link); in link_detach()
4198 ret = -EOPNOTSUPP; in link_detach()
4206 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); in bpf_link_inc_not_zero()
4214 return ERR_PTR(-ENOENT); in bpf_link_by_id()
4220 if (link->id) in bpf_link_by_id()
4223 link = ERR_PTR(-EAGAIN); in bpf_link_by_id()
4225 link = ERR_PTR(-ENOENT); in bpf_link_by_id()
4236 u32 id = attr->link_id; in bpf_link_get_fd_by_id()
4240 return -EINVAL; in bpf_link_get_fd_by_id()
4243 return -EPERM; in bpf_link_get_fd_by_id()
4279 return -EBUSY; in bpf_enable_runtime_stats()
4282 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); in bpf_enable_runtime_stats()
4296 return -EINVAL; in bpf_enable_stats()
4299 return -EPERM; in bpf_enable_stats()
4301 switch (attr->enable_stats.type) { in bpf_enable_stats()
4307 return -EINVAL; in bpf_enable_stats()
4318 return -EINVAL; in bpf_iter_create()
4320 if (attr->iter_create.flags) in bpf_iter_create()
4321 return -EINVAL; in bpf_iter_create()
4323 link = bpf_link_get_from_fd(attr->iter_create.link_fd); in bpf_iter_create()
4343 return -EINVAL; in bpf_prog_bind_map()
4345 if (attr->prog_bind_map.flags) in bpf_prog_bind_map()
4346 return -EINVAL; in bpf_prog_bind_map()
4348 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); in bpf_prog_bind_map()
4352 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
4358 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
4360 used_maps_old = prog->aux->used_maps; in bpf_prog_bind_map()
4362 for (i = 0; i < prog->aux->used_map_cnt; i++) in bpf_prog_bind_map()
4368 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, in bpf_prog_bind_map()
4372 ret = -ENOMEM; in bpf_prog_bind_map()
4377 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); in bpf_prog_bind_map()
4378 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
4380 prog->aux->used_map_cnt++; in bpf_prog_bind_map()
4381 prog->aux->used_maps = used_maps_new; in bpf_prog_bind_map()
4386 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
4401 return -EPERM; in SYSCALL_DEFINE3()
4411 return -EFAULT; in SYSCALL_DEFINE3()
4532 err = -EINVAL; in SYSCALL_DEFINE3()