• Home
  • Raw
  • Download

Lines Matching +full:pre +full:- +full:fetchable

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/bpf-cgroup.h>
34 #include <linux/bpf-netns.h>
42 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
43 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
45 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
46 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
76 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
81 * meant to be a future-proofing of bits.
90 return -E2BIG; in bpf_check_uarg_tail_zero()
97 actual_size - expected_size) == NULL; in bpf_check_uarg_tail_zero()
100 actual_size - expected_size); in bpf_check_uarg_tail_zero()
103 return res ? 0 : -E2BIG; in bpf_check_uarg_tail_zero()
116 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
121 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
126 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
131 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
132 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
133 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
134 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
135 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
139 return map->value_size; in bpf_map_value_size()
148 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
149 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
161 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
162 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
163 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
164 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
165 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
173 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
174 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
176 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
178 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
186 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
191 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
195 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
196 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_update_value()
197 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_update_value()
198 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
201 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
236 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_copy_value()
237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_copy_value()
238 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
244 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
245 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
247 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
251 err = -ENOENT; in bpf_map_copy_value()
341 map->map_type = attr->map_type; in bpf_map_init_from_attr()
342 map->key_size = attr->key_size; in bpf_map_init_from_attr()
343 map->value_size = attr->value_size; in bpf_map_init_from_attr()
344 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
346 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
347 map->map_extra = attr->map_extra; in bpf_map_init_from_attr()
358 map->id = id; in bpf_map_alloc_id()
363 return -ENOSPC; in bpf_map_alloc_id()
373 * disappears - even if someone holds an fd to them they are unusable, in bpf_map_free_id()
377 if (!map->id) in bpf_map_free_id()
382 idr_remove(&map_idr, map->id); in bpf_map_free_id()
383 map->id = 0; in bpf_map_free_id()
393 * So we have to check map->objcg for being NULL each time it's in bpf_map_save_memcg()
397 map->objcg = get_obj_cgroup_from_current(); in bpf_map_save_memcg()
402 if (map->objcg) in bpf_map_release_memcg()
403 obj_cgroup_put(map->objcg); in bpf_map_release_memcg()
408 if (map->objcg) in bpf_map_get_memcg()
409 return get_mem_cgroup_from_objcg(map->objcg); in bpf_map_get_memcg()
487 if (f1->offset < f2->offset) in btf_field_cmp()
488 return -1; in btf_field_cmp()
489 else if (f1->offset > f2->offset) in btf_field_cmp()
499 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) in btf_record_find()
501 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); in btf_record_find()
502 if (!field || !(field->type & field_mask)) in btf_record_find()
513 for (i = 0; i < rec->cnt; i++) { in btf_record_free()
514 switch (rec->fields[i].type) { in btf_record_free()
517 if (rec->fields[i].kptr.module) in btf_record_free()
518 module_put(rec->fields[i].kptr.module); in btf_record_free()
519 btf_put(rec->fields[i].kptr.btf); in btf_record_free()
540 btf_record_free(map->record); in bpf_map_free_record()
541 map->record = NULL; in bpf_map_free_record()
552 size = offsetof(struct btf_record, fields[rec->cnt]); in btf_record_dup()
555 return ERR_PTR(-ENOMEM); in btf_record_dup()
557 fields = rec->fields; in btf_record_dup()
558 new_rec->cnt = 0; in btf_record_dup()
559 for (i = 0; i < rec->cnt; i++) { in btf_record_dup()
565 ret = -ENXIO; in btf_record_dup()
579 ret = -EFAULT; in btf_record_dup()
583 new_rec->cnt++; in btf_record_dup()
600 if (rec_a->cnt != rec_b->cnt) in btf_record_equal()
602 size = offsetof(struct btf_record, fields[rec_a->cnt]); in btf_record_equal()
624 bpf_timer_cancel_and_free(obj + rec->timer_off); in bpf_obj_free_timer()
636 fields = rec->fields; in bpf_obj_free_fields()
637 for (i = 0; i < rec->cnt; i++) { in bpf_obj_free_fields()
640 void *field_ptr = obj + field->offset; in bpf_obj_free_fields()
657 if (!btf_is_kernel(field->kptr.btf)) { in bpf_obj_free_fields()
658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, in bpf_obj_free_fields()
659 field->kptr.btf_id); in bpf_obj_free_fields()
662 pointee_struct_meta->record : in bpf_obj_free_fields()
666 field->kptr.dtor(xchgd_field); in bpf_obj_free_fields()
670 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
675 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
694 struct btf_record *rec = map->record; in bpf_map_free_deferred()
695 struct btf *btf = map->btf; in bpf_map_free_deferred()
700 map->ops->map_free(map); in bpf_map_free_deferred()
705 * Note that the btf_record stashed in map->inner_map_meta->record was in bpf_map_free_deferred()
719 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
720 if (map->ops->map_release_uref) in bpf_map_put_uref()
721 map->ops->map_release_uref(map); in bpf_map_put_uref()
727 INIT_WORK(&map->work, bpf_map_free_deferred); in bpf_map_free_in_work()
731 queue_work(system_unbound_wq, &map->work); in bpf_map_free_in_work()
748 * (underlying map implementation ops->map_free() might sleep)
752 if (atomic64_dec_and_test(&map->refcnt)) { in bpf_map_put()
756 if (READ_ONCE(map->free_after_mult_rcu_gp)) in bpf_map_put()
757 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); in bpf_map_put()
772 struct bpf_map *map = filp->private_data; in bpf_map_release()
774 if (map->ops->map_release) in bpf_map_release()
775 map->ops->map_release(map, filp); in bpf_map_release()
783 fmode_t mode = f.file->f_mode; in map_get_sys_perms()
788 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
797 return map->ops->map_mem_usage(map); in bpf_map_memory_usage()
802 struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo()
806 spin_lock(&map->owner.lock); in bpf_map_show_fdinfo()
807 type = map->owner.type; in bpf_map_show_fdinfo()
808 jited = map->owner.jited; in bpf_map_show_fdinfo()
809 spin_unlock(&map->owner.lock); in bpf_map_show_fdinfo()
822 map->map_type, in bpf_map_show_fdinfo()
823 map->key_size, in bpf_map_show_fdinfo()
824 map->value_size, in bpf_map_show_fdinfo()
825 map->max_entries, in bpf_map_show_fdinfo()
826 map->map_flags, in bpf_map_show_fdinfo()
827 (unsigned long long)map->map_extra, in bpf_map_show_fdinfo()
829 map->id, in bpf_map_show_fdinfo()
830 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
844 return -EINVAL; in bpf_dummy_read()
853 return -EINVAL; in bpf_dummy_write()
856 /* called for any extra memory-mapped regions (except initial) */
859 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open()
861 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open()
868 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close()
870 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close()
881 struct bpf_map *map = filp->private_data; in bpf_map_mmap()
884 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) in bpf_map_mmap()
885 return -ENOTSUPP; in bpf_map_mmap()
887 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap()
888 return -EINVAL; in bpf_map_mmap()
890 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
892 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap()
893 if (map->frozen) { in bpf_map_mmap()
894 err = -EPERM; in bpf_map_mmap()
897 /* map is meant to be read-only, so do not allow mapping as in bpf_map_mmap()
899 * reference and allows user-space to still modify it after in bpf_map_mmap()
902 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
903 err = -EACCES; in bpf_map_mmap()
909 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap()
910 vma->vm_private_data = map; in bpf_map_mmap()
912 if (!(vma->vm_flags & VM_WRITE)) in bpf_map_mmap()
913 /* disallow re-mapping with PROT_WRITE */ in bpf_map_mmap()
916 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
920 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap()
923 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
929 struct bpf_map *map = filp->private_data; in bpf_map_poll()
931 if (map->ops->map_poll) in bpf_map_poll()
932 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
956 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
963 return -EINVAL; in bpf_get_file_flag()
973 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
974 sizeof(attr->CMD##_LAST_FIELD), 0, \
975 sizeof(*attr) - \
976 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
977 sizeof(attr->CMD##_LAST_FIELD)) != NULL
992 return -EINVAL; in bpf_obj_name_cpy()
998 return -EINVAL; in bpf_obj_name_cpy()
1000 return src - orig_src; in bpf_obj_name_cpy()
1008 return -ENOTSUPP; in map_check_no_btf()
1021 if (!key_type || key_size != map->key_size) in map_check_btf()
1022 return -EINVAL; in map_check_btf()
1025 if (!map->ops->map_check_btf) in map_check_btf()
1026 return -EINVAL; in map_check_btf()
1030 if (!value_type || value_size != map->value_size) in map_check_btf()
1031 return -EINVAL; in map_check_btf()
1033 map->record = btf_parse_fields(btf, value_type, in map_check_btf()
1036 map->value_size); in map_check_btf()
1037 if (!IS_ERR_OR_NULL(map->record)) { in map_check_btf()
1041 ret = -EPERM; in map_check_btf()
1044 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { in map_check_btf()
1045 ret = -EACCES; in map_check_btf()
1048 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { in map_check_btf()
1049 switch (map->record->field_mask & (1 << i)) { in map_check_btf()
1053 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1054 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1055 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
1056 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1057 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1058 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1059 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1060 ret = -EOPNOTSUPP; in map_check_btf()
1065 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1066 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1067 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1068 ret = -EOPNOTSUPP; in map_check_btf()
1075 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1076 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in map_check_btf()
1077 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1078 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && in map_check_btf()
1079 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1080 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in map_check_btf()
1081 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1082 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1083 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1084 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1085 ret = -EOPNOTSUPP; in map_check_btf()
1091 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1092 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1093 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1094 ret = -EOPNOTSUPP; in map_check_btf()
1100 ret = -EOPNOTSUPP; in map_check_btf()
1106 ret = btf_check_and_fixup_fields(btf, map->record); in map_check_btf()
1110 if (map->ops->map_check_btf) { in map_check_btf()
1111 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
1128 u32 map_type = attr->map_type; in map_create()
1135 return -EINVAL; in map_create()
1137 if (attr->btf_vmlinux_value_type_id) { in map_create()
1138 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || in map_create()
1139 attr->btf_key_type_id || attr->btf_value_type_id) in map_create()
1140 return -EINVAL; in map_create()
1141 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { in map_create()
1142 return -EINVAL; in map_create()
1145 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && in map_create()
1146 attr->map_extra != 0) in map_create()
1147 return -EINVAL; in map_create()
1149 f_flags = bpf_get_file_flag(attr->map_flags); in map_create()
1156 return -EINVAL; in map_create()
1159 map_type = attr->map_type; in map_create()
1161 return -EINVAL; in map_create()
1165 return -EINVAL; in map_create()
1167 if (ops->map_alloc_check) { in map_create()
1168 err = ops->map_alloc_check(attr); in map_create()
1172 if (attr->map_ifindex) in map_create()
1174 if (!ops->map_mem_usage) in map_create()
1175 return -EINVAL; in map_create()
1184 return -EPERM; in map_create()
1218 return -EPERM; in map_create()
1226 return -EPERM; in map_create()
1230 return -EPERM; in map_create()
1233 map = ops->map_alloc(attr); in map_create()
1236 map->ops = ops; in map_create()
1237 map->map_type = map_type; in map_create()
1239 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
1240 sizeof(attr->map_name)); in map_create()
1244 atomic64_set(&map->refcnt, 1); in map_create()
1245 atomic64_set(&map->usercnt, 1); in map_create()
1246 mutex_init(&map->freeze_mutex); in map_create()
1247 spin_lock_init(&map->owner.lock); in map_create()
1249 if (attr->btf_key_type_id || attr->btf_value_type_id || in map_create()
1253 * counter part. Thus, attr->btf_fd has in map_create()
1256 attr->btf_vmlinux_value_type_id) { in map_create()
1259 btf = btf_get_by_fd(attr->btf_fd); in map_create()
1266 err = -EACCES; in map_create()
1269 map->btf = btf; in map_create()
1271 if (attr->btf_value_type_id) { in map_create()
1272 err = map_check_btf(map, btf, attr->btf_key_type_id, in map_create()
1273 attr->btf_value_type_id); in map_create()
1278 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
1279 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
1280 map->btf_vmlinux_value_type_id = in map_create()
1281 attr->btf_vmlinux_value_type_id; in map_create()
1300 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. in map_create()
1311 btf_put(map->btf); in map_create()
1312 map->ops->map_free(map); in map_create()
1322 return ERR_PTR(-EBADF); in __bpf_map_get()
1323 if (f.file->f_op != &bpf_map_fops) { in __bpf_map_get()
1325 return ERR_PTR(-EINVAL); in __bpf_map_get()
1328 return f.file->private_data; in __bpf_map_get()
1333 atomic64_inc(&map->refcnt); in bpf_map_inc()
1339 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
1340 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
1382 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1384 return ERR_PTR(-ENOENT); in __bpf_map_inc_not_zero()
1386 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1403 return -ENOTSUPP; in bpf_stackmap_copy()
1412 return ERR_PTR(-EINVAL); in __bpf_copy_key()
1423 return ERR_PTR(-EINVAL); in ___bpf_copy_key()
1433 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_elem()
1434 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_elem()
1435 int ufd = attr->map_fd; in map_lookup_elem()
1443 return -EINVAL; in map_lookup_elem()
1445 if (attr->flags & ~BPF_F_LOCK) in map_lookup_elem()
1446 return -EINVAL; in map_lookup_elem()
1453 err = -EPERM; in map_lookup_elem()
1457 if ((attr->flags & BPF_F_LOCK) && in map_lookup_elem()
1458 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_lookup_elem()
1459 err = -EINVAL; in map_lookup_elem()
1463 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1471 err = -ENOMEM; in map_lookup_elem()
1476 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in map_lookup_elem()
1478 err = -EFAULT; in map_lookup_elem()
1480 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1484 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1488 err = -EFAULT; in map_lookup_elem()
1508 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_update_elem()
1509 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); in map_update_elem()
1510 int ufd = attr->map_fd; in map_update_elem()
1518 return -EINVAL; in map_update_elem()
1526 err = -EPERM; in map_update_elem()
1530 if ((attr->flags & BPF_F_LOCK) && in map_update_elem()
1531 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_update_elem()
1532 err = -EINVAL; in map_update_elem()
1536 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1549 err = bpf_map_update_value(map, f.file, key, value, attr->flags); in map_update_elem()
1564 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_delete_elem()
1565 int ufd = attr->map_fd; in map_delete_elem()
1572 return -EINVAL; in map_delete_elem()
1580 err = -EPERM; in map_delete_elem()
1584 key = ___bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1594 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1596 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1602 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1619 void __user *ukey = u64_to_user_ptr(attr->key); in map_get_next_key()
1620 void __user *unext_key = u64_to_user_ptr(attr->next_key); in map_get_next_key()
1621 int ufd = attr->map_fd; in map_get_next_key()
1628 return -EINVAL; in map_get_next_key()
1635 err = -EPERM; in map_get_next_key()
1640 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1649 err = -ENOMEM; in map_get_next_key()
1650 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1660 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1666 err = -EFAULT; in map_get_next_key()
1667 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1685 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_delete_batch()
1690 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_delete_batch()
1691 return -EINVAL; in generic_map_delete_batch()
1693 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_delete_batch()
1694 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_delete_batch()
1695 return -EINVAL; in generic_map_delete_batch()
1698 max_count = attr->batch.count; in generic_map_delete_batch()
1702 if (put_user(0, &uattr->batch.count)) in generic_map_delete_batch()
1703 return -EFAULT; in generic_map_delete_batch()
1705 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1707 return -ENOMEM; in generic_map_delete_batch()
1710 err = -EFAULT; in generic_map_delete_batch()
1711 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1712 map->key_size)) in generic_map_delete_batch()
1722 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1729 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_delete_batch()
1730 err = -EFAULT; in generic_map_delete_batch()
1742 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_update_batch()
1743 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_update_batch()
1748 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_update_batch()
1749 return -EINVAL; in generic_map_update_batch()
1751 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_update_batch()
1752 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_update_batch()
1753 return -EINVAL; in generic_map_update_batch()
1758 max_count = attr->batch.count; in generic_map_update_batch()
1762 if (put_user(0, &uattr->batch.count)) in generic_map_update_batch()
1763 return -EFAULT; in generic_map_update_batch()
1765 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1767 return -ENOMEM; in generic_map_update_batch()
1772 return -ENOMEM; in generic_map_update_batch()
1776 err = -EFAULT; in generic_map_update_batch()
1777 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1778 map->key_size) || in generic_map_update_batch()
1783 attr->batch.elem_flags); in generic_map_update_batch()
1790 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_update_batch()
1791 err = -EFAULT; in generic_map_update_batch()
1804 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); in generic_map_lookup_batch()
1805 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in generic_map_lookup_batch()
1806 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_lookup_batch()
1807 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_lookup_batch()
1812 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_lookup_batch()
1813 return -EINVAL; in generic_map_lookup_batch()
1815 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_lookup_batch()
1816 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in generic_map_lookup_batch()
1817 return -EINVAL; in generic_map_lookup_batch()
1821 max_count = attr->batch.count; in generic_map_lookup_batch()
1825 if (put_user(0, &uattr->batch.count)) in generic_map_lookup_batch()
1826 return -EFAULT; in generic_map_lookup_batch()
1828 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1830 return -ENOMEM; in generic_map_lookup_batch()
1832 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1835 return -ENOMEM; in generic_map_lookup_batch()
1838 err = -EFAULT; in generic_map_lookup_batch()
1840 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
1843 value = key + map->key_size; in generic_map_lookup_batch()
1849 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
1854 attr->batch.elem_flags); in generic_map_lookup_batch()
1856 if (err == -ENOENT) { in generic_map_lookup_batch()
1858 retry--; in generic_map_lookup_batch()
1861 err = -EINTR; in generic_map_lookup_batch()
1868 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
1869 map->key_size)) { in generic_map_lookup_batch()
1870 err = -EFAULT; in generic_map_lookup_batch()
1874 err = -EFAULT; in generic_map_lookup_batch()
1887 if (err == -EFAULT) in generic_map_lookup_batch()
1890 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || in generic_map_lookup_batch()
1891 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
1892 err = -EFAULT; in generic_map_lookup_batch()
1904 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_and_delete_elem()
1905 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_and_delete_elem()
1906 int ufd = attr->map_fd; in map_lookup_and_delete_elem()
1914 return -EINVAL; in map_lookup_and_delete_elem()
1916 if (attr->flags & ~BPF_F_LOCK) in map_lookup_and_delete_elem()
1917 return -EINVAL; in map_lookup_and_delete_elem()
1926 err = -EPERM; in map_lookup_and_delete_elem()
1930 if (attr->flags && in map_lookup_and_delete_elem()
1931 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1932 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
1933 err = -EINVAL; in map_lookup_and_delete_elem()
1937 if ((attr->flags & BPF_F_LOCK) && in map_lookup_and_delete_elem()
1938 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_lookup_and_delete_elem()
1939 err = -EINVAL; in map_lookup_and_delete_elem()
1943 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
1951 err = -ENOMEM; in map_lookup_and_delete_elem()
1956 err = -ENOTSUPP; in map_lookup_and_delete_elem()
1957 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1958 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
1959 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
1960 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
1961 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
1962 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
1963 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_and_delete_elem()
1967 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
1977 err = -EFAULT; in map_lookup_and_delete_elem()
1997 int err = 0, ufd = attr->map_fd; in map_freeze()
2002 return -EINVAL; in map_freeze()
2009 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { in map_freeze()
2011 return -ENOTSUPP; in map_freeze()
2016 return -EPERM; in map_freeze()
2019 mutex_lock(&map->freeze_mutex); in map_freeze()
2021 err = -EBUSY; in map_freeze()
2024 if (READ_ONCE(map->frozen)) { in map_freeze()
2025 err = -EBUSY; in map_freeze()
2029 WRITE_ONCE(map->frozen, true); in map_freeze()
2031 mutex_unlock(&map->freeze_mutex); in map_freeze()
2052 return -EINVAL; in find_prog_type()
2056 return -EINVAL; in find_prog_type()
2058 if (!bpf_prog_is_offloaded(prog->aux)) in find_prog_type()
2059 prog->aux->ops = ops; in find_prog_type()
2061 prog->aux->ops = &bpf_offload_prog_ops; in find_prog_type()
2062 prog->type = type; in find_prog_type()
2091 audit_log_format(ab, "prog-id=%u op=%s", in bpf_audit_prog()
2092 prog->aux->id, bpf_audit_str[op]); in bpf_audit_prog()
2104 prog->aux->id = id; in bpf_prog_alloc_id()
2110 return -ENOSPC; in bpf_prog_alloc_id()
2121 * disappears - even if someone grabs an fd to them they are unusable, in bpf_prog_free_id()
2124 if (!prog->aux->id) in bpf_prog_free_id()
2128 idr_remove(&prog_idr, prog->aux->id); in bpf_prog_free_id()
2129 prog->aux->id = 0; in bpf_prog_free_id()
2137 kvfree(aux->func_info); in __bpf_prog_put_rcu()
2138 kfree(aux->func_info_aux); in __bpf_prog_put_rcu()
2139 free_uid(aux->user); in __bpf_prog_put_rcu()
2141 bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
2147 btf_put(prog->aux->btf); in __bpf_prog_put_noref()
2148 module_put(prog->aux->mod); in __bpf_prog_put_noref()
2149 kvfree(prog->aux->jited_linfo); in __bpf_prog_put_noref()
2150 kvfree(prog->aux->linfo); in __bpf_prog_put_noref()
2151 kfree(prog->aux->kfunc_tab); in __bpf_prog_put_noref()
2152 if (prog->aux->attach_btf) in __bpf_prog_put_noref()
2153 btf_put(prog->aux->attach_btf); in __bpf_prog_put_noref()
2156 if (prog->aux->sleepable) in __bpf_prog_put_noref()
2157 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2159 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2161 __bpf_prog_put_rcu(&prog->aux->rcu); in __bpf_prog_put_noref()
2171 prog = aux->prog; in bpf_prog_put_deferred()
2180 struct bpf_prog_aux *aux = prog->aux; in __bpf_prog_put()
2182 if (atomic64_dec_and_test(&aux->refcnt)) { in __bpf_prog_put()
2184 INIT_WORK(&aux->work, bpf_prog_put_deferred); in __bpf_prog_put()
2185 schedule_work(&aux->work); in __bpf_prog_put()
2187 bpf_prog_put_deferred(&aux->work); in __bpf_prog_put()
2200 struct bpf_prog *prog = filp->private_data; in bpf_prog_release()
2217 stats = this_cpu_ptr(prog->stats); in bpf_prog_inc_misses_counter()
2218 flags = u64_stats_update_begin_irqsave(&stats->syncp); in bpf_prog_inc_misses_counter()
2219 u64_stats_inc(&stats->misses); in bpf_prog_inc_misses_counter()
2220 u64_stats_update_end_irqrestore(&stats->syncp, flags); in bpf_prog_inc_misses_counter()
2234 st = per_cpu_ptr(prog->stats, cpu); in bpf_prog_get_stats()
2236 start = u64_stats_fetch_begin(&st->syncp); in bpf_prog_get_stats()
2237 tnsecs = u64_stats_read(&st->nsecs); in bpf_prog_get_stats()
2238 tcnt = u64_stats_read(&st->cnt); in bpf_prog_get_stats()
2239 tmisses = u64_stats_read(&st->misses); in bpf_prog_get_stats()
2240 } while (u64_stats_fetch_retry(&st->syncp, start)); in bpf_prog_get_stats()
2245 stats->nsecs = nsecs; in bpf_prog_get_stats()
2246 stats->cnt = cnt; in bpf_prog_get_stats()
2247 stats->misses = misses; in bpf_prog_get_stats()
2253 const struct bpf_prog *prog = filp->private_data; in bpf_prog_show_fdinfo()
2254 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_prog_show_fdinfo()
2258 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_prog_show_fdinfo()
2269 prog->type, in bpf_prog_show_fdinfo()
2270 prog->jited, in bpf_prog_show_fdinfo()
2272 prog->pages * 1ULL << PAGE_SHIFT, in bpf_prog_show_fdinfo()
2273 prog->aux->id, in bpf_prog_show_fdinfo()
2277 prog->aux->verified_insns); in bpf_prog_show_fdinfo()
2298 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, in bpf_prog_new_fd()
2305 return ERR_PTR(-EBADF); in ____bpf_prog_get()
2306 if (f.file->f_op != &bpf_prog_fops) { in ____bpf_prog_get()
2308 return ERR_PTR(-EINVAL); in ____bpf_prog_get()
2311 return f.file->private_data; in ____bpf_prog_get()
2316 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add()
2327 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub()
2333 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc()
2342 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero()
2345 return ERR_PTR(-ENOENT); in bpf_prog_inc_not_zero()
2358 if (prog->type != *attach_type) in bpf_prog_get_ok()
2360 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) in bpf_prog_get_ok()
2376 prog = ERR_PTR(-EINVAL); in __bpf_prog_get()
2412 switch (attr->prog_type) { in bpf_prog_load_fixup_attach_type()
2415 * exist so checking for non-zero is the way to go here. in bpf_prog_load_fixup_attach_type()
2417 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2418 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2422 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2423 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2437 return -EINVAL; in bpf_prog_load_check_attach()
2440 return -EINVAL; in bpf_prog_load_check_attach()
2449 return -EINVAL; in bpf_prog_load_check_attach()
2454 return -EINVAL; in bpf_prog_load_check_attach()
2458 return -EINVAL; in bpf_prog_load_check_attach()
2469 return -EINVAL; in bpf_prog_load_check_attach()
2487 return -EINVAL; in bpf_prog_load_check_attach()
2495 return -EINVAL; in bpf_prog_load_check_attach()
2503 return -EINVAL; in bpf_prog_load_check_attach()
2508 return -EINVAL; in bpf_prog_load_check_attach()
2515 return -EINVAL; in bpf_prog_load_check_attach()
2520 return -EINVAL; in bpf_prog_load_check_attach()
2524 return -EINVAL; in bpf_prog_load_check_attach()
2585 enum bpf_prog_type type = attr->prog_type; in bpf_prog_load()
2592 return -EINVAL; in bpf_prog_load()
2594 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | in bpf_prog_load()
2601 return -EINVAL; in bpf_prog_load()
2604 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && in bpf_prog_load()
2606 return -EPERM; in bpf_prog_load()
2616 return -EPERM; in bpf_prog_load()
2618 if (attr->insn_cnt == 0 || in bpf_prog_load()
2619 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) in bpf_prog_load()
2620 return -E2BIG; in bpf_prog_load()
2624 return -EPERM; in bpf_prog_load()
2627 return -EPERM; in bpf_prog_load()
2629 return -EPERM; in bpf_prog_load()
2634 if (attr->attach_prog_fd) { in bpf_prog_load()
2635 dst_prog = bpf_prog_get(attr->attach_prog_fd); in bpf_prog_load()
2638 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); in bpf_prog_load()
2640 return -EINVAL; in bpf_prog_load()
2646 return -ENOTSUPP; in bpf_prog_load()
2649 } else if (attr->attach_btf_id) { in bpf_prog_load()
2655 return -EINVAL; in bpf_prog_load()
2660 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, in bpf_prog_load()
2661 attach_btf, attr->attach_btf_id, in bpf_prog_load()
2667 return -EINVAL; in bpf_prog_load()
2671 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); in bpf_prog_load()
2677 return -ENOMEM; in bpf_prog_load()
2680 prog->expected_attach_type = attr->expected_attach_type; in bpf_prog_load()
2681 prog->aux->attach_btf = attach_btf; in bpf_prog_load()
2682 prog->aux->attach_btf_id = attr->attach_btf_id; in bpf_prog_load()
2683 prog->aux->dst_prog = dst_prog; in bpf_prog_load()
2684 prog->aux->dev_bound = !!attr->prog_ifindex; in bpf_prog_load()
2685 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; in bpf_prog_load()
2686 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; in bpf_prog_load()
2688 err = security_bpf_prog_alloc(prog->aux); in bpf_prog_load()
2692 prog->aux->user = get_current_user(); in bpf_prog_load()
2693 prog->len = attr->insn_cnt; in bpf_prog_load()
2695 err = -EFAULT; in bpf_prog_load()
2696 if (copy_from_bpfptr(prog->insns, in bpf_prog_load()
2697 make_bpfptr(attr->insns, uattr.is_kernel), in bpf_prog_load()
2702 make_bpfptr(attr->license, uattr.is_kernel), in bpf_prog_load()
2703 sizeof(license) - 1) < 0) in bpf_prog_load()
2705 license[sizeof(license) - 1] = 0; in bpf_prog_load()
2707 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in bpf_prog_load()
2708 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; in bpf_prog_load()
2710 prog->orig_prog = NULL; in bpf_prog_load()
2711 prog->jited = 0; in bpf_prog_load()
2713 atomic64_set(&prog->aux->refcnt, 1); in bpf_prog_load()
2715 if (bpf_prog_is_dev_bound(prog->aux)) { in bpf_prog_load()
2722 bpf_prog_is_dev_bound(dst_prog->aux)) { in bpf_prog_load()
2733 prog->aux->load_time = ktime_get_boottime_ns(); in bpf_prog_load()
2734 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, in bpf_prog_load()
2735 sizeof(attr->prog_name)); in bpf_prog_load()
2780 __bpf_prog_put_noref(prog, prog->aux->func_cnt); in bpf_prog_load()
2783 free_uid(prog->aux->user); in bpf_prog_load()
2784 security_bpf_prog_free(prog->aux); in bpf_prog_load()
2786 if (prog->aux->attach_btf) in bpf_prog_load()
2787 btf_put(prog->aux->attach_btf); in bpf_prog_load()
2798 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) in bpf_obj_pin()
2799 return -EINVAL; in bpf_obj_pin()
2802 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_pin()
2803 return -EINVAL; in bpf_obj_pin()
2805 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_pin()
2806 return bpf_obj_pin_user(attr->bpf_fd, path_fd, in bpf_obj_pin()
2807 u64_to_user_ptr(attr->pathname)); in bpf_obj_pin()
2814 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || in bpf_obj_get()
2815 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) in bpf_obj_get()
2816 return -EINVAL; in bpf_obj_get()
2819 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_get()
2820 return -EINVAL; in bpf_obj_get()
2822 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_get()
2823 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), in bpf_obj_get()
2824 attr->file_flags); in bpf_obj_get()
2830 atomic64_set(&link->refcnt, 1); in bpf_link_init()
2831 link->type = type; in bpf_link_init()
2832 link->id = 0; in bpf_link_init()
2833 link->ops = ops; in bpf_link_init()
2834 link->prog = prog; in bpf_link_init()
2858 primer->link->prog = NULL; in bpf_link_cleanup()
2859 bpf_link_free_id(primer->id); in bpf_link_cleanup()
2860 fput(primer->file); in bpf_link_cleanup()
2861 put_unused_fd(primer->fd); in bpf_link_cleanup()
2866 atomic64_inc(&link->refcnt); in bpf_link_inc()
2872 bpf_link_free_id(link->id); in bpf_link_free()
2873 if (link->prog) { in bpf_link_free()
2875 link->ops->release(link); in bpf_link_free()
2876 bpf_prog_put(link->prog); in bpf_link_free()
2879 link->ops->dealloc(link); in bpf_link_free()
2894 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put()
2897 INIT_WORK(&link->work, bpf_link_put_deferred); in bpf_link_put()
2898 schedule_work(&link->work); in bpf_link_put()
2904 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put_direct()
2911 struct bpf_link *link = filp->private_data; in bpf_link_release()
2931 const struct bpf_link *link = filp->private_data; in bpf_link_show_fdinfo()
2932 const struct bpf_prog *prog = link->prog; in bpf_link_show_fdinfo()
2933 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_link_show_fdinfo()
2938 bpf_link_type_strs[link->type], in bpf_link_show_fdinfo()
2939 link->id); in bpf_link_show_fdinfo()
2941 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_link_show_fdinfo()
2946 prog->aux->id); in bpf_link_show_fdinfo()
2948 if (link->ops->show_fdinfo) in bpf_link_show_fdinfo()
2949 link->ops->show_fdinfo(link, m); in bpf_link_show_fdinfo()
2975 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2978 * user-space, if bpf_link is successfully attached. If not, bpf_link and
2979 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3011 primer->link = link; in bpf_link_prime()
3012 primer->file = file; in bpf_link_prime()
3013 primer->fd = fd; in bpf_link_prime()
3014 primer->id = id; in bpf_link_prime()
3020 /* make bpf_link fetchable by ID */ in bpf_link_settle()
3022 primer->link->id = primer->id; in bpf_link_settle()
3024 /* make bpf_link fetchable by FD */ in bpf_link_settle()
3025 fd_install(primer->fd, primer->file); in bpf_link_settle()
3027 return primer->fd; in bpf_link_settle()
3032 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); in bpf_link_new_fd()
3041 return ERR_PTR(-EBADF); in bpf_link_get_from_fd()
3042 if (f.file->f_op != &bpf_link_fops) { in bpf_link_get_from_fd()
3044 return ERR_PTR(-EINVAL); in bpf_link_get_from_fd()
3047 link = f.file->private_data; in bpf_link_get_from_fd()
3060 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, in bpf_tracing_link_release()
3061 tr_link->trampoline)); in bpf_tracing_link_release()
3063 bpf_trampoline_put(tr_link->trampoline); in bpf_tracing_link_release()
3066 if (tr_link->tgt_prog) in bpf_tracing_link_release()
3067 bpf_prog_put(tr_link->tgt_prog); in bpf_tracing_link_release()
3085 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_show_fdinfo()
3091 tr_link->attach_type, in bpf_tracing_link_show_fdinfo()
3102 info->tracing.attach_type = tr_link->attach_type; in bpf_tracing_link_fill_link_info()
3103 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_fill_link_info()
3104 &info->tracing.target_obj_id, in bpf_tracing_link_fill_link_info()
3105 &info->tracing.target_btf_id); in bpf_tracing_link_fill_link_info()
3129 switch (prog->type) { in bpf_tracing_prog_attach()
3131 if (prog->expected_attach_type != BPF_TRACE_FENTRY && in bpf_tracing_prog_attach()
3132 prog->expected_attach_type != BPF_TRACE_FEXIT && in bpf_tracing_prog_attach()
3133 prog->expected_attach_type != BPF_MODIFY_RETURN) { in bpf_tracing_prog_attach()
3134 err = -EINVAL; in bpf_tracing_prog_attach()
3139 if (prog->expected_attach_type != 0) { in bpf_tracing_prog_attach()
3140 err = -EINVAL; in bpf_tracing_prog_attach()
3145 if (prog->expected_attach_type != BPF_LSM_MAC) { in bpf_tracing_prog_attach()
3146 err = -EINVAL; in bpf_tracing_prog_attach()
3151 err = -EINVAL; in bpf_tracing_prog_attach()
3156 err = -EINVAL; in bpf_tracing_prog_attach()
3162 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_tracing_prog_attach()
3163 err = -EINVAL; in bpf_tracing_prog_attach()
3179 err = -ENOMEM; in bpf_tracing_prog_attach()
3182 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, in bpf_tracing_prog_attach()
3184 link->attach_type = prog->expected_attach_type; in bpf_tracing_prog_attach()
3185 link->link.cookie = bpf_cookie; in bpf_tracing_prog_attach()
3187 mutex_lock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3191 * - if prog->aux->dst_trampoline is set, the program was just loaded in bpf_tracing_prog_attach()
3193 * in prog->aux in bpf_tracing_prog_attach()
3195 * - if prog->aux->dst_trampoline is NULL, the program has already been in bpf_tracing_prog_attach()
3198 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + in bpf_tracing_prog_attach()
3201 * - if tgt_prog == NULL when this function was called using the old in bpf_tracing_prog_attach()
3202 * raw_tracepoint_open API, and we need a target from prog->aux in bpf_tracing_prog_attach()
3204 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program in bpf_tracing_prog_attach()
3205 * was detached and is going for re-attachment. in bpf_tracing_prog_attach()
3207 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf in bpf_tracing_prog_attach()
3211 if (!prog->aux->dst_trampoline && !tgt_prog) { in bpf_tracing_prog_attach()
3213 * Allow re-attach for TRACING and LSM programs. If it's in bpf_tracing_prog_attach()
3216 * re-attach in separate code path. in bpf_tracing_prog_attach()
3218 if (prog->type != BPF_PROG_TYPE_TRACING && in bpf_tracing_prog_attach()
3219 prog->type != BPF_PROG_TYPE_LSM) { in bpf_tracing_prog_attach()
3220 err = -EINVAL; in bpf_tracing_prog_attach()
3223 /* We can allow re-attach only if we have valid attach_btf. */ in bpf_tracing_prog_attach()
3224 if (!prog->aux->attach_btf) { in bpf_tracing_prog_attach()
3225 err = -EINVAL; in bpf_tracing_prog_attach()
3228 btf_id = prog->aux->attach_btf_id; in bpf_tracing_prog_attach()
3229 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); in bpf_tracing_prog_attach()
3232 if (!prog->aux->dst_trampoline || in bpf_tracing_prog_attach()
3233 (key && key != prog->aux->dst_trampoline->key)) { in bpf_tracing_prog_attach()
3246 module_put(prog->aux->mod); in bpf_tracing_prog_attach()
3247 prog->aux->mod = tgt_info.tgt_mod; in bpf_tracing_prog_attach()
3252 err = -ENOMEM; in bpf_tracing_prog_attach()
3261 * prog->aux are cleared below. in bpf_tracing_prog_attach()
3263 tr = prog->aux->dst_trampoline; in bpf_tracing_prog_attach()
3264 tgt_prog = prog->aux->dst_prog; in bpf_tracing_prog_attach()
3267 err = bpf_link_prime(&link->link.link, &link_primer); in bpf_tracing_prog_attach()
3271 err = bpf_trampoline_link_prog(&link->link, tr); in bpf_tracing_prog_attach()
3278 link->tgt_prog = tgt_prog; in bpf_tracing_prog_attach()
3279 link->trampoline = tr; in bpf_tracing_prog_attach()
3281 /* Always clear the trampoline and target prog from prog->aux to make in bpf_tracing_prog_attach()
3283 * program is (re-)attached to another target. in bpf_tracing_prog_attach()
3285 if (prog->aux->dst_prog && in bpf_tracing_prog_attach()
3286 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) in bpf_tracing_prog_attach()
3288 bpf_prog_put(prog->aux->dst_prog); in bpf_tracing_prog_attach()
3289 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3291 bpf_trampoline_put(prog->aux->dst_trampoline); in bpf_tracing_prog_attach()
3293 prog->aux->dst_prog = NULL; in bpf_tracing_prog_attach()
3294 prog->aux->dst_trampoline = NULL; in bpf_tracing_prog_attach()
3295 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3299 if (tr && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3301 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3319 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); in bpf_raw_tp_link_release()
3320 bpf_put_raw_tracepoint(raw_tp->btp); in bpf_raw_tp_link_release()
3339 raw_tp_link->btp->tp->name); in bpf_raw_tp_link_show_fdinfo()
3347 return -EFAULT; in bpf_copy_to_user()
3351 if (copy_to_user(ubuf, buf, ulen - 1)) in bpf_copy_to_user()
3352 return -EFAULT; in bpf_copy_to_user()
3353 if (put_user(zero, ubuf + ulen - 1)) in bpf_copy_to_user()
3354 return -EFAULT; in bpf_copy_to_user()
3355 return -ENOSPC; in bpf_copy_to_user()
3366 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); in bpf_raw_tp_link_fill_link_info()
3367 const char *tp_name = raw_tp_link->btp->tp->name; in bpf_raw_tp_link_fill_link_info()
3368 u32 ulen = info->raw_tracepoint.tp_name_len; in bpf_raw_tp_link_fill_link_info()
3372 return -EINVAL; in bpf_raw_tp_link_fill_link_info()
3374 info->raw_tracepoint.tp_name_len = tp_len + 1; in bpf_raw_tp_link_fill_link_info()
3398 struct perf_event *event = perf_link->perf_file->private_data; in bpf_perf_link_release()
3401 fput(perf_link->perf_file); in bpf_perf_link_release()
3422 return -EINVAL; in bpf_perf_link_fill_common()
3439 return -EFAULT; in bpf_perf_link_fill_common()
3453 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); in bpf_perf_link_fill_kprobe()
3454 ulen = info->perf_event.kprobe.name_len; in bpf_perf_link_fill_kprobe()
3460 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; in bpf_perf_link_fill_kprobe()
3462 info->perf_event.type = BPF_PERF_EVENT_KPROBE; in bpf_perf_link_fill_kprobe()
3464 info->perf_event.kprobe.offset = offset; in bpf_perf_link_fill_kprobe()
3467 info->perf_event.kprobe.addr = addr; in bpf_perf_link_fill_kprobe()
3481 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); in bpf_perf_link_fill_uprobe()
3482 ulen = info->perf_event.uprobe.name_len; in bpf_perf_link_fill_uprobe()
3489 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; in bpf_perf_link_fill_uprobe()
3491 info->perf_event.type = BPF_PERF_EVENT_UPROBE; in bpf_perf_link_fill_uprobe()
3492 info->perf_event.uprobe.offset = offset; in bpf_perf_link_fill_uprobe()
3501 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) in bpf_perf_link_fill_probe()
3505 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) in bpf_perf_link_fill_probe()
3508 return -EOPNOTSUPP; in bpf_perf_link_fill_probe()
3517 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); in bpf_perf_link_fill_tracepoint()
3518 ulen = info->perf_event.tracepoint.name_len; in bpf_perf_link_fill_tracepoint()
3519 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; in bpf_perf_link_fill_tracepoint()
3526 info->perf_event.event.type = event->attr.type; in bpf_perf_link_fill_perf_event()
3527 info->perf_event.event.config = event->attr.config; in bpf_perf_link_fill_perf_event()
3528 info->perf_event.type = BPF_PERF_EVENT_EVENT; in bpf_perf_link_fill_perf_event()
3539 event = perf_get_event(perf_link->perf_file); in bpf_perf_link_fill_link_info()
3543 switch (event->prog->type) { in bpf_perf_link_fill_link_info()
3551 return -EOPNOTSUPP; in bpf_perf_link_fill_link_info()
3569 if (attr->link_create.flags) in bpf_perf_link_attach()
3570 return -EINVAL; in bpf_perf_link_attach()
3572 perf_file = perf_event_get(attr->link_create.target_fd); in bpf_perf_link_attach()
3578 err = -ENOMEM; in bpf_perf_link_attach()
3581 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); in bpf_perf_link_attach()
3582 link->perf_file = perf_file; in bpf_perf_link_attach()
3584 err = bpf_link_prime(&link->link, &link_primer); in bpf_perf_link_attach()
3590 event = perf_file->private_data; in bpf_perf_link_attach()
3591 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); in bpf_perf_link_attach()
3608 return -EOPNOTSUPP; in bpf_perf_link_attach()
3622 switch (prog->type) { in bpf_raw_tp_link_attach()
3630 return -EINVAL; in bpf_raw_tp_link_attach()
3631 if (prog->type == BPF_PROG_TYPE_TRACING && in bpf_raw_tp_link_attach()
3632 prog->expected_attach_type == BPF_TRACE_RAW_TP) { in bpf_raw_tp_link_attach()
3633 tp_name = prog->aux->attach_func_name; in bpf_raw_tp_link_attach()
3639 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) in bpf_raw_tp_link_attach()
3640 return -EFAULT; in bpf_raw_tp_link_attach()
3641 buf[sizeof(buf) - 1] = 0; in bpf_raw_tp_link_attach()
3645 return -EINVAL; in bpf_raw_tp_link_attach()
3650 return -ENOENT; in bpf_raw_tp_link_attach()
3654 err = -ENOMEM; in bpf_raw_tp_link_attach()
3657 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, in bpf_raw_tp_link_attach()
3659 link->btp = btp; in bpf_raw_tp_link_attach()
3661 err = bpf_link_prime(&link->link, &link_primer); in bpf_raw_tp_link_attach()
3667 err = bpf_probe_register(link->btp, prog); in bpf_raw_tp_link_attach()
3688 return -EINVAL; in bpf_raw_tracepoint_open()
3690 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); in bpf_raw_tracepoint_open()
3694 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); in bpf_raw_tracepoint_open()
3771 switch (prog->type) { in bpf_prog_attach_check_attach_type()
3776 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; in bpf_prog_attach_check_attach_type()
3779 /* cg-skb progs can be loaded by unpriv user. in bpf_prog_attach_check_attach_type()
3782 return -EPERM; in bpf_prog_attach_check_attach_type()
3783 return prog->enforce_expected_attach_type && in bpf_prog_attach_check_attach_type()
3784 prog->expected_attach_type != attach_type ? in bpf_prog_attach_check_attach_type()
3785 -EINVAL : 0; in bpf_prog_attach_check_attach_type()
3790 return -EINVAL; in bpf_prog_attach_check_attach_type()
3795 return -EINVAL; in bpf_prog_attach_check_attach_type()
3798 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && in bpf_prog_attach_check_attach_type()
3800 return -EINVAL; in bpf_prog_attach_check_attach_type()
3801 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && in bpf_prog_attach_check_attach_type()
3803 return -EINVAL; in bpf_prog_attach_check_attach_type()
3807 return -EINVAL; in bpf_prog_attach_check_attach_type()
3812 return -EINVAL; in bpf_prog_attach_check_attach_type()
3816 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) in bpf_prog_attach_check_attach_type()
3817 return -EINVAL; in bpf_prog_attach_check_attach_type()
3843 return -EINVAL; in bpf_prog_attach()
3845 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_attach()
3847 return -EINVAL; in bpf_prog_attach()
3849 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_attach()
3850 return -EINVAL; in bpf_prog_attach()
3852 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) in bpf_prog_attach()
3853 return -EINVAL; in bpf_prog_attach()
3854 if (attr->relative_fd || in bpf_prog_attach()
3855 attr->expected_revision) in bpf_prog_attach()
3856 return -EINVAL; in bpf_prog_attach()
3859 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_attach()
3863 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { in bpf_prog_attach()
3865 return -EINVAL; in bpf_prog_attach()
3888 prog->expected_attach_type != BPF_LSM_CGROUP) in bpf_prog_attach()
3889 ret = -EINVAL; in bpf_prog_attach()
3897 ret = -EINVAL; in bpf_prog_attach()
3914 return -EINVAL; in bpf_prog_detach()
3916 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_detach()
3919 return -EINVAL; in bpf_prog_detach()
3920 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_detach()
3921 return -EINVAL; in bpf_prog_detach()
3922 if (attr->attach_bpf_fd) { in bpf_prog_detach()
3923 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_detach()
3927 } else if (attr->attach_flags || in bpf_prog_detach()
3928 attr->relative_fd || in bpf_prog_detach()
3929 attr->expected_revision) { in bpf_prog_detach()
3930 return -EINVAL; in bpf_prog_detach()
3958 ret = -EINVAL; in bpf_prog_detach()
3972 return -EPERM; in bpf_prog_query()
3974 return -EINVAL; in bpf_prog_query()
3975 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) in bpf_prog_query()
3976 return -EINVAL; in bpf_prog_query()
3978 switch (attr->query.attach_type) { in bpf_prog_query()
4018 return -EINVAL; in bpf_prog_query()
4028 int ret = -ENOTSUPP; in bpf_prog_test_run()
4031 return -EINVAL; in bpf_prog_test_run()
4033 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || in bpf_prog_test_run()
4034 (!attr->test.ctx_size_in && attr->test.ctx_in)) in bpf_prog_test_run()
4035 return -EINVAL; in bpf_prog_test_run()
4037 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || in bpf_prog_test_run()
4038 (!attr->test.ctx_size_out && attr->test.ctx_out)) in bpf_prog_test_run()
4039 return -EINVAL; in bpf_prog_test_run()
4041 prog = bpf_prog_get(attr->test.prog_fd); in bpf_prog_test_run()
4045 if (prog->aux->ops->test_run) in bpf_prog_test_run()
4046 ret = prog->aux->ops->test_run(prog, attr, uattr); in bpf_prog_test_run()
4059 u32 next_id = attr->start_id; in bpf_obj_get_next_id()
4063 return -EINVAL; in bpf_obj_get_next_id()
4066 return -EPERM; in bpf_obj_get_next_id()
4071 err = -ENOENT; in bpf_obj_get_next_id()
4075 err = put_user(next_id, &uattr->next_id); in bpf_obj_get_next_id()
4125 return ERR_PTR(-ENOENT); in bpf_prog_by_id()
4132 prog = ERR_PTR(-ENOENT); in bpf_prog_by_id()
4140 u32 id = attr->prog_id; in bpf_prog_get_fd_by_id()
4144 return -EINVAL; in bpf_prog_get_fd_by_id()
4147 return -EPERM; in bpf_prog_get_fd_by_id()
4165 u32 id = attr->map_id; in bpf_map_get_fd_by_id()
4170 attr->open_flags & ~BPF_OBJ_FLAG_MASK) in bpf_map_get_fd_by_id()
4171 return -EINVAL; in bpf_map_get_fd_by_id()
4174 return -EPERM; in bpf_map_get_fd_by_id()
4176 f_flags = bpf_get_file_flag(attr->open_flags); in bpf_map_get_fd_by_id()
4185 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
4205 mutex_lock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4206 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { in bpf_map_from_imm()
4207 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
4212 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
4214 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
4222 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4236 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), in bpf_insn_prepare_dump()
4241 for (i = 0; i < prog->len; i++) { in bpf_insn_prepare_dump()
4247 /* fall-through */ in bpf_insn_prepare_dump()
4269 insns[i].imm = map->id; in bpf_insn_prepare_dump()
4290 if ((info->nr_func_info || info->func_info_rec_size) && in set_info_rec_size()
4291 info->func_info_rec_size != sizeof(struct bpf_func_info)) in set_info_rec_size()
4292 return -EINVAL; in set_info_rec_size()
4294 if ((info->nr_line_info || info->line_info_rec_size) && in set_info_rec_size()
4295 info->line_info_rec_size != sizeof(struct bpf_line_info)) in set_info_rec_size()
4296 return -EINVAL; in set_info_rec_size()
4298 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && in set_info_rec_size()
4299 info->jited_line_info_rec_size != sizeof(__u64)) in set_info_rec_size()
4300 return -EINVAL; in set_info_rec_size()
4302 info->func_info_rec_size = sizeof(struct bpf_func_info); in set_info_rec_size()
4303 info->line_info_rec_size = sizeof(struct bpf_line_info); in set_info_rec_size()
4304 info->jited_line_info_rec_size = sizeof(__u64); in set_info_rec_size()
4314 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_prog_get_info_by_fd()
4317 u32 info_len = attr->info.info_len; in bpf_prog_get_info_by_fd()
4330 return -EFAULT; in bpf_prog_get_info_by_fd()
4332 info.type = prog->type; in bpf_prog_get_info_by_fd()
4333 info.id = prog->aux->id; in bpf_prog_get_info_by_fd()
4334 info.load_time = prog->aux->load_time; in bpf_prog_get_info_by_fd()
4336 prog->aux->user->uid); in bpf_prog_get_info_by_fd()
4337 info.gpl_compatible = prog->gpl_compatible; in bpf_prog_get_info_by_fd()
4339 memcpy(info.tag, prog->tag, sizeof(prog->tag)); in bpf_prog_get_info_by_fd()
4340 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); in bpf_prog_get_info_by_fd()
4342 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4344 info.nr_map_ids = prog->aux->used_map_cnt; in bpf_prog_get_info_by_fd()
4351 if (put_user(prog->aux->used_maps[i]->id, in bpf_prog_get_info_by_fd()
4353 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4354 return -EFAULT; in bpf_prog_get_info_by_fd()
4357 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4368 info.verified_insns = prog->aux->verified_insns; in bpf_prog_get_info_by_fd()
4387 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4391 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); in bpf_prog_get_info_by_fd()
4393 return -ENOMEM; in bpf_prog_get_info_by_fd()
4399 return -EFAULT; in bpf_prog_get_info_by_fd()
4402 if (bpf_prog_is_offloaded(prog->aux)) { in bpf_prog_get_info_by_fd()
4414 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4418 for (i = 0; i < prog->aux->func_cnt; i++) in bpf_prog_get_info_by_fd()
4419 info.jited_prog_len += prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4421 info.jited_prog_len = prog->jited_len; in bpf_prog_get_info_by_fd()
4425 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4429 /* for multi-function programs, copy the JITed in bpf_prog_get_info_by_fd()
4432 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4437 for (i = 0; i < prog->aux->func_cnt; i++) { in bpf_prog_get_info_by_fd()
4438 len = prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4440 img = (u8 *) prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
4442 return -EFAULT; in bpf_prog_get_info_by_fd()
4444 free -= len; in bpf_prog_get_info_by_fd()
4449 if (copy_to_user(uinsns, prog->bpf_func, ulen)) in bpf_prog_get_info_by_fd()
4450 return -EFAULT; in bpf_prog_get_info_by_fd()
4458 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4460 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4470 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4473 prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
4476 return -EFAULT; in bpf_prog_get_info_by_fd()
4479 ksym_addr = (unsigned long) prog->bpf_func; in bpf_prog_get_info_by_fd()
4481 return -EFAULT; in bpf_prog_get_info_by_fd()
4489 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4491 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4498 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4501 prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
4503 return -EFAULT; in bpf_prog_get_info_by_fd()
4506 func_len = prog->jited_len; in bpf_prog_get_info_by_fd()
4508 return -EFAULT; in bpf_prog_get_info_by_fd()
4515 if (prog->aux->btf) in bpf_prog_get_info_by_fd()
4516 info.btf_id = btf_obj_id(prog->aux->btf); in bpf_prog_get_info_by_fd()
4517 info.attach_btf_id = prog->aux->attach_btf_id; in bpf_prog_get_info_by_fd()
4522 info.nr_func_info = prog->aux->func_info_cnt; in bpf_prog_get_info_by_fd()
4528 if (copy_to_user(user_finfo, prog->aux->func_info, in bpf_prog_get_info_by_fd()
4530 return -EFAULT; in bpf_prog_get_info_by_fd()
4534 info.nr_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
4540 if (copy_to_user(user_linfo, prog->aux->linfo, in bpf_prog_get_info_by_fd()
4542 return -EFAULT; in bpf_prog_get_info_by_fd()
4546 if (prog->aux->jited_linfo) in bpf_prog_get_info_by_fd()
4547 info.nr_jited_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
4551 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
4559 line_addr = (unsigned long)prog->aux->jited_linfo[i]; in bpf_prog_get_info_by_fd()
4561 return -EFAULT; in bpf_prog_get_info_by_fd()
4569 info.nr_prog_tags = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
4576 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
4579 prog->aux->func[i]->tag, in bpf_prog_get_info_by_fd()
4581 return -EFAULT; in bpf_prog_get_info_by_fd()
4585 prog->tag, BPF_TAG_SIZE)) in bpf_prog_get_info_by_fd()
4586 return -EFAULT; in bpf_prog_get_info_by_fd()
4592 put_user(info_len, &uattr->info.info_len)) in bpf_prog_get_info_by_fd()
4593 return -EFAULT; in bpf_prog_get_info_by_fd()
4603 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_map_get_info_by_fd()
4605 u32 info_len = attr->info.info_len; in bpf_map_get_info_by_fd()
4614 info.type = map->map_type; in bpf_map_get_info_by_fd()
4615 info.id = map->id; in bpf_map_get_info_by_fd()
4616 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
4617 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
4618 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
4619 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
4620 info.map_extra = map->map_extra; in bpf_map_get_info_by_fd()
4621 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
4623 if (map->btf) { in bpf_map_get_info_by_fd()
4624 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
4625 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
4626 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
4628 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
4637 put_user(info_len, &uattr->info.info_len)) in bpf_map_get_info_by_fd()
4638 return -EFAULT; in bpf_map_get_info_by_fd()
4648 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_btf_get_info_by_fd()
4649 u32 info_len = attr->info.info_len; in bpf_btf_get_info_by_fd()
4664 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_link_get_info_by_fd()
4666 u32 info_len = attr->info.info_len; in bpf_link_get_info_by_fd()
4676 return -EFAULT; in bpf_link_get_info_by_fd()
4678 info.type = link->type; in bpf_link_get_info_by_fd()
4679 info.id = link->id; in bpf_link_get_info_by_fd()
4680 if (link->prog) in bpf_link_get_info_by_fd()
4681 info.prog_id = link->prog->aux->id; in bpf_link_get_info_by_fd()
4683 if (link->ops->fill_link_info) { in bpf_link_get_info_by_fd()
4684 err = link->ops->fill_link_info(link, &info); in bpf_link_get_info_by_fd()
4690 put_user(info_len, &uattr->info.info_len)) in bpf_link_get_info_by_fd()
4691 return -EFAULT; in bpf_link_get_info_by_fd()
4702 int ufd = attr->info.bpf_fd; in bpf_obj_get_info_by_fd()
4707 return -EINVAL; in bpf_obj_get_info_by_fd()
4711 return -EBADFD; in bpf_obj_get_info_by_fd()
4713 if (f.file->f_op == &bpf_prog_fops) in bpf_obj_get_info_by_fd()
4714 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, in bpf_obj_get_info_by_fd()
4716 else if (f.file->f_op == &bpf_map_fops) in bpf_obj_get_info_by_fd()
4717 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, in bpf_obj_get_info_by_fd()
4719 else if (f.file->f_op == &btf_fops) in bpf_obj_get_info_by_fd()
4720 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); in bpf_obj_get_info_by_fd()
4721 else if (f.file->f_op == &bpf_link_fops) in bpf_obj_get_info_by_fd()
4722 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, in bpf_obj_get_info_by_fd()
4725 err = -EINVAL; in bpf_obj_get_info_by_fd()
4736 return -EINVAL; in bpf_btf_load()
4739 return -EPERM; in bpf_btf_load()
4749 return -EINVAL; in bpf_btf_get_fd_by_id()
4752 return -EPERM; in bpf_btf_get_fd_by_id()
4754 return btf_get_fd_by_id(attr->btf_id); in bpf_btf_get_fd_by_id()
4763 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); in bpf_task_fd_query_copy()
4767 if (put_user(len, &uattr->task_fd_query.buf_len)) in bpf_task_fd_query_copy()
4768 return -EFAULT; in bpf_task_fd_query_copy()
4769 input_len = attr->task_fd_query.buf_len; in bpf_task_fd_query_copy()
4776 return -EFAULT; in bpf_task_fd_query_copy()
4780 return -EFAULT; in bpf_task_fd_query_copy()
4787 err = -ENOSPC; in bpf_task_fd_query_copy()
4788 if (copy_to_user(ubuf, buf, input_len - 1)) in bpf_task_fd_query_copy()
4789 return -EFAULT; in bpf_task_fd_query_copy()
4790 if (put_user(zero, ubuf + input_len - 1)) in bpf_task_fd_query_copy()
4791 return -EFAULT; in bpf_task_fd_query_copy()
4795 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || in bpf_task_fd_query_copy()
4796 put_user(fd_type, &uattr->task_fd_query.fd_type) || in bpf_task_fd_query_copy()
4797 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || in bpf_task_fd_query_copy()
4798 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) in bpf_task_fd_query_copy()
4799 return -EFAULT; in bpf_task_fd_query_copy()
4809 pid_t pid = attr->task_fd_query.pid; in bpf_task_fd_query()
4810 u32 fd = attr->task_fd_query.fd; in bpf_task_fd_query()
4817 return -EINVAL; in bpf_task_fd_query()
4820 return -EPERM; in bpf_task_fd_query()
4822 if (attr->task_fd_query.flags != 0) in bpf_task_fd_query()
4823 return -EINVAL; in bpf_task_fd_query()
4829 return -ENOENT; in bpf_task_fd_query()
4835 return -EBADF; in bpf_task_fd_query()
4837 if (file->f_op == &bpf_link_fops) { in bpf_task_fd_query()
4838 struct bpf_link *link = file->private_data; in bpf_task_fd_query()
4840 if (link->ops == &bpf_raw_tp_link_lops) { in bpf_task_fd_query()
4843 struct bpf_raw_event_map *btp = raw_tp->btp; in bpf_task_fd_query()
4846 raw_tp->link.prog->aux->id, in bpf_task_fd_query()
4848 btp->tp->name, 0, 0); in bpf_task_fd_query()
4872 err = -ENOTSUPP; in bpf_task_fd_query()
4883 err = -ENOTSUPP; \
4901 return -EINVAL; in bpf_map_do_batch()
4903 ufd = attr->batch.map_fd; in bpf_map_do_batch()
4911 err = -EPERM; in bpf_map_do_batch()
4915 err = -EPERM; in bpf_map_do_batch()
4920 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); in bpf_map_do_batch()
4922 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); in bpf_map_do_batch()
4924 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); in bpf_map_do_batch()
4926 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); in bpf_map_do_batch()
4941 return -EINVAL; in link_create()
4943 if (attr->link_create.attach_type == BPF_STRUCT_OPS) in link_create()
4946 prog = bpf_prog_get(attr->link_create.prog_fd); in link_create()
4951 attr->link_create.attach_type); in link_create()
4955 switch (prog->type) { in link_create()
4967 attr->link_create.target_fd, in link_create()
4968 attr->link_create.target_btf_id, in link_create()
4969 attr->link_create.tracing.cookie); in link_create()
4973 if (attr->link_create.attach_type != prog->expected_attach_type) { in link_create()
4974 ret = -EINVAL; in link_create()
4977 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) in link_create()
4979 else if (prog->expected_attach_type == BPF_TRACE_ITER) in link_create()
4981 else if (prog->expected_attach_type == BPF_LSM_CGROUP) in link_create()
4985 attr->link_create.target_fd, in link_create()
4986 attr->link_create.target_btf_id, in link_create()
4987 attr->link_create.tracing.cookie); in link_create()
5009 if (attr->link_create.attach_type == BPF_PERF_EVENT) in link_create()
5011 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) in link_create()
5013 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) in link_create()
5017 ret = -EINVAL; in link_create()
5031 new_map = bpf_map_get(attr->link_update.new_map_fd); in link_update_map()
5035 if (attr->link_update.flags & BPF_F_REPLACE) { in link_update_map()
5036 old_map = bpf_map_get(attr->link_update.old_map_fd); in link_update_map()
5041 } else if (attr->link_update.old_map_fd) { in link_update_map()
5042 ret = -EINVAL; in link_update_map()
5046 ret = link->ops->update_map(link, new_map, old_map); in link_update_map()
5065 return -EINVAL; in link_update()
5067 flags = attr->link_update.flags; in link_update()
5069 return -EINVAL; in link_update()
5071 link = bpf_link_get_from_fd(attr->link_update.link_fd); in link_update()
5075 if (link->ops->update_map) { in link_update()
5080 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); in link_update()
5087 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); in link_update()
5093 } else if (attr->link_update.old_prog_fd) { in link_update()
5094 ret = -EINVAL; in link_update()
5098 if (link->ops->update_prog) in link_update()
5099 ret = link->ops->update_prog(link, new_prog, old_prog); in link_update()
5101 ret = -EINVAL; in link_update()
5121 return -EINVAL; in link_detach()
5123 link = bpf_link_get_from_fd(attr->link_detach.link_fd); in link_detach()
5127 if (link->ops->detach) in link_detach()
5128 ret = link->ops->detach(link); in link_detach()
5130 ret = -EOPNOTSUPP; in link_detach()
5138 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); in bpf_link_inc_not_zero()
5146 return ERR_PTR(-ENOENT); in bpf_link_by_id()
5152 if (link->id) in bpf_link_by_id()
5155 link = ERR_PTR(-EAGAIN); in bpf_link_by_id()
5157 link = ERR_PTR(-ENOENT); in bpf_link_by_id()
5187 u32 id = attr->link_id; in bpf_link_get_fd_by_id()
5191 return -EINVAL; in bpf_link_get_fd_by_id()
5194 return -EPERM; in bpf_link_get_fd_by_id()
5230 return -EBUSY; in bpf_enable_runtime_stats()
5233 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); in bpf_enable_runtime_stats()
5247 return -EINVAL; in bpf_enable_stats()
5250 return -EPERM; in bpf_enable_stats()
5252 switch (attr->enable_stats.type) { in bpf_enable_stats()
5258 return -EINVAL; in bpf_enable_stats()
5269 return -EINVAL; in bpf_iter_create()
5271 if (attr->iter_create.flags) in bpf_iter_create()
5272 return -EINVAL; in bpf_iter_create()
5274 link = bpf_link_get_from_fd(attr->iter_create.link_fd); in bpf_iter_create()
5294 return -EINVAL; in bpf_prog_bind_map()
5296 if (attr->prog_bind_map.flags) in bpf_prog_bind_map()
5297 return -EINVAL; in bpf_prog_bind_map()
5299 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); in bpf_prog_bind_map()
5303 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
5309 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
5311 used_maps_old = prog->aux->used_maps; in bpf_prog_bind_map()
5313 for (i = 0; i < prog->aux->used_map_cnt; i++) in bpf_prog_bind_map()
5319 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, in bpf_prog_bind_map()
5323 ret = -ENOMEM; in bpf_prog_bind_map()
5328 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); in bpf_prog_bind_map()
5329 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
5331 prog->aux->used_map_cnt++; in bpf_prog_bind_map()
5332 prog->aux->used_maps = used_maps_new; in bpf_prog_bind_map()
5337 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
5359 return -EFAULT; in __sys_bpf()
5480 err = -EINVAL; in __sys_bpf()
5518 return -EINVAL; in BPF_CALL_3()
5524 /* To shut up -Wmissing-prototypes.
5539 if (attr->test.data_in || attr->test.data_out || in kern_sys_bpf()
5540 attr->test.ctx_out || attr->test.duration || in kern_sys_bpf()
5541 attr->test.repeat || attr->test.flags) in kern_sys_bpf()
5542 return -EINVAL; in kern_sys_bpf()
5544 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); in kern_sys_bpf()
5548 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || in kern_sys_bpf()
5549 attr->test.ctx_size_in > U16_MAX) { in kern_sys_bpf()
5551 return -EINVAL; in kern_sys_bpf()
5559 return -EBUSY; in kern_sys_bpf()
5561 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); in kern_sys_bpf()
5593 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close in BPF_CALL_1()
5608 return -EINVAL; in BPF_CALL_4()
5610 if (name_sz <= 1 || name[name_sz - 1]) in BPF_CALL_4()
5611 return -EINVAL; in BPF_CALL_4()
5614 return -EPERM; in BPF_CALL_4()
5617 return *res ? 0 : -ENOENT; in BPF_CALL_4()
5660 struct static_key *key = (struct static_key *)table->data; in bpf_stats_handler()
5666 .mode = table->mode, in bpf_stats_handler()
5672 return -EPERM; in bpf_stats_handler()
5695 int ret, unpriv_enable = *(int *)table->data; in bpf_unpriv_handler()
5700 return -EPERM; in bpf_unpriv_handler()
5706 return -EPERM; in bpf_unpriv_handler()
5707 *(int *)table->data = unpriv_enable; in bpf_unpriv_handler()