• Home
  • Raw
  • Download

Lines Matching refs:map

37 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \  argument
38 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
39 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
40 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) argument
41 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) argument
42 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ argument
43 IS_FD_HASH(map))
112 struct bpf_map *map; in find_and_alloc_map() local
129 map = ops->map_alloc(attr); in find_and_alloc_map()
130 if (IS_ERR(map)) in find_and_alloc_map()
131 return map; in find_and_alloc_map()
132 map->ops = ops; in find_and_alloc_map()
133 map->map_type = type; in find_and_alloc_map()
134 return map; in find_and_alloc_map()
137 static void bpf_map_write_active_inc(struct bpf_map *map) in bpf_map_write_active_inc() argument
139 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
142 static void bpf_map_write_active_dec(struct bpf_map *map) in bpf_map_write_active_dec() argument
144 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
147 bool bpf_map_write_active(const struct bpf_map *map) in bpf_map_write_active() argument
149 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
152 static u32 bpf_map_value_size(const struct bpf_map *map) in bpf_map_value_size() argument
154 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
155 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
156 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
157 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
158 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
159 else if (IS_FD_MAP(map)) in bpf_map_value_size()
162 return map->value_size; in bpf_map_value_size()
165 static void maybe_wait_bpf_programs(struct bpf_map *map) in maybe_wait_bpf_programs() argument
171 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
172 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
176 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, in bpf_map_update_value() argument
182 if (bpf_map_is_dev_bound(map)) { in bpf_map_update_value()
183 return bpf_map_offload_update_elem(map, key, value, flags); in bpf_map_update_value()
184 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
185 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
186 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
187 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
188 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
189 return sock_map_update_elem_sys(map, key, value, flags); in bpf_map_update_value()
190 } else if (IS_FD_PROG_ARRAY(map)) { in bpf_map_update_value()
191 return bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
196 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
197 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
198 err = bpf_percpu_hash_update(map, key, value, flags); in bpf_map_update_value()
199 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
200 err = bpf_percpu_array_update(map, key, value, flags); in bpf_map_update_value()
201 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
202 err = bpf_percpu_cgroup_storage_update(map, key, value, in bpf_map_update_value()
204 } else if (IS_FD_ARRAY(map)) { in bpf_map_update_value()
206 err = bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
209 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
211 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
214 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
216 err = bpf_fd_reuseport_array_update_elem(map, key, value, in bpf_map_update_value()
218 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
219 map->map_type == BPF_MAP_TYPE_STACK) { in bpf_map_update_value()
220 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
223 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
227 maybe_wait_bpf_programs(map); in bpf_map_update_value()
232 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, in bpf_map_copy_value() argument
238 if (bpf_map_is_dev_bound(map)) in bpf_map_copy_value()
239 return bpf_map_offload_lookup_elem(map, key, value); in bpf_map_copy_value()
242 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
243 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
244 err = bpf_percpu_hash_copy(map, key, value); in bpf_map_copy_value()
245 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
246 err = bpf_percpu_array_copy(map, key, value); in bpf_map_copy_value()
247 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
248 err = bpf_percpu_cgroup_storage_copy(map, key, value); in bpf_map_copy_value()
249 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
250 err = bpf_stackmap_copy(map, key, value); in bpf_map_copy_value()
251 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { in bpf_map_copy_value()
252 err = bpf_fd_array_map_lookup_elem(map, key, value); in bpf_map_copy_value()
253 } else if (IS_FD_HASH(map)) { in bpf_map_copy_value()
254 err = bpf_fd_htab_map_lookup_elem(map, key, value); in bpf_map_copy_value()
255 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
256 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); in bpf_map_copy_value()
257 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
258 map->map_type == BPF_MAP_TYPE_STACK) { in bpf_map_copy_value()
259 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
260 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
262 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); in bpf_map_copy_value()
265 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
266 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
268 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
277 copy_map_value_locked(map, value, ptr, true); in bpf_map_copy_value()
279 copy_map_value(map, value, ptr); in bpf_map_copy_value()
281 check_and_init_map_value(map, value); in bpf_map_copy_value()
287 maybe_wait_bpf_programs(map); in bpf_map_copy_value()
360 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument
362 map->map_type = attr->map_type; in bpf_map_init_from_attr()
363 map->key_size = attr->key_size; in bpf_map_init_from_attr()
364 map->value_size = attr->value_size; in bpf_map_init_from_attr()
365 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
366 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
367 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
370 static int bpf_map_alloc_id(struct bpf_map *map) in bpf_map_alloc_id() argument
376 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id()
378 map->id = id; in bpf_map_alloc_id()
388 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) in bpf_map_free_id() argument
397 if (!map->id) in bpf_map_free_id()
405 idr_remove(&map_idr, map->id); in bpf_map_free_id()
406 map->id = 0; in bpf_map_free_id()
415 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
417 map->memcg = get_mem_cgroup_from_mm(current->mm); in bpf_map_save_memcg()
420 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
422 mem_cgroup_put(map->memcg); in bpf_map_release_memcg()
425 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, in bpf_map_kmalloc_node() argument
431 old_memcg = set_active_memcg(map->memcg); in bpf_map_kmalloc_node()
438 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) in bpf_map_kzalloc() argument
443 old_memcg = set_active_memcg(map->memcg); in bpf_map_kzalloc()
450 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, in bpf_map_alloc_percpu() argument
456 old_memcg = set_active_memcg(map->memcg); in bpf_map_alloc_percpu()
464 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
468 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
476 struct bpf_map *map = container_of(work, struct bpf_map, work); in bpf_map_free_deferred() local
478 security_bpf_map_free(map); in bpf_map_free_deferred()
479 bpf_map_release_memcg(map); in bpf_map_free_deferred()
481 map->ops->map_free(map); in bpf_map_free_deferred()
484 static void bpf_map_put_uref(struct bpf_map *map) in bpf_map_put_uref() argument
486 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
487 if (map->ops->map_release_uref) in bpf_map_put_uref()
488 map->ops->map_release_uref(map); in bpf_map_put_uref()
495 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) in __bpf_map_put() argument
497 if (atomic64_dec_and_test(&map->refcnt)) { in __bpf_map_put()
499 bpf_map_free_id(map, do_idr_lock); in __bpf_map_put()
500 btf_put(map->btf); in __bpf_map_put()
501 INIT_WORK(&map->work, bpf_map_free_deferred); in __bpf_map_put()
502 schedule_work(&map->work); in __bpf_map_put()
506 void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
508 __bpf_map_put(map, true); in bpf_map_put()
512 void bpf_map_put_with_uref(struct bpf_map *map) in bpf_map_put_with_uref() argument
514 bpf_map_put_uref(map); in bpf_map_put_with_uref()
515 bpf_map_put(map); in bpf_map_put_with_uref()
520 struct bpf_map *map = filp->private_data; in bpf_map_release() local
522 if (map->ops->map_release) in bpf_map_release()
523 map->ops->map_release(map, filp); in bpf_map_release()
525 bpf_map_put_with_uref(map); in bpf_map_release()
529 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) in map_get_sys_perms() argument
536 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
546 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) in bpf_map_memory_footprint() argument
550 size = round_up(map->key_size + bpf_map_value_size(map), 8); in bpf_map_memory_footprint()
552 return round_up(map->max_entries * size, PAGE_SIZE); in bpf_map_memory_footprint()
557 const struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo() local
561 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { in bpf_map_show_fdinfo()
562 array = container_of(map, struct bpf_array, map); in bpf_map_show_fdinfo()
578 map->map_type, in bpf_map_show_fdinfo()
579 map->key_size, in bpf_map_show_fdinfo()
580 map->value_size, in bpf_map_show_fdinfo()
581 map->max_entries, in bpf_map_show_fdinfo()
582 map->map_flags, in bpf_map_show_fdinfo()
583 bpf_map_memory_footprint(map), in bpf_map_show_fdinfo()
584 map->id, in bpf_map_show_fdinfo()
585 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
614 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open() local
617 bpf_map_write_active_inc(map); in bpf_map_mmap_open()
623 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close() local
626 bpf_map_write_active_dec(map); in bpf_map_mmap_close()
636 struct bpf_map *map = filp->private_data; in bpf_map_mmap() local
639 if (!map->ops->map_mmap || map_value_has_spin_lock(map) || in bpf_map_mmap()
640 map_value_has_timer(map)) in bpf_map_mmap()
646 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
649 if (map->frozen) { in bpf_map_mmap()
658 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
666 vma->vm_private_data = map; in bpf_map_mmap()
672 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
677 bpf_map_write_active_inc(map); in bpf_map_mmap()
679 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
685 struct bpf_map *map = filp->private_data; in bpf_map_poll() local
687 if (map->ops->map_poll) in bpf_map_poll()
688 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
704 int bpf_map_new_fd(struct bpf_map *map, int flags) in bpf_map_new_fd() argument
708 ret = security_bpf_map(map, OPEN_FMODE(flags)); in bpf_map_new_fd()
712 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
759 int map_check_no_btf(const struct bpf_map *map, in map_check_no_btf() argument
767 static int map_check_btf(struct bpf_map *map, const struct btf *btf, in map_check_btf() argument
777 if (!key_type || key_size != map->key_size) in map_check_btf()
781 if (!map->ops->map_check_btf) in map_check_btf()
786 if (!value_type || value_size != map->value_size) in map_check_btf()
789 map->spin_lock_off = btf_find_spin_lock(btf, value_type); in map_check_btf()
791 if (map_value_has_spin_lock(map)) { in map_check_btf()
792 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
794 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
795 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
796 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
797 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
798 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
799 map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in map_check_btf()
801 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > in map_check_btf()
802 map->value_size) { in map_check_btf()
805 map->spin_lock_off, map->value_size); in map_check_btf()
810 map->timer_off = btf_find_timer(btf, value_type); in map_check_btf()
811 if (map_value_has_timer(map)) { in map_check_btf()
812 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
814 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
815 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
816 map->map_type != BPF_MAP_TYPE_ARRAY) in map_check_btf()
820 if (map->ops->map_check_btf) in map_check_btf()
821 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
831 struct bpf_map *map; in map_create() local
857 map = find_and_alloc_map(attr); in map_create()
858 if (IS_ERR(map)) in map_create()
859 return PTR_ERR(map); in map_create()
861 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
866 atomic64_set(&map->refcnt, 1); in map_create()
867 atomic64_set(&map->usercnt, 1); in map_create()
868 mutex_init(&map->freeze_mutex); in map_create()
870 map->spin_lock_off = -EINVAL; in map_create()
871 map->timer_off = -EINVAL; in map_create()
892 map->btf = btf; in map_create()
895 err = map_check_btf(map, btf, attr->btf_key_type_id, in map_create()
901 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
902 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
903 map->btf_vmlinux_value_type_id = in map_create()
907 err = security_bpf_map_alloc(map); in map_create()
911 err = bpf_map_alloc_id(map); in map_create()
915 bpf_map_save_memcg(map); in map_create()
917 err = bpf_map_new_fd(map, f_flags); in map_create()
925 bpf_map_put_with_uref(map); in map_create()
932 security_bpf_map_free(map); in map_create()
934 btf_put(map->btf); in map_create()
935 map->ops->map_free(map); in map_create()
954 void bpf_map_inc(struct bpf_map *map) in bpf_map_inc() argument
956 atomic64_inc(&map->refcnt); in bpf_map_inc()
960 void bpf_map_inc_with_uref(struct bpf_map *map) in bpf_map_inc_with_uref() argument
962 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
963 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
970 struct bpf_map *map; in bpf_map_get() local
972 map = __bpf_map_get(f); in bpf_map_get()
973 if (IS_ERR(map)) in bpf_map_get()
974 return map; in bpf_map_get()
976 bpf_map_inc(map); in bpf_map_get()
979 return map; in bpf_map_get()
985 struct bpf_map *map; in bpf_map_get_with_uref() local
987 map = __bpf_map_get(f); in bpf_map_get_with_uref()
988 if (IS_ERR(map)) in bpf_map_get_with_uref()
989 return map; in bpf_map_get_with_uref()
991 bpf_map_inc_with_uref(map); in bpf_map_get_with_uref()
994 return map; in bpf_map_get_with_uref()
998 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) in __bpf_map_inc_not_zero() argument
1002 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1006 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1008 return map; in __bpf_map_inc_not_zero()
1011 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) in bpf_map_inc_not_zero() argument
1014 map = __bpf_map_inc_not_zero(map, false); in bpf_map_inc_not_zero()
1017 return map; in bpf_map_inc_not_zero()
1021 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) in bpf_stackmap_copy() argument
1056 struct bpf_map *map; in map_lookup_elem() local
1069 map = __bpf_map_get(f); in map_lookup_elem()
1070 if (IS_ERR(map)) in map_lookup_elem()
1071 return PTR_ERR(map); in map_lookup_elem()
1072 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_lookup_elem()
1078 !map_value_has_spin_lock(map)) { in map_lookup_elem()
1083 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1089 value_size = bpf_map_value_size(map); in map_lookup_elem()
1096 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1123 struct bpf_map *map; in map_update_elem() local
1133 map = __bpf_map_get(f); in map_update_elem()
1134 if (IS_ERR(map)) in map_update_elem()
1135 return PTR_ERR(map); in map_update_elem()
1136 bpf_map_write_active_inc(map); in map_update_elem()
1137 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_update_elem()
1143 !map_value_has_spin_lock(map)) { in map_update_elem()
1148 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1154 value_size = bpf_map_value_size(map); in map_update_elem()
1165 err = bpf_map_update_value(map, f, key, value, attr->flags); in map_update_elem()
1172 bpf_map_write_active_dec(map); in map_update_elem()
1183 struct bpf_map *map; in map_delete_elem() local
1192 map = __bpf_map_get(f); in map_delete_elem()
1193 if (IS_ERR(map)) in map_delete_elem()
1194 return PTR_ERR(map); in map_delete_elem()
1195 bpf_map_write_active_inc(map); in map_delete_elem()
1196 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_delete_elem()
1201 key = __bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1207 if (bpf_map_is_dev_bound(map)) { in map_delete_elem()
1208 err = bpf_map_offload_delete_elem(map, key); in map_delete_elem()
1210 } else if (IS_FD_PROG_ARRAY(map) || in map_delete_elem()
1211 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1213 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1219 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1222 maybe_wait_bpf_programs(map); in map_delete_elem()
1226 bpf_map_write_active_dec(map); in map_delete_elem()
1239 struct bpf_map *map; in map_get_next_key() local
1248 map = __bpf_map_get(f); in map_get_next_key()
1249 if (IS_ERR(map)) in map_get_next_key()
1250 return PTR_ERR(map); in map_get_next_key()
1251 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_get_next_key()
1257 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1267 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1271 if (bpf_map_is_dev_bound(map)) { in map_get_next_key()
1272 err = bpf_map_offload_get_next_key(map, key, next_key); in map_get_next_key()
1277 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1284 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1298 int generic_map_delete_batch(struct bpf_map *map, in generic_map_delete_batch() argument
1311 !map_value_has_spin_lock(map)) { in generic_map_delete_batch()
1322 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1328 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1329 map->key_size)) in generic_map_delete_batch()
1332 if (bpf_map_is_dev_bound(map)) { in generic_map_delete_batch()
1333 err = bpf_map_offload_delete_elem(map, key); in generic_map_delete_batch()
1339 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1342 maybe_wait_bpf_programs(map); in generic_map_delete_batch()
1354 int generic_map_update_batch(struct bpf_map *map, in generic_map_update_batch() argument
1370 !map_value_has_spin_lock(map)) { in generic_map_update_batch()
1374 value_size = bpf_map_value_size(map); in generic_map_update_batch()
1383 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1396 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1397 map->key_size) || in generic_map_update_batch()
1401 err = bpf_map_update_value(map, f, key, value, in generic_map_update_batch()
1420 int generic_map_lookup_batch(struct bpf_map *map, in generic_map_lookup_batch() argument
1436 !map_value_has_spin_lock(map)) in generic_map_lookup_batch()
1439 value_size = bpf_map_value_size(map); in generic_map_lookup_batch()
1448 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1452 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1460 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
1463 value = key + map->key_size; in generic_map_lookup_batch()
1469 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
1473 err = bpf_map_copy_value(map, key, value, in generic_map_lookup_batch()
1488 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
1489 map->key_size)) { in generic_map_lookup_batch()
1511 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
1527 struct bpf_map *map; in map_lookup_and_delete_elem() local
1540 map = __bpf_map_get(f); in map_lookup_and_delete_elem()
1541 if (IS_ERR(map)) in map_lookup_and_delete_elem()
1542 return PTR_ERR(map); in map_lookup_and_delete_elem()
1543 bpf_map_write_active_inc(map); in map_lookup_and_delete_elem()
1544 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || in map_lookup_and_delete_elem()
1545 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_lookup_and_delete_elem()
1551 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1552 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
1558 !map_value_has_spin_lock(map)) { in map_lookup_and_delete_elem()
1563 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
1569 value_size = bpf_map_value_size(map); in map_lookup_and_delete_elem()
1577 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1578 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
1579 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
1580 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
1581 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
1582 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
1583 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_and_delete_elem()
1584 if (!bpf_map_is_dev_bound(map)) { in map_lookup_and_delete_elem()
1587 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
1608 bpf_map_write_active_dec(map); in map_lookup_and_delete_elem()
1618 struct bpf_map *map; in map_freeze() local
1625 map = __bpf_map_get(f); in map_freeze()
1626 if (IS_ERR(map)) in map_freeze()
1627 return PTR_ERR(map); in map_freeze()
1629 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || in map_freeze()
1630 map_value_has_timer(map)) { in map_freeze()
1635 mutex_lock(&map->freeze_mutex); in map_freeze()
1636 if (bpf_map_write_active(map)) { in map_freeze()
1640 if (READ_ONCE(map->frozen)) { in map_freeze()
1649 WRITE_ONCE(map->frozen, true); in map_freeze()
1651 mutex_unlock(&map->freeze_mutex); in map_freeze()
3384 struct bpf_map *map; in bpf_map_get_curr_or_next() local
3388 map = idr_get_next(&map_idr, id); in bpf_map_get_curr_or_next()
3389 if (map) { in bpf_map_get_curr_or_next()
3390 map = __bpf_map_inc_not_zero(map, false); in bpf_map_get_curr_or_next()
3391 if (IS_ERR(map)) { in bpf_map_get_curr_or_next()
3398 return map; in bpf_map_get_curr_or_next()
3466 struct bpf_map *map; in bpf_map_get_fd_by_id() local
3483 map = idr_find(&map_idr, id); in bpf_map_get_fd_by_id()
3484 if (map) in bpf_map_get_fd_by_id()
3485 map = __bpf_map_inc_not_zero(map, true); in bpf_map_get_fd_by_id()
3487 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
3490 if (IS_ERR(map)) in bpf_map_get_fd_by_id()
3491 return PTR_ERR(map); in bpf_map_get_fd_by_id()
3493 fd = bpf_map_new_fd(map, f_flags); in bpf_map_get_fd_by_id()
3495 bpf_map_put_with_uref(map); in bpf_map_get_fd_by_id()
3504 const struct bpf_map *map; in bpf_map_from_imm() local
3509 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
3510 if (map == (void *)addr) { in bpf_map_from_imm()
3514 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
3516 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
3521 map = NULL; in bpf_map_from_imm()
3525 return map; in bpf_map_from_imm()
3531 const struct bpf_map *map; in bpf_insn_prepare_dump() local
3568 map = bpf_map_from_imm(prog, imm, &off, &type); in bpf_insn_prepare_dump()
3569 if (map) { in bpf_insn_prepare_dump()
3571 insns[i].imm = map->id; in bpf_insn_prepare_dump()
3894 struct bpf_map *map, in bpf_map_get_info_by_fd() argument
3909 info.type = map->map_type; in bpf_map_get_info_by_fd()
3910 info.id = map->id; in bpf_map_get_info_by_fd()
3911 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
3912 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
3913 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
3914 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
3915 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
3917 if (map->btf) { in bpf_map_get_info_by_fd()
3918 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
3919 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
3920 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
3922 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
3924 if (bpf_map_is_dev_bound(map)) { in bpf_map_get_info_by_fd()
3925 err = bpf_map_offload_info_fill(&info, map); in bpf_map_get_info_by_fd()
4179 err = fn(map, attr, uattr); \
4189 struct bpf_map *map; in bpf_map_do_batch() local
4198 map = __bpf_map_get(f); in bpf_map_do_batch()
4199 if (IS_ERR(map)) in bpf_map_do_batch()
4200 return PTR_ERR(map); in bpf_map_do_batch()
4202 bpf_map_write_active_inc(map); in bpf_map_do_batch()
4203 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in bpf_map_do_batch()
4207 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in bpf_map_do_batch()
4213 BPF_DO_BATCH(map->ops->map_lookup_batch); in bpf_map_do_batch()
4215 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); in bpf_map_do_batch()
4217 BPF_DO_BATCH(map->ops->map_update_batch); in bpf_map_do_batch()
4219 BPF_DO_BATCH(map->ops->map_delete_batch); in bpf_map_do_batch()
4222 bpf_map_write_active_dec(map); in bpf_map_do_batch()
4532 struct bpf_map *map; in bpf_prog_bind_map() local
4546 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
4547 if (IS_ERR(map)) { in bpf_prog_bind_map()
4548 ret = PTR_ERR(map); in bpf_prog_bind_map()
4557 if (used_maps_old[i] == map) { in bpf_prog_bind_map()
4558 bpf_map_put(map); in bpf_prog_bind_map()
4572 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
4583 bpf_map_put(map); in bpf_prog_bind_map()