Lines Matching refs:map
41 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ argument
42 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
43 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
44 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) argument
45 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) argument
46 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ argument
47 IS_FD_HASH(map))
116 struct bpf_map *map; in find_and_alloc_map() local
133 map = ops->map_alloc(attr); in find_and_alloc_map()
134 if (IS_ERR(map)) in find_and_alloc_map()
135 return map; in find_and_alloc_map()
136 map->ops = ops; in find_and_alloc_map()
137 map->map_type = type; in find_and_alloc_map()
138 return map; in find_and_alloc_map()
141 static void bpf_map_write_active_inc(struct bpf_map *map) in bpf_map_write_active_inc() argument
143 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
146 static void bpf_map_write_active_dec(struct bpf_map *map) in bpf_map_write_active_dec() argument
148 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
151 bool bpf_map_write_active(const struct bpf_map *map) in bpf_map_write_active() argument
153 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
156 static u32 bpf_map_value_size(const struct bpf_map *map) in bpf_map_value_size() argument
158 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
159 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
160 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
161 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
162 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
163 else if (IS_FD_MAP(map)) in bpf_map_value_size()
166 return map->value_size; in bpf_map_value_size()
169 static void maybe_wait_bpf_programs(struct bpf_map *map) in maybe_wait_bpf_programs() argument
175 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
176 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
180 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, in bpf_map_update_value() argument
186 if (bpf_map_is_dev_bound(map)) { in bpf_map_update_value()
187 return bpf_map_offload_update_elem(map, key, value, flags); in bpf_map_update_value()
188 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
189 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
190 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
191 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
192 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
193 return sock_map_update_elem_sys(map, key, value, flags); in bpf_map_update_value()
194 } else if (IS_FD_PROG_ARRAY(map)) { in bpf_map_update_value()
195 return bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
200 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
201 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
202 err = bpf_percpu_hash_update(map, key, value, flags); in bpf_map_update_value()
203 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
204 err = bpf_percpu_array_update(map, key, value, flags); in bpf_map_update_value()
205 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
206 err = bpf_percpu_cgroup_storage_update(map, key, value, in bpf_map_update_value()
208 } else if (IS_FD_ARRAY(map)) { in bpf_map_update_value()
210 err = bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
213 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
215 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, in bpf_map_update_value()
218 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
220 err = bpf_fd_reuseport_array_update_elem(map, key, value, in bpf_map_update_value()
222 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
223 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_update_value()
224 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_update_value()
225 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
228 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
232 maybe_wait_bpf_programs(map); in bpf_map_update_value()
237 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, in bpf_map_copy_value() argument
243 if (bpf_map_is_dev_bound(map)) in bpf_map_copy_value()
244 return bpf_map_offload_lookup_elem(map, key, value); in bpf_map_copy_value()
247 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
248 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
249 err = bpf_percpu_hash_copy(map, key, value); in bpf_map_copy_value()
250 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
251 err = bpf_percpu_array_copy(map, key, value); in bpf_map_copy_value()
252 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
253 err = bpf_percpu_cgroup_storage_copy(map, key, value); in bpf_map_copy_value()
254 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
255 err = bpf_stackmap_copy(map, key, value); in bpf_map_copy_value()
256 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { in bpf_map_copy_value()
257 err = bpf_fd_array_map_lookup_elem(map, key, value); in bpf_map_copy_value()
258 } else if (IS_FD_HASH(map)) { in bpf_map_copy_value()
259 err = bpf_fd_htab_map_lookup_elem(map, key, value); in bpf_map_copy_value()
260 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
261 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); in bpf_map_copy_value()
262 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
263 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_copy_value()
264 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_copy_value()
265 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
266 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
268 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); in bpf_map_copy_value()
271 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
272 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
274 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
283 copy_map_value_locked(map, value, ptr, true); in bpf_map_copy_value()
285 copy_map_value(map, value, ptr); in bpf_map_copy_value()
287 check_and_init_map_value(map, value); in bpf_map_copy_value()
293 maybe_wait_bpf_programs(map); in bpf_map_copy_value()
366 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument
368 map->map_type = attr->map_type; in bpf_map_init_from_attr()
369 map->key_size = attr->key_size; in bpf_map_init_from_attr()
370 map->value_size = attr->value_size; in bpf_map_init_from_attr()
371 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
372 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
373 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
374 map->map_extra = attr->map_extra; in bpf_map_init_from_attr()
377 static int bpf_map_alloc_id(struct bpf_map *map) in bpf_map_alloc_id() argument
383 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id()
385 map->id = id; in bpf_map_alloc_id()
395 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) in bpf_map_free_id() argument
404 if (!map->id) in bpf_map_free_id()
412 idr_remove(&map_idr, map->id); in bpf_map_free_id()
413 map->id = 0; in bpf_map_free_id()
422 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
429 map->objcg = get_obj_cgroup_from_current(); in bpf_map_save_memcg()
432 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
434 if (map->objcg) in bpf_map_release_memcg()
435 obj_cgroup_put(map->objcg); in bpf_map_release_memcg()
438 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) in bpf_map_get_memcg() argument
440 if (map->objcg) in bpf_map_get_memcg()
441 return get_mem_cgroup_from_objcg(map->objcg); in bpf_map_get_memcg()
446 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, in bpf_map_kmalloc_node() argument
452 memcg = bpf_map_get_memcg(map); in bpf_map_kmalloc_node()
461 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) in bpf_map_kzalloc() argument
466 memcg = bpf_map_get_memcg(map); in bpf_map_kzalloc()
475 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, in bpf_map_alloc_percpu() argument
481 memcg = bpf_map_get_memcg(map); in bpf_map_alloc_percpu()
491 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
495 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
511 struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset) in bpf_map_kptr_off_contains() argument
519 if (!map_value_has_kptrs(map)) in bpf_map_kptr_off_contains()
521 tab = map->kptr_off_tab; in bpf_map_kptr_off_contains()
525 void bpf_map_free_kptr_off_tab(struct bpf_map *map) in bpf_map_free_kptr_off_tab() argument
527 struct bpf_map_value_off *tab = map->kptr_off_tab; in bpf_map_free_kptr_off_tab()
530 if (!map_value_has_kptrs(map)) in bpf_map_free_kptr_off_tab()
538 map->kptr_off_tab = NULL; in bpf_map_free_kptr_off_tab()
541 struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map) in bpf_map_copy_kptr_off_tab() argument
543 struct bpf_map_value_off *tab = map->kptr_off_tab, *new_tab; in bpf_map_copy_kptr_off_tab()
546 if (!map_value_has_kptrs(map)) in bpf_map_copy_kptr_off_tab()
589 void bpf_map_free_kptrs(struct bpf_map *map, void *map_value) in bpf_map_free_kptrs() argument
591 struct bpf_map_value_off *tab = map->kptr_off_tab; in bpf_map_free_kptrs()
614 struct bpf_map *map = container_of(work, struct bpf_map, work); in bpf_map_free_deferred() local
616 security_bpf_map_free(map); in bpf_map_free_deferred()
617 kfree(map->off_arr); in bpf_map_free_deferred()
618 bpf_map_release_memcg(map); in bpf_map_free_deferred()
622 map->ops->map_free(map); in bpf_map_free_deferred()
625 static void bpf_map_put_uref(struct bpf_map *map) in bpf_map_put_uref() argument
627 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
628 if (map->ops->map_release_uref) in bpf_map_put_uref()
629 map->ops->map_release_uref(map); in bpf_map_put_uref()
636 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) in __bpf_map_put() argument
638 if (atomic64_dec_and_test(&map->refcnt)) { in __bpf_map_put()
640 bpf_map_free_id(map, do_idr_lock); in __bpf_map_put()
641 btf_put(map->btf); in __bpf_map_put()
642 INIT_WORK(&map->work, bpf_map_free_deferred); in __bpf_map_put()
646 queue_work(system_unbound_wq, &map->work); in __bpf_map_put()
650 void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
652 __bpf_map_put(map, true); in bpf_map_put()
656 void bpf_map_put_with_uref(struct bpf_map *map) in bpf_map_put_with_uref() argument
658 bpf_map_put_uref(map); in bpf_map_put_with_uref()
659 bpf_map_put(map); in bpf_map_put_with_uref()
664 struct bpf_map *map = filp->private_data; in bpf_map_release() local
666 if (map->ops->map_release) in bpf_map_release()
667 map->ops->map_release(map, filp); in bpf_map_release()
669 bpf_map_put_with_uref(map); in bpf_map_release()
673 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) in map_get_sys_perms() argument
680 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
690 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) in bpf_map_memory_footprint() argument
694 size = round_up(map->key_size + bpf_map_value_size(map), 8); in bpf_map_memory_footprint()
696 return round_up(map->max_entries * size, PAGE_SIZE); in bpf_map_memory_footprint()
701 struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo() local
704 if (map_type_contains_progs(map)) { in bpf_map_show_fdinfo()
705 spin_lock(&map->owner.lock); in bpf_map_show_fdinfo()
706 type = map->owner.type; in bpf_map_show_fdinfo()
707 jited = map->owner.jited; in bpf_map_show_fdinfo()
708 spin_unlock(&map->owner.lock); in bpf_map_show_fdinfo()
721 map->map_type, in bpf_map_show_fdinfo()
722 map->key_size, in bpf_map_show_fdinfo()
723 map->value_size, in bpf_map_show_fdinfo()
724 map->max_entries, in bpf_map_show_fdinfo()
725 map->map_flags, in bpf_map_show_fdinfo()
726 (unsigned long long)map->map_extra, in bpf_map_show_fdinfo()
727 bpf_map_memory_footprint(map), in bpf_map_show_fdinfo()
728 map->id, in bpf_map_show_fdinfo()
729 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
758 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open() local
761 bpf_map_write_active_inc(map); in bpf_map_mmap_open()
767 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close() local
770 bpf_map_write_active_dec(map); in bpf_map_mmap_close()
780 struct bpf_map *map = filp->private_data; in bpf_map_mmap() local
783 if (!map->ops->map_mmap || map_value_has_spin_lock(map) || in bpf_map_mmap()
784 map_value_has_timer(map) || map_value_has_kptrs(map)) in bpf_map_mmap()
790 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
793 if (map->frozen) { in bpf_map_mmap()
802 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
810 vma->vm_private_data = map; in bpf_map_mmap()
816 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
821 bpf_map_write_active_inc(map); in bpf_map_mmap()
823 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
829 struct bpf_map *map = filp->private_data; in bpf_map_poll() local
831 if (map->ops->map_poll) in bpf_map_poll()
832 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
848 int bpf_map_new_fd(struct bpf_map *map, int flags) in bpf_map_new_fd() argument
852 ret = security_bpf_map(map, OPEN_FMODE(flags)); in bpf_map_new_fd()
856 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
903 int map_check_no_btf(const struct bpf_map *map, in map_check_no_btf() argument
925 struct bpf_map *map = (struct bpf_map *)priv; in map_off_arr_swap() local
926 u32 *off_base = map->off_arr->field_off; in map_off_arr_swap()
930 sz_a = map->off_arr->field_sz + (a - off_base); in map_off_arr_swap()
931 sz_b = map->off_arr->field_sz + (b - off_base); in map_off_arr_swap()
937 static int bpf_map_alloc_off_arr(struct bpf_map *map) in bpf_map_alloc_off_arr() argument
939 bool has_spin_lock = map_value_has_spin_lock(map); in bpf_map_alloc_off_arr()
940 bool has_timer = map_value_has_timer(map); in bpf_map_alloc_off_arr()
941 bool has_kptrs = map_value_has_kptrs(map); in bpf_map_alloc_off_arr()
946 map->off_arr = NULL; in bpf_map_alloc_off_arr()
950 off_arr = kmalloc(sizeof(*map->off_arr), GFP_KERNEL | __GFP_NOWARN); in bpf_map_alloc_off_arr()
953 map->off_arr = off_arr; in bpf_map_alloc_off_arr()
959 off_arr->field_off[i] = map->spin_lock_off; in bpf_map_alloc_off_arr()
966 off_arr->field_off[i] = map->timer_off; in bpf_map_alloc_off_arr()
971 struct bpf_map_value_off *tab = map->kptr_off_tab; in bpf_map_alloc_off_arr()
985 map_off_arr_cmp, map_off_arr_swap, map); in bpf_map_alloc_off_arr()
989 static int map_check_btf(struct bpf_map *map, const struct btf *btf, in map_check_btf() argument
999 if (!key_type || key_size != map->key_size) in map_check_btf()
1003 if (!map->ops->map_check_btf) in map_check_btf()
1008 if (!value_type || value_size != map->value_size) in map_check_btf()
1011 map->spin_lock_off = btf_find_spin_lock(btf, value_type); in map_check_btf()
1013 if (map_value_has_spin_lock(map)) { in map_check_btf()
1014 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
1016 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1017 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1018 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
1019 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1020 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1021 map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in map_check_btf()
1023 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > in map_check_btf()
1024 map->value_size) { in map_check_btf()
1027 map->spin_lock_off, map->value_size); in map_check_btf()
1032 map->timer_off = btf_find_timer(btf, value_type); in map_check_btf()
1033 if (map_value_has_timer(map)) { in map_check_btf()
1034 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
1036 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1037 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1038 map->map_type != BPF_MAP_TYPE_ARRAY) in map_check_btf()
1042 map->kptr_off_tab = btf_parse_kptrs(btf, value_type); in map_check_btf()
1043 if (map_value_has_kptrs(map)) { in map_check_btf()
1048 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { in map_check_btf()
1052 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1053 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1054 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1055 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY) { in map_check_btf()
1061 if (map->ops->map_check_btf) { in map_check_btf()
1062 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
1069 bpf_map_free_kptr_off_tab(map); in map_check_btf()
1078 struct bpf_map *map; in map_create() local
1108 map = find_and_alloc_map(attr); in map_create()
1109 if (IS_ERR(map)) in map_create()
1110 return PTR_ERR(map); in map_create()
1112 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
1117 atomic64_set(&map->refcnt, 1); in map_create()
1118 atomic64_set(&map->usercnt, 1); in map_create()
1119 mutex_init(&map->freeze_mutex); in map_create()
1120 spin_lock_init(&map->owner.lock); in map_create()
1122 map->spin_lock_off = -EINVAL; in map_create()
1123 map->timer_off = -EINVAL; in map_create()
1144 map->btf = btf; in map_create()
1147 err = map_check_btf(map, btf, attr->btf_key_type_id, in map_create()
1153 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
1154 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
1155 map->btf_vmlinux_value_type_id = in map_create()
1159 err = bpf_map_alloc_off_arr(map); in map_create()
1163 err = security_bpf_map_alloc(map); in map_create()
1167 err = bpf_map_alloc_id(map); in map_create()
1171 bpf_map_save_memcg(map); in map_create()
1173 err = bpf_map_new_fd(map, f_flags); in map_create()
1181 bpf_map_put_with_uref(map); in map_create()
1188 security_bpf_map_free(map); in map_create()
1190 kfree(map->off_arr); in map_create()
1192 btf_put(map->btf); in map_create()
1193 map->ops->map_free(map); in map_create()
1212 void bpf_map_inc(struct bpf_map *map) in bpf_map_inc() argument
1214 atomic64_inc(&map->refcnt); in bpf_map_inc()
1218 void bpf_map_inc_with_uref(struct bpf_map *map) in bpf_map_inc_with_uref() argument
1220 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
1221 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
1228 struct bpf_map *map; in bpf_map_get() local
1230 map = __bpf_map_get(f); in bpf_map_get()
1231 if (IS_ERR(map)) in bpf_map_get()
1232 return map; in bpf_map_get()
1234 bpf_map_inc(map); in bpf_map_get()
1237 return map; in bpf_map_get()
1244 struct bpf_map *map; in bpf_map_get_with_uref() local
1246 map = __bpf_map_get(f); in bpf_map_get_with_uref()
1247 if (IS_ERR(map)) in bpf_map_get_with_uref()
1248 return map; in bpf_map_get_with_uref()
1250 bpf_map_inc_with_uref(map); in bpf_map_get_with_uref()
1253 return map; in bpf_map_get_with_uref()
1257 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) in __bpf_map_inc_not_zero() argument
1261 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1265 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1267 return map; in __bpf_map_inc_not_zero()
1270 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) in bpf_map_inc_not_zero() argument
1273 map = __bpf_map_inc_not_zero(map, false); in bpf_map_inc_not_zero()
1276 return map; in bpf_map_inc_not_zero()
1280 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) in bpf_stackmap_copy() argument
1315 struct bpf_map *map; in map_lookup_elem() local
1328 map = __bpf_map_get(f); in map_lookup_elem()
1329 if (IS_ERR(map)) in map_lookup_elem()
1330 return PTR_ERR(map); in map_lookup_elem()
1331 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_lookup_elem()
1337 !map_value_has_spin_lock(map)) { in map_lookup_elem()
1342 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1348 value_size = bpf_map_value_size(map); in map_lookup_elem()
1355 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in map_lookup_elem()
1359 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1363 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1390 struct bpf_map *map; in map_update_elem() local
1400 map = __bpf_map_get(f); in map_update_elem()
1401 if (IS_ERR(map)) in map_update_elem()
1402 return PTR_ERR(map); in map_update_elem()
1403 bpf_map_write_active_inc(map); in map_update_elem()
1404 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_update_elem()
1410 !map_value_has_spin_lock(map)) { in map_update_elem()
1415 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1421 value_size = bpf_map_value_size(map); in map_update_elem()
1428 err = bpf_map_update_value(map, f, key, value, attr->flags); in map_update_elem()
1434 bpf_map_write_active_dec(map); in map_update_elem()
1445 struct bpf_map *map; in map_delete_elem() local
1454 map = __bpf_map_get(f); in map_delete_elem()
1455 if (IS_ERR(map)) in map_delete_elem()
1456 return PTR_ERR(map); in map_delete_elem()
1457 bpf_map_write_active_inc(map); in map_delete_elem()
1458 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_delete_elem()
1463 key = ___bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1469 if (bpf_map_is_dev_bound(map)) { in map_delete_elem()
1470 err = bpf_map_offload_delete_elem(map, key); in map_delete_elem()
1472 } else if (IS_FD_PROG_ARRAY(map) || in map_delete_elem()
1473 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1475 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1481 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1484 maybe_wait_bpf_programs(map); in map_delete_elem()
1488 bpf_map_write_active_dec(map); in map_delete_elem()
1501 struct bpf_map *map; in map_get_next_key() local
1510 map = __bpf_map_get(f); in map_get_next_key()
1511 if (IS_ERR(map)) in map_get_next_key()
1512 return PTR_ERR(map); in map_get_next_key()
1513 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_get_next_key()
1519 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1529 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1533 if (bpf_map_is_dev_bound(map)) { in map_get_next_key()
1534 err = bpf_map_offload_get_next_key(map, key, next_key); in map_get_next_key()
1539 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1546 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1560 int generic_map_delete_batch(struct bpf_map *map, in generic_map_delete_batch() argument
1573 !map_value_has_spin_lock(map)) { in generic_map_delete_batch()
1584 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1590 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1591 map->key_size)) in generic_map_delete_batch()
1594 if (bpf_map_is_dev_bound(map)) { in generic_map_delete_batch()
1595 err = bpf_map_offload_delete_elem(map, key); in generic_map_delete_batch()
1601 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1613 maybe_wait_bpf_programs(map); in generic_map_delete_batch()
1617 int generic_map_update_batch(struct bpf_map *map, in generic_map_update_batch() argument
1633 !map_value_has_spin_lock(map)) { in generic_map_update_batch()
1637 value_size = bpf_map_value_size(map); in generic_map_update_batch()
1646 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1659 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1660 map->key_size) || in generic_map_update_batch()
1664 err = bpf_map_update_value(map, f, key, value, in generic_map_update_batch()
1683 int generic_map_lookup_batch(struct bpf_map *map, in generic_map_lookup_batch() argument
1699 !map_value_has_spin_lock(map)) in generic_map_lookup_batch()
1702 value_size = bpf_map_value_size(map); in generic_map_lookup_batch()
1711 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1715 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
1723 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
1726 value = key + map->key_size; in generic_map_lookup_batch()
1732 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
1736 err = bpf_map_copy_value(map, key, value, in generic_map_lookup_batch()
1751 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
1752 map->key_size)) { in generic_map_lookup_batch()
1774 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
1790 struct bpf_map *map; in map_lookup_and_delete_elem() local
1803 map = __bpf_map_get(f); in map_lookup_and_delete_elem()
1804 if (IS_ERR(map)) in map_lookup_and_delete_elem()
1805 return PTR_ERR(map); in map_lookup_and_delete_elem()
1806 bpf_map_write_active_inc(map); in map_lookup_and_delete_elem()
1807 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || in map_lookup_and_delete_elem()
1808 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_lookup_and_delete_elem()
1814 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1815 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
1821 !map_value_has_spin_lock(map)) { in map_lookup_and_delete_elem()
1826 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
1832 value_size = bpf_map_value_size(map); in map_lookup_and_delete_elem()
1840 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1841 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
1842 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
1843 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
1844 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
1845 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
1846 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_and_delete_elem()
1847 if (!bpf_map_is_dev_bound(map)) { in map_lookup_and_delete_elem()
1850 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
1871 bpf_map_write_active_dec(map); in map_lookup_and_delete_elem()
1881 struct bpf_map *map; in map_freeze() local
1888 map = __bpf_map_get(f); in map_freeze()
1889 if (IS_ERR(map)) in map_freeze()
1890 return PTR_ERR(map); in map_freeze()
1892 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || in map_freeze()
1893 map_value_has_timer(map) || map_value_has_kptrs(map)) { in map_freeze()
1898 mutex_lock(&map->freeze_mutex); in map_freeze()
1899 if (bpf_map_write_active(map)) { in map_freeze()
1903 if (READ_ONCE(map->frozen)) { in map_freeze()
1912 WRITE_ONCE(map->frozen, true); in map_freeze()
1914 mutex_unlock(&map->freeze_mutex); in map_freeze()
3686 struct bpf_map *map; in bpf_map_get_curr_or_next() local
3690 map = idr_get_next(&map_idr, id); in bpf_map_get_curr_or_next()
3691 if (map) { in bpf_map_get_curr_or_next()
3692 map = __bpf_map_inc_not_zero(map, false); in bpf_map_get_curr_or_next()
3693 if (IS_ERR(map)) { in bpf_map_get_curr_or_next()
3700 return map; in bpf_map_get_curr_or_next()
3768 struct bpf_map *map; in bpf_map_get_fd_by_id() local
3785 map = idr_find(&map_idr, id); in bpf_map_get_fd_by_id()
3786 if (map) in bpf_map_get_fd_by_id()
3787 map = __bpf_map_inc_not_zero(map, true); in bpf_map_get_fd_by_id()
3789 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
3792 if (IS_ERR(map)) in bpf_map_get_fd_by_id()
3793 return PTR_ERR(map); in bpf_map_get_fd_by_id()
3795 fd = bpf_map_new_fd(map, f_flags); in bpf_map_get_fd_by_id()
3797 bpf_map_put_with_uref(map); in bpf_map_get_fd_by_id()
3806 const struct bpf_map *map; in bpf_map_from_imm() local
3811 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
3812 if (map == (void *)addr) { in bpf_map_from_imm()
3816 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
3818 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
3823 map = NULL; in bpf_map_from_imm()
3827 return map; in bpf_map_from_imm()
3833 const struct bpf_map *map; in bpf_insn_prepare_dump() local
3870 map = bpf_map_from_imm(prog, imm, &off, &type); in bpf_insn_prepare_dump()
3871 if (map) { in bpf_insn_prepare_dump()
3873 insns[i].imm = map->id; in bpf_insn_prepare_dump()
4203 struct bpf_map *map, in bpf_map_get_info_by_fd() argument
4218 info.type = map->map_type; in bpf_map_get_info_by_fd()
4219 info.id = map->id; in bpf_map_get_info_by_fd()
4220 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
4221 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
4222 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
4223 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
4224 info.map_extra = map->map_extra; in bpf_map_get_info_by_fd()
4225 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
4227 if (map->btf) { in bpf_map_get_info_by_fd()
4228 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
4229 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
4230 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
4232 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
4234 if (bpf_map_is_dev_bound(map)) { in bpf_map_get_info_by_fd()
4235 err = bpf_map_offload_info_fill(&info, map); in bpf_map_get_info_by_fd()
4489 err = fn(map, attr, uattr); \
4499 struct bpf_map *map; in bpf_map_do_batch() local
4508 map = __bpf_map_get(f); in bpf_map_do_batch()
4509 if (IS_ERR(map)) in bpf_map_do_batch()
4510 return PTR_ERR(map); in bpf_map_do_batch()
4512 bpf_map_write_active_inc(map); in bpf_map_do_batch()
4513 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in bpf_map_do_batch()
4517 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in bpf_map_do_batch()
4523 BPF_DO_BATCH(map->ops->map_lookup_batch); in bpf_map_do_batch()
4525 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); in bpf_map_do_batch()
4527 BPF_DO_BATCH(map->ops->map_update_batch); in bpf_map_do_batch()
4529 BPF_DO_BATCH(map->ops->map_delete_batch); in bpf_map_do_batch()
4532 bpf_map_write_active_dec(map); in bpf_map_do_batch()
4874 struct bpf_map *map; in bpf_prog_bind_map() local
4888 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
4889 if (IS_ERR(map)) { in bpf_prog_bind_map()
4890 ret = PTR_ERR(map); in bpf_prog_bind_map()
4899 if (used_maps_old[i] == map) { in bpf_prog_bind_map()
4900 bpf_map_put(map); in bpf_prog_bind_map()
4914 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
4925 bpf_map_put(map); in bpf_prog_bind_map()