• Home
  • Raw
  • Download

Lines Matching refs:map

27 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \  argument
28 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) argument
32 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) argument
104 struct bpf_map *map; in find_and_alloc_map() local
121 map = ops->map_alloc(attr); in find_and_alloc_map()
122 if (IS_ERR(map)) in find_and_alloc_map()
123 return map; in find_and_alloc_map()
124 map->ops = ops; in find_and_alloc_map()
125 map->map_type = type; in find_and_alloc_map()
126 return map; in find_and_alloc_map()
176 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument
178 map->map_type = attr->map_type; in bpf_map_init_from_attr()
179 map->key_size = attr->key_size; in bpf_map_init_from_attr()
180 map->value_size = attr->value_size; in bpf_map_init_from_attr()
181 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
182 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
183 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
240 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) in bpf_map_charge_memlock() argument
244 ret = bpf_charge_memlock(map->memory.user, pages); in bpf_map_charge_memlock()
247 map->memory.pages += pages; in bpf_map_charge_memlock()
251 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) in bpf_map_uncharge_memlock() argument
253 bpf_uncharge_memlock(map->memory.user, pages); in bpf_map_uncharge_memlock()
254 map->memory.pages -= pages; in bpf_map_uncharge_memlock()
257 static int bpf_map_alloc_id(struct bpf_map *map) in bpf_map_alloc_id() argument
263 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id()
265 map->id = id; in bpf_map_alloc_id()
275 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) in bpf_map_free_id() argument
284 if (!map->id) in bpf_map_free_id()
292 idr_remove(&map_idr, map->id); in bpf_map_free_id()
293 map->id = 0; in bpf_map_free_id()
304 struct bpf_map *map = container_of(work, struct bpf_map, work); in bpf_map_free_deferred() local
307 bpf_map_charge_move(&mem, &map->memory); in bpf_map_free_deferred()
308 security_bpf_map_free(map); in bpf_map_free_deferred()
310 map->ops->map_free(map); in bpf_map_free_deferred()
314 static void bpf_map_put_uref(struct bpf_map *map) in bpf_map_put_uref() argument
316 if (atomic_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
317 if (map->ops->map_release_uref) in bpf_map_put_uref()
318 map->ops->map_release_uref(map); in bpf_map_put_uref()
325 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) in __bpf_map_put() argument
327 if (atomic_dec_and_test(&map->refcnt)) { in __bpf_map_put()
329 bpf_map_free_id(map, do_idr_lock); in __bpf_map_put()
330 btf_put(map->btf); in __bpf_map_put()
331 INIT_WORK(&map->work, bpf_map_free_deferred); in __bpf_map_put()
332 schedule_work(&map->work); in __bpf_map_put()
336 void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
338 __bpf_map_put(map, true); in bpf_map_put()
342 void bpf_map_put_with_uref(struct bpf_map *map) in bpf_map_put_with_uref() argument
344 bpf_map_put_uref(map); in bpf_map_put_with_uref()
345 bpf_map_put(map); in bpf_map_put_with_uref()
350 struct bpf_map *map = filp->private_data; in bpf_map_release() local
352 if (map->ops->map_release) in bpf_map_release()
353 map->ops->map_release(map, filp); in bpf_map_release()
355 bpf_map_put_with_uref(map); in bpf_map_release()
359 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) in map_get_sys_perms() argument
366 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
374 const struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo() local
379 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { in bpf_map_show_fdinfo()
380 array = container_of(map, struct bpf_array, map); in bpf_map_show_fdinfo()
394 map->map_type, in bpf_map_show_fdinfo()
395 map->key_size, in bpf_map_show_fdinfo()
396 map->value_size, in bpf_map_show_fdinfo()
397 map->max_entries, in bpf_map_show_fdinfo()
398 map->map_flags, in bpf_map_show_fdinfo()
399 map->memory.pages * 1ULL << PAGE_SHIFT, in bpf_map_show_fdinfo()
400 map->id, in bpf_map_show_fdinfo()
401 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
439 int bpf_map_new_fd(struct bpf_map *map, int flags) in bpf_map_new_fd() argument
443 ret = security_bpf_map(map, OPEN_FMODE(flags)); in bpf_map_new_fd()
447 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
493 int map_check_no_btf(const struct bpf_map *map, in map_check_no_btf() argument
501 static int map_check_btf(struct bpf_map *map, const struct btf *btf, in map_check_btf() argument
511 if (!key_type || key_size != map->key_size) in map_check_btf()
515 if (!map->ops->map_check_btf) in map_check_btf()
520 if (!value_type || value_size != map->value_size) in map_check_btf()
523 map->spin_lock_off = btf_find_spin_lock(btf, value_type); in map_check_btf()
525 if (map_value_has_spin_lock(map)) { in map_check_btf()
526 if (map->map_flags & BPF_F_RDONLY_PROG) in map_check_btf()
528 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
529 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
530 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
531 map->map_type != BPF_MAP_TYPE_SK_STORAGE) in map_check_btf()
533 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > in map_check_btf()
534 map->value_size) { in map_check_btf()
537 map->spin_lock_off, map->value_size); in map_check_btf()
542 if (map->ops->map_check_btf) in map_check_btf()
543 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
554 struct bpf_map *map; in map_create() local
572 map = find_and_alloc_map(attr); in map_create()
573 if (IS_ERR(map)) in map_create()
574 return PTR_ERR(map); in map_create()
576 err = bpf_obj_name_cpy(map->name, attr->map_name); in map_create()
580 atomic_set(&map->refcnt, 1); in map_create()
581 atomic_set(&map->usercnt, 1); in map_create()
597 err = map_check_btf(map, btf, attr->btf_key_type_id, in map_create()
604 map->btf = btf; in map_create()
605 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
606 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
608 map->spin_lock_off = -EINVAL; in map_create()
611 err = security_bpf_map_alloc(map); in map_create()
615 err = bpf_map_alloc_id(map); in map_create()
619 err = bpf_map_new_fd(map, f_flags); in map_create()
627 bpf_map_put_with_uref(map); in map_create()
634 security_bpf_map_free(map); in map_create()
636 btf_put(map->btf); in map_create()
637 bpf_map_charge_move(&mem, &map->memory); in map_create()
638 map->ops->map_free(map); in map_create()
661 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) in bpf_map_inc() argument
663 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { in bpf_map_inc()
664 atomic_dec(&map->refcnt); in bpf_map_inc()
668 atomic_inc(&map->usercnt); in bpf_map_inc()
669 return map; in bpf_map_inc()
676 struct bpf_map *map; in bpf_map_get_with_uref() local
678 map = __bpf_map_get(f); in bpf_map_get_with_uref()
679 if (IS_ERR(map)) in bpf_map_get_with_uref()
680 return map; in bpf_map_get_with_uref()
682 map = bpf_map_inc(map, true); in bpf_map_get_with_uref()
685 return map; in bpf_map_get_with_uref()
689 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, in __bpf_map_inc_not_zero() argument
694 refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
697 __bpf_map_put(map, false); in __bpf_map_inc_not_zero()
705 atomic_inc(&map->usercnt); in __bpf_map_inc_not_zero()
707 return map; in __bpf_map_inc_not_zero()
710 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) in bpf_map_inc_not_zero() argument
713 map = __bpf_map_inc_not_zero(map, uref); in bpf_map_inc_not_zero()
716 return map; in bpf_map_inc_not_zero()
720 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) in bpf_stackmap_copy() argument
744 struct bpf_map *map; in map_lookup_elem() local
757 map = __bpf_map_get(f); in map_lookup_elem()
758 if (IS_ERR(map)) in map_lookup_elem()
759 return PTR_ERR(map); in map_lookup_elem()
760 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_lookup_elem()
766 !map_value_has_spin_lock(map)) { in map_lookup_elem()
771 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
777 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_elem()
778 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in map_lookup_elem()
779 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in map_lookup_elem()
780 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in map_lookup_elem()
781 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in map_lookup_elem()
782 else if (IS_FD_MAP(map)) in map_lookup_elem()
785 value_size = map->value_size; in map_lookup_elem()
792 if (bpf_map_is_dev_bound(map)) { in map_lookup_elem()
793 err = bpf_map_offload_lookup_elem(map, key, value); in map_lookup_elem()
799 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_elem()
800 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_elem()
801 err = bpf_percpu_hash_copy(map, key, value); in map_lookup_elem()
802 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in map_lookup_elem()
803 err = bpf_percpu_array_copy(map, key, value); in map_lookup_elem()
804 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in map_lookup_elem()
805 err = bpf_percpu_cgroup_storage_copy(map, key, value); in map_lookup_elem()
806 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in map_lookup_elem()
807 err = bpf_stackmap_copy(map, key, value); in map_lookup_elem()
808 } else if (IS_FD_ARRAY(map)) { in map_lookup_elem()
809 err = bpf_fd_array_map_lookup_elem(map, key, value); in map_lookup_elem()
810 } else if (IS_FD_HASH(map)) { in map_lookup_elem()
811 err = bpf_fd_htab_map_lookup_elem(map, key, value); in map_lookup_elem()
812 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in map_lookup_elem()
813 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); in map_lookup_elem()
814 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_elem()
815 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_elem()
816 err = map->ops->map_peek_elem(map, value); in map_lookup_elem()
819 if (map->ops->map_lookup_elem_sys_only) in map_lookup_elem()
820 ptr = map->ops->map_lookup_elem_sys_only(map, key); in map_lookup_elem()
822 ptr = map->ops->map_lookup_elem(map, key); in map_lookup_elem()
831 copy_map_value_locked(map, value, ptr, true); in map_lookup_elem()
833 copy_map_value(map, value, ptr); in map_lookup_elem()
835 check_and_init_map_lock(map, value); in map_lookup_elem()
861 static void maybe_wait_bpf_programs(struct bpf_map *map) in maybe_wait_bpf_programs() argument
867 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
868 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
879 struct bpf_map *map; in map_update_elem() local
889 map = __bpf_map_get(f); in map_update_elem()
890 if (IS_ERR(map)) in map_update_elem()
891 return PTR_ERR(map); in map_update_elem()
892 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_update_elem()
898 !map_value_has_spin_lock(map)) { in map_update_elem()
903 key = __bpf_copy_key(ukey, map->key_size); in map_update_elem()
909 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_update_elem()
910 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in map_update_elem()
911 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in map_update_elem()
912 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in map_update_elem()
913 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in map_update_elem()
915 value_size = map->value_size; in map_update_elem()
927 if (bpf_map_is_dev_bound(map)) { in map_update_elem()
928 err = bpf_map_offload_update_elem(map, key, value, attr->flags); in map_update_elem()
930 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in map_update_elem()
931 map->map_type == BPF_MAP_TYPE_SOCKHASH || in map_update_elem()
932 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in map_update_elem()
933 err = map->ops->map_update_elem(map, key, value, attr->flags); in map_update_elem()
942 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_update_elem()
943 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_update_elem()
944 err = bpf_percpu_hash_update(map, key, value, attr->flags); in map_update_elem()
945 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in map_update_elem()
946 err = bpf_percpu_array_update(map, key, value, attr->flags); in map_update_elem()
947 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in map_update_elem()
948 err = bpf_percpu_cgroup_storage_update(map, key, value, in map_update_elem()
950 } else if (IS_FD_ARRAY(map)) { in map_update_elem()
952 err = bpf_fd_array_map_update_elem(map, f.file, key, value, in map_update_elem()
955 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in map_update_elem()
957 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, in map_update_elem()
960 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in map_update_elem()
962 err = bpf_fd_reuseport_array_update_elem(map, key, value, in map_update_elem()
964 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_update_elem()
965 map->map_type == BPF_MAP_TYPE_STACK) { in map_update_elem()
966 err = map->ops->map_push_elem(map, value, attr->flags); in map_update_elem()
969 err = map->ops->map_update_elem(map, key, value, attr->flags); in map_update_elem()
974 maybe_wait_bpf_programs(map); in map_update_elem()
991 struct bpf_map *map; in map_delete_elem() local
1000 map = __bpf_map_get(f); in map_delete_elem()
1001 if (IS_ERR(map)) in map_delete_elem()
1002 return PTR_ERR(map); in map_delete_elem()
1003 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_delete_elem()
1008 key = __bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1014 if (bpf_map_is_dev_bound(map)) { in map_delete_elem()
1015 err = bpf_map_offload_delete_elem(map, key); in map_delete_elem()
1022 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1026 maybe_wait_bpf_programs(map); in map_delete_elem()
1042 struct bpf_map *map; in map_get_next_key() local
1051 map = __bpf_map_get(f); in map_get_next_key()
1052 if (IS_ERR(map)) in map_get_next_key()
1053 return PTR_ERR(map); in map_get_next_key()
1054 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in map_get_next_key()
1060 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1070 next_key = kmalloc(map->key_size, GFP_USER); in map_get_next_key()
1074 if (bpf_map_is_dev_bound(map)) { in map_get_next_key()
1075 err = bpf_map_offload_get_next_key(map, key, next_key); in map_get_next_key()
1080 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1087 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1108 struct bpf_map *map; in map_lookup_and_delete_elem() local
1118 map = __bpf_map_get(f); in map_lookup_and_delete_elem()
1119 if (IS_ERR(map)) in map_lookup_and_delete_elem()
1120 return PTR_ERR(map); in map_lookup_and_delete_elem()
1121 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_lookup_and_delete_elem()
1126 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
1132 value_size = map->value_size; in map_lookup_and_delete_elem()
1139 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
1140 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
1141 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
1168 struct bpf_map *map; in map_freeze() local
1175 map = __bpf_map_get(f); in map_freeze()
1176 if (IS_ERR(map)) in map_freeze()
1177 return PTR_ERR(map); in map_freeze()
1178 if (READ_ONCE(map->frozen)) { in map_freeze()
1187 WRITE_ONCE(map->frozen, true); in map_freeze()
2190 struct bpf_map *map; in bpf_map_get_fd_by_id() local
2207 map = idr_find(&map_idr, id); in bpf_map_get_fd_by_id()
2208 if (map) in bpf_map_get_fd_by_id()
2209 map = __bpf_map_inc_not_zero(map, true); in bpf_map_get_fd_by_id()
2211 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
2214 if (IS_ERR(map)) in bpf_map_get_fd_by_id()
2215 return PTR_ERR(map); in bpf_map_get_fd_by_id()
2217 fd = bpf_map_new_fd(map, f_flags); in bpf_map_get_fd_by_id()
2219 bpf_map_put_with_uref(map); in bpf_map_get_fd_by_id()
2228 const struct bpf_map *map; in bpf_map_from_imm() local
2232 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
2233 if (map == (void *)addr) { in bpf_map_from_imm()
2235 return map; in bpf_map_from_imm()
2237 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
2239 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
2241 return map; in bpf_map_from_imm()
2250 const struct bpf_map *map; in bpf_insn_prepare_dump() local
2280 map = bpf_map_from_imm(prog, imm, &off, &type); in bpf_insn_prepare_dump()
2281 if (map) { in bpf_insn_prepare_dump()
2283 insns[i].imm = map->id; in bpf_insn_prepare_dump()
2598 static int bpf_map_get_info_by_fd(struct bpf_map *map, in bpf_map_get_info_by_fd() argument
2612 info.type = map->map_type; in bpf_map_get_info_by_fd()
2613 info.id = map->id; in bpf_map_get_info_by_fd()
2614 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
2615 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
2616 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
2617 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
2618 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
2620 if (map->btf) { in bpf_map_get_info_by_fd()
2621 info.btf_id = btf_id(map->btf); in bpf_map_get_info_by_fd()
2622 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
2623 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
2626 if (bpf_map_is_dev_bound(map)) { in bpf_map_get_info_by_fd()
2627 err = bpf_map_offload_info_fill(&info, map); in bpf_map_get_info_by_fd()