Lines Matching refs:map
67 void (*map_release)(struct bpf_map *map, struct file *map_file);
68 void (*map_free)(struct bpf_map *map);
69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
70 void (*map_release_uref)(struct bpf_map *map);
71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
76 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
85 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
87 int (*map_delete_elem)(struct bpf_map *map, void *key);
88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
89 int (*map_pop_elem)(struct bpf_map *map, void *value);
90 int (*map_peek_elem)(struct bpf_map *map, void *value);
93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
98 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
100 int (*map_check_btf)(const struct bpf_map *map,
106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
112 int (*map_direct_value_addr)(const struct bpf_map *map,
114 int (*map_direct_value_meta)(const struct bpf_map *map,
116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
146 int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
200 static inline bool map_value_has_spin_lock(const struct bpf_map *map) in map_value_has_spin_lock() argument
202 return map->spin_lock_off >= 0; in map_value_has_spin_lock()
205 static inline bool map_value_has_timer(const struct bpf_map *map) in map_value_has_timer() argument
207 return map->timer_off >= 0; in map_value_has_timer()
210 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) in check_and_init_map_value() argument
212 if (unlikely(map_value_has_spin_lock(map))) in check_and_init_map_value()
213 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock)); in check_and_init_map_value()
214 if (unlikely(map_value_has_timer(map))) in check_and_init_map_value()
215 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer)); in check_and_init_map_value()
219 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) in copy_map_value() argument
223 if (unlikely(map_value_has_spin_lock(map))) { in copy_map_value()
224 s_off = map->spin_lock_off; in copy_map_value()
227 if (unlikely(map_value_has_timer(map))) { in copy_map_value()
228 t_off = map->timer_off; in copy_map_value()
243 map->value_size - s_off - s_sz); in copy_map_value()
245 memcpy(dst, src, map->value_size); in copy_map_value()
248 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
257 int (*map_get_next_key)(struct bpf_offloaded_map *map,
259 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
261 int (*map_update_elem)(struct bpf_offloaded_map *map,
263 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
269 struct bpf_map map; member
276 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) in map_to_offmap() argument
278 return container_of(map, struct bpf_offloaded_map, map); in map_to_offmap()
281 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) in bpf_map_offload_neutral() argument
283 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map_offload_neutral()
286 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) in bpf_map_support_seq_show() argument
288 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && in bpf_map_support_seq_show()
289 map->ops->map_seq_show_elem; in bpf_map_support_seq_show()
292 int map_check_no_btf(const struct bpf_map *map,
885 struct bpf_map *map; member
1008 struct bpf_map *map; member
1070 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1103 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, in bpf_struct_ops_map_sys_lookup_elem() argument
1112 struct bpf_map map; member
1134 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) in bpf_map_flags_to_cap() argument
1136 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); in bpf_map_flags_to_cap()
1175 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1436 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1441 void bpf_map_inc(struct bpf_map *map);
1442 void bpf_map_inc_with_uref(struct bpf_map *map);
1443 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1444 void bpf_map_put_with_uref(struct bpf_map *map);
1445 void bpf_map_put(struct bpf_map *map);
1449 bool bpf_map_write_active(const struct bpf_map *map);
1450 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1451 int generic_map_lookup_batch(struct bpf_map *map,
1454 int generic_map_update_batch(struct bpf_map *map,
1457 int generic_map_delete_batch(struct bpf_map *map,
1464 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1466 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1467 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1471 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, in bpf_map_kmalloc_node() argument
1478 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) in bpf_map_kzalloc() argument
1484 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, in bpf_map_alloc_percpu() argument
1518 int bpf_map_new_fd(struct bpf_map *map, int flags);
1541 struct bpf_map *map; member
1582 __bpf_md_ptr(struct bpf_map *, map);
1606 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1607 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1608 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1610 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1613 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1615 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1617 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1618 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1620 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1663 struct bpf_map *map, bool exclude_ingress);
1667 struct bpf_prog *xdp_prog, struct bpf_map *map,
1815 static inline bool dev_map_can_have_prog(struct bpf_map *map) in dev_map_can_have_prog() argument
1844 struct bpf_map *map, bool exclude_ingress) in dev_map_enqueue_multi() argument
1860 struct bpf_prog *xdp_prog, struct bpf_map *map, in dev_map_redirect_multi() argument
1883 static inline bool cpu_map_prog_allowed(struct bpf_map *map) in cpu_map_prog_allowed() argument
1934 static inline void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
1991 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1993 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1994 int bpf_map_offload_update_elem(struct bpf_map *map,
1996 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1997 int bpf_map_offload_get_next_key(struct bpf_map *map,
2000 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2022 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) in bpf_map_is_dev_bound() argument
2024 return unlikely(map->ops == &bpf_map_offload_ops); in bpf_map_is_dev_bound()
2028 void bpf_map_offload_map_free(struct bpf_map *map);
2035 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2051 static inline bool bpf_map_is_dev_bound(struct bpf_map *map) in bpf_map_is_dev_bound() argument
2061 static inline void bpf_map_offload_map_free(struct bpf_map *map) in bpf_map_offload_map_free() argument
2085 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, in sock_map_update_elem_sys() argument
2095 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2097 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2105 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, in bpf_fd_reuseport_array_lookup_elem() argument
2111 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, in bpf_fd_reuseport_array_update_elem() argument