Lines Matching refs:map
86 void (*map_release)(struct bpf_map *map, struct file *map_file);
87 void (*map_free)(struct bpf_map *map);
88 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
89 void (*map_release_uref)(struct bpf_map *map);
90 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
91 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
93 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
95 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
98 int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
101 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
105 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
106 long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
107 long (*map_delete_elem)(struct bpf_map *map, void *key);
108 long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
109 long (*map_pop_elem)(struct bpf_map *map, void *value);
110 long (*map_peek_elem)(struct bpf_map *map, void *value);
111 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
114 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
120 void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
121 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
123 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
125 int (*map_check_btf)(const struct bpf_map *map,
131 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
132 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
133 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
137 int (*map_direct_value_addr)(const struct bpf_map *map,
139 int (*map_direct_value_meta)(const struct bpf_map *map,
141 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
142 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
156 long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
174 long (*map_for_each_callback)(struct bpf_map *map,
178 u64 (*map_mem_usage)(const struct bpf_map *map);
457 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) in check_and_init_map_value() argument
459 bpf_obj_init(map->record, dst); in check_and_init_map_value()
504 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) in copy_map_value() argument
506 bpf_obj_memcpy(map->record, dst, src, map->value_size, false); in copy_map_value()
509 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) in copy_map_value_long() argument
511 bpf_obj_memcpy(map->record, dst, src, map->value_size, true); in copy_map_value_long()
534 static inline void zero_map_value(struct bpf_map *map, void *dst) in zero_map_value() argument
536 bpf_obj_memzero(map->record, dst, map->value_size); in zero_map_value()
539 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
555 int (*map_get_next_key)(struct bpf_offloaded_map *map,
557 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
559 int (*map_update_elem)(struct bpf_offloaded_map *map,
561 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
567 struct bpf_map map; member
574 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) in map_to_offmap() argument
576 return container_of(map, struct bpf_offloaded_map, map); in map_to_offmap()
579 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) in bpf_map_offload_neutral() argument
581 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map_offload_neutral()
584 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) in bpf_map_support_seq_show() argument
586 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && in bpf_map_support_seq_show()
587 map->ops->map_seq_show_elem; in bpf_map_support_seq_show()
590 int map_check_no_btf(const struct bpf_map *map,
1454 struct bpf_map *map; member
1621 struct bpf_map *map; member
1846 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1890 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1906 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, in bpf_struct_ops_map_sys_lookup_elem() argument
1916 static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) in bpf_map_struct_ops_info_fill() argument
1942 struct bpf_map map; member
1976 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) in bpf_map_flags_to_cap() argument
1978 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); in bpf_map_flags_to_cap()
2004 static inline bool map_type_contains_progs(struct bpf_map *map) in map_type_contains_progs() argument
2006 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || in map_type_contains_progs()
2007 map->map_type == BPF_MAP_TYPE_DEVMAP || in map_type_contains_progs()
2008 map->map_type == BPF_MAP_TYPE_CPUMAP; in map_type_contains_progs()
2011 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
2025 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2264 void bpf_map_free_id(struct bpf_map *map);
2269 void bpf_map_free_record(struct bpf_map *map);
2289 void bpf_map_inc(struct bpf_map *map);
2290 void bpf_map_inc_with_uref(struct bpf_map *map);
2291 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2292 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
2293 void bpf_map_put_with_uref(struct bpf_map *map);
2294 void bpf_map_put(struct bpf_map *map);
2298 bool bpf_map_write_active(const struct bpf_map *map);
2299 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2300 int generic_map_lookup_batch(struct bpf_map *map,
2303 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
2306 int generic_map_delete_batch(struct bpf_map *map,
2312 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
2315 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2317 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
2318 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
2320 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
2338 bpf_map_init_elem_count(struct bpf_map *map) in bpf_map_init_elem_count() argument
2340 size_t size = sizeof(*map->elem_count), align = size; in bpf_map_init_elem_count()
2343 map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); in bpf_map_init_elem_count()
2344 if (!map->elem_count) in bpf_map_init_elem_count()
2351 bpf_map_free_elem_count(struct bpf_map *map) in bpf_map_free_elem_count() argument
2353 free_percpu(map->elem_count); in bpf_map_free_elem_count()
2356 static inline void bpf_map_inc_elem_count(struct bpf_map *map) in bpf_map_inc_elem_count() argument
2358 this_cpu_inc(*map->elem_count); in bpf_map_inc_elem_count()
2361 static inline void bpf_map_dec_elem_count(struct bpf_map *map) in bpf_map_dec_elem_count() argument
2363 this_cpu_dec(*map->elem_count); in bpf_map_dec_elem_count()
2390 int bpf_map_new_fd(struct bpf_map *map, int flags);
2449 struct bpf_map *map; member
2500 __bpf_md_ptr(struct bpf_map *, map);
2524 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
2525 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2526 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2528 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2531 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2533 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
2535 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2536 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2538 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2565 struct bpf_map *map, bool exclude_ingress);
2569 struct bpf_prog *xdp_prog, struct bpf_map *map,
2825 struct bpf_map *map, bool exclude_ingress) in dev_map_enqueue_multi() argument
2841 struct bpf_prog *xdp_prog, struct bpf_map *map, in dev_map_redirect_multi() argument
2905 static inline void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
3014 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
3016 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
3017 int bpf_map_offload_update_elem(struct bpf_map *map,
3019 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
3020 int bpf_map_offload_get_next_key(struct bpf_map *map,
3023 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
3057 static inline bool bpf_map_is_offloaded(struct bpf_map *map) in bpf_map_is_offloaded() argument
3059 return unlikely(map->ops == &bpf_map_offload_ops); in bpf_map_is_offloaded()
3063 void bpf_map_offload_map_free(struct bpf_map *map);
3064 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
3071 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
3123 static inline bool bpf_map_is_offloaded(struct bpf_map *map) in bpf_map_is_offloaded() argument
3133 static inline void bpf_map_offload_map_free(struct bpf_map *map) in bpf_map_offload_map_free() argument
3137 static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) in bpf_map_offload_map_mem_usage() argument
3162 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, in sock_map_update_elem_sys() argument
3199 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
3201 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
3209 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, in bpf_fd_reuseport_array_lookup_elem() argument
3215 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, in bpf_fd_reuseport_array_update_elem() argument