Home
last modified time | relevance | path

Searched refs:map (Results 1 – 25 of 47) sorted by relevance

12

/kernel/bpf/
Darraymap.c26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
143 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
146 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
154 return &array->map; in array_map_alloc()
163 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
165 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem()
168 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
174 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, in array_map_direct_value_addr() argument
[all …]
Dlocal_storage.c22 struct bpf_map map; member
29 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) in map_to_storage() argument
31 return container_of(map, struct bpf_cgroup_storage_map, map); in map_to_storage()
34 static bool attach_type_isolated(const struct bpf_map *map) in attach_type_isolated() argument
36 return map->key_size == sizeof(struct bpf_cgroup_storage_key); in attach_type_isolated()
39 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map, in bpf_cgroup_storage_key_cmp() argument
42 if (attach_type_isolated(&map->map)) { in bpf_cgroup_storage_key_cmp()
67 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, in cgroup_storage_lookup() argument
70 struct rb_root *root = &map->root; in cgroup_storage_lookup()
74 spin_lock_bh(&map->lock); in cgroup_storage_lookup()
[all …]
Dsyscall.c41 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ argument
42 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
43 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
44 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) argument
45 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) argument
46 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ argument
47 IS_FD_HASH(map))
116 struct bpf_map *map; in find_and_alloc_map() local
133 map = ops->map_alloc(attr); in find_and_alloc_map()
134 if (IS_ERR(map)) in find_and_alloc_map()
[all …]
Dhashtab.c87 struct bpf_map map; member
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
187 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
188 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
193 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
194 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
210 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
225 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
228 if (!map_value_has_timer(&htab->map)) in htab_free_prealloced_timers()
[all …]
Dmap_iter.c16 struct bpf_map *map; in bpf_map_seq_start() local
18 map = bpf_map_get_curr_or_next(&info->map_id); in bpf_map_seq_start()
19 if (!map) in bpf_map_seq_start()
24 return map; in bpf_map_seq_start()
39 __bpf_md_ptr(struct bpf_map *, map);
42 DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map) in DEFINE_BPF_ITER_FUNC() argument
52 ctx.map = v; in DEFINE_BPF_ITER_FUNC()
95 { offsetof(struct bpf_iter__bpf_map, map),
106 struct bpf_map *map; in bpf_iter_attach_map() local
110 if (!linfo->map.map_fd) in bpf_iter_attach_map()
[all …]
Dqueue_stack_maps.c18 struct bpf_map map; member
26 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map) in bpf_queue_stack() argument
28 return container_of(map, struct bpf_queue_stack, map); in bpf_queue_stack()
81 bpf_map_init_from_attr(&qs->map, attr); in queue_stack_map_alloc()
87 return &qs->map; in queue_stack_map_alloc()
91 static void queue_stack_map_free(struct bpf_map *map) in queue_stack_map_free() argument
93 struct bpf_queue_stack *qs = bpf_queue_stack(map); in queue_stack_map_free()
98 static int __queue_map_get(struct bpf_map *map, void *value, bool delete) in __queue_map_get() argument
100 struct bpf_queue_stack *qs = bpf_queue_stack(map); in __queue_map_get()
113 memset(value, 0, qs->map.value_size); in __queue_map_get()
[all …]
Ddevmap.c76 struct bpf_map map; member
131 bpf_map_init_from_attr(&dtab->map, attr); in dev_map_init_map()
134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); in dev_map_init_map()
142 dtab->map.numa_node); in dev_map_init_map()
148 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * in dev_map_init_map()
150 dtab->map.numa_node); in dev_map_init_map()
180 return &dtab->map; in dev_map_alloc()
183 static void dev_map_free(struct bpf_map *map) in dev_map_free() argument
185 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_free()
202 bpf_clear_redirect_map(map); in dev_map_free()
[all …]
Dbloom_filter.c16 struct bpf_map map; member
44 static int bloom_map_peek_elem(struct bpf_map *map, void *value) in bloom_map_peek_elem() argument
47 container_of(map, struct bpf_bloom_filter, map); in bloom_map_peek_elem()
51 h = hash(bloom, value, map->value_size, i); in bloom_map_peek_elem()
59 static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) in bloom_map_push_elem() argument
62 container_of(map, struct bpf_bloom_filter, map); in bloom_map_push_elem()
69 h = hash(bloom, value, map->value_size, i); in bloom_map_push_elem()
76 static int bloom_map_pop_elem(struct bpf_map *map, void *value) in bloom_map_pop_elem() argument
81 static int bloom_map_delete_elem(struct bpf_map *map, void *value) in bloom_map_delete_elem() argument
86 static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in bloom_map_get_next_key() argument
[all …]
Dreuseport_array.c12 struct bpf_map map; member
16 static struct reuseport_array *reuseport_array(struct bpf_map *map) in reuseport_array() argument
18 return (struct reuseport_array *)map; in reuseport_array()
50 static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key) in reuseport_array_lookup_elem() argument
52 struct reuseport_array *array = reuseport_array(map); in reuseport_array_lookup_elem()
55 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem()
62 static int reuseport_array_delete_elem(struct bpf_map *map, void *key) in reuseport_array_delete_elem() argument
64 struct reuseport_array *array = reuseport_array(map); in reuseport_array_delete_elem()
69 if (index >= map->max_entries) in reuseport_array_delete_elem()
94 static void reuseport_array_free(struct bpf_map *map) in reuseport_array_free() argument
[all …]
Dcpumap.c79 struct bpf_map map; member
106 bpf_map_init_from_attr(&cmap->map, attr); in cpu_map_alloc()
109 if (cmap->map.max_entries > NR_CPUS) { in cpu_map_alloc()
115 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
117 cmap->map.numa_node); in cpu_map_alloc()
121 return &cmap->map; in cpu_map_alloc()
410 struct bpf_map *map, int fd) in __cpu_map_load_bpf_program() argument
419 !bpf_prog_map_compatible(map, prog)) { in __cpu_map_load_bpf_program()
431 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, in __cpu_map_entry_alloc() argument
442 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
[all …]
Dbpf_inode_storage.c37 struct bpf_map *map, in inode_storage_lookup() argument
53 smap = (struct bpf_local_storage_map *)map; in inode_storage_lookup()
105 static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key) in bpf_fd_inode_storage_lookup_elem() argument
116 sdata = inode_storage_lookup(f->f_inode, map, true); in bpf_fd_inode_storage_lookup_elem()
121 static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, in bpf_fd_inode_storage_update_elem() argument
138 (struct bpf_local_storage_map *)map, in bpf_fd_inode_storage_update_elem()
144 static int inode_storage_delete(struct inode *inode, struct bpf_map *map) in inode_storage_delete() argument
148 sdata = inode_storage_lookup(inode, map, false); in inode_storage_delete()
157 static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) in bpf_fd_inode_storage_delete_elem() argument
167 err = inode_storage_delete(f->f_inode, map); in bpf_fd_inode_storage_delete_elem()
[all …]
Dringbuf.c85 struct bpf_map map; member
205 bpf_map_init_from_attr(&rb_map->map, attr); in ringbuf_map_alloc()
207 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
213 return &rb_map->map; in ringbuf_map_alloc()
230 static void ringbuf_map_free(struct bpf_map *map) in ringbuf_map_free() argument
234 rb_map = container_of(map, struct bpf_ringbuf_map, map); in ringbuf_map_free()
239 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key) in ringbuf_map_lookup_elem() argument
244 static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value, in ringbuf_map_update_elem() argument
250 static int ringbuf_map_delete_elem(struct bpf_map *map, void *key) in ringbuf_map_delete_elem() argument
255 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key, in ringbuf_map_get_next_key() argument
[all …]
Dstackmap.c27 struct bpf_map map; member
34 static inline bool stack_map_use_build_id(struct bpf_map *map) in stack_map_use_build_id() argument
36 return (map->map_flags & BPF_F_STACK_BUILD_ID); in stack_map_use_build_id()
39 static inline int stack_map_data_size(struct bpf_map *map) in stack_map_data_size() argument
41 return stack_map_use_build_id(map) ? in stack_map_data_size()
48 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist()
52 smap->map.numa_node); in prealloc_elems_and_freelist()
61 smap->map.max_entries); in prealloc_elems_and_freelist()
107 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc()
[all …]
Dbpf_task_storage.c57 task_storage_lookup(struct task_struct *task, struct bpf_map *map, in task_storage_lookup() argument
68 smap = (struct bpf_local_storage_map *)map; in task_storage_lookup()
118 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key) in bpf_pid_task_storage_lookup_elem() argument
142 sdata = task_storage_lookup(task, map, true); in bpf_pid_task_storage_lookup_elem()
151 static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, in bpf_pid_task_storage_update_elem() argument
177 task, (struct bpf_local_storage_map *)map, value, map_flags, in bpf_pid_task_storage_update_elem()
187 static int task_storage_delete(struct task_struct *task, struct bpf_map *map) in task_storage_delete() argument
191 sdata = task_storage_lookup(task, map, false); in task_storage_delete()
200 static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key) in bpf_pid_task_storage_delete_elem() argument
223 err = task_storage_delete(task, map); in bpf_pid_task_storage_delete_elem()
[all …]
Dbpf_struct_ops.c31 struct bpf_map map; member
236 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, in bpf_struct_ops_map_get_next_key() argument
246 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, in bpf_struct_ops_map_sys_lookup_elem() argument
249 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; in bpf_struct_ops_map_sys_lookup_elem()
260 memset(value, 0, map->value_size); in bpf_struct_ops_map_sys_lookup_elem()
268 memcpy(uvalue, st_map->uvalue, map->value_size); in bpf_struct_ops_map_sys_lookup_elem()
275 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) in bpf_struct_ops_map_lookup_elem() argument
352 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, in bpf_struct_ops_map_update_elem() argument
355 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; in bpf_struct_ops_map_update_elem()
398 memcpy(uvalue, value, map->value_size); in bpf_struct_ops_map_update_elem()
[all …]
Dlpm_trie.c33 struct bpf_map map; member
228 static void *trie_lookup_elem(struct bpf_map *map, void *_key) in trie_lookup_elem() argument
230 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); in trie_lookup_elem()
289 size += trie->map.value_size; in lpm_trie_node_alloc()
291 node = bpf_map_kmalloc_node(&trie->map, size, GFP_NOWAIT | __GFP_NOWARN, in lpm_trie_node_alloc()
292 trie->map.numa_node); in lpm_trie_node_alloc()
300 trie->map.value_size); in lpm_trie_node_alloc()
306 static int trie_update_elem(struct bpf_map *map, in trie_update_elem() argument
309 struct lpm_trie *trie = container_of(map, struct lpm_trie, map); in trie_update_elem()
328 if (trie->n_entries == trie->map.max_entries) { in trie_update_elem()
[all …]
Dhelpers.c34 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) in BPF_CALL_2() argument
38 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2()
50 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, in BPF_CALL_4() argument
55 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4()
69 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) in BPF_CALL_2() argument
73 return map->ops->map_delete_elem(map, key); in BPF_CALL_2()
85 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) in BPF_CALL_3() argument
87 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3()
100 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) in BPF_CALL_2() argument
102 return map->ops->map_pop_elem(map, value); in BPF_CALL_2()
[all …]
Dbpf_local_storage.c29 struct bpf_map *map = &smap->map; in mem_charge() local
31 if (!map->ops->map_local_storage_charge) in mem_charge()
34 return map->ops->map_local_storage_charge(smap, owner, size); in mem_charge()
40 struct bpf_map *map = &smap->map; in mem_uncharge() local
42 if (map->ops->map_local_storage_uncharge) in mem_uncharge()
43 map->ops->map_local_storage_uncharge(smap, owner, size); in mem_uncharge()
49 struct bpf_map *map = &smap->map; in owner_storage() local
51 return map->ops->map_owner_storage_ptr(owner); in owner_storage()
83 selem = bpf_map_kzalloc(&smap->map, smap->elem_size, in bpf_selem_alloc()
87 copy_map_value(&smap->map, SDATA(selem)->data, value); in bpf_selem_alloc()
[all …]
Doffload.c376 bpf_map_init_from_attr(&offmap->map, attr); in bpf_map_offload_map_alloc()
399 return &offmap->map; in bpf_map_offload_map_alloc()
412 bpf_map_free_id(&offmap->map, true); in __bpf_map_offload_destroy()
417 void bpf_map_offload_map_free(struct bpf_map *map) in bpf_map_offload_map_free() argument
419 struct bpf_offloaded_map *offmap = map_to_offmap(map); in bpf_map_offload_map_free()
431 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) in bpf_map_offload_lookup_elem() argument
433 struct bpf_offloaded_map *offmap = map_to_offmap(map); in bpf_map_offload_lookup_elem()
444 int bpf_map_offload_update_elem(struct bpf_map *map, in bpf_map_offload_update_elem() argument
447 struct bpf_offloaded_map *offmap = map_to_offmap(map); in bpf_map_offload_update_elem()
462 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) in bpf_map_offload_delete_elem() argument
[all …]
/kernel/dma/
Dmap_benchmark.c38 struct map_benchmark_data *map = data; in map_benchmark_thread() local
39 int npages = map->bparam.granule; in map_benchmark_thread()
58 if (map->dir != DMA_FROM_DEVICE) in map_benchmark_thread()
62 dma_addr = dma_map_single(map->dev, buf, size, map->dir); in map_benchmark_thread()
63 if (unlikely(dma_mapping_error(map->dev, dma_addr))) { in map_benchmark_thread()
65 dev_name(map->dev)); in map_benchmark_thread()
73 ndelay(map->bparam.dma_trans_ns); in map_benchmark_thread()
76 dma_unmap_single(map->dev, dma_addr, size, map->dir); in map_benchmark_thread()
87 atomic64_add(map_100ns, &map->sum_map_100ns); in map_benchmark_thread()
88 atomic64_add(unmap_100ns, &map->sum_unmap_100ns); in map_benchmark_thread()
[all …]
Ddirect.c485 enum pci_p2pdma_map_type map; in dma_direct_map_sg() local
491 map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg); in dma_direct_map_sg()
492 switch (map) { in dma_direct_map_sg()
641 struct bus_dma_region *map; in dma_direct_set_offset() local
652 map = kcalloc(2, sizeof(*map), GFP_KERNEL); in dma_direct_set_offset()
653 if (!map) in dma_direct_set_offset()
655 map[0].cpu_start = cpu_start; in dma_direct_set_offset()
656 map[0].dma_start = dma_start; in dma_direct_set_offset()
657 map[0].offset = offset; in dma_direct_set_offset()
658 map[0].size = size; in dma_direct_set_offset()
[all …]
/kernel/trace/
Dtracing_map.c202 static int tracing_map_add_field(struct tracing_map *map, in tracing_map_add_field() argument
207 if (map->n_fields < TRACING_MAP_FIELDS_MAX) { in tracing_map_add_field()
208 ret = map->n_fields; in tracing_map_add_field()
209 map->fields[map->n_fields++].cmp_fn = cmp_fn; in tracing_map_add_field()
227 int tracing_map_add_sum_field(struct tracing_map *map) in tracing_map_add_sum_field() argument
229 return tracing_map_add_field(map, tracing_map_cmp_atomic64); in tracing_map_add_sum_field()
244 int tracing_map_add_var(struct tracing_map *map) in tracing_map_add_var() argument
248 if (map->n_vars < TRACING_MAP_VARS_MAX) in tracing_map_add_var()
249 ret = map->n_vars++; in tracing_map_add_var()
270 int tracing_map_add_key_field(struct tracing_map *map, in tracing_map_add_key_field() argument
[all …]
Dtracing_map.h139 struct tracing_map *map; member
190 struct tracing_map_array *map; member
245 extern int tracing_map_init(struct tracing_map *map);
247 extern int tracing_map_add_sum_field(struct tracing_map *map);
248 extern int tracing_map_add_var(struct tracing_map *map);
249 extern int tracing_map_add_key_field(struct tracing_map *map,
253 extern void tracing_map_destroy(struct tracing_map *map);
254 extern void tracing_map_clear(struct tracing_map *map);
257 tracing_map_insert(struct tracing_map *map, void *key);
259 tracing_map_lookup(struct tracing_map *map, void *key);
[all …]
/kernel/
Duser_namespace.c30 struct uid_gid_map *map);
276 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument
284 return bsearch(&key, map->forward, extents, in map_id_range_down_max()
294 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument
303 first = map->extent[idx].first; in map_id_range_down_base()
304 last = first + map->extent[idx].count - 1; in map_id_range_down_base()
307 return &map->extent[idx]; in map_id_range_down_base()
312 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down() argument
315 unsigned extents = map->nr_extents; in map_id_range_down()
319 extent = map_id_range_down_base(extents, map, id, count); in map_id_range_down()
[all …]
/kernel/bpf/preload/iterators/
Diterators.bpf.c23 struct bpf_map *map; member
81 struct bpf_map *map = ctx->map; in dump_bpf_map() local
83 if (!map) in dump_bpf_map()
89 BPF_SEQ_PRINTF(seq, "%4u %-16s%6d\n", map->id, map->name, map->max_entries); in dump_bpf_map()

12