Home
last modified time | relevance | path

Searched refs:map (Results 1 – 17 of 17) sorted by relevance

/kernel/bpf/
Dsyscall.c28 struct bpf_map *map; in find_and_alloc_map() local
32 map = tl->ops->map_alloc(attr); in find_and_alloc_map()
33 if (IS_ERR(map)) in find_and_alloc_map()
34 return map; in find_and_alloc_map()
35 map->ops = tl->ops; in find_and_alloc_map()
36 map->map_type = attr->map_type; in find_and_alloc_map()
37 return map; in find_and_alloc_map()
49 static int bpf_map_charge_memlock(struct bpf_map *map) in bpf_map_charge_memlock() argument
56 atomic_long_add(map->pages, &user->locked_vm); in bpf_map_charge_memlock()
59 atomic_long_sub(map->pages, &user->locked_vm); in bpf_map_charge_memlock()
[all …]
Darraymap.c77 array->map.unpriv_array = unpriv; in array_map_alloc()
80 array->map.key_size = attr->key_size; in array_map_alloc()
81 array->map.value_size = attr->value_size; in array_map_alloc()
82 array->map.max_entries = attr->max_entries; in array_map_alloc()
83 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; in array_map_alloc()
86 return &array->map; in array_map_alloc()
90 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
92 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem()
95 if (index >= array->map.max_entries) in array_map_lookup_elem()
102 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in array_map_get_next_key() argument
[all …]
Dhashtab.c18 struct bpf_map map; member
45 htab->map.key_size = attr->key_size; in htab_map_alloc()
46 htab->map.value_size = attr->value_size; in htab_map_alloc()
47 htab->map.max_entries = attr->max_entries; in htab_map_alloc()
53 if (htab->map.max_entries == 0 || htab->map.key_size == 0 || in htab_map_alloc()
54 htab->map.value_size == 0) in htab_map_alloc()
58 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
61 if (htab->map.key_size > MAX_BPF_STACK) in htab_map_alloc()
67 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - in htab_map_alloc()
77 round_up(htab->map.key_size, 8) + in htab_map_alloc()
[all …]
Dhelpers.c35 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; in bpf_map_lookup_elem() local
41 value = map->ops->map_lookup_elem(map, key); in bpf_map_lookup_elem()
59 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; in bpf_map_update_elem() local
65 return map->ops->map_update_elem(map, key, value, r4); in bpf_map_update_elem()
80 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; in bpf_map_delete_elem() local
85 return map->ops->map_delete_elem(map, key); in bpf_map_delete_elem()
Dverifier.c674 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; in check_map_access() local
676 if (off < 0 || off + size > map->value_size) { in check_map_access()
678 map->value_size, off, size); in check_map_access()
953 static int check_map_func_compatibility(struct bpf_map *map, int func_id) in check_map_func_compatibility() argument
955 if (!map) in check_map_func_compatibility()
959 switch (map->map_type) { in check_map_func_compatibility()
976 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
981 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
991 map->map_type, func_id); in check_map_func_compatibility()
1000 struct bpf_map *map = NULL; in check_call() local
[all …]
Dcore.c518 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; in __bpf_prog_run() local
519 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_prog_run()
523 if (unlikely(index >= array->map.max_entries)) in __bpf_prog_run()
759 struct bpf_map *map = aux->used_maps[i]; in bpf_check_tail_call() local
762 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in bpf_check_tail_call()
765 array = container_of(map, struct bpf_array, map); in bpf_check_tail_call()
/kernel/
Dpid.c56 struct pidmap *map, int off) in mk_pid() argument
58 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; in mk_pid()
61 #define find_next_offset(map, off) \ argument
62 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
106 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; in free_pidmap() local
109 clear_bit(offset, map->page); in free_pidmap()
110 atomic_inc(&map->nr_free); in free_pidmap()
155 struct pidmap *map; in alloc_pidmap() local
161 map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; in alloc_pidmap()
169 if (unlikely(!map->page)) { in alloc_pidmap()
[all …]
Duser_namespace.c31 struct uid_gid_map *map);
154 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down() argument
162 extents = map->nr_extents; in map_id_range_down()
165 first = map->extent[idx].first; in map_id_range_down()
166 last = first + map->extent[idx].count - 1; in map_id_range_down()
173 id = (id - first) + map->extent[idx].lower_first; in map_id_range_down()
180 static u32 map_id_down(struct uid_gid_map *map, u32 id) in map_id_down() argument
186 extents = map->nr_extents; in map_id_down()
189 first = map->extent[idx].first; in map_id_down()
190 last = first + map->extent[idx].count - 1; in map_id_down()
[all …]
/kernel/trace/
Dbpf_trace.c193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; in bpf_perf_event_read() local
194 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_perf_event_read()
197 if (unlikely(index >= array->map.max_entries)) in bpf_perf_event_read()
232 struct bpf_map *map = (struct bpf_map *) (long) r2; in bpf_perf_event_output() local
233 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_perf_event_output()
242 if (unlikely(index >= array->map.max_entries)) in bpf_perf_event_output()
Dtrace_entries.h288 __field_struct( struct mmiotrace_map, map )
289 __field_desc( resource_size_t, map, phys )
290 __field_desc( unsigned long, map, virt )
291 __field_desc( unsigned long, map, len )
292 __field_desc( int, map, map_id )
293 __field_desc( unsigned char, map, opcode )
Dtrace_mmiotrace.c226 m = &field->map; in mmio_print_map()
329 struct mmiotrace_map *map) in __trace_mmiotrace_map() argument
344 entry->map = *map; in __trace_mmiotrace_map()
350 void mmio_trace_mapping(struct mmiotrace_map *map) in mmio_trace_mapping() argument
357 __trace_mmiotrace_map(tr, data, map); in mmio_trace_mapping()
Dtrace.c155 struct trace_enum_map map; member
1599 unsigned map; in __trace_find_cmdline() local
1613 map = savedcmd->map_pid_to_cmdline[tpid]; in __trace_find_cmdline()
1614 if (map != NO_CMDLINE_MAP) { in __trace_find_cmdline()
1615 tpid = savedcmd->map_cmdline_to_pid[map]; in __trace_find_cmdline()
1617 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); in __trace_find_cmdline()
1637 unsigned map; in trace_find_tgid() local
1642 map = savedcmd->map_pid_to_cmdline[pid]; in trace_find_tgid()
1643 if (map != NO_CMDLINE_MAP) in trace_find_tgid()
1644 tgid = saved_tgids[map]; in trace_find_tgid()
[all …]
Dtrace_events.c2201 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) in enum_replace() argument
2207 elen = snprintf(ptr, 0, "%ld", map->enum_value); in enum_replace()
2212 snprintf(ptr, elen + 1, "%ld", map->enum_value); in enum_replace()
2224 struct trace_enum_map *map) in update_event_printk() argument
2228 int len = strlen(map->enum_string); in update_event_printk()
2259 if (strncmp(map->enum_string, ptr, len) == 0 && in update_event_printk()
2261 ptr = enum_replace(ptr, map, len); in update_event_printk()
2299 void trace_event_enum_update(struct trace_enum_map **map, int len) in trace_event_enum_update() argument
2327 if (call->class->system == map[i]->system) { in trace_event_enum_update()
2333 update_event_printk(call, map[i]); in trace_event_enum_update()
Dtrace.h1376 void trace_event_enum_update(struct trace_enum_map **map, int len);
1379 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } in trace_event_enum_update() argument
/kernel/power/
Dswap.c86 struct swap_map_page *map; member
951 if (handle->maps->map) in release_swap_reader()
952 free_page((unsigned long)handle->maps->map); in release_swap_reader()
988 tmp->map = (struct swap_map_page *) in get_swap_reader()
990 if (!tmp->map) { in get_swap_reader()
995 error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL); in get_swap_reader()
1000 offset = tmp->map->next_swap; in get_swap_reader()
1003 handle->cur = handle->maps->map; in get_swap_reader()
1024 free_page((unsigned long)handle->maps->map); in swap_read_page()
1031 handle->cur = handle->maps->map; in swap_read_page()
/kernel/irq/
Dirqdomain.c354 if (domain->ops->map) { in irq_domain_associate()
355 ret = domain->ops->map(domain, virq, hwirq); in irq_domain_associate()
Dgeneric-chip.c435 .map = irq_map_generic_chip,