/kernel/bpf/ |
D | arraymap.c | 19 static void bpf_array_free_percpu(struct bpf_array *array) in bpf_array_free_percpu() argument 23 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu() 24 free_percpu(array->pptrs[i]); in bpf_array_free_percpu() 29 static int bpf_array_alloc_percpu(struct bpf_array *array) in bpf_array_alloc_percpu() argument 34 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu() 35 ptr = __alloc_percpu_gfp(array->elem_size, 8, in bpf_array_alloc_percpu() 38 bpf_array_free_percpu(array); in bpf_array_alloc_percpu() 41 array->pptrs[i] = ptr; in bpf_array_alloc_percpu() 79 struct bpf_array *array; in array_map_alloc() local 104 array_size = sizeof(*array); in array_map_alloc() [all …]
|
D | reuseport_array.c | 51 struct reuseport_array *array = reuseport_array(map); in reuseport_array_lookup_elem() local 54 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem() 57 return rcu_dereference(array->ptrs[index]); in reuseport_array_lookup_elem() 63 struct reuseport_array *array = reuseport_array(map); in reuseport_array_delete_elem() local 71 if (!rcu_access_pointer(array->ptrs[index])) in reuseport_array_delete_elem() 76 sk = rcu_dereference_protected(array->ptrs[index], in reuseport_array_delete_elem() 81 RCU_INIT_POINTER(array->ptrs[index], NULL); in reuseport_array_delete_elem() 95 struct reuseport_array *array = reuseport_array(map); in reuseport_array_free() local 128 sk = rcu_dereference(array->ptrs[i]); in reuseport_array_free() 138 RCU_INIT_POINTER(array->ptrs[i], NULL); in reuseport_array_free() [all …]
|
D | core.c | 1469 struct bpf_array *array = container_of(map, struct bpf_array, map); in ___bpf_prog_run() local 1473 if (unlikely(index >= array->map.max_entries)) in ___bpf_prog_run() 1480 prog = READ_ONCE(array->ptrs[index]); in ___bpf_prog_run() 1658 bool bpf_prog_array_compatible(struct bpf_array *array, in bpf_prog_array_compatible() argument 1664 if (!array->owner_prog_type) { in bpf_prog_array_compatible() 1668 array->owner_prog_type = fp->type; in bpf_prog_array_compatible() 1669 array->owner_jited = fp->jited; in bpf_prog_array_compatible() 1674 return array->owner_prog_type == fp->type && in bpf_prog_array_compatible() 1675 array->owner_jited == fp->jited; in bpf_prog_array_compatible() 1685 struct bpf_array *array; in bpf_check_tail_call() local [all …]
|
D | btf.c | 1875 const struct btf_array *array = btf_type_array(t); in btf_array_check_meta() local 1876 u32 meta_needed = sizeof(*array); in btf_array_check_meta() 1909 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { in btf_array_check_meta() 1914 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { in btf_array_check_meta() 1927 const struct btf_array *array = btf_type_array(v->t); in btf_array_resolve() local 1934 index_type_id = array->index_type; in btf_array_resolve() 1954 elem_type_id = array->type; in btf_array_resolve() 1978 if (array->nelems && elem_size > U32_MAX / array->nelems) { in btf_array_resolve() 1984 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); in btf_array_resolve() 1992 const struct btf_array *array = btf_type_array(t); in btf_array_log() local [all …]
|
D | cgroup.c | 140 struct bpf_prog_array **array) in compute_effective_progs() argument 178 *array = progs; in compute_effective_progs()
|
D | syscall.c | 375 const struct bpf_array *array; in bpf_map_show_fdinfo() local 380 array = container_of(map, struct bpf_array, map); in bpf_map_show_fdinfo() 381 owner_prog_type = array->owner_prog_type; in bpf_map_show_fdinfo() 382 owner_jited = array->owner_jited; in bpf_map_show_fdinfo()
|
/kernel/trace/ |
D | tracing_map.h | 173 #define TRACING_MAP_ARRAY_ELT(array, idx) \ argument 174 (array->pages[idx >> array->entry_shift] + \ 175 ((idx & array->entry_mask) << array->entry_size_shift)) 177 #define TRACING_MAP_ENTRY(array, idx) \ argument 178 ((struct tracing_map_entry *)TRACING_MAP_ARRAY_ELT(array, idx)) 180 #define TRACING_MAP_ELT(array, idx) \ argument 181 ((struct tracing_map_elt **)TRACING_MAP_ARRAY_ELT(array, idx))
|
D | trace_probe_tmpl.h | 105 goto array; in process_fetch_insn_bottom() 109 goto array; in process_fetch_insn_bottom() 143 array: in process_fetch_insn_bottom()
|
D | bpf_trace.c | 350 struct bpf_array *array = container_of(map, struct bpf_array, map); in get_map_perf_counter() local 359 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter() 362 ee = READ_ONCE(array->ptrs[index]); in get_map_perf_counter() 423 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_perf_event_output() local 431 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output() 434 ee = READ_ONCE(array->ptrs[index]); in __bpf_perf_event_output() 565 struct bpf_array *array = container_of(map, struct bpf_array, map); in BPF_CALL_2() local 568 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2() 571 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
|
D | ring_buffer_benchmark.c | 139 inc = event->array[0] + 4; in read_page() 151 if (!event->array[0]) { in read_page() 155 inc = event->array[0] + 4; in read_page()
|
D | ring_buffer.c | 128 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 168 length = event->array[0]; in rb_event_data_length() 185 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length() 240 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length() 241 length -= sizeof(event->array[0]); in ring_buffer_event_length() 255 return (void *)&event->array[0]; in rb_event_data() 257 return (void *)&event->array[1]; in rb_event_data() 291 ts = event->array[0]; in ring_buffer_event_time_stamp() 2188 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail() 2318 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp() [all …]
|
D | trace_events_hist.c | 1013 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; in parse_synth_field() local 1036 array = strchr(field_name, '['); in parse_synth_field() 1037 if (array) in parse_synth_field() 1038 len -= strlen(array); in parse_synth_field() 1051 if (array) in parse_synth_field() 1052 len += strlen(array); in parse_synth_field() 1064 if (array) { in parse_synth_field() 1065 strcat(field->type, array); in parse_synth_field()
|
D | trace.c | 820 ring_buffer_write(buffer, event->array[0], &event->array[1]); in __buffer_unlock_commit() 2500 entry->array[0] = len; in trace_event_buffer_lock_reserve()
|
/kernel/sched/ |
D | rt.c | 78 struct rt_prio_array *array; in init_rt_rq() local 81 array = &rt_rq->active; in init_rt_rq() 83 INIT_LIST_HEAD(array->queue + i); in init_rt_rq() 84 __clear_bit(i, array->bitmap); in init_rt_rq() 87 __set_bit(MAX_RT_PRIO, array->bitmap); in init_rt_rq() 1142 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio() local 1145 sched_find_first_bit(array->bitmap); in dec_rt_prio() 1260 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) in __delist_rt_entity() argument 1264 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity() 1265 __clear_bit(rt_se_prio(rt_se), array->bitmap); in __delist_rt_entity() [all …]
|
/kernel/cgroup/ |
D | cgroup-v1.c | 333 pid_t *array; in pidlist_array_load() local 349 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL); in pidlist_array_load() 350 if (!array) in pidlist_array_load() 363 array[n++] = pid; in pidlist_array_load() 368 sort(array, length, sizeof(pid_t), cmppid, NULL); in pidlist_array_load() 370 length = pidlist_uniq(array, length); in pidlist_array_load() 374 kvfree(array); in pidlist_array_load() 380 l->list = array; in pidlist_array_load()
|
/kernel/ |
D | relay.c | 82 static void relay_free_page_array(struct page **array) in relay_free_page_array() argument 84 kvfree(array); in relay_free_page_array()
|