/kernel/bpf/ |
D | arraymap.c | 22 static void bpf_array_free_percpu(struct bpf_array *array) in bpf_array_free_percpu() argument 26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu() 27 free_percpu(array->pptrs[i]); in bpf_array_free_percpu() 32 static int bpf_array_alloc_percpu(struct bpf_array *array) in bpf_array_alloc_percpu() argument 37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu() 38 ptr = __alloc_percpu_gfp(array->elem_size, 8, in bpf_array_alloc_percpu() 41 bpf_array_free_percpu(array); in bpf_array_alloc_percpu() 44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu() 58 struct bpf_array *array; in array_map_alloc() local 96 array_size = sizeof(*array); in array_map_alloc() [all …]
|
D | core.c | 717 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_prog_run() local 721 if (unlikely(index >= array->map.max_entries)) in __bpf_prog_run() 728 prog = READ_ONCE(array->ptrs[index]); in __bpf_prog_run() 934 bool bpf_prog_array_compatible(struct bpf_array *array, in bpf_prog_array_compatible() argument 937 if (!array->owner_prog_type) { in bpf_prog_array_compatible() 941 array->owner_prog_type = fp->type; in bpf_prog_array_compatible() 942 array->owner_jited = fp->jited; in bpf_prog_array_compatible() 947 return array->owner_prog_type == fp->type && in bpf_prog_array_compatible() 948 array->owner_jited == fp->jited; in bpf_prog_array_compatible() 958 struct bpf_array *array; in bpf_check_tail_call() local [all …]
|
/kernel/trace/ |
D | tracing_map.h | 169 #define TRACING_MAP_ARRAY_ELT(array, idx) \ argument 170 (array->pages[idx >> array->entry_shift] + \ 171 ((idx & array->entry_mask) << array->entry_size_shift)) 173 #define TRACING_MAP_ENTRY(array, idx) \ argument 174 ((struct tracing_map_entry *)TRACING_MAP_ARRAY_ELT(array, idx)) 176 #define TRACING_MAP_ELT(array, idx) \ argument 177 ((struct tracing_map_elt **)TRACING_MAP_ARRAY_ELT(array, idx))
|
D | bpf_trace.c | 259 struct bpf_array *array = container_of(map, struct bpf_array, map); in BPF_CALL_2() local 269 if (unlikely(index >= array->map.max_entries)) in BPF_CALL_2() 272 ee = READ_ONCE(array->ptrs[index]); in BPF_CALL_2() 305 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_perf_event_output() local 314 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output() 317 ee = READ_ONCE(array->ptrs[index]); in __bpf_perf_event_output() 401 struct bpf_array *array = container_of(map, struct bpf_array, map); in BPF_CALL_2() local 406 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2() 409 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
|
D | ring_buffer_benchmark.c | 137 inc = event->array[0] + 4; in read_page() 149 if (!event->array[0]) { in read_page() 153 inc = event->array[0] + 4; in read_page()
|
D | ring_buffer.c | 123 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 169 length = event->array[0]; in rb_event_data_length() 186 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length() 241 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length() 242 length -= sizeof(event->array[0]); in ring_buffer_event_length() 256 return (void *)&event->array[0]; in rb_event_data() 258 return (void *)&event->array[1]; in rb_event_data() 2126 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail() 2253 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp() 2257 event->array[0] = 0; in rb_add_time_stamp() [all …]
|
D | ftrace.c | 4381 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 4694 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) in ftrace_set_func() argument 4728 if (array[i] == rec->ip) { in ftrace_set_func() 4737 array[(*idx)++] = rec->ip; in ftrace_set_func() 4743 array[i] = array[--(*idx)]; in ftrace_set_func() 4744 array[*idx] = 0; in ftrace_set_func()
|
D | trace.c | 2080 ring_buffer_write(buffer, event->array[0], &event->array[1]); in __buffer_unlock_commit() 2107 entry->array[0] = len; in trace_event_buffer_lock_reserve()
|
/kernel/sched/ |
D | rt.c | 79 struct rt_prio_array *array; in init_rt_rq() local 82 array = &rt_rq->active; in init_rt_rq() 84 INIT_LIST_HEAD(array->queue + i); in init_rt_rq() 85 __clear_bit(i, array->bitmap); in init_rt_rq() 88 __set_bit(MAX_RT_PRIO, array->bitmap); in init_rt_rq() 893 struct rt_prio_array *array = &rt_rq->active; in dump_throttled_rt_tasks() local 904 if (bitmap_empty(array->bitmap, MAX_RT_PRIO)) in dump_throttled_rt_tasks() 908 idx = sched_find_first_bit(array->bitmap); in dump_throttled_rt_tasks() 910 list_for_each_entry(rt_se, array->queue + idx, run_list) { in dump_throttled_rt_tasks() 921 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1); in dump_throttled_rt_tasks() [all …]
|
/kernel/ |
D | cgroup.c | 4635 pid_t *array; in pidlist_array_load() local 4651 array = pidlist_allocate(length); in pidlist_array_load() 4652 if (!array) in pidlist_array_load() 4665 array[n++] = pid; in pidlist_array_load() 4671 sort(array, length, sizeof(pid_t), fried_cmppid, NULL); in pidlist_array_load() 4673 sort(array, length, sizeof(pid_t), cmppid, NULL); in pidlist_array_load() 4675 length = pidlist_uniq(array, length); in pidlist_array_load() 4679 pidlist_free(array); in pidlist_array_load() 4685 l->list = array; in pidlist_array_load()
|
D | relay.c | 82 static void relay_free_page_array(struct page **array) in relay_free_page_array() argument 84 kvfree(array); in relay_free_page_array()
|