/kernel/events/ |
D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() [all …]
|
D | core.c | 6992 perf_output_copy(handle, data->br_stack->entries, size); in perf_output_sample()
|
/kernel/ |
D | stacktrace.c | 23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument 28 if (WARN_ON(!entries)) in stack_trace_print() 32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print() 46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument 51 if (WARN_ON(!entries)) in stack_trace_snprint() 56 (void *)entries[i]); in stack_trace_snprint() 275 .entries = store, in stack_trace_save() 299 .entries = store, in stack_trace_save_tsk() 322 .entries = store, in stack_trace_save_regs() 348 .entries = store, in stack_trace_save_tsk_reliable() [all …]
|
D | backtracetest.c | 47 unsigned long entries[8]; in backtrace_test_saved() local 53 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in backtrace_test_saved() 54 stack_trace_print(entries, nr_entries, 0); in backtrace_test_saved()
|
D | jump_label.c | 383 struct jump_entry *entries) in static_key_set_entries() argument 387 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); in static_key_set_entries() 389 key->entries = entries; in static_key_set_entries() 522 struct jump_entry *entries; member 585 if (!mod->entries) in __jump_label_mod_update() 593 __jump_label_update(key, mod->entries, stop, in __jump_label_mod_update() 665 jlm2->entries = static_key_entries(key); in jump_label_add_module() 671 jlm->entries = iter; in jump_label_add_module() 727 static_key_set_entries(key, jlm->entries); in jump_label_del_module()
|
/kernel/trace/ |
D | tracing_map.c | 940 void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries, in tracing_map_destroy_sort_entries() argument 946 destroy_sort_entry(entries[i]); in tracing_map_destroy_sort_entries() 948 vfree(entries); in tracing_map_destroy_sort_entries() 1004 const struct tracing_map_sort_entry **entries, in sort_secondary() argument 1024 const struct tracing_map_sort_entry **a = &entries[i]; in sort_secondary() 1025 const struct tracing_map_sort_entry **b = &entries[i + 1]; in sort_secondary() 1040 sort(&entries[start], n_sub, in sort_secondary() 1080 struct tracing_map_sort_entry *sort_entry, **entries; in tracing_map_sort_entries() local 1083 entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts)); in tracing_map_sort_entries() 1084 if (!entries) in tracing_map_sort_entries() [all …]
|
D | ring_buffer_benchmark.c | 234 unsigned long long entries; in ring_buffer_producer() local 299 entries = ring_buffer_entries(buffer); in ring_buffer_producer() 332 trace_printk("Entries: %lld\n", entries); in ring_buffer_producer() 333 trace_printk("Total: %lld\n", entries + overruns + read); in ring_buffer_producer()
|
D | ring_buffer.c | 333 local_t entries; /* entries on this page */ member 514 local_t entries; member 1413 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update() 1443 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update() 1805 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries() 2366 int entries; in rb_handle_head_page() local 2370 entries = rb_page_entries(next_page); in rb_handle_head_page() 2398 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page() 3068 local_inc(&cpu_buffer->entries); in rb_commit() 3422 local_inc(&tail_page->entries); in __rb_reserve_next() [all …]
|
D | trace.c | 3669 unsigned long entries = 0; in tracing_iter_reset() local 3688 entries++; in tracing_iter_reset() 3692 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset() 3779 unsigned long *entries, int cpu) in get_total_entries_cpu() argument 3796 *entries = count; in get_total_entries_cpu() 3801 unsigned long *total, unsigned long *entries) in get_total_entries() argument 3807 *entries = 0; in get_total_entries() 3812 *entries += e; in get_total_entries() 3818 unsigned long total, entries; in trace_total_entries_cpu() local 3823 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu() [all …]
|
D | tracing_map.h | 286 tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
|
D | trace_events_hist.c | 4567 unsigned long entries[HIST_STACKTRACE_DEPTH]; in event_hist_trigger() local 4582 memset(entries, 0, HIST_STACKTRACE_SIZE); in event_hist_trigger() 4583 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, in event_hist_trigger() 4585 key = entries; in event_hist_trigger()
|
D | trace.h | 171 unsigned long entries; member
|
D | bpf_trace.c | 1555 memcpy(buf, br_stack->entries, to_copy); in BPF_CALL_4()
|
/kernel/livepatch/ |
D | transition.c | 193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument 201 address = entries[i]; in klp_check_stack_func() 244 static unsigned long entries[MAX_STACK_ENTRIES]; in klp_check_stack() local 249 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); in klp_check_stack() 262 ret = klp_check_stack_func(func, entries, nr_entries); in klp_check_stack()
|
/kernel/bpf/ |
D | devmap.c | 89 static struct hlist_head *dev_map_create_hash(unsigned int entries, in dev_map_create_hash() argument 95 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); in dev_map_create_hash() 97 for (i = 0; i < entries; i++) in dev_map_create_hash()
|
/kernel/power/ |
D | swap.c | 81 sector_t entries[MAP_PAGE_ENTRIES]; member 460 handle->cur->entries[handle->k++] = offset; in swap_write_page() 1026 offset = handle->cur->entries[handle->k]; in swap_read_page() 1311 handle->cur->entries[handle->k]) { in load_image_lzo()
|
/kernel/locking/ |
D | lockdep.c | 510 unsigned long entries[] __aligned(sizeof(unsigned long)); 523 memcmp(t1->entries, t2->entries, in traces_identical() 524 t1->nr_entries * sizeof(t1->entries[0])) == 0; in traces_identical() 550 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace() 552 hash = jhash(trace->entries, trace->nr_entries * in save_trace() 553 sizeof(trace->entries[0]), 0); in save_trace() 1804 stack_trace_print(trace->entries, trace->nr_entries, spaces); in print_lock_trace()
|