Home
last modified time | relevance | path

Searched refs:entries (Results 1 – 19 of 19) sorted by relevance

/kernel/events/
Dcallchain.c50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local
53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu()
56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu()
58 kfree(entries); in release_callchain_buffers_rcu()
63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local
65 entries = callchain_cpus_entries; in release_callchain_buffers()
67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers()
74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local
83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers()
84 if (!entries) in alloc_callchain_buffers()
[all …]
Dcore.c7193 perf_output_copy(handle, data->br_stack->entries, size); in perf_output_sample()
/kernel/
Dstacktrace.c24 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument
29 if (WARN_ON(!entries)) in stack_trace_print()
33 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); in stack_trace_print()
47 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument
52 if (WARN_ON(!entries)) in stack_trace_snprint()
57 (void *)entries[i]); in stack_trace_snprint()
273 .entries = store, in stack_trace_save()
297 .entries = store, in stack_trace_save_tsk()
320 .entries = store, in stack_trace_save_regs()
346 .entries = store, in stack_trace_save_tsk_reliable()
[all …]
Dbacktracetest.c47 unsigned long entries[8]; in backtrace_test_saved() local
53 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in backtrace_test_saved()
54 stack_trace_print(entries, nr_entries, 0); in backtrace_test_saved()
Djump_label.c379 struct jump_entry *entries) in static_key_set_entries() argument
383 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); in static_key_set_entries()
385 key->entries = entries; in static_key_set_entries()
519 struct jump_entry *entries; member
582 if (!mod->entries) in __jump_label_mod_update()
590 __jump_label_update(key, mod->entries, stop, in __jump_label_mod_update()
638 jlm2->entries = static_key_entries(key); in jump_label_add_module()
644 jlm->entries = iter; in jump_label_add_module()
700 static_key_set_entries(key, jlm->entries); in jump_label_del_module()
/kernel/trace/
Dtracing_map.c940 void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries, in tracing_map_destroy_sort_entries() argument
946 destroy_sort_entry(entries[i]); in tracing_map_destroy_sort_entries()
948 vfree(entries); in tracing_map_destroy_sort_entries()
1003 const struct tracing_map_sort_entry **entries, in sort_secondary() argument
1023 const struct tracing_map_sort_entry **a = &entries[i]; in sort_secondary()
1024 const struct tracing_map_sort_entry **b = &entries[i + 1]; in sort_secondary()
1039 sort(&entries[start], n_sub, in sort_secondary()
1080 struct tracing_map_sort_entry *sort_entry, **entries; in tracing_map_sort_entries() local
1083 entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts)); in tracing_map_sort_entries()
1084 if (!entries) in tracing_map_sort_entries()
[all …]
Dring_buffer_benchmark.c234 unsigned long long entries; in ring_buffer_producer() local
299 entries = ring_buffer_entries(buffer); in ring_buffer_producer()
332 trace_printk("Entries: %lld\n", entries); in ring_buffer_producer()
333 trace_printk("Total: %lld\n", entries + overruns + read); in ring_buffer_producer()
Dring_buffer.c306 local_t entries; /* entries on this page */ member
478 local_t entries; member
1468 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1498 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1885 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
2446 int entries; in rb_handle_head_page() local
2450 entries = rb_page_entries(next_page); in rb_handle_head_page()
2478 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3117 local_inc(&cpu_buffer->entries); in rb_commit()
3621 local_inc(&tail_page->entries); in __rb_reserve_next()
[all …]
Dtrace.c4071 unsigned long entries = 0; in tracing_iter_reset() local
4090 entries++; in tracing_iter_reset()
4094 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
4181 unsigned long *entries, int cpu) in get_total_entries_cpu() argument
4198 *entries = count; in get_total_entries_cpu()
4203 unsigned long *total, unsigned long *entries) in get_total_entries() argument
4209 *entries = 0; in get_total_entries()
4214 *entries += e; in get_total_entries()
4220 unsigned long total, entries; in trace_total_entries_cpu() local
4225 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu()
[all …]
Dtracing_map.h286 tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
Dtrace_events_hist.c5180 unsigned long entries[HIST_STACKTRACE_DEPTH]; in event_hist_trigger() local
5198 memset(entries, 0, HIST_STACKTRACE_SIZE); in event_hist_trigger()
5199 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, in event_hist_trigger()
5201 key = entries; in event_hist_trigger()
Dtrace.h166 unsigned long entries; member
Dbpf_trace.c1743 memcpy(buf, br_stack->entries, to_copy); in BPF_CALL_4()
DKconfig268 entries and exits. This also can probe multiple functions by one
/kernel/printk/
Dindex.c22 struct pi_entry **entries; in pi_get_entry() local
27 entries = mod->printk_index_start; in pi_get_entry()
33 entries = __start_printk_index; in pi_get_entry()
40 return entries[pos]; in pi_get_entry()
/kernel/livepatch/
Dtransition.c192 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument
200 address = entries[i]; in klp_check_stack_func()
243 static unsigned long entries[MAX_STACK_ENTRIES]; in klp_check_stack() local
248 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); in klp_check_stack()
257 ret = klp_check_stack_func(func, entries, nr_entries); in klp_check_stack()
/kernel/power/
Dswap.c83 sector_t entries[MAP_PAGE_ENTRIES]; member
465 handle->cur->entries[handle->k++] = offset; in swap_write_page()
1034 offset = handle->cur->entries[handle->k]; in swap_read_page()
1316 handle->cur->entries[handle->k]) { in load_image_lzo()
/kernel/bpf/
Ddevmap.c91 static struct hlist_head *dev_map_create_hash(unsigned int entries, in dev_map_create_hash() argument
97 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); in dev_map_create_hash()
99 for (i = 0; i < entries; i++) in dev_map_create_hash()
/kernel/locking/
Dlockdep.c542 unsigned long entries[] __aligned(sizeof(unsigned long));
555 memcmp(t1->entries, t2->entries, in traces_identical()
556 t1->nr_entries * sizeof(t1->entries[0])) == 0; in traces_identical()
582 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace()
584 hash = jhash(trace->entries, trace->nr_entries * in save_trace()
585 sizeof(trace->entries[0]), 0); in save_trace()
1850 stack_trace_print(trace->entries, trace->nr_entries, spaces); in print_lock_trace()