/kernel/trace/ |
D | trace_selftest.c | 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) in warn_failed_init_tracer() argument 100 trace->name, init_ret); in warn_failed_init_tracer() 356 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, in trace_selftest_startup_dynamic_tracing() argument 386 ret = tracer_init(trace, tr); in trace_selftest_startup_dynamic_tracing() 388 warn_failed_init_tracer(trace, ret); in trace_selftest_startup_dynamic_tracing() 424 trace->reset(tr); in trace_selftest_startup_dynamic_tracing() 432 trace->reset(tr); in trace_selftest_startup_dynamic_tracing() 572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) argument 691 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) in trace_selftest_startup_function() argument 710 ret = tracer_init(trace, tr); in trace_selftest_startup_function() [all …]
|
D | fgraph.c | 125 struct ftrace_graph_ent trace; in function_graph_enter() local 138 trace.func = func; in function_graph_enter() 139 trace.depth = ++current->curr_ret_depth; in function_graph_enter() 145 if (!ftrace_graph_entry(&trace)) in function_graph_enter() 158 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, in ftrace_pop_return_trace() argument 202 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace() 203 trace->calltime = current->ret_stack[index].calltime; in ftrace_pop_return_trace() 204 trace->overrun = atomic_read(¤t->trace_overrun); in ftrace_pop_return_trace() 205 trace->depth = current->curr_ret_depth--; in ftrace_pop_return_trace() 245 struct ftrace_graph_ret trace; in ftrace_return_to_handler() local [all …]
|
D | trace_functions_graph.c | 98 struct ftrace_graph_ent *trace, in __trace_graph_entry() argument 111 entry->graph_ent = *trace; in __trace_graph_entry() 126 int trace_graph_entry(struct ftrace_graph_ent *trace) in trace_graph_entry() argument 146 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry() 158 if (ftrace_graph_ignore_func(trace)) in trace_graph_entry() 177 ret = __trace_graph_entry(tr, trace, trace_ctx); in trace_graph_entry() 217 struct ftrace_graph_ret *trace, in __trace_graph_return() argument 230 entry->ret = *trace; in __trace_graph_return() 235 void trace_graph_return(struct ftrace_graph_ret *trace) in trace_graph_return() argument 244 ftrace_graph_addr_finish(trace); in trace_graph_return() [all …]
|
D | trace_stat.c | 308 int register_stat_tracer(struct tracer_stat *trace) in register_stat_tracer() argument 313 if (!trace) in register_stat_tracer() 316 if (!trace->stat_start || !trace->stat_next || !trace->stat_show) in register_stat_tracer() 322 if (node->ts == trace) in register_stat_tracer() 332 session->ts = trace; in register_stat_tracer() 351 void unregister_stat_tracer(struct tracer_stat *trace) in unregister_stat_tracer() argument 357 if (node->ts == trace) { in unregister_stat_tracer()
|
D | trace.h | 499 struct tracer *trace; member 554 int (*selftest)(struct tracer *trace, 651 void trace_graph_return(struct ftrace_graph_ret *trace); 652 int trace_graph_entry(struct ftrace_graph_ent *trace); 749 extern int trace_selftest_startup_function(struct tracer *trace, 751 extern int trace_selftest_startup_function_graph(struct tracer *trace, 753 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 755 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 757 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 759 extern int trace_selftest_startup_wakeup(struct tracer *trace, [all …]
|
D | trace_syscalls.c | 133 struct syscall_trace_enter *trace; in print_syscall_enter() local 137 trace = (typeof(trace))ent; in print_syscall_enter() 138 syscall = trace->nr; in print_syscall_enter() 162 trace->args[i], in print_syscall_enter() 179 struct syscall_trace_exit *trace; in print_syscall_exit() local 183 trace = (typeof(trace))ent; in print_syscall_exit() 184 syscall = trace->nr; in print_syscall_exit() 198 trace->ret); in print_syscall_exit() 272 struct syscall_trace_enter trace; in syscall_enter_define_fields() local 274 int offset = offsetof(typeof(trace), args); in syscall_enter_define_fields() [all …]
|
D | Kconfig | 27 See Documentation/trace/ftrace-design.rst 32 See Documentation/trace/ftrace-design.rst 37 See Documentation/trace/ftrace-design.rst 63 See Documentation/trace/ftrace-design.rst 68 See Documentation/trace/ftrace-design.rst 197 Enable the kernel to trace every kernel function. This is done 213 Enable the kernel to trace a function at both its return 215 Its first purpose is to trace the duration of functions and 230 can function trace, and this table is linked into the kernel 299 stack-trace saved. If this is configured with DYNAMIC_FTRACE [all …]
|
D | trace_stat.h | 16 void *(*stat_start)(struct tracer_stat *trace); 31 extern int register_stat_tracer(struct tracer_stat *trace); 32 extern void unregister_stat_tracer(struct tracer_stat *trace);
|
D | trace_irqsoff.c | 178 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) in irqsoff_graph_entry() argument 186 if (ftrace_graph_ignore_func(trace)) in irqsoff_graph_entry() 195 if (ftrace_graph_notrace_addr(trace->func)) in irqsoff_graph_entry() 202 ret = __trace_graph_entry(tr, trace, trace_ctx); in irqsoff_graph_entry() 208 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) in irqsoff_graph_return() argument 215 ftrace_graph_addr_finish(trace); in irqsoff_graph_return() 221 __trace_graph_return(tr, trace, trace_ctx); in irqsoff_graph_return()
|
D | trace_output.c | 795 if (event->funcs->trace == NULL) in register_trace_event() 796 event->funcs->trace = trace_nop_print; in register_trace_event() 921 .trace = trace_fn_trace, 1061 .trace = trace_ctx_print, 1073 .trace = trace_wake_print, 1113 .trace = trace_stack_print, 1166 .trace = trace_user_stack_print, 1229 .trace = trace_hwlat_print, 1299 .trace = trace_osnoise_print, 1345 .trace = trace_timerlat_print, [all …]
|
D | trace_sched_wakeup.c | 115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace) in wakeup_graph_entry() argument 122 if (ftrace_graph_ignore_func(trace)) in wakeup_graph_entry() 131 if (ftrace_graph_notrace_addr(trace->func)) in wakeup_graph_entry() 137 ret = __trace_graph_entry(tr, trace, trace_ctx); in wakeup_graph_entry() 144 static void wakeup_graph_return(struct ftrace_graph_ret *trace) in wakeup_graph_return() argument 150 ftrace_graph_addr_finish(trace); in wakeup_graph_return() 155 __trace_graph_return(tr, trace, trace_ctx); in wakeup_graph_return()
|
D | trace.c | 2138 type->flags->trace = type; in register_tracer() 2877 !event_call->event.funcs->trace) in output_printk() 2891 event_call->event.funcs->trace(iter, 0, event); in output_printk() 4117 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { in s_start() 4119 if (iter->trace->close) in s_start() 4120 iter->trace->close(iter); in s_start() 4121 *iter->trace = *tr->current_trace; in s_start() 4123 if (iter->trace->open) in s_start() 4124 iter->trace->open(iter); in s_start() 4129 if (iter->snapshot && iter->trace->use_max_tr) in s_start() [all …]
|
D | trace_branch.c | 165 .trace = trace_branch_print, 317 static void *annotated_branch_stat_start(struct tracer_stat *trace) in annotated_branch_stat_start() argument 407 static void *all_branch_stat_start(struct tracer_stat *trace) in all_branch_stat_start() argument
|
D | Makefile | 49 obj-$(CONFIG_TRACING) += trace.o
|
D | trace_events_synth.c | 135 struct synth_trace_event trace; in synth_event_define_fields() local 136 int offset = offsetof(typeof(trace), fields); in synth_event_define_fields() 442 .trace = print_synth_event
|
D | ftrace.c | 446 static void *function_stat_start(struct tracer_stat *trace) in function_stat_start() argument 449 container_of(trace, struct ftrace_profile_stat, stat); in function_stat_start() 789 static int profile_graph_entry(struct ftrace_graph_ent *trace) in profile_graph_entry() argument 793 function_profile_call(trace->func, 0, NULL, NULL); in profile_graph_entry() 806 static void profile_graph_return(struct ftrace_graph_ret *trace) in profile_graph_return() argument 820 if (!trace->calltime) in profile_graph_return() 823 calltime = trace->rettime - trace->calltime; in profile_graph_return() 839 rec = ftrace_find_profiled_func(stat, trace->func); in profile_graph_return()
|
D | trace_kprobe.c | 1760 .trace = print_kretprobe_event 1764 .trace = print_kprobe_event
|
/kernel/ |
D | stacktrace.c | 250 save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) in save_stack_trace_tsk() argument 256 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) in save_stack_trace_regs() argument 272 struct stack_trace trace = { in stack_trace_save() local 278 save_stack_trace(&trace); in stack_trace_save() 279 return trace.nr_entries; in stack_trace_save() 296 struct stack_trace trace = { in stack_trace_save_tsk() local 303 save_stack_trace_tsk(task, &trace); in stack_trace_save_tsk() 304 return trace.nr_entries; in stack_trace_save_tsk() 319 struct stack_trace trace = { in stack_trace_save_regs() local 325 save_stack_trace_regs(regs, &trace); in stack_trace_save_regs() [all …]
|
D | Makefile | 100 obj-$(CONFIG_FUNCTION_TRACER) += trace/ 101 obj-$(CONFIG_TRACING) += trace/ 102 obj-$(CONFIG_TRACE_CLOCK) += trace/ 103 obj-$(CONFIG_RING_BUFFER) += trace/ 104 obj-$(CONFIG_TRACEPOINTS) += trace/ 105 obj-$(CONFIG_RETHOOK) += trace/
|
D | fork.c | 2105 int trace, in copy_process() argument 2552 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); in copy_process() 2751 int trace = 0; in kernel_clone() local 2776 trace = PTRACE_EVENT_VFORK; in kernel_clone() 2778 trace = PTRACE_EVENT_CLONE; in kernel_clone() 2780 trace = PTRACE_EVENT_FORK; in kernel_clone() 2782 if (likely(!ptrace_event_enabled(current, trace))) in kernel_clone() 2783 trace = 0; in kernel_clone() 2786 p = copy_process(NULL, trace, NUMA_NO_NODE, args); in kernel_clone() 2822 if (unlikely(trace)) in kernel_clone() [all …]
|
/kernel/bpf/ |
D | stackmap.c | 214 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid() argument 224 if (trace->nr <= skip) in __bpf_get_stackid() 228 trace_nr = trace->nr - skip; in __bpf_get_stackid() 230 ips = trace->ip + skip; in __bpf_get_stackid() 289 struct perf_callchain_entry *trace; in BPF_CALL_3() local 300 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3() 303 if (unlikely(!trace)) in BPF_CALL_3() 307 return __bpf_get_stackid(map, trace, flags); in BPF_CALL_3() 319 static __u64 count_kernel_ip(struct perf_callchain_entry *trace) in count_kernel_ip() argument 323 while (nr_kernel < trace->nr) { in count_kernel_ip() [all …]
|
D | cpumap.c | 787 goto trace; in cpu_map_generic_redirect() 790 trace: in cpu_map_generic_redirect()
|
/kernel/locking/ |
D | lockdep.c | 561 struct lock_trace *trace, *t2; in save_trace() local 569 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); in save_trace() 582 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace() 584 hash = jhash(trace->entries, trace->nr_entries * in save_trace() 585 sizeof(trace->entries[0]), 0); in save_trace() 586 trace->hash = hash; in save_trace() 589 if (traces_identical(trace, t2)) in save_trace() 592 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; in save_trace() 593 hlist_add_head(&trace->hash_entry, hash_head); in save_trace() 595 return trace; in save_trace() [all …]
|
/kernel/trace/rv/ |
D | Kconfig | 21 theorem proving). RV works by analyzing the trace of the system's 26 Documentation/trace/rv/runtime-verification.rst 39 Documentation/trace/rv/monitor_wip.rst 51 Documentation/trace/rv/monitor_wwnr.rst
|
/kernel/power/ |
D | Kconfig | 252 functions from <linux/resume-trace.h> as well as the 253 <asm/resume-trace.h> header with a TRACE_RESUME() macro.
|