/kernel/trace/ |
D | trace_events.c | 98 trace_find_event_field(struct trace_event_call *call, char *name) in trace_find_event_field() argument 103 head = trace_get_fields(call); in trace_find_event_field() 142 int trace_define_field(struct trace_event_call *call, const char *type, in trace_define_field() argument 148 if (WARN_ON(!call->class)) in trace_define_field() 151 head = trace_get_fields(call); in trace_define_field() 200 static void trace_destroy_fields(struct trace_event_call *call) in trace_destroy_fields() argument 205 head = trace_get_fields(call); in trace_destroy_fields() 216 int trace_event_get_offsets(struct trace_event_call *call) in trace_event_get_offsets() argument 221 head = trace_get_fields(call); in trace_event_get_offsets() 234 static bool test_field(const char *fmt, struct trace_event_call *call) in test_field() argument [all …]
|
D | trace_syscalls.c | 24 syscall_get_enter_fields(struct trace_event_call *call) in syscall_get_enter_fields() argument 26 struct syscall_metadata *entry = call->data; in syscall_get_enter_fields() 239 static int __init set_syscall_print_fmt(struct trace_event_call *call) in set_syscall_print_fmt() argument 243 struct syscall_metadata *entry = call->data; in set_syscall_print_fmt() 245 if (entry->enter_event != call) { in set_syscall_print_fmt() 246 call->print_fmt = "\"0x%lx\", REC->ret"; in set_syscall_print_fmt() 259 call->print_fmt = print_fmt; in set_syscall_print_fmt() 264 static void __init free_syscall_print_fmt(struct trace_event_call *call) in free_syscall_print_fmt() argument 266 struct syscall_metadata *entry = call->data; in free_syscall_print_fmt() 268 if (entry->enter_event == call) in free_syscall_print_fmt() [all …]
|
D | trace_export.c | 18 static int ftrace_event_register(struct trace_event_call *call, in ftrace_event_register() argument 161 #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, regfn) \ argument 162 static struct trace_event_class __refdata event_class_ftrace_##call = { \ 164 .fields_array = ftrace_event_fields_##call, \ 165 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 169 struct trace_event_call __used event_##call = { \ 170 .class = &event_class_ftrace_##call, \ 172 .name = #call, \ 179 __section("_ftrace_events") *__event_##call = &event_##call; 182 #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ argument [all …]
|
D | trace_events_inject.c | 36 parse_field(char *str, struct trace_event_call *call, in parse_field() argument 60 field = trace_find_event_field(call, field_name); in parse_field() 138 static int trace_get_entry_size(struct trace_event_call *call) in trace_get_entry_size() argument 144 head = trace_get_fields(call); in trace_get_entry_size() 153 static void *trace_alloc_entry(struct trace_event_call *call, int *size) in trace_alloc_entry() argument 155 int entry_size = trace_get_entry_size(call); in trace_alloc_entry() 165 head = trace_get_fields(call); in trace_alloc_entry() 192 static int parse_entry(char *str, struct trace_event_call *call, void **pentry) in parse_entry() argument 200 entry = trace_alloc_entry(call, &entry_size); in parse_entry() 205 tracing_generic_entry_update(entry, call->event.type, in parse_entry() [all …]
|
D | trace_dynevent.c | 24 struct trace_event_call *call; in trace_event_dyn_try_get_ref() local 31 list_for_each_entry(call, &ftrace_events, list) { in trace_event_dyn_try_get_ref() 32 if (call == dyn_call) { in trace_event_dyn_try_get_ref() 41 void trace_event_dyn_put_ref(struct trace_event_call *call) in trace_event_dyn_put_ref() argument 43 if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC))) in trace_event_dyn_put_ref() 46 if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) { in trace_event_dyn_put_ref() 47 atomic_set(&call->refcnt, 0); in trace_event_dyn_put_ref() 51 atomic_dec(&call->refcnt); in trace_event_dyn_put_ref() 54 bool trace_event_dyn_busy(struct trace_event_call *call) in trace_event_dyn_busy() argument 56 return atomic_read(&call->refcnt) != 0; in trace_event_dyn_busy()
|
D | trace_probe.h | 207 bool trace_kprobe_on_func_entry(struct trace_event_call *call); 208 bool trace_kprobe_error_injectable(struct trace_event_call *call); 210 static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call) in trace_kprobe_on_func_entry() argument 215 static inline bool trace_kprobe_error_injectable(struct trace_event_call *call) in trace_kprobe_error_injectable() argument 242 struct trace_event_call call; member 286 return trace_event_name(&tp->event->call); in trace_probe_name() 291 return tp->event->call.class->system; in trace_probe_group_name() 297 return &tp->event->call; in trace_probe_event_call() 303 return container_of(event_call, struct trace_probe_event, call); in trace_probe_event_from_call() 307 trace_probe_primary_from_call(struct trace_event_call *call) in trace_probe_primary_from_call() argument [all …]
|
D | trace_functions_graph.c | 101 struct trace_event_call *call = &event_funcgraph_entry; in __trace_graph_entry() local 112 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_entry() 220 struct trace_event_call *call = &event_funcgraph_exit; in __trace_graph_return() local 231 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_return() 632 struct ftrace_graph_ent *call; in print_graph_entry_leaf() local 638 call = &entry->graph_ent; in print_graph_entry_leaf() 651 cpu_data->depth = call->depth - 1; in print_graph_entry_leaf() 654 if (call->depth < FTRACE_RETFUNC_DEPTH && in print_graph_entry_leaf() 655 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_leaf() 656 cpu_data->enter_funcs[call->depth] = 0; in print_graph_entry_leaf() [all …]
|
D | trace_events_synth.c | 114 static int synth_event_define_fields(struct trace_event_call *call) in synth_event_define_fields() argument 118 struct synth_event *event = call->data; in synth_event_define_fields() 129 ret = trace_define_field(call, type, name, offset, size, in synth_event_define_fields() 339 se = container_of(event, struct synth_event, call.event); in print_synth_event() 596 static void free_synth_event_print_fmt(struct trace_event_call *call) in free_synth_event_print_fmt() argument 598 if (call) { in free_synth_event_print_fmt() 599 kfree(call->print_fmt); in free_synth_event_print_fmt() 600 call->print_fmt = NULL; in free_synth_event_print_fmt() 642 static int set_synth_event_print_fmt(struct trace_event_call *call) in set_synth_event_print_fmt() argument 644 struct synth_event *event = call->data; in set_synth_event_print_fmt() [all …]
|
D | trace_kprobe.c | 206 trace_kprobe_primary_from_call(struct trace_event_call *call) in trace_kprobe_primary_from_call() argument 210 tp = trace_probe_primary_from_call(call); in trace_kprobe_primary_from_call() 217 bool trace_kprobe_on_func_entry(struct trace_event_call *call) in trace_kprobe_on_func_entry() argument 219 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); in trace_kprobe_on_func_entry() 226 bool trace_kprobe_error_injectable(struct trace_event_call *call) in trace_kprobe_error_injectable() argument 228 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); in trace_kprobe_error_injectable() 349 static int enable_trace_kprobe(struct trace_event_call *call, in enable_trace_kprobe() argument 357 tp = trace_probe_primary_from_call(call); in enable_trace_kprobe() 400 static int disable_trace_kprobe(struct trace_event_call *call, in disable_trace_kprobe() argument 405 tp = trace_probe_primary_from_call(call); in disable_trace_kprobe() [all …]
|
D | trace_uprobe.c | 321 trace_uprobe_primary_from_call(struct trace_event_call *call) in trace_uprobe_primary_from_call() argument 325 tp = trace_probe_primary_from_call(call); in trace_uprobe_primary_from_call() 957 struct trace_event_call *call = trace_probe_event_call(&tu->tp); in __uprobe_trace_func() local 959 WARN_ON(call != trace_file->event_call); in __uprobe_trace_func() 970 call->event.type, size, 0); in __uprobe_trace_func() 1095 static int probe_event_enable(struct trace_event_call *call, in probe_event_enable() argument 1103 tp = trace_probe_primary_from_call(call); in probe_event_enable() 1156 static void probe_event_disable(struct trace_event_call *call, in probe_event_disable() argument 1161 tp = trace_probe_primary_from_call(call); in probe_event_disable() 1276 static int uprobe_perf_close(struct trace_event_call *call, in uprobe_perf_close() argument [all …]
|
D | trace_eprobe.c | 504 struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp); in __eprobe_trace_func() local 508 if (WARN_ON_ONCE(call != edata->file->event_call)) in __eprobe_trace_func() 522 call->event.type, in __eprobe_trace_func() 721 static int enable_trace_eprobe(struct trace_event_call *call, in enable_trace_eprobe() argument 730 tp = trace_probe_primary_from_call(call); in enable_trace_eprobe() 780 static int disable_trace_eprobe(struct trace_event_call *call, in disable_trace_eprobe() argument 786 tp = trace_probe_primary_from_call(call); in disable_trace_eprobe() 844 struct trace_event_call *call = trace_probe_event_call(&ep->tp); in init_trace_eprobe_call() local 846 call->flags = TRACE_EVENT_FL_EPROBE; in init_trace_eprobe_call() 847 call->event.funcs = &eprobe_funcs; in init_trace_eprobe_call() [all …]
|
D | trace_events_filter.c | 1227 struct trace_event_call *call = data; in parse_pred() local 1260 field = trace_find_event_field(call, field_name); in parse_pred() 1303 if (ftrace_event_is_function(call)) { in parse_pred() 1598 static int process_preds(struct trace_event_call *call, in process_preds() argument 1628 parse_pred, call, pe); in process_preds() 1806 struct trace_event_call *call, in create_filter() argument 1821 err = process_preds(call, filter_string, *filterp, pe); in create_filter() 1830 struct trace_event_call *call, in create_event_filter() argument 1834 return create_filter(tr, call, filter_str, set_str, filterp); in create_event_filter() 1871 struct trace_event_call *call = file->event_call; in apply_event_filter() local [all …]
|
D | trace_probe.c | 958 struct trace_event_call *call = trace_probe_event_call(tp); in traceprobe_set_print_fmt() local 970 call->print_fmt = print_fmt; in traceprobe_set_print_fmt() 1003 kfree(tpe->call.name); in trace_probe_event_free() 1004 kfree(tpe->call.print_fmt); in trace_probe_event_free() 1044 struct trace_event_call *call; in trace_probe_init() local 1064 call = trace_probe_event_call(tp); in trace_probe_init() 1065 call->class = &tp->event->class; in trace_probe_init() 1066 call->name = kstrdup(event, GFP_KERNEL); in trace_probe_init() 1067 if (!call->name) { in trace_probe_init() 1106 struct trace_event_call *call = trace_probe_event_call(tp); in trace_probe_register_event_call() local [all …]
|
D | trace_mmiotrace.c | 297 struct trace_event_call *call = &event_mmiotrace_rw; in __trace_mmiotrace_rw() local 313 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_mmiotrace_rw() 328 struct trace_event_call *call = &event_mmiotrace_map; in __trace_mmiotrace_map() local 344 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_mmiotrace_map()
|
D | trace.h | 1117 bool ftrace_event_is_function(struct trace_event_call *call); 1317 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1498 struct trace_event_call *call, 1504 trace_find_event_field(struct trace_event_call *call, char *name); 1862 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ argument 1864 __aligned(4) event_##call; 1866 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ argument 1867 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 1869 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ argument 1870 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) [all …]
|
D | trace_dynevent.h | 80 struct trace_event_call *call) in dyn_event_add() argument 87 call->flags |= TRACE_EVENT_FL_DYNAMIC; in dyn_event_add()
|
D | trace_sched_wakeup.c | 379 struct trace_event_call *call = &event_context_switch; in tracing_sched_switch_trace() local 397 if (!call_filter_check_discard(call, entry, buffer, event)) in tracing_sched_switch_trace() 407 struct trace_event_call *call = &event_wakeup; in tracing_sched_wakeup_trace() local 425 if (!call_filter_check_discard(call, entry, buffer, event)) in tracing_sched_wakeup_trace()
|
D | trace_synth.h | 34 struct trace_event_call call; member
|
D | trace_osnoise.c | 331 struct trace_event_call *call = &event_osnoise; in trace_osnoise_sample() local 349 if (!call_filter_check_discard(call, entry, buffer, event)) in trace_osnoise_sample() 396 struct trace_event_call *call = &event_osnoise; in trace_timerlat_sample() local 410 if (!call_filter_check_discard(call, entry, buffer, event)) in trace_timerlat_sample() 460 struct trace_event_call *call = &event_osnoise; in timerlat_dump_stack() local 482 if (!call_filter_check_discard(call, entry, buffer, event)) in timerlat_dump_stack()
|
/kernel/bpf/preload/iterators/ |
D | Makefile | 33 $(call msg,CLEAN) 37 $(call msg,GEN-SKEL,$@) 42 $(call msg,BPF,$@) 48 $(call msg,MKDIR,$@)
|
/kernel/ |
D | Makefile | 43 CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector 146 $(call if_changed,gzip) 151 $(call filechk,cat) 158 $(call cmd,genikh)
|
/kernel/rcu/ |
D | rcutorture.c | 341 call_rcu_func_t call; member 505 .call = call_rcu, 554 .call = call_rcu_busted, 669 .call = srcu_torture_call, 702 .call = srcu_torture_call, 722 .call = srcu_torture_call, 763 .call = call_rcu_tasks, 834 .call = call_rcu_tasks_rude, 874 .call = call_rcu_tasks_trace, 1662 if (cur_ops->call) { in rcu_torture_timer() [all …]
|
/kernel/bpf/ |
D | Kconfig | 27 bool "Enable bpf() system call" 35 Enable the bpf() system call that allows to manipulate BPF programs
|
/kernel/debug/kdb/ |
D | Makefile | 24 $(call cmd,gen-kdb)
|
/kernel/kcsan/ |
D | Makefile | 10 CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
|