/kernel/events/ |
D | core.c | 182 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument 184 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event() 210 struct perf_event *event; member 218 struct perf_event *event = efs->event; in event_function() local 219 struct perf_event_context *ctx = event->ctx; in event_function() 254 efs->func(event, cpuctx, ctx, efs->data); in event_function() 261 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument 263 struct perf_event_context *ctx = event->ctx; in event_function_call() 266 .event = event, in event_function_call() 271 if (!event->parent) { in event_function_call() [all …]
|
D | ring_buffer.c | 24 handle->event->pending_wakeup = 1; in perf_output_wakeup() 25 irq_work_queue(&handle->event->pending_irq); in perf_output_wakeup() 151 struct perf_event *event, unsigned int size, in __perf_output_begin() argument 167 if (event->parent) in __perf_output_begin() 168 event = event->parent; in __perf_output_begin() 170 rb = rcu_dereference(event->rb); in __perf_output_begin() 177 atomic64_inc(&event->lost_samples); in __perf_output_begin() 183 handle->event = event; in __perf_output_begin() 188 if (event->attr.sample_id_all) in __perf_output_begin() 189 size += event->id_header_size; in __perf_output_begin() [all …]
|
/kernel/trace/ |
D | trace_events_synth.c | 91 static bool synth_event_match(const char *system, const char *event, 114 struct synth_event *event = to_synth_event(ev); in synth_event_is_busy() local 116 return event->ref != 0; in synth_event_is_busy() 119 static bool synth_event_match(const char *system, const char *event, in synth_event_match() argument 124 return strcmp(sev->name, event) == 0 && in synth_event_match() 137 struct synth_event *event = call->data; in synth_event_define_fields() local 143 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { in synth_event_define_fields() 144 size = event->fields[i]->size; in synth_event_define_fields() 145 is_signed = event->fields[i]->is_signed; in synth_event_define_fields() 146 type = event->fields[i]->type; in synth_event_define_fields() [all …]
|
D | ring_buffer.c | 144 #define skip_time_extend(event) \ argument 145 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 147 #define extended_time(event) \ argument 148 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 150 static inline int rb_null_event(struct ring_buffer_event *event) in rb_null_event() argument 152 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event() 155 static void rb_event_set_padding(struct ring_buffer_event *event) in rb_event_set_padding() argument 158 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding() 159 event->time_delta = 0; in rb_event_set_padding() 163 rb_event_data_length(struct ring_buffer_event *event) in rb_event_data_length() argument [all …]
|
D | trace_output.c | 296 struct trace_event_call *event; in trace_raw_output_prep() local 301 event = container_of(trace_event, struct trace_event_call, event); in trace_raw_output_prep() 304 if (entry->type != event->event.type) { in trace_raw_output_prep() 310 trace_seq_printf(s, "%s: ", trace_event_name(event)); in trace_raw_output_prep() 678 struct trace_event *event; in ftrace_find_event() local 683 hlist_for_each_entry(event, &event_hash[key], node) { in ftrace_find_event() 684 if (event->type == type) in ftrace_find_event() 685 return event; in ftrace_find_event() 751 int register_trace_event(struct trace_event *event) in register_trace_event() argument 758 if (WARN_ON(!event)) in register_trace_event() [all …]
|
D | ring_buffer_benchmark.c | 87 struct ring_buffer_event *event; in read_event() local 91 event = ring_buffer_consume(buffer, cpu, &ts, NULL); in read_event() 92 if (!event) in read_event() 95 entry = ring_buffer_event_data(event); in read_event() 107 struct ring_buffer_event *event; in read_page() local 133 event = (void *)&rpage->data[i]; in read_page() 134 switch (event->type_len) { in read_page() 137 if (!event->time_delta) in read_page() 139 inc = event->array[0] + 4; in read_page() 145 entry = ring_buffer_event_data(event); in read_page() [all …]
|
D | trace_uprobe.c | 43 static bool trace_uprobe_match(const char *system, const char *event, 312 static bool trace_uprobe_match(const char *system, const char *event, in trace_uprobe_match() argument 317 return (event[0] == '\0' || in trace_uprobe_match() 318 strcmp(trace_probe_name(&tu->tp), event) == 0) && in trace_uprobe_match() 339 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) in alloc_trace_uprobe() argument 348 ret = trace_probe_init(&tu->tp, event, group, true); in alloc_trace_uprobe() 356 init_trace_uprobe_filter(tu->tp.event->filter); in alloc_trace_uprobe() 376 static struct trace_uprobe *find_probe_event(const char *event, const char *group) in find_probe_event() argument 382 if (strcmp(trace_probe_name(&tu->tp), event) == 0 && in find_probe_event() 415 struct trace_probe_event *tpe = orig->tp.event; in trace_uprobe_has_same_uprobe() [all …]
|
D | trace_eprobe.c | 33 struct trace_event_call *event; member 53 if (ep->event) in trace_event_probe_cleanup() 54 trace_event_put_ref(ep->event); in trace_event_probe_cleanup() 123 static bool eprobe_dyn_event_match(const char *system, const char *event, in eprobe_dyn_event_match() argument 152 if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0) in eprobe_dyn_event_match() 192 struct trace_event_call *event, in alloc_event_probe() argument 200 if (!event) in alloc_event_probe() 203 sys_name = event->class->system; in alloc_event_probe() 204 event_name = trace_event_name(event); in alloc_event_probe() 208 trace_event_put_ref(event); in alloc_event_probe() [all …]
|
D | trace_events_filter.c | 639 static int filter_pred_##type(struct filter_pred *pred, void *event) \ 643 type *addr = (type *)(event + pred->offset); \ 648 type *addr = (type *)(event + pred->offset); \ 653 type *addr = (type *)(event + pred->offset); \ 658 type *addr = (type *)(event + pred->offset); \ 663 type *addr = (type *)(event + pred->offset); \ 673 static int filter_pred_##size(struct filter_pred *pred, void *event) \ 675 u##size *addr = (u##size *)(event + pred->offset); \ 745 static int filter_pred_string(struct filter_pred *pred, void *event) in filter_pred_string() argument 747 char *addr = (char *)(event + pred->offset); in filter_pred_string() [all …]
|
D | trace_kprobe.c | 44 static bool trace_kprobe_match(const char *system, const char *event, 162 static bool trace_kprobe_match(const char *system, const char *event, in trace_kprobe_match() argument 167 return (event[0] == '\0' || in trace_kprobe_match() 168 strcmp(trace_probe_name(&tk->tp), event) == 0) && in trace_kprobe_match() 257 const char *event, in alloc_trace_kprobe() argument 293 ret = trace_probe_init(&tk->tp, event, group, false); in alloc_trace_kprobe() 304 static struct trace_kprobe *find_trace_kprobe(const char *event, in find_trace_kprobe() argument 311 if (strcmp(trace_probe_name(&tk->tp), event) == 0 && in find_trace_kprobe() 561 struct trace_probe_event *tpe = orig->tp.event; in trace_kprobe_has_same_kprobe() 774 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; in __trace_kprobe_create() local [all …]
|
D | trace_probe.c | 234 const char *slash, *event = *pevent; in traceprobe_parse_event_name() local 237 slash = strchr(event, '/'); in traceprobe_parse_event_name() 239 slash = strchr(event, '.'); in traceprobe_parse_event_name() 242 if (slash == event) { in traceprobe_parse_event_name() 246 if (slash - event + 1 > MAX_EVENT_NAME_LEN) { in traceprobe_parse_event_name() 250 strlcpy(buf, event, slash - event + 1); in traceprobe_parse_event_name() 257 offset += slash - event + 1; in traceprobe_parse_event_name() 258 event = *pevent; in traceprobe_parse_event_name() 260 len = strlen(event); in traceprobe_parse_event_name() 272 if (!is_good_name(event)) { in traceprobe_parse_event_name() [all …]
|
D | trace_syscalls.c | 18 static int syscall_enter_register(struct trace_event_call *event, 20 static int syscall_exit_register(struct trace_event_call *event, 128 struct trace_event *event) in print_syscall_enter() argument 144 if (entry->enter_event->event.type != ent->type) { in print_syscall_enter() 175 struct trace_event *event) in print_syscall_exit() argument 192 if (entry->exit_event->event.type != ent->type) { in print_syscall_exit() 324 entry = ring_buffer_event_data(fbuffer.event); in ftrace_syscall_enter() 361 entry = ring_buffer_event_data(fbuffer.event); in ftrace_syscall_exit() 618 sys_data->enter_event->event.type, 1, regs, in perf_syscall_enter() 714 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, in perf_syscall_exit() [all …]
|
D | trace_probe.h | 249 struct trace_probe_event *event; member 263 return !!(tp->event->flags & flag); in trace_probe_test_flag() 269 tp->event->flags |= flag; in trace_probe_set_flag() 275 tp->event->flags &= ~flag; in trace_probe_clear_flag() 285 return trace_event_name(&tp->event->call); in trace_probe_name() 290 return tp->event->call.class->system; in trace_probe_group_name() 296 return &tp->event->call; in trace_probe_event_call() 315 return &tp->event->probes; in trace_probe_probe_list() 328 return trace_remove_event_call(&tp->event->call); in trace_probe_unregister_event_call() 333 return !!list_is_singular(&tp->event->files); in trace_probe_has_single_file() [all …]
|
D | trace_mmiotrace.c | 299 struct ring_buffer_event *event; in __trace_mmiotrace_rw() local 304 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, in __trace_mmiotrace_rw() 306 if (!event) { in __trace_mmiotrace_rw() 310 entry = ring_buffer_event_data(event); in __trace_mmiotrace_rw() 313 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_mmiotrace_rw() 314 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); in __trace_mmiotrace_rw() 330 struct ring_buffer_event *event; in __trace_mmiotrace_map() local 335 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, in __trace_mmiotrace_map() 337 if (!event) { in __trace_mmiotrace_map() 341 entry = ring_buffer_event_data(event); in __trace_mmiotrace_map() [all …]
|
D | trace_dynevent.c | 75 char *system = NULL, *event, *p; in dyn_event_release() local 88 event = &argv[0][2]; in dyn_event_release() 90 event = strchr(argv[0], ':'); in dyn_event_release() 91 if (!event) { in dyn_event_release() 95 event++; in dyn_event_release() 98 p = strchr(event, '/'); in dyn_event_release() 100 system = event; in dyn_event_release() 101 event = p + 1; in dyn_event_release() 104 if (!system && event[0] == '\0') { in dyn_event_release() 113 if (!pos->ops->match(system, event, in dyn_event_release()
|
D | trace_event_perf.c | 223 if (tp_event->event.type == event_id && in perf_trace_init() 437 struct perf_event *event; in perf_ftrace_function_call() local 453 event = container_of(ops, struct perf_event, ftrace_ops); in perf_ftrace_function_call() 461 head.first = &event->hlist_entry; in perf_ftrace_function_call() 485 static int perf_ftrace_function_register(struct perf_event *event) in perf_ftrace_function_register() argument 487 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_register() 495 static int perf_ftrace_function_unregister(struct perf_event *event) in perf_ftrace_function_unregister() argument 497 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_unregister() 506 struct perf_event *event = data; in perf_ftrace_event_register() local 520 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id(); in perf_ftrace_event_register() [all …]
|
D | trace_functions_graph.c | 102 struct ring_buffer_event *event; in __trace_graph_entry() local 106 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, in __trace_graph_entry() 108 if (!event) in __trace_graph_entry() 110 entry = ring_buffer_event_data(event); in __trace_graph_entry() 112 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_entry() 113 trace_buffer_unlock_commit_nostack(buffer, event); in __trace_graph_entry() 221 struct ring_buffer_event *event; in __trace_graph_return() local 225 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, in __trace_graph_return() 227 if (!event) in __trace_graph_return() 229 entry = ring_buffer_event_data(event); in __trace_graph_return() [all …]
|
D | trace_events_hist.c | 90 void *event); 205 void *event); 211 void *event) in hist_field_const() argument 220 void *event) in hist_field_counter() argument 229 void *event) in hist_field_string() argument 231 char *addr = (char *)(event + hist_field->field->offset); in hist_field_string() 240 void *event) in hist_field_dynstring() argument 242 u32 str_item = *(u32 *)(event + hist_field->field->offset); in hist_field_dynstring() 244 char *addr = (char *)(event + str_loc); in hist_field_dynstring() 253 void *event) in hist_field_reldynstring() argument [all …]
|
D | trace_boot.c | 102 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) in trace_boot_add_kprobe_event() argument 113 ret = kprobe_event_gen_cmd_start(&cmd, event, val); in trace_boot_add_kprobe_event() 130 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) in trace_boot_add_kprobe_event() argument 139 trace_boot_add_synth_event(struct xbc_node *node, const char *event) in trace_boot_add_synth_event() argument 149 ret = synth_event_gen_cmd_start(&cmd, event, NULL); in trace_boot_add_synth_event() 167 trace_boot_add_synth_event(struct xbc_node *node, const char *event) in trace_boot_add_synth_event() argument 468 const char *p, *group, *event; in trace_boot_init_one_event() local 471 event = xbc_node_get_data(enode); in trace_boot_init_one_event() 474 if (trace_boot_add_kprobe_event(enode, event) < 0) in trace_boot_init_one_event() 477 if (trace_boot_add_synth_event(enode, event) < 0) in trace_boot_init_one_event() [all …]
|
D | trace.c | 293 struct ring_buffer_event *event, int flag) in trace_process_export() argument 299 entry = ring_buffer_event_data(event); in trace_process_export() 300 size = ring_buffer_event_length(event); in trace_process_export() 337 static void ftrace_exports(struct ring_buffer_event *event, int flag) in ftrace_exports() argument 345 trace_process_export(export, event, flag); in ftrace_exports() 520 struct ring_buffer_event *event) in call_filter_check_discard() argument 524 __trace_event_discard_commit(buffer, event); in call_filter_check_discard() 937 trace_event_setup(struct ring_buffer_event *event, in trace_event_setup() argument 940 struct trace_entry *ent = ring_buffer_event_data(event); in trace_event_setup() 951 struct ring_buffer_event *event; in __trace_buffer_lock_reserve() local [all …]
|
D | trace_sched_wakeup.c | 381 struct ring_buffer_event *event; in tracing_sched_switch_trace() local 384 event = trace_buffer_lock_reserve(buffer, TRACE_CTX, in tracing_sched_switch_trace() 386 if (!event) in tracing_sched_switch_trace() 388 entry = ring_buffer_event_data(event); in tracing_sched_switch_trace() 397 if (!call_filter_check_discard(call, entry, buffer, event)) in tracing_sched_switch_trace() 398 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx); in tracing_sched_switch_trace() 408 struct ring_buffer_event *event; in tracing_sched_wakeup_trace() local 412 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, in tracing_sched_wakeup_trace() 414 if (!event) in tracing_sched_wakeup_trace() 416 entry = ring_buffer_event_data(event); in tracing_sched_wakeup_trace() [all …]
|
D | bpf_trace.c | 567 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter() 628 struct perf_event *event; in __bpf_perf_event_output() local 639 event = ee->event; in __bpf_perf_event_output() 640 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || in __bpf_perf_event_output() 641 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) in __bpf_perf_event_output() 644 if (unlikely(event->oncpu != cpu)) in __bpf_perf_event_output() 647 return perf_event_output(event, sd, regs); in __bpf_perf_event_output() 1126 return ctx->event->bpf_cookie; in BPF_CALL_1() 1701 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, in BPF_CALL_3() 2125 int perf_event_attach_bpf_prog(struct perf_event *event, in perf_event_attach_bpf_prog() argument [all …]
|
/kernel/ |
D | watchdog_hld.c | 110 static void watchdog_overflow_callback(struct perf_event *event, in watchdog_overflow_callback() argument 115 event->hw.interrupts = 0; in watchdog_overflow_callback() 207 struct perf_event *event = this_cpu_read(watchdog_ev); in hardlockup_detector_perf_disable() local 209 if (event) { in hardlockup_detector_perf_disable() 210 perf_event_disable(event); in hardlockup_detector_perf_disable() 212 this_cpu_write(dead_event, event); in hardlockup_detector_perf_disable() 228 struct perf_event *event = per_cpu(dead_event, cpu); in hardlockup_detector_perf_cleanup() local 234 if (event) in hardlockup_detector_perf_cleanup() 235 perf_event_release_kernel(event); in hardlockup_detector_perf_cleanup() 253 struct perf_event *event = per_cpu(watchdog_ev, cpu); in hardlockup_detector_perf_stop() local [all …]
|
D | compat.c | 179 int get_compat_sigevent(struct sigevent *event, in get_compat_sigevent() argument 182 memset(event, 0, sizeof(*event)); in get_compat_sigevent() 184 __get_user(event->sigev_value.sival_int, in get_compat_sigevent() 186 __get_user(event->sigev_signo, &u_event->sigev_signo) || in get_compat_sigevent() 187 __get_user(event->sigev_notify, &u_event->sigev_notify) || in get_compat_sigevent() 188 __get_user(event->sigev_notify_thread_id, in get_compat_sigevent()
|
/kernel/locking/ |
D | lock_events.h | 37 static inline void __lockevent_inc(enum lock_events event, bool cond) in __lockevent_inc() argument 40 raw_cpu_inc(lockevents[event]); in __lockevent_inc() 46 static inline void __lockevent_add(enum lock_events event, int inc) in __lockevent_add() argument 48 raw_cpu_add(lockevents[event], inc); in __lockevent_add()
|