1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12
13 struct trace_array;
14 struct array_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18 union bpf_attr;
19
20 /* Used for event string fields when they are NULL */
21 #define EVENT_NULL_STR "(null)"
22
23 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
24 unsigned long flags,
25 const struct trace_print_flags *flag_array);
26
27 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
28 const struct trace_print_flags *symbol_array);
29
30 #if BITS_PER_LONG == 32
31 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
32 unsigned long long flags,
33 const struct trace_print_flags_u64 *flag_array);
34
35 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
36 unsigned long long val,
37 const struct trace_print_flags_u64
38 *symbol_array);
39 #endif
40
41 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
42 unsigned int bitmask_size);
43
44 const char *trace_print_hex_seq(struct trace_seq *p,
45 const unsigned char *buf, int len,
46 bool concatenate);
47
48 const char *trace_print_array_seq(struct trace_seq *p,
49 const void *buf, int count,
50 size_t el_size);
51
52 const char *
53 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
54 int prefix_type, int rowsize, int groupsize,
55 const void *buf, size_t len, bool ascii);
56
57 struct trace_iterator;
58 struct trace_event;
59
60 int trace_raw_output_prep(struct trace_iterator *iter,
61 struct trace_event *event);
62 extern __printf(2, 3)
63 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
64
65 /* Used to find the offset and length of dynamic fields in trace events */
66 struct trace_dynamic_info {
67 #ifdef CONFIG_CPU_BIG_ENDIAN
68 u16 len;
69 u16 offset;
70 #else
71 u16 offset;
72 u16 len;
73 #endif
74 } __packed;
75
76 /*
77 * The trace entry - the most basic unit of tracing. This is what
78 * is printed in the end as a single line in the trace output, such as:
79 *
80 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
81 */
82 struct trace_entry {
83 unsigned short type;
84 unsigned char flags;
85 unsigned char preempt_count;
86 int pid;
87 };
88
89 #define TRACE_EVENT_TYPE_MAX \
90 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
91
92 /*
93 * Trace iterator - used by printout routines who present trace
94 * results to users and which routines might sleep, etc:
95 */
96 struct trace_iterator {
97 struct trace_array *tr;
98 struct tracer *trace;
99 struct array_buffer *array_buffer;
100 void *private;
101 int cpu_file;
102 struct mutex mutex;
103 struct ring_buffer_iter **buffer_iter;
104 unsigned long iter_flags;
105 void *temp; /* temp holder */
106 unsigned int temp_size;
107 char *fmt; /* modified format holder */
108 unsigned int fmt_size;
109 atomic_t wait_index;
110
111 /* trace_seq for __print_flags() and __print_symbolic() etc. */
112 struct trace_seq tmp_seq;
113
114 cpumask_var_t started;
115
116 /* Set when the file is closed to prevent new waiters */
117 bool closed;
118
119 /* it's true when current open file is snapshot */
120 bool snapshot;
121
122 /* The below is zeroed out in pipe_read */
123 struct trace_seq seq;
124 struct trace_entry *ent;
125 unsigned long lost_events;
126 int leftover;
127 int ent_size;
128 int cpu;
129 u64 ts;
130
131 loff_t pos;
132 long idx;
133
134 /* All new field here will be zeroed out in pipe_read */
135 };
136
137 enum trace_iter_flags {
138 TRACE_FILE_LAT_FMT = 1,
139 TRACE_FILE_ANNOTATE = 2,
140 TRACE_FILE_TIME_IN_NS = 4,
141 };
142
143
144 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
145 int flags, struct trace_event *event);
146
147 struct trace_event_functions {
148 trace_print_func trace;
149 trace_print_func raw;
150 trace_print_func hex;
151 trace_print_func binary;
152 };
153
154 struct trace_event {
155 struct hlist_node node;
156 int type;
157 struct trace_event_functions *funcs;
158 };
159
160 extern int register_trace_event(struct trace_event *event);
161 extern int unregister_trace_event(struct trace_event *event);
162
163 /* Return values for print_line callback */
164 enum print_line_t {
165 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
166 TRACE_TYPE_HANDLED = 1,
167 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
168 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
169 };
170
171 enum print_line_t trace_handle_return(struct trace_seq *s);
172
tracing_generic_entry_update(struct trace_entry * entry,unsigned short type,unsigned int trace_ctx)173 static inline void tracing_generic_entry_update(struct trace_entry *entry,
174 unsigned short type,
175 unsigned int trace_ctx)
176 {
177 entry->preempt_count = trace_ctx & 0xff;
178 entry->pid = current->pid;
179 entry->type = type;
180 entry->flags = trace_ctx >> 16;
181 }
182
183 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
184
185 enum trace_flag_type {
186 TRACE_FLAG_IRQS_OFF = 0x01,
187 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
188 TRACE_FLAG_NEED_RESCHED = 0x04,
189 TRACE_FLAG_HARDIRQ = 0x08,
190 TRACE_FLAG_SOFTIRQ = 0x10,
191 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
192 TRACE_FLAG_NMI = 0x40,
193 TRACE_FLAG_BH_OFF = 0x80,
194 };
195
196 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
tracing_gen_ctx_flags(unsigned long irqflags)197 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
198 {
199 unsigned int irq_status = irqs_disabled_flags(irqflags) ?
200 TRACE_FLAG_IRQS_OFF : 0;
201 return tracing_gen_ctx_irq_test(irq_status);
202 }
tracing_gen_ctx(void)203 static inline unsigned int tracing_gen_ctx(void)
204 {
205 unsigned long irqflags;
206
207 local_save_flags(irqflags);
208 return tracing_gen_ctx_flags(irqflags);
209 }
210 #else
211
tracing_gen_ctx_flags(unsigned long irqflags)212 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
213 {
214 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
215 }
tracing_gen_ctx(void)216 static inline unsigned int tracing_gen_ctx(void)
217 {
218 return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
219 }
220 #endif
221
tracing_gen_ctx_dec(void)222 static inline unsigned int tracing_gen_ctx_dec(void)
223 {
224 unsigned int trace_ctx;
225
226 trace_ctx = tracing_gen_ctx();
227 /*
228 * Subtract one from the preemption counter if preemption is enabled,
229 * see trace_event_buffer_reserve()for details.
230 */
231 if (IS_ENABLED(CONFIG_PREEMPTION))
232 trace_ctx--;
233 return trace_ctx;
234 }
235
236 struct trace_event_file;
237
238 struct ring_buffer_event *
239 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
240 struct trace_event_file *trace_file,
241 int type, unsigned long len,
242 unsigned int trace_ctx);
243
244 #define TRACE_RECORD_CMDLINE BIT(0)
245 #define TRACE_RECORD_TGID BIT(1)
246
247 void tracing_record_taskinfo(struct task_struct *task, int flags);
248 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
249 struct task_struct *next, int flags);
250
251 void tracing_record_cmdline(struct task_struct *task);
252 void tracing_record_tgid(struct task_struct *task);
253
254 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
255 __printf(3, 4);
256
257 struct event_filter;
258
259 enum trace_reg {
260 TRACE_REG_REGISTER,
261 TRACE_REG_UNREGISTER,
262 #ifdef CONFIG_PERF_EVENTS
263 TRACE_REG_PERF_REGISTER,
264 TRACE_REG_PERF_UNREGISTER,
265 TRACE_REG_PERF_OPEN,
266 TRACE_REG_PERF_CLOSE,
267 /*
268 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
269 * custom action was taken and the default action is not to be
270 * performed.
271 */
272 TRACE_REG_PERF_ADD,
273 TRACE_REG_PERF_DEL,
274 #endif
275 };
276
277 struct trace_event_call;
278
279 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
280
281 struct trace_event_fields {
282 const char *type;
283 union {
284 struct {
285 const char *name;
286 const int size;
287 const int align;
288 const unsigned int is_signed:1;
289 unsigned int needs_test:1;
290 const int filter_type;
291 const int len;
292 };
293 int (*define_fields)(struct trace_event_call *);
294 };
295 };
296
297 struct trace_event_class {
298 const char *system;
299 void *probe;
300 #ifdef CONFIG_PERF_EVENTS
301 void *perf_probe;
302 #endif
303 int (*reg)(struct trace_event_call *event,
304 enum trace_reg type, void *data);
305 struct trace_event_fields *fields_array;
306 struct list_head *(*get_fields)(struct trace_event_call *);
307 struct list_head fields;
308 int (*raw_init)(struct trace_event_call *);
309 };
310
311 extern int trace_event_reg(struct trace_event_call *event,
312 enum trace_reg type, void *data);
313
314 struct trace_event_buffer {
315 struct trace_buffer *buffer;
316 struct ring_buffer_event *event;
317 struct trace_event_file *trace_file;
318 void *entry;
319 unsigned int trace_ctx;
320 struct pt_regs *regs;
321 };
322
323 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
324 struct trace_event_file *trace_file,
325 unsigned long len);
326
327 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
328
329 enum {
330 TRACE_EVENT_FL_FILTERED_BIT,
331 TRACE_EVENT_FL_CAP_ANY_BIT,
332 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
333 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
334 TRACE_EVENT_FL_TRACEPOINT_BIT,
335 TRACE_EVENT_FL_DYNAMIC_BIT,
336 TRACE_EVENT_FL_KPROBE_BIT,
337 TRACE_EVENT_FL_UPROBE_BIT,
338 TRACE_EVENT_FL_EPROBE_BIT,
339 TRACE_EVENT_FL_FPROBE_BIT,
340 TRACE_EVENT_FL_CUSTOM_BIT,
341 TRACE_EVENT_FL_TEST_STR_BIT,
342 };
343
344 /*
345 * Event flags:
346 * FILTERED - The event has a filter attached
347 * CAP_ANY - Any user can enable for perf
348 * NO_SET_FILTER - Set when filter has error and is to be ignored
349 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
350 * TRACEPOINT - Event is a tracepoint
351 * DYNAMIC - Event is a dynamic event (created at run time)
352 * KPROBE - Event is a kprobe
353 * UPROBE - Event is a uprobe
354 * EPROBE - Event is an event probe
355 * FPROBE - Event is an function probe
356 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
357 * This is set when the custom event has not been attached
358 * to a tracepoint yet, then it is cleared when it is.
359 * TEST_STR - The event has a "%s" that points to a string outside the event
360 */
361 enum {
362 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
363 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
364 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
365 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
366 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
367 TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
368 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
369 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
370 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
371 TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
372 TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
373 TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
374 };
375
376 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
377
378 struct trace_event_call {
379 struct list_head list;
380 struct trace_event_class *class;
381 union {
382 const char *name;
383 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
384 struct tracepoint *tp;
385 };
386 struct trace_event event;
387 char *print_fmt;
388 struct event_filter *filter;
389 /*
390 * Static events can disappear with modules,
391 * where as dynamic ones need their own ref count.
392 */
393 union {
394 void *module;
395 atomic_t refcnt;
396 };
397 void *data;
398
399 /* See the TRACE_EVENT_FL_* flags above */
400 int flags; /* static flags of different events */
401
402 #ifdef CONFIG_PERF_EVENTS
403 int perf_refcount;
404 struct hlist_head __percpu *perf_events;
405 struct bpf_prog_array __rcu *prog_array;
406
407 int (*perf_perm)(struct trace_event_call *,
408 struct perf_event *);
409 #endif
410 };
411
412 #ifdef CONFIG_DYNAMIC_EVENTS
413 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
414 void trace_event_dyn_put_ref(struct trace_event_call *call);
415 bool trace_event_dyn_busy(struct trace_event_call *call);
416 #else
trace_event_dyn_try_get_ref(struct trace_event_call * call)417 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
418 {
419 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
420 return false;
421 }
trace_event_dyn_put_ref(struct trace_event_call * call)422 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
423 {
424 }
trace_event_dyn_busy(struct trace_event_call * call)425 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
426 {
427 /* Nothing should call this without DYNAIMIC_EVENTS configured. */
428 return true;
429 }
430 #endif
431
trace_event_try_get_ref(struct trace_event_call * call)432 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
433 {
434 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
435 return trace_event_dyn_try_get_ref(call);
436 else
437 return try_module_get(call->module);
438 }
439
trace_event_put_ref(struct trace_event_call * call)440 static inline void trace_event_put_ref(struct trace_event_call *call)
441 {
442 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
443 trace_event_dyn_put_ref(call);
444 else
445 module_put(call->module);
446 }
447
448 #ifdef CONFIG_PERF_EVENTS
bpf_prog_array_valid(struct trace_event_call * call)449 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
450 {
451 /*
452 * This inline function checks whether call->prog_array
453 * is valid or not. The function is called in various places,
454 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
455 *
456 * If this function returns true, and later call->prog_array
457 * becomes false inside rcu_read_lock/unlock region,
458 * we bail out then. If this function return false,
459 * there is a risk that we might miss a few events if the checking
460 * were delayed until inside rcu_read_lock/unlock region and
461 * call->prog_array happened to become non-NULL then.
462 *
463 * Here, READ_ONCE() is used instead of rcu_access_pointer().
464 * rcu_access_pointer() requires the actual definition of
465 * "struct bpf_prog_array" while READ_ONCE() only needs
466 * a declaration of the same type.
467 */
468 return !!READ_ONCE(call->prog_array);
469 }
470 #endif
471
472 static inline const char *
trace_event_name(struct trace_event_call * call)473 trace_event_name(struct trace_event_call *call)
474 {
475 if (call->flags & TRACE_EVENT_FL_CUSTOM)
476 return call->name;
477 else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
478 return call->tp ? call->tp->name : NULL;
479 else
480 return call->name;
481 }
482
483 static inline struct list_head *
trace_get_fields(struct trace_event_call * event_call)484 trace_get_fields(struct trace_event_call *event_call)
485 {
486 if (!event_call->class->get_fields)
487 return &event_call->class->fields;
488 return event_call->class->get_fields(event_call);
489 }
490
491 struct trace_subsystem_dir;
492
493 enum {
494 EVENT_FILE_FL_ENABLED_BIT,
495 EVENT_FILE_FL_RECORDED_CMD_BIT,
496 EVENT_FILE_FL_RECORDED_TGID_BIT,
497 EVENT_FILE_FL_FILTERED_BIT,
498 EVENT_FILE_FL_NO_SET_FILTER_BIT,
499 EVENT_FILE_FL_SOFT_MODE_BIT,
500 EVENT_FILE_FL_SOFT_DISABLED_BIT,
501 EVENT_FILE_FL_TRIGGER_MODE_BIT,
502 EVENT_FILE_FL_TRIGGER_COND_BIT,
503 EVENT_FILE_FL_PID_FILTER_BIT,
504 EVENT_FILE_FL_WAS_ENABLED_BIT,
505 EVENT_FILE_FL_FREED_BIT,
506 };
507
508 extern struct trace_event_file *trace_get_event_file(const char *instance,
509 const char *system,
510 const char *event);
511 extern void trace_put_event_file(struct trace_event_file *file);
512
513 #define MAX_DYNEVENT_CMD_LEN (2048)
514
515 enum dynevent_type {
516 DYNEVENT_TYPE_SYNTH = 1,
517 DYNEVENT_TYPE_KPROBE,
518 DYNEVENT_TYPE_NONE,
519 };
520
521 struct dynevent_cmd;
522
523 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
524
525 struct dynevent_cmd {
526 struct seq_buf seq;
527 const char *event_name;
528 unsigned int n_fields;
529 enum dynevent_type type;
530 dynevent_create_fn_t run_command;
531 void *private_data;
532 };
533
534 extern int dynevent_create(struct dynevent_cmd *cmd);
535
536 extern int synth_event_delete(const char *name);
537
538 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
539 char *buf, int maxlen);
540
541 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
542 const char *name,
543 struct module *mod, ...);
544
545 #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
546 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
547
548 struct synth_field_desc {
549 const char *type;
550 const char *name;
551 };
552
553 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
554 const char *name,
555 struct module *mod,
556 struct synth_field_desc *fields,
557 unsigned int n_fields);
558 extern int synth_event_create(const char *name,
559 struct synth_field_desc *fields,
560 unsigned int n_fields, struct module *mod);
561
562 extern int synth_event_add_field(struct dynevent_cmd *cmd,
563 const char *type,
564 const char *name);
565 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
566 const char *type_name);
567 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
568 struct synth_field_desc *fields,
569 unsigned int n_fields);
570
571 #define synth_event_gen_cmd_end(cmd) \
572 dynevent_create(cmd)
573
574 struct synth_event;
575
576 struct synth_event_trace_state {
577 struct trace_event_buffer fbuffer;
578 struct synth_trace_event *entry;
579 struct trace_buffer *buffer;
580 struct synth_event *event;
581 unsigned int cur_field;
582 unsigned int n_u64;
583 bool disabled;
584 bool add_next;
585 bool add_name;
586 };
587
588 extern int synth_event_trace(struct trace_event_file *file,
589 unsigned int n_vals, ...);
590 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
591 unsigned int n_vals);
592 extern int synth_event_trace_start(struct trace_event_file *file,
593 struct synth_event_trace_state *trace_state);
594 extern int synth_event_add_next_val(u64 val,
595 struct synth_event_trace_state *trace_state);
596 extern int synth_event_add_val(const char *field_name, u64 val,
597 struct synth_event_trace_state *trace_state);
598 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
599
600 extern int kprobe_event_delete(const char *name);
601
602 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
603 char *buf, int maxlen);
604
605 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
606 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
607
608 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
609 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
610
611 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
612 bool kretprobe,
613 const char *name,
614 const char *loc, ...);
615
616 #define kprobe_event_add_fields(cmd, ...) \
617 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
618
619 #define kprobe_event_add_field(cmd, field) \
620 __kprobe_event_add_fields(cmd, field, NULL)
621
622 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
623
624 #define kprobe_event_gen_cmd_end(cmd) \
625 dynevent_create(cmd)
626
627 #define kretprobe_event_gen_cmd_end(cmd) \
628 dynevent_create(cmd)
629
630 /*
631 * Event file flags:
632 * ENABLED - The event is enabled
633 * RECORDED_CMD - The comms should be recorded at sched_switch
634 * RECORDED_TGID - The tgids should be recorded at sched_switch
635 * FILTERED - The event has a filter attached
636 * NO_SET_FILTER - Set when filter has error and is to be ignored
637 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
638 * SOFT_DISABLED - When set, do not trace the event (even though its
639 * tracepoint may be enabled)
640 * TRIGGER_MODE - When set, invoke the triggers associated with the event
641 * TRIGGER_COND - When set, one or more triggers has an associated filter
642 * PID_FILTER - When set, the event is filtered based on pid
643 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
644 * FREED - File descriptor is freed, all fields should be considered invalid
645 */
646 enum {
647 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
648 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
649 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
650 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
651 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
652 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
653 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
654 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
655 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
656 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
657 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
658 EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
659 };
660
661 struct trace_event_file {
662 struct list_head list;
663 struct trace_event_call *event_call;
664 struct event_filter __rcu *filter;
665 struct eventfs_inode *ei;
666 struct trace_array *tr;
667 struct trace_subsystem_dir *system;
668 struct list_head triggers;
669
670 /*
671 * 32 bit flags:
672 * bit 0: enabled
673 * bit 1: enabled cmd record
674 * bit 2: enable/disable with the soft disable bit
675 * bit 3: soft disabled
676 * bit 4: trigger enabled
677 *
678 * Note: The bits must be set atomically to prevent races
679 * from other writers. Reads of flags do not need to be in
680 * sync as they occur in critical sections. But the way flags
681 * is currently used, these changes do not affect the code
682 * except that when a change is made, it may have a slight
683 * delay in propagating the changes to other CPUs due to
684 * caching and such. Which is mostly OK ;-)
685 */
686 unsigned long flags;
687 refcount_t ref; /* ref count for opened files */
688 atomic_t sm_ref; /* soft-mode reference counter */
689 atomic_t tm_ref; /* trigger-mode reference counter */
690 };
691
692 #ifdef CONFIG_HIST_TRIGGERS
693 extern struct irq_work hist_poll_work;
694 extern wait_queue_head_t hist_poll_wq;
695
hist_poll_wakeup(void)696 static inline void hist_poll_wakeup(void)
697 {
698 if (wq_has_sleeper(&hist_poll_wq))
699 irq_work_queue(&hist_poll_work);
700 }
701
702 #define hist_poll_wait(file, wait) \
703 poll_wait(file, &hist_poll_wq, wait)
704 #endif
705
706 #define __TRACE_EVENT_FLAGS(name, value) \
707 static int __init trace_init_flags_##name(void) \
708 { \
709 event_##name.flags |= value; \
710 return 0; \
711 } \
712 early_initcall(trace_init_flags_##name);
713
714 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
715 static int perf_perm_##name(struct trace_event_call *tp_event, \
716 struct perf_event *p_event) \
717 { \
718 return ({ expr; }); \
719 } \
720 static int __init trace_init_perf_perm_##name(void) \
721 { \
722 event_##name.perf_perm = &perf_perm_##name; \
723 return 0; \
724 } \
725 early_initcall(trace_init_perf_perm_##name);
726
727 #define PERF_MAX_TRACE_SIZE 8192
728
729 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
730
731 enum event_trigger_type {
732 ETT_NONE = (0),
733 ETT_TRACE_ONOFF = (1 << 0),
734 ETT_SNAPSHOT = (1 << 1),
735 ETT_STACKTRACE = (1 << 2),
736 ETT_EVENT_ENABLE = (1 << 3),
737 ETT_EVENT_HIST = (1 << 4),
738 ETT_HIST_ENABLE = (1 << 5),
739 ETT_EVENT_EPROBE = (1 << 6),
740 };
741
742 extern int filter_match_preds(struct event_filter *filter, void *rec);
743
744 extern enum event_trigger_type
745 event_triggers_call(struct trace_event_file *file,
746 struct trace_buffer *buffer, void *rec,
747 struct ring_buffer_event *event);
748 extern void
749 event_triggers_post_call(struct trace_event_file *file,
750 enum event_trigger_type tt);
751
752 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
753
754 bool __trace_trigger_soft_disabled(struct trace_event_file *file);
755
756 /**
757 * trace_trigger_soft_disabled - do triggers and test if soft disabled
758 * @file: The file pointer of the event to test
759 *
760 * If any triggers without filters are attached to this event, they
761 * will be called here. If the event is soft disabled and has no
762 * triggers that require testing the fields, it will return true,
763 * otherwise false.
764 */
765 static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file * file)766 trace_trigger_soft_disabled(struct trace_event_file *file)
767 {
768 unsigned long eflags = file->flags;
769
770 if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
771 EVENT_FILE_FL_SOFT_DISABLED |
772 EVENT_FILE_FL_PID_FILTER))))
773 return false;
774
775 if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
776 return false;
777
778 return __trace_trigger_soft_disabled(file);
779 }
780
781 #ifdef CONFIG_BPF_EVENTS
782 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
783 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
784 void perf_event_detach_bpf_prog(struct perf_event *event);
785 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
786
787 struct bpf_raw_tp_link;
788 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
789 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
790
791 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
792 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
793 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
794 u32 *fd_type, const char **buf,
795 u64 *probe_offset, u64 *probe_addr,
796 unsigned long *missed);
797 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
798 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
799 #else
trace_call_bpf(struct trace_event_call * call,void * ctx)800 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
801 {
802 return 1;
803 }
804
805 static inline int
perf_event_attach_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)806 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
807 {
808 return -EOPNOTSUPP;
809 }
810
perf_event_detach_bpf_prog(struct perf_event * event)811 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
812
813 static inline int
perf_event_query_prog_array(struct perf_event * event,void __user * info)814 perf_event_query_prog_array(struct perf_event *event, void __user *info)
815 {
816 return -EOPNOTSUPP;
817 }
818 struct bpf_raw_tp_link;
bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_raw_tp_link * link)819 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
820 {
821 return -EOPNOTSUPP;
822 }
bpf_probe_unregister(struct bpf_raw_event_map * btp,struct bpf_raw_tp_link * link)823 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
824 {
825 return -EOPNOTSUPP;
826 }
bpf_get_raw_tracepoint(const char * name)827 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
828 {
829 return NULL;
830 }
bpf_put_raw_tracepoint(struct bpf_raw_event_map * btp)831 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
832 {
833 }
bpf_get_perf_event_info(const struct perf_event * event,u32 * prog_id,u32 * fd_type,const char ** buf,u64 * probe_offset,u64 * probe_addr,unsigned long * missed)834 static inline int bpf_get_perf_event_info(const struct perf_event *event,
835 u32 *prog_id, u32 *fd_type,
836 const char **buf, u64 *probe_offset,
837 u64 *probe_addr, unsigned long *missed)
838 {
839 return -EOPNOTSUPP;
840 }
841 static inline int
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)842 bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
843 {
844 return -EOPNOTSUPP;
845 }
846 static inline int
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)847 bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
848 {
849 return -EOPNOTSUPP;
850 }
851 #endif
852
853 enum {
854 FILTER_OTHER = 0,
855 FILTER_STATIC_STRING,
856 FILTER_DYN_STRING,
857 FILTER_RDYN_STRING,
858 FILTER_PTR_STRING,
859 FILTER_TRACE_FN,
860 FILTER_CPUMASK,
861 FILTER_COMM,
862 FILTER_CPU,
863 FILTER_STACKTRACE,
864 };
865
866 extern int trace_event_raw_init(struct trace_event_call *call);
867 extern int trace_define_field(struct trace_event_call *call, const char *type,
868 const char *name, int offset, int size,
869 int is_signed, int filter_type);
870 extern int trace_add_event_call(struct trace_event_call *call);
871 extern int trace_remove_event_call(struct trace_event_call *call);
872 extern int trace_event_get_offsets(struct trace_event_call *call);
873
874 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
875 int trace_set_clr_event(const char *system, const char *event, int set);
876 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
877 const char *event, bool enable);
878 /*
879 * The double __builtin_constant_p is because gcc will give us an error
880 * if we try to allocate the static variable to fmt if it is not a
881 * constant. Even with the outer if statement optimizing out.
882 */
883 #define event_trace_printk(ip, fmt, args...) \
884 do { \
885 __trace_printk_check_format(fmt, ##args); \
886 tracing_record_cmdline(current); \
887 if (__builtin_constant_p(fmt)) { \
888 static const char *trace_printk_fmt \
889 __section("__trace_printk_fmt") = \
890 __builtin_constant_p(fmt) ? fmt : NULL; \
891 \
892 __trace_bprintk(ip, trace_printk_fmt, ##args); \
893 } else \
894 __trace_printk(ip, fmt, ##args); \
895 } while (0)
896
897 #ifdef CONFIG_PERF_EVENTS
898 struct perf_event;
899
900 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
901
902 extern int perf_trace_init(struct perf_event *event);
903 extern void perf_trace_destroy(struct perf_event *event);
904 extern int perf_trace_add(struct perf_event *event, int flags);
905 extern void perf_trace_del(struct perf_event *event, int flags);
906 #ifdef CONFIG_KPROBE_EVENTS
907 extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
908 extern void perf_kprobe_destroy(struct perf_event *event);
909 extern int bpf_get_kprobe_info(const struct perf_event *event,
910 u32 *fd_type, const char **symbol,
911 u64 *probe_offset, u64 *probe_addr,
912 unsigned long *missed,
913 bool perf_type_tracepoint);
914 #endif
915 #ifdef CONFIG_UPROBE_EVENTS
916 extern int perf_uprobe_init(struct perf_event *event,
917 unsigned long ref_ctr_offset, bool is_retprobe);
918 extern void perf_uprobe_destroy(struct perf_event *event);
919 extern int bpf_get_uprobe_info(const struct perf_event *event,
920 u32 *fd_type, const char **filename,
921 u64 *probe_offset, u64 *probe_addr,
922 bool perf_type_tracepoint);
923 #endif
924 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
925 char *filter_str);
926 extern void ftrace_profile_free_filter(struct perf_event *event);
927 void perf_trace_buf_update(void *record, u16 type);
928 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
929
930 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
931 void perf_event_free_bpf_prog(struct perf_event *event);
932
933 void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1);
934 void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2);
935 void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
936 u64 arg3);
937 void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
938 u64 arg3, u64 arg4);
939 void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
940 u64 arg3, u64 arg4, u64 arg5);
941 void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
942 u64 arg3, u64 arg4, u64 arg5, u64 arg6);
943 void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
944 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
945 void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
946 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
947 u64 arg8);
948 void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
949 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
950 u64 arg8, u64 arg9);
951 void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
952 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
953 u64 arg8, u64 arg9, u64 arg10);
954 void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
955 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
956 u64 arg8, u64 arg9, u64 arg10, u64 arg11);
957 void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
958 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
959 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
960 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
961 struct trace_event_call *call, u64 count,
962 struct pt_regs *regs, struct hlist_head *head,
963 struct task_struct *task);
964
965 static inline void
perf_trace_buf_submit(void * raw_data,int size,int rctx,u16 type,u64 count,struct pt_regs * regs,void * head,struct task_struct * task)966 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
967 u64 count, struct pt_regs *regs, void *head,
968 struct task_struct *task)
969 {
970 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
971 }
972
973 #endif
974
975 #define TRACE_EVENT_STR_MAX 512
976
977 /*
978 * gcc warns that you can not use a va_list in an inlined
979 * function. But lets me make it into a macro :-/
980 */
981 #define __trace_event_vstr_len(fmt, va) \
982 ({ \
983 va_list __ap; \
984 int __ret; \
985 \
986 va_copy(__ap, *(va)); \
987 __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
988 va_end(__ap); \
989 \
990 min(__ret, TRACE_EVENT_STR_MAX); \
991 })
992
993 #endif /* _LINUX_TRACE_EVENT_H */
994
995 /*
996 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
997 * This is due to the way trace custom events work. If a file includes two
998 * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
999 * will override the TRACE_CUSTOM_EVENT and break the second include.
1000 */
1001
1002 #ifndef TRACE_CUSTOM_EVENT
1003
1004 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
1005 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
1006 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
1007
1008 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
1009