1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/trace_seq.h>
16 #include <linux/trace_events.h>
17 #include <linux/compiler.h>
18 #include <linux/glob.h>
19
20 #ifdef CONFIG_FTRACE_SYSCALLS
21 #include <asm/unistd.h> /* For NR_SYSCALLS */
22 #include <asm/syscall.h> /* some archs define it here */
23 #endif
24
25 enum trace_type {
26 __TRACE_FIRST_TYPE = 0,
27
28 TRACE_FN,
29 TRACE_CTX,
30 TRACE_WAKE,
31 TRACE_STACK,
32 TRACE_PRINT,
33 TRACE_BPRINT,
34 TRACE_MMIO_RW,
35 TRACE_MMIO_MAP,
36 TRACE_BRANCH,
37 TRACE_GRAPH_RET,
38 TRACE_GRAPH_ENT,
39 TRACE_USER_STACK,
40 TRACE_BLK,
41 TRACE_BPUTS,
42 TRACE_HWLAT,
43 TRACE_RAW_DATA,
44
45 __TRACE_LAST_TYPE,
46 };
47
48
49 #undef __field
50 #define __field(type, item) type item;
51
52 #undef __field_struct
53 #define __field_struct(type, item) __field(type, item)
54
55 #undef __field_desc
56 #define __field_desc(type, container, item)
57
58 #undef __array
59 #define __array(type, item, size) type item[size];
60
61 #undef __array_desc
62 #define __array_desc(type, container, item, size)
63
64 #undef __dynamic_array
65 #define __dynamic_array(type, item) type item[];
66
67 #undef F_STRUCT
68 #define F_STRUCT(args...) args
69
70 #undef FTRACE_ENTRY
71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
72 struct struct_name { \
73 struct trace_entry ent; \
74 tstruct \
75 }
76
77 #undef FTRACE_ENTRY_DUP
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79
80 #undef FTRACE_ENTRY_REG
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 filter, regfn) \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 filter)
85
86 #undef FTRACE_ENTRY_PACKED
87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
88 filter) \
89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
90 filter) __packed
91
92 #include "trace_entries.h"
93
94 /*
95 * syscalls are special, and need special handling, this is why
96 * they are not included in trace_entries.h
97 */
98 struct syscall_trace_enter {
99 struct trace_entry ent;
100 int nr;
101 unsigned long args[];
102 };
103
104 struct syscall_trace_exit {
105 struct trace_entry ent;
106 int nr;
107 long ret;
108 };
109
110 struct kprobe_trace_entry_head {
111 struct trace_entry ent;
112 unsigned long ip;
113 };
114
115 struct kretprobe_trace_entry_head {
116 struct trace_entry ent;
117 unsigned long func;
118 unsigned long ret_ip;
119 };
120
121 /*
122 * trace_flag_type is an enumeration that holds different
123 * states when a trace occurs. These are:
124 * IRQS_OFF - interrupts were disabled
125 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
126 * NEED_RESCHED - reschedule is requested
127 * HARDIRQ - inside an interrupt handler
128 * SOFTIRQ - inside a softirq handler
129 */
130 enum trace_flag_type {
131 TRACE_FLAG_IRQS_OFF = 0x01,
132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
133 TRACE_FLAG_NEED_RESCHED = 0x04,
134 TRACE_FLAG_HARDIRQ = 0x08,
135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
137 TRACE_FLAG_NMI = 0x40,
138 };
139
140 #define TRACE_BUF_SIZE 1024
141
142 struct trace_array;
143
144 /*
145 * The CPU trace array - it consists of thousands of trace entries
146 * plus some other descriptor data: (for example which task started
147 * the trace, etc.)
148 */
149 struct trace_array_cpu {
150 atomic_t disabled;
151 void *buffer_page; /* ring buffer spare */
152
153 unsigned long entries;
154 unsigned long saved_latency;
155 unsigned long critical_start;
156 unsigned long critical_end;
157 unsigned long critical_sequence;
158 unsigned long nice;
159 unsigned long policy;
160 unsigned long rt_priority;
161 unsigned long skipped_entries;
162 u64 preempt_timestamp;
163 pid_t pid;
164 kuid_t uid;
165 char comm[TASK_COMM_LEN];
166
167 bool ignore_pid;
168 #ifdef CONFIG_FUNCTION_TRACER
169 bool ftrace_ignore_pid;
170 #endif
171 };
172
173 struct tracer;
174 struct trace_option_dentry;
175
176 struct trace_buffer {
177 struct trace_array *tr;
178 struct ring_buffer *buffer;
179 struct trace_array_cpu __percpu *data;
180 u64 time_start;
181 int cpu;
182 };
183
184 #define TRACE_FLAGS_MAX_SIZE 32
185
186 struct trace_options {
187 struct tracer *tracer;
188 struct trace_option_dentry *topts;
189 };
190
191 struct trace_pid_list {
192 int pid_max;
193 unsigned long *pids;
194 };
195
196 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
197
198 /**
199 * struct cond_snapshot - conditional snapshot data and callback
200 *
201 * The cond_snapshot structure encapsulates a callback function and
202 * data associated with the snapshot for a given tracing instance.
203 *
204 * When a snapshot is taken conditionally, by invoking
205 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
206 * passed in turn to the cond_snapshot.update() function. That data
207 * can be compared by the update() implementation with the cond_data
208 * contained wihin the struct cond_snapshot instance associated with
209 * the trace_array. Because the tr->max_lock is held throughout the
210 * update() call, the update() function can directly retrieve the
211 * cond_snapshot and cond_data associated with the per-instance
212 * snapshot associated with the trace_array.
213 *
214 * The cond_snapshot.update() implementation can save data to be
215 * associated with the snapshot if it decides to, and returns 'true'
216 * in that case, or it returns 'false' if the conditional snapshot
217 * shouldn't be taken.
218 *
219 * The cond_snapshot instance is created and associated with the
220 * user-defined cond_data by tracing_cond_snapshot_enable().
221 * Likewise, the cond_snapshot instance is destroyed and is no longer
222 * associated with the trace instance by
223 * tracing_cond_snapshot_disable().
224 *
225 * The method below is required.
226 *
227 * @update: When a conditional snapshot is invoked, the update()
228 * callback function is invoked with the tr->max_lock held. The
229 * update() implementation signals whether or not to actually
230 * take the snapshot, by returning 'true' if so, 'false' if no
231 * snapshot should be taken. Because the max_lock is held for
232 * the duration of update(), the implementation is safe to
233 * directly retrieven and save any implementation data it needs
234 * to in association with the snapshot.
235 */
236 struct cond_snapshot {
237 void *cond_data;
238 cond_update_fn_t update;
239 };
240
241 /*
242 * The trace array - an array of per-CPU trace arrays. This is the
243 * highest level data structure that individual tracers deal with.
244 * They have on/off state as well:
245 */
246 struct trace_array {
247 struct list_head list;
248 char *name;
249 struct trace_buffer trace_buffer;
250 #ifdef CONFIG_TRACER_MAX_TRACE
251 /*
252 * The max_buffer is used to snapshot the trace when a maximum
253 * latency is reached, or when the user initiates a snapshot.
254 * Some tracers will use this to store a maximum trace while
255 * it continues examining live traces.
256 *
257 * The buffers for the max_buffer are set up the same as the trace_buffer
258 * When a snapshot is taken, the buffer of the max_buffer is swapped
259 * with the buffer of the trace_buffer and the buffers are reset for
260 * the trace_buffer so the tracing can continue.
261 */
262 struct trace_buffer max_buffer;
263 bool allocated_snapshot;
264 #endif
265 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
266 unsigned long max_latency;
267 #endif
268 struct trace_pid_list __rcu *filtered_pids;
269 /*
270 * max_lock is used to protect the swapping of buffers
271 * when taking a max snapshot. The buffers themselves are
272 * protected by per_cpu spinlocks. But the action of the swap
273 * needs its own lock.
274 *
275 * This is defined as a arch_spinlock_t in order to help
276 * with performance when lockdep debugging is enabled.
277 *
278 * It is also used in other places outside the update_max_tr
279 * so it needs to be defined outside of the
280 * CONFIG_TRACER_MAX_TRACE.
281 */
282 arch_spinlock_t max_lock;
283 int buffer_disabled;
284 #ifdef CONFIG_FTRACE_SYSCALLS
285 int sys_refcount_enter;
286 int sys_refcount_exit;
287 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
288 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
289 #endif
290 int stop_count;
291 int clock_id;
292 int nr_topts;
293 bool clear_trace;
294 int buffer_percent;
295 unsigned int n_err_log_entries;
296 struct tracer *current_trace;
297 unsigned int trace_flags;
298 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
299 unsigned int flags;
300 raw_spinlock_t start_lock;
301 struct list_head err_log;
302 struct dentry *dir;
303 struct dentry *options;
304 struct dentry *percpu_dir;
305 struct dentry *event_dir;
306 struct trace_options *topts;
307 struct list_head systems;
308 struct list_head events;
309 struct trace_event_file *trace_marker_file;
310 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
311 int ref;
312 #ifdef CONFIG_FUNCTION_TRACER
313 struct ftrace_ops *ops;
314 struct trace_pid_list __rcu *function_pids;
315 #ifdef CONFIG_DYNAMIC_FTRACE
316 /* All of these are protected by the ftrace_lock */
317 struct list_head func_probes;
318 struct list_head mod_trace;
319 struct list_head mod_notrace;
320 #endif
321 /* function tracing enabled */
322 int function_enabled;
323 #endif
324 int time_stamp_abs_ref;
325 struct list_head hist_vars;
326 #ifdef CONFIG_TRACER_SNAPSHOT
327 struct cond_snapshot *cond_snapshot;
328 #endif
329 };
330
331 enum {
332 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
333 };
334
335 extern struct list_head ftrace_trace_arrays;
336
337 extern struct mutex trace_types_lock;
338
339 extern int trace_array_get(struct trace_array *tr);
340 extern void trace_array_put(struct trace_array *tr);
341 extern int tracing_check_open_get_tr(struct trace_array *tr);
342
343 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
344 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
345
346 extern bool trace_clock_in_ns(struct trace_array *tr);
347
348 /*
349 * The global tracer (top) should be the first trace array added,
350 * but we check the flag anyway.
351 */
top_trace_array(void)352 static inline struct trace_array *top_trace_array(void)
353 {
354 struct trace_array *tr;
355
356 if (list_empty(&ftrace_trace_arrays))
357 return NULL;
358
359 tr = list_entry(ftrace_trace_arrays.prev,
360 typeof(*tr), list);
361 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
362 return tr;
363 }
364
365 #define FTRACE_CMP_TYPE(var, type) \
366 __builtin_types_compatible_p(typeof(var), type *)
367
368 #undef IF_ASSIGN
369 #define IF_ASSIGN(var, entry, etype, id) \
370 if (FTRACE_CMP_TYPE(var, etype)) { \
371 var = (typeof(var))(entry); \
372 WARN_ON(id != 0 && (entry)->type != id); \
373 break; \
374 }
375
376 /* Will cause compile errors if type is not found. */
377 extern void __ftrace_bad_type(void);
378
379 /*
380 * The trace_assign_type is a verifier that the entry type is
381 * the same as the type being assigned. To add new types simply
382 * add a line with the following format:
383 *
384 * IF_ASSIGN(var, ent, type, id);
385 *
386 * Where "type" is the trace type that includes the trace_entry
387 * as the "ent" item. And "id" is the trace identifier that is
388 * used in the trace_type enum.
389 *
390 * If the type can have more than one id, then use zero.
391 */
392 #define trace_assign_type(var, ent) \
393 do { \
394 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
395 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
396 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
397 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
398 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
399 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
400 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
401 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
402 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
403 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
404 TRACE_MMIO_RW); \
405 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
406 TRACE_MMIO_MAP); \
407 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
408 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
409 TRACE_GRAPH_ENT); \
410 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
411 TRACE_GRAPH_RET); \
412 __ftrace_bad_type(); \
413 } while (0)
414
415 /*
416 * An option specific to a tracer. This is a boolean value.
417 * The bit is the bit index that sets its value on the
418 * flags value in struct tracer_flags.
419 */
420 struct tracer_opt {
421 const char *name; /* Will appear on the trace_options file */
422 u32 bit; /* Mask assigned in val field in tracer_flags */
423 };
424
425 /*
426 * The set of specific options for a tracer. Your tracer
427 * have to set the initial value of the flags val.
428 */
429 struct tracer_flags {
430 u32 val;
431 struct tracer_opt *opts;
432 struct tracer *trace;
433 };
434
435 /* Makes more easy to define a tracer opt */
436 #define TRACER_OPT(s, b) .name = #s, .bit = b
437
438
439 struct trace_option_dentry {
440 struct tracer_opt *opt;
441 struct tracer_flags *flags;
442 struct trace_array *tr;
443 struct dentry *entry;
444 };
445
446 /**
447 * struct tracer - a specific tracer and its callbacks to interact with tracefs
448 * @name: the name chosen to select it on the available_tracers file
449 * @init: called when one switches to this tracer (echo name > current_tracer)
450 * @reset: called when one switches to another tracer
451 * @start: called when tracing is unpaused (echo 1 > tracing_on)
452 * @stop: called when tracing is paused (echo 0 > tracing_on)
453 * @update_thresh: called when tracing_thresh is updated
454 * @open: called when the trace file is opened
455 * @pipe_open: called when the trace_pipe file is opened
456 * @close: called when the trace file is released
457 * @pipe_close: called when the trace_pipe file is released
458 * @read: override the default read callback on trace_pipe
459 * @splice_read: override the default splice_read callback on trace_pipe
460 * @selftest: selftest to run on boot (see trace_selftest.c)
461 * @print_headers: override the first lines that describe your columns
462 * @print_line: callback that prints a trace
463 * @set_flag: signals one of your private flags changed (trace_options file)
464 * @flags: your private flags
465 */
466 struct tracer {
467 const char *name;
468 int (*init)(struct trace_array *tr);
469 void (*reset)(struct trace_array *tr);
470 void (*start)(struct trace_array *tr);
471 void (*stop)(struct trace_array *tr);
472 int (*update_thresh)(struct trace_array *tr);
473 void (*open)(struct trace_iterator *iter);
474 void (*pipe_open)(struct trace_iterator *iter);
475 void (*close)(struct trace_iterator *iter);
476 void (*pipe_close)(struct trace_iterator *iter);
477 ssize_t (*read)(struct trace_iterator *iter,
478 struct file *filp, char __user *ubuf,
479 size_t cnt, loff_t *ppos);
480 ssize_t (*splice_read)(struct trace_iterator *iter,
481 struct file *filp,
482 loff_t *ppos,
483 struct pipe_inode_info *pipe,
484 size_t len,
485 unsigned int flags);
486 #ifdef CONFIG_FTRACE_STARTUP_TEST
487 int (*selftest)(struct tracer *trace,
488 struct trace_array *tr);
489 #endif
490 void (*print_header)(struct seq_file *m);
491 enum print_line_t (*print_line)(struct trace_iterator *iter);
492 /* If you handled the flag setting, return 0 */
493 int (*set_flag)(struct trace_array *tr,
494 u32 old_flags, u32 bit, int set);
495 /* Return 0 if OK with change, else return non-zero */
496 int (*flag_changed)(struct trace_array *tr,
497 u32 mask, int set);
498 struct tracer *next;
499 struct tracer_flags *flags;
500 int enabled;
501 int ref;
502 bool print_max;
503 bool allow_instances;
504 #ifdef CONFIG_TRACER_MAX_TRACE
505 bool use_max_tr;
506 #endif
507 /* True if tracer cannot be enabled in kernel param */
508 bool noboot;
509 };
510
511
512 /* Only current can touch trace_recursion */
513
514 /*
515 * For function tracing recursion:
516 * The order of these bits are important.
517 *
518 * When function tracing occurs, the following steps are made:
519 * If arch does not support a ftrace feature:
520 * call internal function (uses INTERNAL bits) which calls...
521 * The function callback, which can use the FTRACE bits to
522 * check for recursion.
523 */
524 enum {
525 TRACE_BUFFER_BIT,
526 TRACE_BUFFER_NMI_BIT,
527 TRACE_BUFFER_IRQ_BIT,
528 TRACE_BUFFER_SIRQ_BIT,
529
530 /* Start of function recursion bits */
531 TRACE_FTRACE_BIT,
532 TRACE_FTRACE_NMI_BIT,
533 TRACE_FTRACE_IRQ_BIT,
534 TRACE_FTRACE_SIRQ_BIT,
535 TRACE_FTRACE_TRANSITION_BIT,
536
537 /* Internal use recursion bits */
538 TRACE_INTERNAL_BIT,
539 TRACE_INTERNAL_NMI_BIT,
540 TRACE_INTERNAL_IRQ_BIT,
541 TRACE_INTERNAL_SIRQ_BIT,
542 TRACE_INTERNAL_TRANSITION_BIT,
543
544 TRACE_BRANCH_BIT,
545 /*
546 * Abuse of the trace_recursion.
547 * As we need a way to maintain state if we are tracing the function
548 * graph in irq because we want to trace a particular function that
549 * was called in irq context but we have irq tracing off. Since this
550 * can only be modified by current, we can reuse trace_recursion.
551 */
552 TRACE_IRQ_BIT,
553
554 /* Set if the function is in the set_graph_function file */
555 TRACE_GRAPH_BIT,
556
557 /*
558 * In the very unlikely case that an interrupt came in
559 * at a start of graph tracing, and we want to trace
560 * the function in that interrupt, the depth can be greater
561 * than zero, because of the preempted start of a previous
562 * trace. In an even more unlikely case, depth could be 2
563 * if a softirq interrupted the start of graph tracing,
564 * followed by an interrupt preempting a start of graph
565 * tracing in the softirq, and depth can even be 3
566 * if an NMI came in at the start of an interrupt function
567 * that preempted a softirq start of a function that
568 * preempted normal context!!!! Luckily, it can't be
569 * greater than 3, so the next two bits are a mask
570 * of what the depth is when we set TRACE_GRAPH_BIT
571 */
572
573 TRACE_GRAPH_DEPTH_START_BIT,
574 TRACE_GRAPH_DEPTH_END_BIT,
575
576 /*
577 * To implement set_graph_notrace, if this bit is set, we ignore
578 * function graph tracing of called functions, until the return
579 * function is called to clear it.
580 */
581 TRACE_GRAPH_NOTRACE_BIT,
582 };
583
584 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
585 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
586 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
587
588 #define trace_recursion_depth() \
589 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
590 #define trace_recursion_set_depth(depth) \
591 do { \
592 current->trace_recursion &= \
593 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
594 current->trace_recursion |= \
595 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
596 } while (0)
597
598 #define TRACE_CONTEXT_BITS 4
599
600 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
601
602 #define TRACE_LIST_START TRACE_INTERNAL_BIT
603
604 #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
605
606 enum {
607 TRACE_CTX_NMI,
608 TRACE_CTX_IRQ,
609 TRACE_CTX_SOFTIRQ,
610 TRACE_CTX_NORMAL,
611 TRACE_CTX_TRANSITION,
612 };
613
trace_get_context_bit(void)614 static __always_inline int trace_get_context_bit(void)
615 {
616 int bit;
617
618 if (in_interrupt()) {
619 if (in_nmi())
620 bit = TRACE_CTX_NMI;
621
622 else if (in_irq())
623 bit = TRACE_CTX_IRQ;
624 else
625 bit = TRACE_CTX_SOFTIRQ;
626 } else
627 bit = TRACE_CTX_NORMAL;
628
629 return bit;
630 }
631
trace_test_and_set_recursion(int start)632 static __always_inline int trace_test_and_set_recursion(int start)
633 {
634 unsigned int val = current->trace_recursion;
635 int bit;
636
637 bit = trace_get_context_bit() + start;
638 if (unlikely(val & (1 << bit))) {
639 /*
640 * It could be that preempt_count has not been updated during
641 * a switch between contexts. Allow for a single recursion.
642 */
643 bit = start + TRACE_CTX_TRANSITION;
644 if (trace_recursion_test(bit))
645 return -1;
646 trace_recursion_set(bit);
647 barrier();
648 return bit;
649 }
650
651 val |= 1 << bit;
652 current->trace_recursion = val;
653 barrier();
654
655 return bit;
656 }
657
trace_clear_recursion(int bit)658 static __always_inline void trace_clear_recursion(int bit)
659 {
660 unsigned int val = current->trace_recursion;
661
662 bit = 1 << bit;
663 val &= ~bit;
664
665 barrier();
666 current->trace_recursion = val;
667 }
668
669 static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator * iter,int cpu)670 trace_buffer_iter(struct trace_iterator *iter, int cpu)
671 {
672 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
673 }
674
675 int tracer_init(struct tracer *t, struct trace_array *tr);
676 int tracing_is_enabled(void);
677 void tracing_reset_online_cpus(struct trace_buffer *buf);
678 void tracing_reset_current(int cpu);
679 void tracing_reset_all_online_cpus(void);
680 void tracing_reset_all_online_cpus_unlocked(void);
681 int tracing_open_generic(struct inode *inode, struct file *filp);
682 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
683 int tracing_open_file_tr(struct inode *inode, struct file *filp);
684 int tracing_release_file_tr(struct inode *inode, struct file *filp);
685 bool tracing_is_disabled(void);
686 bool tracer_tracing_is_on(struct trace_array *tr);
687 void tracer_tracing_on(struct trace_array *tr);
688 void tracer_tracing_off(struct trace_array *tr);
689 struct dentry *trace_create_file(const char *name,
690 umode_t mode,
691 struct dentry *parent,
692 void *data,
693 const struct file_operations *fops);
694
695 struct dentry *tracing_init_dentry(void);
696
697 struct ring_buffer_event;
698
699 struct ring_buffer_event *
700 trace_buffer_lock_reserve(struct ring_buffer *buffer,
701 int type,
702 unsigned long len,
703 unsigned long flags,
704 int pc);
705
706 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
707 struct trace_array_cpu *data);
708
709 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
710 int *ent_cpu, u64 *ent_ts);
711
712 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
713 struct ring_buffer_event *event);
714
715 int trace_empty(struct trace_iterator *iter);
716
717 void *trace_find_next_entry_inc(struct trace_iterator *iter);
718
719 void trace_init_global_iter(struct trace_iterator *iter);
720
721 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
722
723 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
724 unsigned long trace_total_entries(struct trace_array *tr);
725
726 void trace_function(struct trace_array *tr,
727 unsigned long ip,
728 unsigned long parent_ip,
729 unsigned long flags, int pc);
730 void trace_graph_function(struct trace_array *tr,
731 unsigned long ip,
732 unsigned long parent_ip,
733 unsigned long flags, int pc);
734 void trace_latency_header(struct seq_file *m);
735 void trace_default_header(struct seq_file *m);
736 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
737 int trace_empty(struct trace_iterator *iter);
738
739 void trace_graph_return(struct ftrace_graph_ret *trace);
740 int trace_graph_entry(struct ftrace_graph_ent *trace);
741 void set_graph_array(struct trace_array *tr);
742
743 void tracing_start_cmdline_record(void);
744 void tracing_stop_cmdline_record(void);
745 void tracing_start_tgid_record(void);
746 void tracing_stop_tgid_record(void);
747
748 int register_tracer(struct tracer *type);
749 int is_tracing_stopped(void);
750
751 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
752
753 extern cpumask_var_t __read_mostly tracing_buffer_mask;
754
755 #define for_each_tracing_cpu(cpu) \
756 for_each_cpu(cpu, tracing_buffer_mask)
757
758 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
759
760 extern unsigned long tracing_thresh;
761
762 /* PID filtering */
763
764 extern int pid_max;
765
766 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
767 pid_t search_pid);
768 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
769 struct task_struct *task);
770 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
771 struct task_struct *self,
772 struct task_struct *task);
773 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
774 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
775 int trace_pid_show(struct seq_file *m, void *v);
776 void trace_free_pid_list(struct trace_pid_list *pid_list);
777 int trace_pid_write(struct trace_pid_list *filtered_pids,
778 struct trace_pid_list **new_pid_list,
779 const char __user *ubuf, size_t cnt);
780
781 #ifdef CONFIG_TRACER_MAX_TRACE
782 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
783 void *cond_data);
784 void update_max_tr_single(struct trace_array *tr,
785 struct task_struct *tsk, int cpu);
786 #endif /* CONFIG_TRACER_MAX_TRACE */
787
788 #ifdef CONFIG_STACKTRACE
789 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
790 int pc);
791 #else
__trace_stack(struct trace_array * tr,unsigned long flags,int skip,int pc)792 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
793 int skip, int pc)
794 {
795 }
796 #endif /* CONFIG_STACKTRACE */
797
798 extern u64 ftrace_now(int cpu);
799
800 extern void trace_find_cmdline(int pid, char comm[]);
801 extern int trace_find_tgid(int pid);
802 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
803
804 #ifdef CONFIG_DYNAMIC_FTRACE
805 extern unsigned long ftrace_update_tot_cnt;
806 extern unsigned long ftrace_number_of_pages;
807 extern unsigned long ftrace_number_of_groups;
808 void ftrace_init_trace_array(struct trace_array *tr);
809 #else
ftrace_init_trace_array(struct trace_array * tr)810 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
811 #endif
812 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
813 extern int DYN_FTRACE_TEST_NAME(void);
814 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
815 extern int DYN_FTRACE_TEST_NAME2(void);
816
817 extern bool ring_buffer_expanded;
818 extern bool tracing_selftest_disabled;
819
820 #ifdef CONFIG_FTRACE_STARTUP_TEST
821 extern int trace_selftest_startup_function(struct tracer *trace,
822 struct trace_array *tr);
823 extern int trace_selftest_startup_function_graph(struct tracer *trace,
824 struct trace_array *tr);
825 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
826 struct trace_array *tr);
827 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
828 struct trace_array *tr);
829 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
830 struct trace_array *tr);
831 extern int trace_selftest_startup_wakeup(struct tracer *trace,
832 struct trace_array *tr);
833 extern int trace_selftest_startup_nop(struct tracer *trace,
834 struct trace_array *tr);
835 extern int trace_selftest_startup_branch(struct tracer *trace,
836 struct trace_array *tr);
837 /*
838 * Tracer data references selftest functions that only occur
839 * on boot up. These can be __init functions. Thus, when selftests
840 * are enabled, then the tracers need to reference __init functions.
841 */
842 #define __tracer_data __refdata
843 #else
844 /* Tracers are seldom changed. Optimize when selftests are disabled. */
845 #define __tracer_data __read_mostly
846 #endif /* CONFIG_FTRACE_STARTUP_TEST */
847
848 extern void *head_page(struct trace_array_cpu *data);
849 extern unsigned long long ns2usecs(u64 nsec);
850 extern int
851 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
852 extern int
853 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
854 extern int
855 trace_array_vprintk(struct trace_array *tr,
856 unsigned long ip, const char *fmt, va_list args);
857 int trace_array_printk(struct trace_array *tr,
858 unsigned long ip, const char *fmt, ...);
859 int trace_array_printk_buf(struct ring_buffer *buffer,
860 unsigned long ip, const char *fmt, ...);
861 void trace_printk_seq(struct trace_seq *s);
862 enum print_line_t print_trace_line(struct trace_iterator *iter);
863
864 extern char trace_find_mark(unsigned long long duration);
865
866 struct ftrace_hash;
867
868 struct ftrace_mod_load {
869 struct list_head list;
870 char *func;
871 char *module;
872 int enable;
873 };
874
875 enum {
876 FTRACE_HASH_FL_MOD = (1 << 0),
877 };
878
879 struct ftrace_hash {
880 unsigned long size_bits;
881 struct hlist_head *buckets;
882 unsigned long count;
883 unsigned long flags;
884 struct rcu_head rcu;
885 };
886
887 struct ftrace_func_entry *
888 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
889
ftrace_hash_empty(struct ftrace_hash * hash)890 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
891 {
892 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
893 }
894
895 /* Standard output formatting function used for function return traces */
896 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
897
898 /* Flag options */
899 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
900 #define TRACE_GRAPH_PRINT_CPU 0x2
901 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
902 #define TRACE_GRAPH_PRINT_PROC 0x8
903 #define TRACE_GRAPH_PRINT_DURATION 0x10
904 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
905 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
906 #define TRACE_GRAPH_PRINT_IRQS 0x80
907 #define TRACE_GRAPH_PRINT_TAIL 0x100
908 #define TRACE_GRAPH_SLEEP_TIME 0x200
909 #define TRACE_GRAPH_GRAPH_TIME 0x400
910 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
911 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
912
913 extern void ftrace_graph_sleep_time_control(bool enable);
914
915 #ifdef CONFIG_FUNCTION_PROFILER
916 extern void ftrace_graph_graph_time_control(bool enable);
917 #else
ftrace_graph_graph_time_control(bool enable)918 static inline void ftrace_graph_graph_time_control(bool enable) { }
919 #endif
920
921 extern enum print_line_t
922 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
923 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
924 extern void
925 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
926 extern void graph_trace_open(struct trace_iterator *iter);
927 extern void graph_trace_close(struct trace_iterator *iter);
928 extern int __trace_graph_entry(struct trace_array *tr,
929 struct ftrace_graph_ent *trace,
930 unsigned long flags, int pc);
931 extern void __trace_graph_return(struct trace_array *tr,
932 struct ftrace_graph_ret *trace,
933 unsigned long flags, int pc);
934
935 #ifdef CONFIG_DYNAMIC_FTRACE
936 extern struct ftrace_hash __rcu *ftrace_graph_hash;
937 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
938
ftrace_graph_addr(struct ftrace_graph_ent * trace)939 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
940 {
941 unsigned long addr = trace->func;
942 int ret = 0;
943 struct ftrace_hash *hash;
944
945 preempt_disable_notrace();
946
947 /*
948 * Have to open code "rcu_dereference_sched()" because the
949 * function graph tracer can be called when RCU is not
950 * "watching".
951 * Protected with schedule_on_each_cpu(ftrace_sync)
952 */
953 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
954
955 if (ftrace_hash_empty(hash)) {
956 ret = 1;
957 goto out;
958 }
959
960 if (ftrace_lookup_ip(hash, addr)) {
961
962 /*
963 * This needs to be cleared on the return functions
964 * when the depth is zero.
965 */
966 trace_recursion_set(TRACE_GRAPH_BIT);
967 trace_recursion_set_depth(trace->depth);
968
969 /*
970 * If no irqs are to be traced, but a set_graph_function
971 * is set, and called by an interrupt handler, we still
972 * want to trace it.
973 */
974 if (in_irq())
975 trace_recursion_set(TRACE_IRQ_BIT);
976 else
977 trace_recursion_clear(TRACE_IRQ_BIT);
978 ret = 1;
979 }
980
981 out:
982 preempt_enable_notrace();
983 return ret;
984 }
985
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)986 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
987 {
988 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
989 trace->depth == trace_recursion_depth())
990 trace_recursion_clear(TRACE_GRAPH_BIT);
991 }
992
ftrace_graph_notrace_addr(unsigned long addr)993 static inline int ftrace_graph_notrace_addr(unsigned long addr)
994 {
995 int ret = 0;
996 struct ftrace_hash *notrace_hash;
997
998 preempt_disable_notrace();
999
1000 /*
1001 * Have to open code "rcu_dereference_sched()" because the
1002 * function graph tracer can be called when RCU is not
1003 * "watching".
1004 * Protected with schedule_on_each_cpu(ftrace_sync)
1005 */
1006 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1007 !preemptible());
1008
1009 if (ftrace_lookup_ip(notrace_hash, addr))
1010 ret = 1;
1011
1012 preempt_enable_notrace();
1013 return ret;
1014 }
1015 #else
ftrace_graph_addr(struct ftrace_graph_ent * trace)1016 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1017 {
1018 return 1;
1019 }
1020
ftrace_graph_notrace_addr(unsigned long addr)1021 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1022 {
1023 return 0;
1024 }
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)1025 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1026 { }
1027 #endif /* CONFIG_DYNAMIC_FTRACE */
1028
1029 extern unsigned int fgraph_max_depth;
1030
ftrace_graph_ignore_func(struct ftrace_graph_ent * trace)1031 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1032 {
1033 /* trace it when it is-nested-in or is a function enabled. */
1034 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1035 ftrace_graph_addr(trace)) ||
1036 (trace->depth < 0) ||
1037 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1038 }
1039
1040 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1041 static inline enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1042 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1043 {
1044 return TRACE_TYPE_UNHANDLED;
1045 }
1046 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1047
1048 extern struct list_head ftrace_pids;
1049
1050 #ifdef CONFIG_FUNCTION_TRACER
1051 struct ftrace_func_command {
1052 struct list_head list;
1053 char *name;
1054 int (*func)(struct trace_array *tr,
1055 struct ftrace_hash *hash,
1056 char *func, char *cmd,
1057 char *params, int enable);
1058 };
1059 extern bool ftrace_filter_param __initdata;
ftrace_trace_task(struct trace_array * tr)1060 static inline int ftrace_trace_task(struct trace_array *tr)
1061 {
1062 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1063 }
1064 extern int ftrace_is_dead(void);
1065 int ftrace_create_function_files(struct trace_array *tr,
1066 struct dentry *parent);
1067 void ftrace_destroy_function_files(struct trace_array *tr);
1068 void ftrace_init_global_array_ops(struct trace_array *tr);
1069 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1070 void ftrace_reset_array_ops(struct trace_array *tr);
1071 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1072 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1073 struct dentry *d_tracer);
1074 void ftrace_clear_pids(struct trace_array *tr);
1075 int init_function_trace(void);
1076 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1077 #else
ftrace_trace_task(struct trace_array * tr)1078 static inline int ftrace_trace_task(struct trace_array *tr)
1079 {
1080 return 1;
1081 }
ftrace_is_dead(void)1082 static inline int ftrace_is_dead(void) { return 0; }
1083 static inline int
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)1084 ftrace_create_function_files(struct trace_array *tr,
1085 struct dentry *parent)
1086 {
1087 return 0;
1088 }
ftrace_destroy_function_files(struct trace_array * tr)1089 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1090 static inline __init void
ftrace_init_global_array_ops(struct trace_array * tr)1091 ftrace_init_global_array_ops(struct trace_array *tr) { }
ftrace_reset_array_ops(struct trace_array * tr)1092 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d)1093 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d)1094 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
ftrace_clear_pids(struct trace_array * tr)1095 static inline void ftrace_clear_pids(struct trace_array *tr) { }
init_function_trace(void)1096 static inline int init_function_trace(void) { return 0; }
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)1097 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1098 /* ftace_func_t type is not defined, use macro instead of static inline */
1099 #define ftrace_init_array_ops(tr, func) do { } while (0)
1100 #endif /* CONFIG_FUNCTION_TRACER */
1101
1102 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1103
1104 struct ftrace_probe_ops {
1105 void (*func)(unsigned long ip,
1106 unsigned long parent_ip,
1107 struct trace_array *tr,
1108 struct ftrace_probe_ops *ops,
1109 void *data);
1110 int (*init)(struct ftrace_probe_ops *ops,
1111 struct trace_array *tr,
1112 unsigned long ip, void *init_data,
1113 void **data);
1114 void (*free)(struct ftrace_probe_ops *ops,
1115 struct trace_array *tr,
1116 unsigned long ip, void *data);
1117 int (*print)(struct seq_file *m,
1118 unsigned long ip,
1119 struct ftrace_probe_ops *ops,
1120 void *data);
1121 };
1122
1123 struct ftrace_func_mapper;
1124 typedef int (*ftrace_mapper_func)(void *data);
1125
1126 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1127 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1128 unsigned long ip);
1129 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1130 unsigned long ip, void *data);
1131 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1132 unsigned long ip);
1133 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1134 ftrace_mapper_func free_func);
1135
1136 extern int
1137 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1138 struct ftrace_probe_ops *ops, void *data);
1139 extern int
1140 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1141 struct ftrace_probe_ops *ops);
1142 extern void clear_ftrace_function_probes(struct trace_array *tr);
1143
1144 int register_ftrace_command(struct ftrace_func_command *cmd);
1145 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1146
1147 void ftrace_create_filter_files(struct ftrace_ops *ops,
1148 struct dentry *parent);
1149 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1150 #else
1151 struct ftrace_func_command;
1152
register_ftrace_command(struct ftrace_func_command * cmd)1153 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1154 {
1155 return -EINVAL;
1156 }
unregister_ftrace_command(char * cmd_name)1157 static inline __init int unregister_ftrace_command(char *cmd_name)
1158 {
1159 return -EINVAL;
1160 }
clear_ftrace_function_probes(struct trace_array * tr)1161 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1162 {
1163 }
1164
1165 /*
1166 * The ops parameter passed in is usually undefined.
1167 * This must be a macro.
1168 */
1169 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1170 #define ftrace_destroy_filter_files(ops) do { } while (0)
1171 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1172
1173 bool ftrace_event_is_function(struct trace_event_call *call);
1174
1175 /*
1176 * struct trace_parser - servers for reading the user input separated by spaces
1177 * @cont: set if the input is not complete - no final space char was found
1178 * @buffer: holds the parsed user input
1179 * @idx: user input length
1180 * @size: buffer size
1181 */
1182 struct trace_parser {
1183 bool cont;
1184 char *buffer;
1185 unsigned idx;
1186 unsigned size;
1187 };
1188
trace_parser_loaded(struct trace_parser * parser)1189 static inline bool trace_parser_loaded(struct trace_parser *parser)
1190 {
1191 return (parser->idx != 0);
1192 }
1193
trace_parser_cont(struct trace_parser * parser)1194 static inline bool trace_parser_cont(struct trace_parser *parser)
1195 {
1196 return parser->cont;
1197 }
1198
trace_parser_clear(struct trace_parser * parser)1199 static inline void trace_parser_clear(struct trace_parser *parser)
1200 {
1201 parser->cont = false;
1202 parser->idx = 0;
1203 }
1204
1205 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1206 extern void trace_parser_put(struct trace_parser *parser);
1207 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1208 size_t cnt, loff_t *ppos);
1209
1210 /*
1211 * Only create function graph options if function graph is configured.
1212 */
1213 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1214 # define FGRAPH_FLAGS \
1215 C(DISPLAY_GRAPH, "display-graph"),
1216 #else
1217 # define FGRAPH_FLAGS
1218 #endif
1219
1220 #ifdef CONFIG_BRANCH_TRACER
1221 # define BRANCH_FLAGS \
1222 C(BRANCH, "branch"),
1223 #else
1224 # define BRANCH_FLAGS
1225 #endif
1226
1227 #ifdef CONFIG_FUNCTION_TRACER
1228 # define FUNCTION_FLAGS \
1229 C(FUNCTION, "function-trace"), \
1230 C(FUNC_FORK, "function-fork"),
1231 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1232 #else
1233 # define FUNCTION_FLAGS
1234 # define FUNCTION_DEFAULT_FLAGS 0UL
1235 # define TRACE_ITER_FUNC_FORK 0UL
1236 #endif
1237
1238 #ifdef CONFIG_STACKTRACE
1239 # define STACK_FLAGS \
1240 C(STACKTRACE, "stacktrace"),
1241 #else
1242 # define STACK_FLAGS
1243 #endif
1244
1245 /*
1246 * trace_iterator_flags is an enumeration that defines bit
1247 * positions into trace_flags that controls the output.
1248 *
1249 * NOTE: These bits must match the trace_options array in
1250 * trace.c (this macro guarantees it).
1251 */
1252 #define TRACE_FLAGS \
1253 C(PRINT_PARENT, "print-parent"), \
1254 C(SYM_OFFSET, "sym-offset"), \
1255 C(SYM_ADDR, "sym-addr"), \
1256 C(VERBOSE, "verbose"), \
1257 C(RAW, "raw"), \
1258 C(HEX, "hex"), \
1259 C(BIN, "bin"), \
1260 C(BLOCK, "block"), \
1261 C(PRINTK, "trace_printk"), \
1262 C(ANNOTATE, "annotate"), \
1263 C(USERSTACKTRACE, "userstacktrace"), \
1264 C(SYM_USEROBJ, "sym-userobj"), \
1265 C(PRINTK_MSGONLY, "printk-msg-only"), \
1266 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1267 C(LATENCY_FMT, "latency-format"), \
1268 C(RECORD_CMD, "record-cmd"), \
1269 C(RECORD_TGID, "record-tgid"), \
1270 C(OVERWRITE, "overwrite"), \
1271 C(STOP_ON_FREE, "disable_on_free"), \
1272 C(IRQ_INFO, "irq-info"), \
1273 C(MARKERS, "markers"), \
1274 C(EVENT_FORK, "event-fork"), \
1275 FUNCTION_FLAGS \
1276 FGRAPH_FLAGS \
1277 STACK_FLAGS \
1278 BRANCH_FLAGS
1279
1280 /*
1281 * By defining C, we can make TRACE_FLAGS a list of bit names
1282 * that will define the bits for the flag masks.
1283 */
1284 #undef C
1285 #define C(a, b) TRACE_ITER_##a##_BIT
1286
1287 enum trace_iterator_bits {
1288 TRACE_FLAGS
1289 /* Make sure we don't go more than we have bits for */
1290 TRACE_ITER_LAST_BIT
1291 };
1292
1293 /*
1294 * By redefining C, we can make TRACE_FLAGS a list of masks that
1295 * use the bits as defined above.
1296 */
1297 #undef C
1298 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1299
1300 enum trace_iterator_flags { TRACE_FLAGS };
1301
1302 /*
1303 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1304 * control the output of kernel symbols.
1305 */
1306 #define TRACE_ITER_SYM_MASK \
1307 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1308
1309 extern struct tracer nop_trace;
1310
1311 #ifdef CONFIG_BRANCH_TRACER
1312 extern int enable_branch_tracing(struct trace_array *tr);
1313 extern void disable_branch_tracing(void);
trace_branch_enable(struct trace_array * tr)1314 static inline int trace_branch_enable(struct trace_array *tr)
1315 {
1316 if (tr->trace_flags & TRACE_ITER_BRANCH)
1317 return enable_branch_tracing(tr);
1318 return 0;
1319 }
trace_branch_disable(void)1320 static inline void trace_branch_disable(void)
1321 {
1322 /* due to races, always disable */
1323 disable_branch_tracing();
1324 }
1325 #else
trace_branch_enable(struct trace_array * tr)1326 static inline int trace_branch_enable(struct trace_array *tr)
1327 {
1328 return 0;
1329 }
trace_branch_disable(void)1330 static inline void trace_branch_disable(void)
1331 {
1332 }
1333 #endif /* CONFIG_BRANCH_TRACER */
1334
1335 /* set ring buffers to default size if not already done so */
1336 int tracing_update_buffers(void);
1337
1338 struct ftrace_event_field {
1339 struct list_head link;
1340 const char *name;
1341 const char *type;
1342 int filter_type;
1343 int offset;
1344 int size;
1345 int is_signed;
1346 };
1347
1348 struct prog_entry;
1349
1350 struct event_filter {
1351 struct prog_entry __rcu *prog;
1352 char *filter_string;
1353 };
1354
1355 struct event_subsystem {
1356 struct list_head list;
1357 const char *name;
1358 struct event_filter *filter;
1359 int ref_count;
1360 };
1361
1362 struct trace_subsystem_dir {
1363 struct list_head list;
1364 struct event_subsystem *subsystem;
1365 struct trace_array *tr;
1366 struct dentry *entry;
1367 int ref_count;
1368 int nr_events;
1369 };
1370
1371 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1372 struct ring_buffer *buffer,
1373 struct ring_buffer_event *event);
1374
1375 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1376 struct ring_buffer *buffer,
1377 struct ring_buffer_event *event,
1378 unsigned long flags, int pc,
1379 struct pt_regs *regs);
1380
trace_buffer_unlock_commit(struct trace_array * tr,struct ring_buffer * buffer,struct ring_buffer_event * event,unsigned long flags,int pc)1381 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1382 struct ring_buffer *buffer,
1383 struct ring_buffer_event *event,
1384 unsigned long flags, int pc)
1385 {
1386 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1387 }
1388
1389 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1390 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1391 void trace_buffered_event_disable(void);
1392 void trace_buffered_event_enable(void);
1393
1394 static inline void
__trace_event_discard_commit(struct ring_buffer * buffer,struct ring_buffer_event * event)1395 __trace_event_discard_commit(struct ring_buffer *buffer,
1396 struct ring_buffer_event *event)
1397 {
1398 if (this_cpu_read(trace_buffered_event) == event) {
1399 /* Simply release the temp buffer */
1400 this_cpu_dec(trace_buffered_event_cnt);
1401 return;
1402 }
1403 ring_buffer_discard_commit(buffer, event);
1404 }
1405
1406 /*
1407 * Helper function for event_trigger_unlock_commit{_regs}().
1408 * If there are event triggers attached to this event that requires
1409 * filtering against its fields, then they wil be called as the
1410 * entry already holds the field information of the current event.
1411 *
1412 * It also checks if the event should be discarded or not.
1413 * It is to be discarded if the event is soft disabled and the
1414 * event was only recorded to process triggers, or if the event
1415 * filter is active and this event did not match the filters.
1416 *
1417 * Returns true if the event is discarded, false otherwise.
1418 */
1419 static inline bool
__event_trigger_test_discard(struct trace_event_file * file,struct ring_buffer * buffer,struct ring_buffer_event * event,void * entry,enum event_trigger_type * tt)1420 __event_trigger_test_discard(struct trace_event_file *file,
1421 struct ring_buffer *buffer,
1422 struct ring_buffer_event *event,
1423 void *entry,
1424 enum event_trigger_type *tt)
1425 {
1426 unsigned long eflags = file->flags;
1427
1428 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1429 *tt = event_triggers_call(file, entry, event);
1430
1431 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1432 EVENT_FILE_FL_FILTERED |
1433 EVENT_FILE_FL_PID_FILTER))))
1434 return false;
1435
1436 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1437 goto discard;
1438
1439 if (file->flags & EVENT_FILE_FL_FILTERED &&
1440 !filter_match_preds(file->filter, entry))
1441 goto discard;
1442
1443 if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1444 trace_event_ignore_this_pid(file))
1445 goto discard;
1446
1447 return false;
1448 discard:
1449 __trace_event_discard_commit(buffer, event);
1450 return true;
1451 }
1452
1453 /**
1454 * event_trigger_unlock_commit - handle triggers and finish event commit
1455 * @file: The file pointer assoctiated to the event
1456 * @buffer: The ring buffer that the event is being written to
1457 * @event: The event meta data in the ring buffer
1458 * @entry: The event itself
1459 * @irq_flags: The state of the interrupts at the start of the event
1460 * @pc: The state of the preempt count at the start of the event.
1461 *
1462 * This is a helper function to handle triggers that require data
1463 * from the event itself. It also tests the event against filters and
1464 * if the event is soft disabled and should be discarded.
1465 */
1466 static inline void
event_trigger_unlock_commit(struct trace_event_file * file,struct ring_buffer * buffer,struct ring_buffer_event * event,void * entry,unsigned long irq_flags,int pc)1467 event_trigger_unlock_commit(struct trace_event_file *file,
1468 struct ring_buffer *buffer,
1469 struct ring_buffer_event *event,
1470 void *entry, unsigned long irq_flags, int pc)
1471 {
1472 enum event_trigger_type tt = ETT_NONE;
1473
1474 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1475 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1476
1477 if (tt)
1478 event_triggers_post_call(file, tt);
1479 }
1480
1481 /**
1482 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1483 * @file: The file pointer assoctiated to the event
1484 * @buffer: The ring buffer that the event is being written to
1485 * @event: The event meta data in the ring buffer
1486 * @entry: The event itself
1487 * @irq_flags: The state of the interrupts at the start of the event
1488 * @pc: The state of the preempt count at the start of the event.
1489 *
1490 * This is a helper function to handle triggers that require data
1491 * from the event itself. It also tests the event against filters and
1492 * if the event is soft disabled and should be discarded.
1493 *
1494 * Same as event_trigger_unlock_commit() but calls
1495 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1496 */
1497 static inline void
event_trigger_unlock_commit_regs(struct trace_event_file * file,struct ring_buffer * buffer,struct ring_buffer_event * event,void * entry,unsigned long irq_flags,int pc,struct pt_regs * regs)1498 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1499 struct ring_buffer *buffer,
1500 struct ring_buffer_event *event,
1501 void *entry, unsigned long irq_flags, int pc,
1502 struct pt_regs *regs)
1503 {
1504 enum event_trigger_type tt = ETT_NONE;
1505
1506 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1507 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1508 irq_flags, pc, regs);
1509
1510 if (tt)
1511 event_triggers_post_call(file, tt);
1512 }
1513
1514 #define FILTER_PRED_INVALID ((unsigned short)-1)
1515 #define FILTER_PRED_IS_RIGHT (1 << 15)
1516 #define FILTER_PRED_FOLD (1 << 15)
1517
1518 /*
1519 * The max preds is the size of unsigned short with
1520 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1521 * and FOLD flags. The other is reserved.
1522 *
1523 * 2^14 preds is way more than enough.
1524 */
1525 #define MAX_FILTER_PRED 16384
1526
1527 struct filter_pred;
1528 struct regex;
1529
1530 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1531
1532 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1533
1534 enum regex_type {
1535 MATCH_FULL = 0,
1536 MATCH_FRONT_ONLY,
1537 MATCH_MIDDLE_ONLY,
1538 MATCH_END_ONLY,
1539 MATCH_GLOB,
1540 MATCH_INDEX,
1541 };
1542
1543 struct regex {
1544 char pattern[MAX_FILTER_STR_VAL];
1545 int len;
1546 int field_len;
1547 regex_match_func match;
1548 };
1549
1550 struct filter_pred {
1551 filter_pred_fn_t fn;
1552 u64 val;
1553 struct regex regex;
1554 unsigned short *ops;
1555 struct ftrace_event_field *field;
1556 int offset;
1557 int not;
1558 int op;
1559 };
1560
is_string_field(struct ftrace_event_field * field)1561 static inline bool is_string_field(struct ftrace_event_field *field)
1562 {
1563 return field->filter_type == FILTER_DYN_STRING ||
1564 field->filter_type == FILTER_STATIC_STRING ||
1565 field->filter_type == FILTER_PTR_STRING ||
1566 field->filter_type == FILTER_COMM;
1567 }
1568
is_function_field(struct ftrace_event_field * field)1569 static inline bool is_function_field(struct ftrace_event_field *field)
1570 {
1571 return field->filter_type == FILTER_TRACE_FN;
1572 }
1573
1574 extern enum regex_type
1575 filter_parse_regex(char *buff, int len, char **search, int *not);
1576 extern void print_event_filter(struct trace_event_file *file,
1577 struct trace_seq *s);
1578 extern int apply_event_filter(struct trace_event_file *file,
1579 char *filter_string);
1580 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1581 char *filter_string);
1582 extern void print_subsystem_event_filter(struct event_subsystem *system,
1583 struct trace_seq *s);
1584 extern int filter_assign_type(const char *type);
1585 extern int create_event_filter(struct trace_array *tr,
1586 struct trace_event_call *call,
1587 char *filter_str, bool set_str,
1588 struct event_filter **filterp);
1589 extern void free_event_filter(struct event_filter *filter);
1590
1591 struct ftrace_event_field *
1592 trace_find_event_field(struct trace_event_call *call, char *name);
1593
1594 extern void trace_event_enable_cmd_record(bool enable);
1595 extern void trace_event_enable_tgid_record(bool enable);
1596
1597 extern int event_trace_init(void);
1598 extern int init_events(void);
1599 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1600 extern int event_trace_del_tracer(struct trace_array *tr);
1601
1602 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1603 const char *system,
1604 const char *event);
1605 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1606 const char *system,
1607 const char *event);
1608
event_file_data(struct file * filp)1609 static inline void *event_file_data(struct file *filp)
1610 {
1611 return READ_ONCE(file_inode(filp)->i_private);
1612 }
1613
1614 extern struct mutex event_mutex;
1615 extern struct list_head ftrace_events;
1616
1617 extern const struct file_operations event_trigger_fops;
1618 extern const struct file_operations event_hist_fops;
1619
1620 #ifdef CONFIG_HIST_TRIGGERS
1621 extern int register_trigger_hist_cmd(void);
1622 extern int register_trigger_hist_enable_disable_cmds(void);
1623 #else
register_trigger_hist_cmd(void)1624 static inline int register_trigger_hist_cmd(void) { return 0; }
register_trigger_hist_enable_disable_cmds(void)1625 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1626 #endif
1627
1628 extern int register_trigger_cmds(void);
1629 extern void clear_event_triggers(struct trace_array *tr);
1630
1631 struct event_trigger_data {
1632 unsigned long count;
1633 int ref;
1634 struct event_trigger_ops *ops;
1635 struct event_command *cmd_ops;
1636 struct event_filter __rcu *filter;
1637 char *filter_str;
1638 void *private_data;
1639 bool paused;
1640 bool paused_tmp;
1641 struct list_head list;
1642 char *name;
1643 struct list_head named_list;
1644 struct event_trigger_data *named_data;
1645 };
1646
1647 /* Avoid typos */
1648 #define ENABLE_EVENT_STR "enable_event"
1649 #define DISABLE_EVENT_STR "disable_event"
1650 #define ENABLE_HIST_STR "enable_hist"
1651 #define DISABLE_HIST_STR "disable_hist"
1652
1653 struct enable_trigger_data {
1654 struct trace_event_file *file;
1655 bool enable;
1656 bool hist;
1657 };
1658
1659 extern int event_enable_trigger_print(struct seq_file *m,
1660 struct event_trigger_ops *ops,
1661 struct event_trigger_data *data);
1662 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1663 struct event_trigger_data *data);
1664 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1665 struct trace_event_file *file,
1666 char *glob, char *cmd, char *param);
1667 extern int event_enable_register_trigger(char *glob,
1668 struct event_trigger_ops *ops,
1669 struct event_trigger_data *data,
1670 struct trace_event_file *file);
1671 extern void event_enable_unregister_trigger(char *glob,
1672 struct event_trigger_ops *ops,
1673 struct event_trigger_data *test,
1674 struct trace_event_file *file);
1675 extern void trigger_data_free(struct event_trigger_data *data);
1676 extern int event_trigger_init(struct event_trigger_ops *ops,
1677 struct event_trigger_data *data);
1678 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1679 int trigger_enable);
1680 extern void update_cond_flag(struct trace_event_file *file);
1681 extern int set_trigger_filter(char *filter_str,
1682 struct event_trigger_data *trigger_data,
1683 struct trace_event_file *file);
1684 extern struct event_trigger_data *find_named_trigger(const char *name);
1685 extern bool is_named_trigger(struct event_trigger_data *test);
1686 extern int save_named_trigger(const char *name,
1687 struct event_trigger_data *data);
1688 extern void del_named_trigger(struct event_trigger_data *data);
1689 extern void pause_named_trigger(struct event_trigger_data *data);
1690 extern void unpause_named_trigger(struct event_trigger_data *data);
1691 extern void set_named_trigger_data(struct event_trigger_data *data,
1692 struct event_trigger_data *named_data);
1693 extern struct event_trigger_data *
1694 get_named_trigger_data(struct event_trigger_data *data);
1695 extern int register_event_command(struct event_command *cmd);
1696 extern int unregister_event_command(struct event_command *cmd);
1697 extern int register_trigger_hist_enable_disable_cmds(void);
1698
1699 /**
1700 * struct event_trigger_ops - callbacks for trace event triggers
1701 *
1702 * The methods in this structure provide per-event trigger hooks for
1703 * various trigger operations.
1704 *
1705 * All the methods below, except for @init() and @free(), must be
1706 * implemented.
1707 *
1708 * @func: The trigger 'probe' function called when the triggering
1709 * event occurs. The data passed into this callback is the data
1710 * that was supplied to the event_command @reg() function that
1711 * registered the trigger (see struct event_command) along with
1712 * the trace record, rec.
1713 *
1714 * @init: An optional initialization function called for the trigger
1715 * when the trigger is registered (via the event_command reg()
1716 * function). This can be used to perform per-trigger
1717 * initialization such as incrementing a per-trigger reference
1718 * count, for instance. This is usually implemented by the
1719 * generic utility function @event_trigger_init() (see
1720 * trace_event_triggers.c).
1721 *
1722 * @free: An optional de-initialization function called for the
1723 * trigger when the trigger is unregistered (via the
1724 * event_command @reg() function). This can be used to perform
1725 * per-trigger de-initialization such as decrementing a
1726 * per-trigger reference count and freeing corresponding trigger
1727 * data, for instance. This is usually implemented by the
1728 * generic utility function @event_trigger_free() (see
1729 * trace_event_triggers.c).
1730 *
1731 * @print: The callback function invoked to have the trigger print
1732 * itself. This is usually implemented by a wrapper function
1733 * that calls the generic utility function @event_trigger_print()
1734 * (see trace_event_triggers.c).
1735 */
1736 struct event_trigger_ops {
1737 void (*func)(struct event_trigger_data *data,
1738 void *rec,
1739 struct ring_buffer_event *rbe);
1740 int (*init)(struct event_trigger_ops *ops,
1741 struct event_trigger_data *data);
1742 void (*free)(struct event_trigger_ops *ops,
1743 struct event_trigger_data *data);
1744 int (*print)(struct seq_file *m,
1745 struct event_trigger_ops *ops,
1746 struct event_trigger_data *data);
1747 };
1748
1749 /**
1750 * struct event_command - callbacks and data members for event commands
1751 *
1752 * Event commands are invoked by users by writing the command name
1753 * into the 'trigger' file associated with a trace event. The
1754 * parameters associated with a specific invocation of an event
1755 * command are used to create an event trigger instance, which is
1756 * added to the list of trigger instances associated with that trace
1757 * event. When the event is hit, the set of triggers associated with
1758 * that event is invoked.
1759 *
1760 * The data members in this structure provide per-event command data
1761 * for various event commands.
1762 *
1763 * All the data members below, except for @post_trigger, must be set
1764 * for each event command.
1765 *
1766 * @name: The unique name that identifies the event command. This is
1767 * the name used when setting triggers via trigger files.
1768 *
1769 * @trigger_type: A unique id that identifies the event command
1770 * 'type'. This value has two purposes, the first to ensure that
1771 * only one trigger of the same type can be set at a given time
1772 * for a particular event e.g. it doesn't make sense to have both
1773 * a traceon and traceoff trigger attached to a single event at
1774 * the same time, so traceon and traceoff have the same type
1775 * though they have different names. The @trigger_type value is
1776 * also used as a bit value for deferring the actual trigger
1777 * action until after the current event is finished. Some
1778 * commands need to do this if they themselves log to the trace
1779 * buffer (see the @post_trigger() member below). @trigger_type
1780 * values are defined by adding new values to the trigger_type
1781 * enum in include/linux/trace_events.h.
1782 *
1783 * @flags: See the enum event_command_flags below.
1784 *
1785 * All the methods below, except for @set_filter() and @unreg_all(),
1786 * must be implemented.
1787 *
1788 * @func: The callback function responsible for parsing and
1789 * registering the trigger written to the 'trigger' file by the
1790 * user. It allocates the trigger instance and registers it with
1791 * the appropriate trace event. It makes use of the other
1792 * event_command callback functions to orchestrate this, and is
1793 * usually implemented by the generic utility function
1794 * @event_trigger_callback() (see trace_event_triggers.c).
1795 *
1796 * @reg: Adds the trigger to the list of triggers associated with the
1797 * event, and enables the event trigger itself, after
1798 * initializing it (via the event_trigger_ops @init() function).
1799 * This is also where commands can use the @trigger_type value to
1800 * make the decision as to whether or not multiple instances of
1801 * the trigger should be allowed. This is usually implemented by
1802 * the generic utility function @register_trigger() (see
1803 * trace_event_triggers.c).
1804 *
1805 * @unreg: Removes the trigger from the list of triggers associated
1806 * with the event, and disables the event trigger itself, after
1807 * initializing it (via the event_trigger_ops @free() function).
1808 * This is usually implemented by the generic utility function
1809 * @unregister_trigger() (see trace_event_triggers.c).
1810 *
1811 * @unreg_all: An optional function called to remove all the triggers
1812 * from the list of triggers associated with the event. Called
1813 * when a trigger file is opened in truncate mode.
1814 *
1815 * @set_filter: An optional function called to parse and set a filter
1816 * for the trigger. If no @set_filter() method is set for the
1817 * event command, filters set by the user for the command will be
1818 * ignored. This is usually implemented by the generic utility
1819 * function @set_trigger_filter() (see trace_event_triggers.c).
1820 *
1821 * @get_trigger_ops: The callback function invoked to retrieve the
1822 * event_trigger_ops implementation associated with the command.
1823 */
1824 struct event_command {
1825 struct list_head list;
1826 char *name;
1827 enum event_trigger_type trigger_type;
1828 int flags;
1829 int (*func)(struct event_command *cmd_ops,
1830 struct trace_event_file *file,
1831 char *glob, char *cmd, char *params);
1832 int (*reg)(char *glob,
1833 struct event_trigger_ops *ops,
1834 struct event_trigger_data *data,
1835 struct trace_event_file *file);
1836 void (*unreg)(char *glob,
1837 struct event_trigger_ops *ops,
1838 struct event_trigger_data *data,
1839 struct trace_event_file *file);
1840 void (*unreg_all)(struct trace_event_file *file);
1841 int (*set_filter)(char *filter_str,
1842 struct event_trigger_data *data,
1843 struct trace_event_file *file);
1844 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1845 };
1846
1847 /**
1848 * enum event_command_flags - flags for struct event_command
1849 *
1850 * @POST_TRIGGER: A flag that says whether or not this command needs
1851 * to have its action delayed until after the current event has
1852 * been closed. Some triggers need to avoid being invoked while
1853 * an event is currently in the process of being logged, since
1854 * the trigger may itself log data into the trace buffer. Thus
1855 * we make sure the current event is committed before invoking
1856 * those triggers. To do that, the trigger invocation is split
1857 * in two - the first part checks the filter using the current
1858 * trace record; if a command has the @post_trigger flag set, it
1859 * sets a bit for itself in the return value, otherwise it
1860 * directly invokes the trigger. Once all commands have been
1861 * either invoked or set their return flag, the current record is
1862 * either committed or discarded. At that point, if any commands
1863 * have deferred their triggers, those commands are finally
1864 * invoked following the close of the current event. In other
1865 * words, if the event_trigger_ops @func() probe implementation
1866 * itself logs to the trace buffer, this flag should be set,
1867 * otherwise it can be left unspecified.
1868 *
1869 * @NEEDS_REC: A flag that says whether or not this command needs
1870 * access to the trace record in order to perform its function,
1871 * regardless of whether or not it has a filter associated with
1872 * it (filters make a trigger require access to the trace record
1873 * but are not always present).
1874 */
1875 enum event_command_flags {
1876 EVENT_CMD_FL_POST_TRIGGER = 1,
1877 EVENT_CMD_FL_NEEDS_REC = 2,
1878 };
1879
event_command_post_trigger(struct event_command * cmd_ops)1880 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1881 {
1882 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1883 }
1884
event_command_needs_rec(struct event_command * cmd_ops)1885 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1886 {
1887 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1888 }
1889
1890 extern int trace_event_enable_disable(struct trace_event_file *file,
1891 int enable, int soft_disable);
1892 extern int tracing_alloc_snapshot(void);
1893 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1894 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1895
1896 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1897 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1898
1899 extern const char *__start___trace_bprintk_fmt[];
1900 extern const char *__stop___trace_bprintk_fmt[];
1901
1902 extern const char *__start___tracepoint_str[];
1903 extern const char *__stop___tracepoint_str[];
1904
1905 void trace_printk_control(bool enabled);
1906 void trace_printk_init_buffers(void);
1907 void trace_printk_start_comm(void);
1908 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1909 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1910
1911 #define MAX_EVENT_NAME_LEN 64
1912
1913 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1914 extern ssize_t trace_parse_run_command(struct file *file,
1915 const char __user *buffer, size_t count, loff_t *ppos,
1916 int (*createfn)(int, char**));
1917
1918 extern unsigned int err_pos(char *cmd, const char *str);
1919 extern void tracing_log_err(struct trace_array *tr,
1920 const char *loc, const char *cmd,
1921 const char **errs, u8 type, u8 pos);
1922
1923 /*
1924 * Normal trace_printk() and friends allocates special buffers
1925 * to do the manipulation, as well as saves the print formats
1926 * into sections to display. But the trace infrastructure wants
1927 * to use these without the added overhead at the price of being
1928 * a bit slower (used mainly for warnings, where we don't care
1929 * about performance). The internal_trace_puts() is for such
1930 * a purpose.
1931 */
1932 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1933
1934 #undef FTRACE_ENTRY
1935 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1936 extern struct trace_event_call \
1937 __aligned(4) event_##call;
1938 #undef FTRACE_ENTRY_DUP
1939 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1940 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1941 filter)
1942 #undef FTRACE_ENTRY_PACKED
1943 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1944 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1945 filter)
1946
1947 #include "trace_entries.h"
1948
1949 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1950 int perf_ftrace_event_register(struct trace_event_call *call,
1951 enum trace_reg type, void *data);
1952 #else
1953 #define perf_ftrace_event_register NULL
1954 #endif
1955
1956 #ifdef CONFIG_FTRACE_SYSCALLS
1957 void init_ftrace_syscalls(void);
1958 const char *get_syscall_name(int syscall);
1959 #else
init_ftrace_syscalls(void)1960 static inline void init_ftrace_syscalls(void) { }
get_syscall_name(int syscall)1961 static inline const char *get_syscall_name(int syscall)
1962 {
1963 return NULL;
1964 }
1965 #endif
1966
1967 #ifdef CONFIG_EVENT_TRACING
1968 void trace_event_init(void);
1969 void trace_event_eval_update(struct trace_eval_map **map, int len);
1970 #else
trace_event_init(void)1971 static inline void __init trace_event_init(void) { }
trace_event_eval_update(struct trace_eval_map ** map,int len)1972 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1973 #endif
1974
1975 #ifdef CONFIG_TRACER_SNAPSHOT
1976 void tracing_snapshot_instance(struct trace_array *tr);
1977 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1978 #else
tracing_snapshot_instance(struct trace_array * tr)1979 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
tracing_alloc_snapshot_instance(struct trace_array * tr)1980 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1981 {
1982 return 0;
1983 }
1984 #endif
1985
1986 #ifdef CONFIG_PREEMPT_TRACER
1987 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1988 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1989 #else
tracer_preempt_on(unsigned long a0,unsigned long a1)1990 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
tracer_preempt_off(unsigned long a0,unsigned long a1)1991 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1992 #endif
1993 #ifdef CONFIG_IRQSOFF_TRACER
1994 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1995 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1996 #else
tracer_hardirqs_on(unsigned long a0,unsigned long a1)1997 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
tracer_hardirqs_off(unsigned long a0,unsigned long a1)1998 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1999 #endif
2000
2001 extern struct trace_iterator *tracepoint_print_iter;
2002
2003 /*
2004 * Reset the state of the trace_iterator so that it can read consumed data.
2005 * Normally, the trace_iterator is used for reading the data when it is not
2006 * consumed, and must retain state.
2007 */
trace_iterator_reset(struct trace_iterator * iter)2008 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2009 {
2010 const size_t offset = offsetof(struct trace_iterator, seq);
2011
2012 /*
2013 * Keep gcc from complaining about overwriting more than just one
2014 * member in the structure.
2015 */
2016 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2017
2018 iter->pos = -1;
2019 }
2020
2021 #endif /* _LINUX_KERNEL_TRACE_H */
2022