• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Ftrace header.  For implementation details beyond the random comments
3  * scattered below, see: Documentation/trace/ftrace-design.txt
4  */
5 
6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H
8 
9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h>
12 #include <linux/bitops.h>
13 #include <linux/ptrace.h>
14 #include <linux/ktime.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
18 #include <linux/fs.h>
19 
20 #include <asm/ftrace.h>
21 
22 /*
23  * If the arch supports passing the variable contents of
24  * function_trace_op as the third parameter back from the
25  * mcount call, then the arch should define this as 1.
26  */
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0
29 #endif
30 
31 /*
32  * If the arch's mcount caller does not support all of ftrace's
33  * features, then it must call an indirect function that
34  * does. Or at least does enough to prevent any unwelcomed side effects.
35  */
36 #if !ARCH_SUPPORTS_FTRACE_OPS
37 # define FTRACE_FORCE_LIST_FUNC 1
38 #else
39 # define FTRACE_FORCE_LIST_FUNC 0
40 #endif
41 
42 /* Main tracing buffer and events set up */
43 #ifdef CONFIG_TRACING
44 void trace_init(void);
45 #else
trace_init(void)46 static inline void trace_init(void) { }
47 #endif
48 
49 struct module;
50 struct ftrace_hash;
51 
52 #ifdef CONFIG_FUNCTION_TRACER
53 
54 extern int ftrace_enabled;
55 extern int
56 ftrace_enable_sysctl(struct ctl_table *table, int write,
57 		     void __user *buffer, size_t *lenp,
58 		     loff_t *ppos);
59 
60 struct ftrace_ops;
61 
62 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
63 			      struct ftrace_ops *op, struct pt_regs *regs);
64 
65 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
66 
67 /*
68  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
69  * set in the flags member.
70  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
71  * IPMODIFY are a kind of attribute flags which can be set only before
72  * registering the ftrace_ops, and can not be modified while registered.
73  * Changing those attribute flags after regsitering ftrace_ops will
74  * cause unexpected results.
75  *
76  * ENABLED - set/unset when ftrace_ops is registered/unregistered
77  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
78  *           allocated ftrace_ops which need special care
79  * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
80  *           could be controlled by following calls:
81  *             ftrace_function_local_enable
82  *             ftrace_function_local_disable
83  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
84  *            and passed to the callback. If this flag is set, but the
85  *            architecture does not support passing regs
86  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
87  *            ftrace_ops will fail to register, unless the next flag
88  *            is set.
89  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
90  *            handler can handle an arch that does not save regs
91  *            (the handler tests if regs == NULL), then it can set
92  *            this flag instead. It will not fail registering the ftrace_ops
93  *            but, the regs field will be NULL if the arch does not support
94  *            passing regs to the handler.
95  *            Note, if this flag is set, the SAVE_REGS flag will automatically
96  *            get set upon registering the ftrace_ops, if the arch supports it.
97  * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
98  *            that the call back has its own recursion protection. If it does
99  *            not set this, then the ftrace infrastructure will add recursion
100  *            protection for the caller.
101  * STUB   - The ftrace_ops is just a place holder.
102  * INITIALIZED - The ftrace_ops has already been initialized (first use time
103  *            register_ftrace_function() is called, it will initialized the ops)
104  * DELETED - The ops are being deleted, do not let them be registered again.
105  * ADDING  - The ops is in the process of being added.
106  * REMOVING - The ops is in the process of being removed.
107  * MODIFYING - The ops is in the process of changing its filter functions.
108  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
109  *            The arch specific code sets this flag when it allocated a
110  *            trampoline. This lets the arch know that it can update the
111  *            trampoline in case the callback function changes.
112  *            The ftrace_ops trampoline can be set by the ftrace users, and
113  *            in such cases the arch must not modify it. Only the arch ftrace
114  *            core code should set this flag.
115  * IPMODIFY - The ops can modify the IP register. This can only be set with
116  *            SAVE_REGS. If another ops with this flag set is already registered
117  *            for any of the functions that this ops will be registered for, then
118  *            this ops will fail to register or set_filter_ip.
119  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
120  */
121 enum {
122 	FTRACE_OPS_FL_ENABLED			= 1 << 0,
123 	FTRACE_OPS_FL_DYNAMIC			= 1 << 1,
124 	FTRACE_OPS_FL_PER_CPU			= 1 << 2,
125 	FTRACE_OPS_FL_SAVE_REGS			= 1 << 3,
126 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 4,
127 	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 5,
128 	FTRACE_OPS_FL_STUB			= 1 << 6,
129 	FTRACE_OPS_FL_INITIALIZED		= 1 << 7,
130 	FTRACE_OPS_FL_DELETED			= 1 << 8,
131 	FTRACE_OPS_FL_ADDING			= 1 << 9,
132 	FTRACE_OPS_FL_REMOVING			= 1 << 10,
133 	FTRACE_OPS_FL_MODIFYING			= 1 << 11,
134 	FTRACE_OPS_FL_ALLOC_TRAMP		= 1 << 12,
135 	FTRACE_OPS_FL_IPMODIFY			= 1 << 13,
136 	FTRACE_OPS_FL_PID			= 1 << 14,
137 	FTRACE_OPS_FL_RCU			= 1 << 15,
138 };
139 
140 #ifdef CONFIG_DYNAMIC_FTRACE
141 /* The hash used to know what functions callbacks trace */
142 struct ftrace_ops_hash {
143 	struct ftrace_hash		*notrace_hash;
144 	struct ftrace_hash		*filter_hash;
145 	struct mutex			regex_lock;
146 };
147 #endif
148 
149 /*
150  * Note, ftrace_ops can be referenced outside of RCU protection, unless
151  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
152  * core data, the unregistering of it will perform a scheduling on all CPUs
153  * to make sure that there are no more users. Depending on the load of the
154  * system that may take a bit of time.
155  *
156  * Any private data added must also take care not to be freed and if private
157  * data is added to a ftrace_ops that is in core code, the user of the
158  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
159  */
160 struct ftrace_ops {
161 	ftrace_func_t			func;
162 	struct ftrace_ops		*next;
163 	unsigned long			flags;
164 	void				*private;
165 	ftrace_func_t			saved_func;
166 	int __percpu			*disabled;
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 	struct ftrace_ops_hash		local_hash;
169 	struct ftrace_ops_hash		*func_hash;
170 	struct ftrace_ops_hash		old_hash;
171 	unsigned long			trampoline;
172 	unsigned long			trampoline_size;
173 #endif
174 };
175 
176 /*
177  * Type of the current tracing.
178  */
179 enum ftrace_tracing_type_t {
180 	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
181 	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
182 };
183 
184 /* Current tracing type, default is FTRACE_TYPE_ENTER */
185 extern enum ftrace_tracing_type_t ftrace_tracing_type;
186 
187 /*
188  * The ftrace_ops must be a static and should also
189  * be read_mostly.  These functions do modify read_mostly variables
190  * so use them sparely. Never free an ftrace_op or modify the
191  * next pointer after it has been registered. Even after unregistering
192  * it, the next pointer may still be used internally.
193  */
194 int register_ftrace_function(struct ftrace_ops *ops);
195 int unregister_ftrace_function(struct ftrace_ops *ops);
196 void clear_ftrace_function(void);
197 
198 /**
199  * ftrace_function_local_enable - enable ftrace_ops on current cpu
200  *
201  * This function enables tracing on current cpu by decreasing
202  * the per cpu control variable.
203  * It must be called with preemption disabled and only on ftrace_ops
204  * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
205  * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
206  */
ftrace_function_local_enable(struct ftrace_ops * ops)207 static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
208 {
209 	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
210 		return;
211 
212 	(*this_cpu_ptr(ops->disabled))--;
213 }
214 
215 /**
216  * ftrace_function_local_disable - disable ftrace_ops on current cpu
217  *
218  * This function disables tracing on current cpu by increasing
219  * the per cpu control variable.
220  * It must be called with preemption disabled and only on ftrace_ops
221  * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
222  * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
223  */
ftrace_function_local_disable(struct ftrace_ops * ops)224 static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
225 {
226 	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
227 		return;
228 
229 	(*this_cpu_ptr(ops->disabled))++;
230 }
231 
232 /**
233  * ftrace_function_local_disabled - returns ftrace_ops disabled value
234  *                                  on current cpu
235  *
236  * This function returns value of ftrace_ops::disabled on current cpu.
237  * It must be called with preemption disabled and only on ftrace_ops
238  * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
239  * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
240  */
ftrace_function_local_disabled(struct ftrace_ops * ops)241 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
242 {
243 	WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
244 	return *this_cpu_ptr(ops->disabled);
245 }
246 
247 extern void ftrace_stub(unsigned long a0, unsigned long a1,
248 			struct ftrace_ops *op, struct pt_regs *regs);
249 
250 #else /* !CONFIG_FUNCTION_TRACER */
251 /*
252  * (un)register_ftrace_function must be a macro since the ops parameter
253  * must not be evaluated.
254  */
255 #define register_ftrace_function(ops) ({ 0; })
256 #define unregister_ftrace_function(ops) ({ 0; })
ftrace_nr_registered_ops(void)257 static inline int ftrace_nr_registered_ops(void)
258 {
259 	return 0;
260 }
clear_ftrace_function(void)261 static inline void clear_ftrace_function(void) { }
ftrace_kill(void)262 static inline void ftrace_kill(void) { }
263 #endif /* CONFIG_FUNCTION_TRACER */
264 
265 #ifdef CONFIG_STACK_TRACER
266 
267 #define STACK_TRACE_ENTRIES 500
268 
269 struct stack_trace;
270 
271 extern unsigned stack_trace_index[];
272 extern struct stack_trace stack_trace_max;
273 extern unsigned long stack_trace_max_size;
274 extern arch_spinlock_t stack_trace_max_lock;
275 
276 extern int stack_tracer_enabled;
277 void stack_trace_print(void);
278 int
279 stack_trace_sysctl(struct ctl_table *table, int write,
280 		   void __user *buffer, size_t *lenp,
281 		   loff_t *ppos);
282 #endif
283 
284 struct ftrace_func_command {
285 	struct list_head	list;
286 	char			*name;
287 	int			(*func)(struct ftrace_hash *hash,
288 					char *func, char *cmd,
289 					char *params, int enable);
290 };
291 
292 #ifdef CONFIG_DYNAMIC_FTRACE
293 
294 int ftrace_arch_code_modify_prepare(void);
295 int ftrace_arch_code_modify_post_process(void);
296 
297 struct dyn_ftrace;
298 
299 enum ftrace_bug_type {
300 	FTRACE_BUG_UNKNOWN,
301 	FTRACE_BUG_INIT,
302 	FTRACE_BUG_NOP,
303 	FTRACE_BUG_CALL,
304 	FTRACE_BUG_UPDATE,
305 };
306 extern enum ftrace_bug_type ftrace_bug_type;
307 
308 /*
309  * Archs can set this to point to a variable that holds the value that was
310  * expected at the call site before calling ftrace_bug().
311  */
312 extern const void *ftrace_expected;
313 
314 void ftrace_bug(int err, struct dyn_ftrace *rec);
315 
316 struct seq_file;
317 
318 struct ftrace_probe_ops {
319 	void			(*func)(unsigned long ip,
320 					unsigned long parent_ip,
321 					void **data);
322 	int			(*init)(struct ftrace_probe_ops *ops,
323 					unsigned long ip, void **data);
324 	void			(*free)(struct ftrace_probe_ops *ops,
325 					unsigned long ip, void **data);
326 	int			(*print)(struct seq_file *m,
327 					 unsigned long ip,
328 					 struct ftrace_probe_ops *ops,
329 					 void *data);
330 };
331 
332 extern int
333 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
334 			      void *data);
335 extern void
336 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
337 				void *data);
338 extern void
339 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
340 extern void unregister_ftrace_function_probe_all(char *glob);
341 
342 extern int ftrace_text_reserved(const void *start, const void *end);
343 
344 extern int ftrace_nr_registered_ops(void);
345 
346 bool is_ftrace_trampoline(unsigned long addr);
347 
348 /*
349  * The dyn_ftrace record's flags field is split into two parts.
350  * the first part which is '0-FTRACE_REF_MAX' is a counter of
351  * the number of callbacks that have registered the function that
352  * the dyn_ftrace descriptor represents.
353  *
354  * The second part is a mask:
355  *  ENABLED - the function is being traced
356  *  REGS    - the record wants the function to save regs
357  *  REGS_EN - the function is set up to save regs.
358  *  IPMODIFY - the record allows for the IP address to be changed.
359  *  DISABLED - the record is not ready to be touched yet
360  *
361  * When a new ftrace_ops is registered and wants a function to save
362  * pt_regs, the rec->flag REGS is set. When the function has been
363  * set up to save regs, the REG_EN flag is set. Once a function
364  * starts saving regs it will do so until all ftrace_ops are removed
365  * from tracing that function.
366  */
367 enum {
368 	FTRACE_FL_ENABLED	= (1UL << 31),
369 	FTRACE_FL_REGS		= (1UL << 30),
370 	FTRACE_FL_REGS_EN	= (1UL << 29),
371 	FTRACE_FL_TRAMP		= (1UL << 28),
372 	FTRACE_FL_TRAMP_EN	= (1UL << 27),
373 	FTRACE_FL_IPMODIFY	= (1UL << 26),
374 	FTRACE_FL_DISABLED	= (1UL << 25),
375 };
376 
377 #define FTRACE_REF_MAX_SHIFT	25
378 #define FTRACE_FL_BITS		7
379 #define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
380 #define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
381 #define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
382 
383 #define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)
384 
385 struct dyn_ftrace {
386 	unsigned long		ip; /* address of mcount call-site */
387 	unsigned long		flags;
388 	struct dyn_arch_ftrace	arch;
389 };
390 
391 int ftrace_force_update(void);
392 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
393 			 int remove, int reset);
394 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
395 		       int len, int reset);
396 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
397 			int len, int reset);
398 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
399 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
400 void ftrace_free_filter(struct ftrace_ops *ops);
401 
402 int register_ftrace_command(struct ftrace_func_command *cmd);
403 int unregister_ftrace_command(struct ftrace_func_command *cmd);
404 
405 enum {
406 	FTRACE_UPDATE_CALLS		= (1 << 0),
407 	FTRACE_DISABLE_CALLS		= (1 << 1),
408 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
409 	FTRACE_START_FUNC_RET		= (1 << 3),
410 	FTRACE_STOP_FUNC_RET		= (1 << 4),
411 };
412 
413 /*
414  * The FTRACE_UPDATE_* enum is used to pass information back
415  * from the ftrace_update_record() and ftrace_test_record()
416  * functions. These are called by the code update routines
417  * to find out what is to be done for a given function.
418  *
419  *  IGNORE           - The function is already what we want it to be
420  *  MAKE_CALL        - Start tracing the function
421  *  MODIFY_CALL      - Stop saving regs for the function
422  *  MAKE_NOP         - Stop tracing the function
423  */
424 enum {
425 	FTRACE_UPDATE_IGNORE,
426 	FTRACE_UPDATE_MAKE_CALL,
427 	FTRACE_UPDATE_MODIFY_CALL,
428 	FTRACE_UPDATE_MAKE_NOP,
429 };
430 
431 enum {
432 	FTRACE_ITER_FILTER	= (1 << 0),
433 	FTRACE_ITER_NOTRACE	= (1 << 1),
434 	FTRACE_ITER_PRINTALL	= (1 << 2),
435 	FTRACE_ITER_DO_HASH	= (1 << 3),
436 	FTRACE_ITER_HASH	= (1 << 4),
437 	FTRACE_ITER_ENABLED	= (1 << 5),
438 };
439 
440 void arch_ftrace_update_code(int command);
441 
442 struct ftrace_rec_iter;
443 
444 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
445 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
446 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
447 
448 #define for_ftrace_rec_iter(iter)		\
449 	for (iter = ftrace_rec_iter_start();	\
450 	     iter;				\
451 	     iter = ftrace_rec_iter_next(iter))
452 
453 
454 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
455 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
456 void ftrace_run_stop_machine(int command);
457 unsigned long ftrace_location(unsigned long ip);
458 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
459 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
460 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
461 
462 extern ftrace_func_t ftrace_trace_function;
463 
464 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
465 		  struct inode *inode, struct file *file);
466 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
467 			    size_t cnt, loff_t *ppos);
468 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
469 			     size_t cnt, loff_t *ppos);
470 int ftrace_regex_release(struct inode *inode, struct file *file);
471 
472 void __init
473 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
474 
475 /* defined in arch */
476 extern int ftrace_ip_converted(unsigned long ip);
477 extern int ftrace_dyn_arch_init(void);
478 extern void ftrace_replace_code(int enable);
479 extern int ftrace_update_ftrace_func(ftrace_func_t func);
480 extern void ftrace_caller(void);
481 extern void ftrace_regs_caller(void);
482 extern void ftrace_call(void);
483 extern void ftrace_regs_call(void);
484 extern void mcount_call(void);
485 
486 void ftrace_modify_all_code(int command);
487 
488 #ifndef FTRACE_ADDR
489 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
490 #endif
491 
492 #ifndef FTRACE_GRAPH_ADDR
493 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
494 #endif
495 
496 #ifndef FTRACE_REGS_ADDR
497 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
498 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
499 #else
500 # define FTRACE_REGS_ADDR FTRACE_ADDR
501 #endif
502 #endif
503 
504 /*
505  * If an arch would like functions that are only traced
506  * by the function graph tracer to jump directly to its own
507  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
508  * to be that address to jump to.
509  */
510 #ifndef FTRACE_GRAPH_TRAMP_ADDR
511 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
512 #endif
513 
514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
515 extern void ftrace_graph_caller(void);
516 extern int ftrace_enable_ftrace_graph_caller(void);
517 extern int ftrace_disable_ftrace_graph_caller(void);
518 #else
ftrace_enable_ftrace_graph_caller(void)519 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
ftrace_disable_ftrace_graph_caller(void)520 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
521 #endif
522 
523 /**
524  * ftrace_make_nop - convert code into nop
525  * @mod: module structure if called by module load initialization
526  * @rec: the mcount call site record
527  * @addr: the address that the call site should be calling
528  *
529  * This is a very sensitive operation and great care needs
530  * to be taken by the arch.  The operation should carefully
531  * read the location, check to see if what is read is indeed
532  * what we expect it to be, and then on success of the compare,
533  * it should write to the location.
534  *
535  * The code segment at @rec->ip should be a caller to @addr
536  *
537  * Return must be:
538  *  0 on success
539  *  -EFAULT on error reading the location
540  *  -EINVAL on a failed compare of the contents
541  *  -EPERM  on error writing to the location
542  * Any other value will be considered a failure.
543  */
544 extern int ftrace_make_nop(struct module *mod,
545 			   struct dyn_ftrace *rec, unsigned long addr);
546 
547 /**
548  * ftrace_make_call - convert a nop call site into a call to addr
549  * @rec: the mcount call site record
550  * @addr: the address that the call site should call
551  *
552  * This is a very sensitive operation and great care needs
553  * to be taken by the arch.  The operation should carefully
554  * read the location, check to see if what is read is indeed
555  * what we expect it to be, and then on success of the compare,
556  * it should write to the location.
557  *
558  * The code segment at @rec->ip should be a nop
559  *
560  * Return must be:
561  *  0 on success
562  *  -EFAULT on error reading the location
563  *  -EINVAL on a failed compare of the contents
564  *  -EPERM  on error writing to the location
565  * Any other value will be considered a failure.
566  */
567 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
568 
569 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
570 /**
571  * ftrace_modify_call - convert from one addr to another (no nop)
572  * @rec: the mcount call site record
573  * @old_addr: the address expected to be currently called to
574  * @addr: the address to change to
575  *
576  * This is a very sensitive operation and great care needs
577  * to be taken by the arch.  The operation should carefully
578  * read the location, check to see if what is read is indeed
579  * what we expect it to be, and then on success of the compare,
580  * it should write to the location.
581  *
582  * The code segment at @rec->ip should be a caller to @old_addr
583  *
584  * Return must be:
585  *  0 on success
586  *  -EFAULT on error reading the location
587  *  -EINVAL on a failed compare of the contents
588  *  -EPERM  on error writing to the location
589  * Any other value will be considered a failure.
590  */
591 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
592 			      unsigned long addr);
593 #else
594 /* Should never be called */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)595 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
596 				     unsigned long addr)
597 {
598 	return -EINVAL;
599 }
600 #endif
601 
602 /* May be defined in arch */
603 extern int ftrace_arch_read_dyn_info(char *buf, int size);
604 
605 extern int skip_trace(unsigned long ip);
606 extern void ftrace_module_init(struct module *mod);
607 extern void ftrace_module_enable(struct module *mod);
608 extern void ftrace_release_mod(struct module *mod);
609 
610 extern void ftrace_disable_daemon(void);
611 extern void ftrace_enable_daemon(void);
612 #else /* CONFIG_DYNAMIC_FTRACE */
skip_trace(unsigned long ip)613 static inline int skip_trace(unsigned long ip) { return 0; }
ftrace_force_update(void)614 static inline int ftrace_force_update(void) { return 0; }
ftrace_disable_daemon(void)615 static inline void ftrace_disable_daemon(void) { }
ftrace_enable_daemon(void)616 static inline void ftrace_enable_daemon(void) { }
ftrace_module_init(struct module * mod)617 static inline void ftrace_module_init(struct module *mod) { }
ftrace_module_enable(struct module * mod)618 static inline void ftrace_module_enable(struct module *mod) { }
ftrace_release_mod(struct module * mod)619 static inline void ftrace_release_mod(struct module *mod) { }
register_ftrace_command(struct ftrace_func_command * cmd)620 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
621 {
622 	return -EINVAL;
623 }
unregister_ftrace_command(char * cmd_name)624 static inline __init int unregister_ftrace_command(char *cmd_name)
625 {
626 	return -EINVAL;
627 }
ftrace_text_reserved(const void * start,const void * end)628 static inline int ftrace_text_reserved(const void *start, const void *end)
629 {
630 	return 0;
631 }
ftrace_location(unsigned long ip)632 static inline unsigned long ftrace_location(unsigned long ip)
633 {
634 	return 0;
635 }
636 
637 /*
638  * Again users of functions that have ftrace_ops may not
639  * have them defined when ftrace is not enabled, but these
640  * functions may still be called. Use a macro instead of inline.
641  */
642 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
643 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
644 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
645 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
646 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
647 #define ftrace_free_filter(ops) do { } while (0)
648 
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)649 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
650 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)651 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
652 			     size_t cnt, loff_t *ppos) { return -ENODEV; }
653 static inline int
ftrace_regex_release(struct inode * inode,struct file * file)654 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
655 
is_ftrace_trampoline(unsigned long addr)656 static inline bool is_ftrace_trampoline(unsigned long addr)
657 {
658 	return false;
659 }
660 #endif /* CONFIG_DYNAMIC_FTRACE */
661 
662 /* totally disable ftrace - can not re-enable after this */
663 void ftrace_kill(void);
664 
tracer_disable(void)665 static inline void tracer_disable(void)
666 {
667 #ifdef CONFIG_FUNCTION_TRACER
668 	ftrace_enabled = 0;
669 #endif
670 }
671 
672 /*
673  * Ftrace disable/restore without lock. Some synchronization mechanism
674  * must be used to prevent ftrace_enabled to be changed between
675  * disable/restore.
676  */
__ftrace_enabled_save(void)677 static inline int __ftrace_enabled_save(void)
678 {
679 #ifdef CONFIG_FUNCTION_TRACER
680 	int saved_ftrace_enabled = ftrace_enabled;
681 	ftrace_enabled = 0;
682 	return saved_ftrace_enabled;
683 #else
684 	return 0;
685 #endif
686 }
687 
__ftrace_enabled_restore(int enabled)688 static inline void __ftrace_enabled_restore(int enabled)
689 {
690 #ifdef CONFIG_FUNCTION_TRACER
691 	ftrace_enabled = enabled;
692 #endif
693 }
694 
695 /* All archs should have this, but we define it for consistency */
696 #ifndef ftrace_return_address0
697 # define ftrace_return_address0 __builtin_return_address(0)
698 #endif
699 
700 /* Archs may use other ways for ADDR1 and beyond */
701 #ifndef ftrace_return_address
702 # ifdef CONFIG_FRAME_POINTER
703 #  define ftrace_return_address(n) __builtin_return_address(n)
704 # else
705 #  define ftrace_return_address(n) 0UL
706 # endif
707 #endif
708 
709 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
710 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
711 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
712 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
713 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
714 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
715 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
716 
get_lock_parent_ip(void)717 static inline unsigned long get_lock_parent_ip(void)
718 {
719 	unsigned long addr = CALLER_ADDR0;
720 
721 	if (!in_lock_functions(addr))
722 		return addr;
723 	addr = CALLER_ADDR1;
724 	if (!in_lock_functions(addr))
725 		return addr;
726 	return CALLER_ADDR2;
727 }
728 
729 #ifdef CONFIG_IRQSOFF_TRACER
730   extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
731   extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
732 #else
time_hardirqs_on(unsigned long a0,unsigned long a1)733   static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
time_hardirqs_off(unsigned long a0,unsigned long a1)734   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
735 #endif
736 
737 #if defined(CONFIG_PREEMPT_TRACER) || \
738 	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
739   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
740   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
741 #else
742 /*
743  * Use defines instead of static inlines because some arches will make code out
744  * of the CALLER_ADDR, when we really want these to be a real nop.
745  */
746 # define trace_preempt_on(a0, a1) do { } while (0)
747 # define trace_preempt_off(a0, a1) do { } while (0)
748 #endif
749 
750 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
751 extern void ftrace_init(void);
752 #else
ftrace_init(void)753 static inline void ftrace_init(void) { }
754 #endif
755 
756 /*
757  * Structure that defines an entry function trace.
758  * It's already packed but the attribute "packed" is needed
759  * to remove extra padding at the end.
760  */
761 struct ftrace_graph_ent {
762 	unsigned long func; /* Current function */
763 	int depth;
764 } __packed;
765 
766 /*
767  * Structure that defines a return function trace.
768  * It's already packed but the attribute "packed" is needed
769  * to remove extra padding at the end.
770  */
771 struct ftrace_graph_ret {
772 	unsigned long func; /* Current function */
773 	/* Number of functions that overran the depth limit for current task */
774 	unsigned long overrun;
775 	unsigned long long calltime;
776 	unsigned long long rettime;
777 	int depth;
778 } __packed;
779 
780 /* Type of the callback handlers for tracing function graph*/
781 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
782 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
783 
784 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
785 
786 /* for init task */
787 #define INIT_FTRACE_GRAPH		.ret_stack = NULL,
788 
789 /*
790  * Stack of return addresses for functions
791  * of a thread.
792  * Used in struct thread_info
793  */
794 struct ftrace_ret_stack {
795 	unsigned long ret;
796 	unsigned long func;
797 	unsigned long long calltime;
798 #ifdef CONFIG_FUNCTION_PROFILER
799 	unsigned long long subtime;
800 #endif
801 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
802 	unsigned long fp;
803 #endif
804 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
805 	unsigned long *retp;
806 #endif
807 };
808 
809 /*
810  * Primary handler of a function return.
811  * It relays on ftrace_return_to_handler.
812  * Defined in entry_32/64.S
813  */
814 extern void return_to_handler(void);
815 
816 extern int
817 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
818 			 unsigned long frame_pointer, unsigned long *retp);
819 
820 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
821 				    unsigned long ret, unsigned long *retp);
822 
823 /*
824  * Sometimes we don't want to trace a function with the function
825  * graph tracer but we want them to keep traced by the usual function
826  * tracer if the function graph tracer is not configured.
827  */
828 #define __notrace_funcgraph		notrace
829 
830 #define FTRACE_NOTRACE_DEPTH 65536
831 #define FTRACE_RETFUNC_DEPTH 50
832 #define FTRACE_RETSTACK_ALLOC_SIZE 32
833 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
834 				trace_func_graph_ent_t entryfunc);
835 
836 extern bool ftrace_graph_is_dead(void);
837 extern void ftrace_graph_stop(void);
838 
839 /* The current handlers in use */
840 extern trace_func_graph_ret_t ftrace_graph_return;
841 extern trace_func_graph_ent_t ftrace_graph_entry;
842 
843 extern void unregister_ftrace_graph(void);
844 
845 extern void ftrace_graph_init_task(struct task_struct *t);
846 extern void ftrace_graph_exit_task(struct task_struct *t);
847 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
848 
task_curr_ret_stack(struct task_struct * t)849 static inline int task_curr_ret_stack(struct task_struct *t)
850 {
851 	return t->curr_ret_stack;
852 }
853 
pause_graph_tracing(void)854 static inline void pause_graph_tracing(void)
855 {
856 	atomic_inc(&current->tracing_graph_pause);
857 }
858 
unpause_graph_tracing(void)859 static inline void unpause_graph_tracing(void)
860 {
861 	atomic_dec(&current->tracing_graph_pause);
862 }
863 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
864 
865 #define __notrace_funcgraph
866 #define INIT_FTRACE_GRAPH
867 
ftrace_graph_init_task(struct task_struct * t)868 static inline void ftrace_graph_init_task(struct task_struct *t) { }
ftrace_graph_exit_task(struct task_struct * t)869 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)870 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
871 
register_ftrace_graph(trace_func_graph_ret_t retfunc,trace_func_graph_ent_t entryfunc)872 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
873 			  trace_func_graph_ent_t entryfunc)
874 {
875 	return -1;
876 }
unregister_ftrace_graph(void)877 static inline void unregister_ftrace_graph(void) { }
878 
task_curr_ret_stack(struct task_struct * tsk)879 static inline int task_curr_ret_stack(struct task_struct *tsk)
880 {
881 	return -1;
882 }
883 
884 static inline unsigned long
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)885 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
886 		      unsigned long *retp)
887 {
888 	return ret;
889 }
890 
pause_graph_tracing(void)891 static inline void pause_graph_tracing(void) { }
unpause_graph_tracing(void)892 static inline void unpause_graph_tracing(void) { }
893 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
894 
895 #ifdef CONFIG_TRACING
896 
897 /* flags for current->trace */
898 enum {
899 	TSK_TRACE_FL_TRACE_BIT	= 0,
900 	TSK_TRACE_FL_GRAPH_BIT	= 1,
901 };
902 enum {
903 	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
904 	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
905 };
906 
set_tsk_trace_trace(struct task_struct * tsk)907 static inline void set_tsk_trace_trace(struct task_struct *tsk)
908 {
909 	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
910 }
911 
clear_tsk_trace_trace(struct task_struct * tsk)912 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
913 {
914 	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
915 }
916 
test_tsk_trace_trace(struct task_struct * tsk)917 static inline int test_tsk_trace_trace(struct task_struct *tsk)
918 {
919 	return tsk->trace & TSK_TRACE_FL_TRACE;
920 }
921 
set_tsk_trace_graph(struct task_struct * tsk)922 static inline void set_tsk_trace_graph(struct task_struct *tsk)
923 {
924 	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
925 }
926 
clear_tsk_trace_graph(struct task_struct * tsk)927 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
928 {
929 	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
930 }
931 
test_tsk_trace_graph(struct task_struct * tsk)932 static inline int test_tsk_trace_graph(struct task_struct *tsk)
933 {
934 	return tsk->trace & TSK_TRACE_FL_GRAPH;
935 }
936 
937 enum ftrace_dump_mode;
938 
939 extern enum ftrace_dump_mode ftrace_dump_on_oops;
940 extern int tracepoint_printk;
941 
942 extern void disable_trace_on_warning(void);
943 extern int __disable_trace_on_warning;
944 
945 #ifdef CONFIG_PREEMPT
946 #define INIT_TRACE_RECURSION		.trace_recursion = 0,
947 #endif
948 
949 #else /* CONFIG_TRACING */
disable_trace_on_warning(void)950 static inline void  disable_trace_on_warning(void) { }
951 #endif /* CONFIG_TRACING */
952 
953 #ifndef INIT_TRACE_RECURSION
954 #define INIT_TRACE_RECURSION
955 #endif
956 
957 #ifdef CONFIG_FTRACE_SYSCALLS
958 
959 unsigned long arch_syscall_addr(int nr);
960 
961 #endif /* CONFIG_FTRACE_SYSCALLS */
962 
963 #endif /* _LINUX_FTRACE_H */
964