• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
ftrace_allocate_ftrace_ops(struct trace_array * tr)52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
ftrace_free_ftrace_ops(struct trace_array * tr)74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	int ret;
84 	/*
85 	 * The top level array uses the "global_ops", and the files are
86 	 * created on boot up.
87 	 */
88 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 		return 0;
90 
91 	if (!tr->ops)
92 		return -EINVAL;
93 
94 	ret = allocate_fgraph_ops(tr, tr->ops);
95 	if (ret) {
96 		kfree(tr->ops);
97 		return ret;
98 	}
99 
100 	ftrace_create_filter_files(tr->ops, parent);
101 
102 	return 0;
103 }
104 
ftrace_destroy_function_files(struct trace_array * tr)105 void ftrace_destroy_function_files(struct trace_array *tr)
106 {
107 	ftrace_destroy_filter_files(tr->ops);
108 	ftrace_free_ftrace_ops(tr);
109 	free_fgraph_ops(tr);
110 }
111 
select_trace_function(u32 flags_val)112 static ftrace_func_t select_trace_function(u32 flags_val)
113 {
114 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 	case TRACE_FUNC_NO_OPTS:
116 		return function_trace_call;
117 	case TRACE_FUNC_OPT_STACK:
118 		return function_stack_trace_call;
119 	case TRACE_FUNC_OPT_NO_REPEATS:
120 		return function_no_repeats_trace_call;
121 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 		return function_stack_no_repeats_trace_call;
123 	default:
124 		return NULL;
125 	}
126 }
127 
handle_func_repeats(struct trace_array * tr,u32 flags_val)128 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129 {
130 	if (!tr->last_func_repeats &&
131 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 		if (!tr->last_func_repeats)
134 			return false;
135 	}
136 
137 	return true;
138 }
139 
function_trace_init(struct trace_array * tr)140 static int function_trace_init(struct trace_array *tr)
141 {
142 	ftrace_func_t func;
143 	/*
144 	 * Instance trace_arrays get their ops allocated
145 	 * at instance creation. Unless it failed
146 	 * the allocation.
147 	 */
148 	if (!tr->ops)
149 		return -ENOMEM;
150 
151 	func = select_trace_function(func_flags.val);
152 	if (!func)
153 		return -EINVAL;
154 
155 	if (!handle_func_repeats(tr, func_flags.val))
156 		return -ENOMEM;
157 
158 	ftrace_init_array_ops(tr, func);
159 
160 	tr->array_buffer.cpu = raw_smp_processor_id();
161 
162 	tracing_start_cmdline_record();
163 	tracing_start_function_trace(tr);
164 	return 0;
165 }
166 
function_trace_reset(struct trace_array * tr)167 static void function_trace_reset(struct trace_array *tr)
168 {
169 	tracing_stop_function_trace(tr);
170 	tracing_stop_cmdline_record();
171 	ftrace_reset_array_ops(tr);
172 }
173 
function_trace_start(struct trace_array * tr)174 static void function_trace_start(struct trace_array *tr)
175 {
176 	tracing_reset_online_cpus(&tr->array_buffer);
177 }
178 
179 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)180 function_trace_call(unsigned long ip, unsigned long parent_ip,
181 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
182 {
183 	struct trace_array *tr = op->private;
184 	struct trace_array_cpu *data;
185 	unsigned int trace_ctx;
186 	int bit;
187 	int cpu;
188 
189 	if (unlikely(!tr->function_enabled))
190 		return;
191 
192 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
193 	if (bit < 0)
194 		return;
195 
196 	trace_ctx = tracing_gen_ctx_dec();
197 
198 	cpu = smp_processor_id();
199 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
200 	if (!atomic_read(&data->disabled))
201 		trace_function(tr, ip, parent_ip, trace_ctx);
202 
203 	ftrace_test_recursion_unlock(bit);
204 }
205 
206 #ifdef CONFIG_UNWINDER_ORC
207 /*
208  * Skip 2:
209  *
210  *   function_stack_trace_call()
211  *   ftrace_call()
212  */
213 #define STACK_SKIP 2
214 #else
215 /*
216  * Skip 3:
217  *   __trace_stack()
218  *   function_stack_trace_call()
219  *   ftrace_call()
220  */
221 #define STACK_SKIP 3
222 #endif
223 
224 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)225 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
226 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
227 {
228 	struct trace_array *tr = op->private;
229 	struct trace_array_cpu *data;
230 	unsigned long flags;
231 	long disabled;
232 	int cpu;
233 	unsigned int trace_ctx;
234 	int skip = STACK_SKIP;
235 
236 	if (unlikely(!tr->function_enabled))
237 		return;
238 
239 	/*
240 	 * Need to use raw, since this must be called before the
241 	 * recursive protection is performed.
242 	 */
243 	local_irq_save(flags);
244 	cpu = raw_smp_processor_id();
245 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
246 	disabled = atomic_inc_return(&data->disabled);
247 
248 	if (likely(disabled == 1)) {
249 		trace_ctx = tracing_gen_ctx_flags(flags);
250 		trace_function(tr, ip, parent_ip, trace_ctx);
251 #ifdef CONFIG_UNWINDER_FRAME_POINTER
252 		if (ftrace_pids_enabled(op))
253 			skip++;
254 #endif
255 		__trace_stack(tr, trace_ctx, skip);
256 	}
257 
258 	atomic_dec(&data->disabled);
259 	local_irq_restore(flags);
260 }
261 
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)262 static inline bool is_repeat_check(struct trace_array *tr,
263 				   struct trace_func_repeats *last_info,
264 				   unsigned long ip, unsigned long parent_ip)
265 {
266 	if (last_info->ip == ip &&
267 	    last_info->parent_ip == parent_ip &&
268 	    last_info->count < U16_MAX) {
269 		last_info->ts_last_call =
270 			ring_buffer_time_stamp(tr->array_buffer.buffer);
271 		last_info->count++;
272 		return true;
273 	}
274 
275 	return false;
276 }
277 
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)278 static inline void process_repeats(struct trace_array *tr,
279 				   unsigned long ip, unsigned long parent_ip,
280 				   struct trace_func_repeats *last_info,
281 				   unsigned int trace_ctx)
282 {
283 	if (last_info->count) {
284 		trace_last_func_repeats(tr, last_info, trace_ctx);
285 		last_info->count = 0;
286 	}
287 
288 	last_info->ip = ip;
289 	last_info->parent_ip = parent_ip;
290 }
291 
292 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)293 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
294 			       struct ftrace_ops *op,
295 			       struct ftrace_regs *fregs)
296 {
297 	struct trace_func_repeats *last_info;
298 	struct trace_array *tr = op->private;
299 	struct trace_array_cpu *data;
300 	unsigned int trace_ctx;
301 	int bit;
302 	int cpu;
303 
304 	if (unlikely(!tr->function_enabled))
305 		return;
306 
307 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
308 	if (bit < 0)
309 		return;
310 
311 	cpu = smp_processor_id();
312 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
313 	if (atomic_read(&data->disabled))
314 		goto out;
315 
316 	/*
317 	 * An interrupt may happen at any place here. But as far as I can see,
318 	 * the only damage that this can cause is to mess up the repetition
319 	 * counter without valuable data being lost.
320 	 * TODO: think about a solution that is better than just hoping to be
321 	 * lucky.
322 	 */
323 	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
324 	if (is_repeat_check(tr, last_info, ip, parent_ip))
325 		goto out;
326 
327 	trace_ctx = tracing_gen_ctx_dec();
328 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
329 
330 	trace_function(tr, ip, parent_ip, trace_ctx);
331 
332 out:
333 	ftrace_test_recursion_unlock(bit);
334 }
335 
336 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)337 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
338 				     struct ftrace_ops *op,
339 				     struct ftrace_regs *fregs)
340 {
341 	struct trace_func_repeats *last_info;
342 	struct trace_array *tr = op->private;
343 	struct trace_array_cpu *data;
344 	unsigned long flags;
345 	long disabled;
346 	int cpu;
347 	unsigned int trace_ctx;
348 
349 	if (unlikely(!tr->function_enabled))
350 		return;
351 
352 	/*
353 	 * Need to use raw, since this must be called before the
354 	 * recursive protection is performed.
355 	 */
356 	local_irq_save(flags);
357 	cpu = raw_smp_processor_id();
358 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
359 	disabled = atomic_inc_return(&data->disabled);
360 
361 	if (likely(disabled == 1)) {
362 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
363 		if (is_repeat_check(tr, last_info, ip, parent_ip))
364 			goto out;
365 
366 		trace_ctx = tracing_gen_ctx_flags(flags);
367 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
368 
369 		trace_function(tr, ip, parent_ip, trace_ctx);
370 		__trace_stack(tr, trace_ctx, STACK_SKIP);
371 	}
372 
373  out:
374 	atomic_dec(&data->disabled);
375 	local_irq_restore(flags);
376 }
377 
378 static struct tracer_opt func_opts[] = {
379 #ifdef CONFIG_STACKTRACE
380 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
381 #endif
382 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
383 	{ } /* Always set a last empty entry */
384 };
385 
386 static struct tracer_flags func_flags = {
387 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
388 	.opts = func_opts
389 };
390 
tracing_start_function_trace(struct trace_array * tr)391 static void tracing_start_function_trace(struct trace_array *tr)
392 {
393 	tr->function_enabled = 0;
394 	register_ftrace_function(tr->ops);
395 	tr->function_enabled = 1;
396 }
397 
tracing_stop_function_trace(struct trace_array * tr)398 static void tracing_stop_function_trace(struct trace_array *tr)
399 {
400 	tr->function_enabled = 0;
401 	unregister_ftrace_function(tr->ops);
402 }
403 
404 static struct tracer function_trace;
405 
406 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)407 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
408 {
409 	ftrace_func_t func;
410 	u32 new_flags;
411 
412 	/* Do nothing if already set. */
413 	if (!!set == !!(func_flags.val & bit))
414 		return 0;
415 
416 	/* We can change this flag only when not running. */
417 	if (tr->current_trace != &function_trace)
418 		return 0;
419 
420 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
421 	func = select_trace_function(new_flags);
422 	if (!func)
423 		return -EINVAL;
424 
425 	/* Check if there's anything to change. */
426 	if (tr->ops->func == func)
427 		return 0;
428 
429 	if (!handle_func_repeats(tr, new_flags))
430 		return -ENOMEM;
431 
432 	unregister_ftrace_function(tr->ops);
433 	tr->ops->func = func;
434 	register_ftrace_function(tr->ops);
435 
436 	return 0;
437 }
438 
439 static struct tracer function_trace __tracer_data =
440 {
441 	.name		= "function",
442 	.init		= function_trace_init,
443 	.reset		= function_trace_reset,
444 	.start		= function_trace_start,
445 	.flags		= &func_flags,
446 	.set_flag	= func_set_flag,
447 	.allow_instances = true,
448 #ifdef CONFIG_FTRACE_SELFTEST
449 	.selftest	= trace_selftest_startup_function,
450 #endif
451 };
452 
453 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)454 static void update_traceon_count(struct ftrace_probe_ops *ops,
455 				 unsigned long ip,
456 				 struct trace_array *tr, bool on,
457 				 void *data)
458 {
459 	struct ftrace_func_mapper *mapper = data;
460 	long *count;
461 	long old_count;
462 
463 	/*
464 	 * Tracing gets disabled (or enabled) once per count.
465 	 * This function can be called at the same time on multiple CPUs.
466 	 * It is fine if both disable (or enable) tracing, as disabling
467 	 * (or enabling) the second time doesn't do anything as the
468 	 * state of the tracer is already disabled (or enabled).
469 	 * What needs to be synchronized in this case is that the count
470 	 * only gets decremented once, even if the tracer is disabled
471 	 * (or enabled) twice, as the second one is really a nop.
472 	 *
473 	 * The memory barriers guarantee that we only decrement the
474 	 * counter once. First the count is read to a local variable
475 	 * and a read barrier is used to make sure that it is loaded
476 	 * before checking if the tracer is in the state we want.
477 	 * If the tracer is not in the state we want, then the count
478 	 * is guaranteed to be the old count.
479 	 *
480 	 * Next the tracer is set to the state we want (disabled or enabled)
481 	 * then a write memory barrier is used to make sure that
482 	 * the new state is visible before changing the counter by
483 	 * one minus the old counter. This guarantees that another CPU
484 	 * executing this code will see the new state before seeing
485 	 * the new counter value, and would not do anything if the new
486 	 * counter is seen.
487 	 *
488 	 * Note, there is no synchronization between this and a user
489 	 * setting the tracing_on file. But we currently don't care
490 	 * about that.
491 	 */
492 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
493 	old_count = *count;
494 
495 	if (old_count <= 0)
496 		return;
497 
498 	/* Make sure we see count before checking tracing state */
499 	smp_rmb();
500 
501 	if (on == !!tracer_tracing_is_on(tr))
502 		return;
503 
504 	if (on)
505 		tracer_tracing_on(tr);
506 	else
507 		tracer_tracing_off(tr);
508 
509 	/* Make sure tracing state is visible before updating count */
510 	smp_wmb();
511 
512 	*count = old_count - 1;
513 }
514 
515 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)516 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
517 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
518 		     void *data)
519 {
520 	update_traceon_count(ops, ip, tr, 1, data);
521 }
522 
523 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)524 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
525 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
526 		      void *data)
527 {
528 	update_traceon_count(ops, ip, tr, 0, data);
529 }
530 
531 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)532 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
533 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
534 	       void *data)
535 {
536 	if (tracer_tracing_is_on(tr))
537 		return;
538 
539 	tracer_tracing_on(tr);
540 }
541 
542 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)543 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
544 		struct trace_array *tr, struct ftrace_probe_ops *ops,
545 		void *data)
546 {
547 	if (!tracer_tracing_is_on(tr))
548 		return;
549 
550 	tracer_tracing_off(tr);
551 }
552 
553 #ifdef CONFIG_UNWINDER_ORC
554 /*
555  * Skip 3:
556  *
557  *   function_trace_probe_call()
558  *   ftrace_ops_assist_func()
559  *   ftrace_call()
560  */
561 #define FTRACE_STACK_SKIP 3
562 #else
563 /*
564  * Skip 5:
565  *
566  *   __trace_stack()
567  *   ftrace_stacktrace()
568  *   function_trace_probe_call()
569  *   ftrace_ops_assist_func()
570  *   ftrace_call()
571  */
572 #define FTRACE_STACK_SKIP 5
573 #endif
574 
trace_stack(struct trace_array * tr)575 static __always_inline void trace_stack(struct trace_array *tr)
576 {
577 	__trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP);
578 }
579 
580 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)581 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
582 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
583 		  void *data)
584 {
585 	trace_stack(tr);
586 }
587 
588 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)589 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
590 			struct trace_array *tr, struct ftrace_probe_ops *ops,
591 			void *data)
592 {
593 	struct ftrace_func_mapper *mapper = data;
594 	long *count;
595 	long old_count;
596 	long new_count;
597 
598 	if (!tracing_is_on())
599 		return;
600 
601 	/* unlimited? */
602 	if (!mapper) {
603 		trace_stack(tr);
604 		return;
605 	}
606 
607 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
608 
609 	/*
610 	 * Stack traces should only execute the number of times the
611 	 * user specified in the counter.
612 	 */
613 	do {
614 		old_count = *count;
615 
616 		if (!old_count)
617 			return;
618 
619 		new_count = old_count - 1;
620 		new_count = cmpxchg(count, old_count, new_count);
621 		if (new_count == old_count)
622 			trace_stack(tr);
623 
624 		if (!tracing_is_on())
625 			return;
626 
627 	} while (new_count != old_count);
628 }
629 
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)630 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
631 			void *data)
632 {
633 	struct ftrace_func_mapper *mapper = data;
634 	long *count = NULL;
635 
636 	if (mapper)
637 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
638 
639 	if (count) {
640 		if (*count <= 0)
641 			return 0;
642 		(*count)--;
643 	}
644 
645 	return 1;
646 }
647 
648 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)649 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
650 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
651 		  void *data)
652 {
653 	if (update_count(ops, ip, data))
654 		ftrace_dump(DUMP_ALL);
655 }
656 
657 /* Only dump the current CPU buffer. */
658 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)659 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
660 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
661 		     void *data)
662 {
663 	if (update_count(ops, ip, data))
664 		ftrace_dump(DUMP_ORIG);
665 }
666 
667 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)668 ftrace_probe_print(const char *name, struct seq_file *m,
669 		   unsigned long ip, struct ftrace_probe_ops *ops,
670 		   void *data)
671 {
672 	struct ftrace_func_mapper *mapper = data;
673 	long *count = NULL;
674 
675 	seq_printf(m, "%ps:%s", (void *)ip, name);
676 
677 	if (mapper)
678 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
679 
680 	if (count)
681 		seq_printf(m, ":count=%ld\n", *count);
682 	else
683 		seq_puts(m, ":unlimited\n");
684 
685 	return 0;
686 }
687 
688 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)689 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
690 		     struct ftrace_probe_ops *ops,
691 		     void *data)
692 {
693 	return ftrace_probe_print("traceon", m, ip, ops, data);
694 }
695 
696 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)697 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
698 			 struct ftrace_probe_ops *ops, void *data)
699 {
700 	return ftrace_probe_print("traceoff", m, ip, ops, data);
701 }
702 
703 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)704 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
705 			struct ftrace_probe_ops *ops, void *data)
706 {
707 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
708 }
709 
710 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)711 ftrace_dump_print(struct seq_file *m, unsigned long ip,
712 			struct ftrace_probe_ops *ops, void *data)
713 {
714 	return ftrace_probe_print("dump", m, ip, ops, data);
715 }
716 
717 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)718 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
719 			struct ftrace_probe_ops *ops, void *data)
720 {
721 	return ftrace_probe_print("cpudump", m, ip, ops, data);
722 }
723 
724 
725 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)726 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
727 		  unsigned long ip, void *init_data, void **data)
728 {
729 	struct ftrace_func_mapper *mapper = *data;
730 
731 	if (!mapper) {
732 		mapper = allocate_ftrace_func_mapper();
733 		if (!mapper)
734 			return -ENOMEM;
735 		*data = mapper;
736 	}
737 
738 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
739 }
740 
741 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)742 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
743 		  unsigned long ip, void *data)
744 {
745 	struct ftrace_func_mapper *mapper = data;
746 
747 	if (!ip) {
748 		free_ftrace_func_mapper(mapper, NULL);
749 		return;
750 	}
751 
752 	ftrace_func_mapper_remove_ip(mapper, ip);
753 }
754 
755 static struct ftrace_probe_ops traceon_count_probe_ops = {
756 	.func			= ftrace_traceon_count,
757 	.print			= ftrace_traceon_print,
758 	.init			= ftrace_count_init,
759 	.free			= ftrace_count_free,
760 };
761 
762 static struct ftrace_probe_ops traceoff_count_probe_ops = {
763 	.func			= ftrace_traceoff_count,
764 	.print			= ftrace_traceoff_print,
765 	.init			= ftrace_count_init,
766 	.free			= ftrace_count_free,
767 };
768 
769 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
770 	.func			= ftrace_stacktrace_count,
771 	.print			= ftrace_stacktrace_print,
772 	.init			= ftrace_count_init,
773 	.free			= ftrace_count_free,
774 };
775 
776 static struct ftrace_probe_ops dump_probe_ops = {
777 	.func			= ftrace_dump_probe,
778 	.print			= ftrace_dump_print,
779 	.init			= ftrace_count_init,
780 	.free			= ftrace_count_free,
781 };
782 
783 static struct ftrace_probe_ops cpudump_probe_ops = {
784 	.func			= ftrace_cpudump_probe,
785 	.print			= ftrace_cpudump_print,
786 };
787 
788 static struct ftrace_probe_ops traceon_probe_ops = {
789 	.func			= ftrace_traceon,
790 	.print			= ftrace_traceon_print,
791 };
792 
793 static struct ftrace_probe_ops traceoff_probe_ops = {
794 	.func			= ftrace_traceoff,
795 	.print			= ftrace_traceoff_print,
796 };
797 
798 static struct ftrace_probe_ops stacktrace_probe_ops = {
799 	.func			= ftrace_stacktrace,
800 	.print			= ftrace_stacktrace_print,
801 };
802 
803 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)804 ftrace_trace_probe_callback(struct trace_array *tr,
805 			    struct ftrace_probe_ops *ops,
806 			    struct ftrace_hash *hash, char *glob,
807 			    char *cmd, char *param, int enable)
808 {
809 	void *count = (void *)-1;
810 	char *number;
811 	int ret;
812 
813 	/* hash funcs only work with set_ftrace_filter */
814 	if (!enable)
815 		return -EINVAL;
816 
817 	if (glob[0] == '!')
818 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
819 
820 	if (!param)
821 		goto out_reg;
822 
823 	number = strsep(&param, ":");
824 
825 	if (!strlen(number))
826 		goto out_reg;
827 
828 	/*
829 	 * We use the callback data field (which is a pointer)
830 	 * as our counter.
831 	 */
832 	ret = kstrtoul(number, 0, (unsigned long *)&count);
833 	if (ret)
834 		return ret;
835 
836  out_reg:
837 	ret = register_ftrace_function_probe(glob, tr, ops, count);
838 
839 	return ret < 0 ? ret : 0;
840 }
841 
842 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)843 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
844 			    char *glob, char *cmd, char *param, int enable)
845 {
846 	struct ftrace_probe_ops *ops;
847 
848 	if (!tr)
849 		return -ENODEV;
850 
851 	/* we register both traceon and traceoff to this callback */
852 	if (strcmp(cmd, "traceon") == 0)
853 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
854 	else
855 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
856 
857 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
858 					   param, enable);
859 }
860 
861 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)862 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
863 			   char *glob, char *cmd, char *param, int enable)
864 {
865 	struct ftrace_probe_ops *ops;
866 
867 	if (!tr)
868 		return -ENODEV;
869 
870 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
871 
872 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
873 					   param, enable);
874 }
875 
876 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)877 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
878 			   char *glob, char *cmd, char *param, int enable)
879 {
880 	struct ftrace_probe_ops *ops;
881 
882 	if (!tr)
883 		return -ENODEV;
884 
885 	ops = &dump_probe_ops;
886 
887 	/* Only dump once. */
888 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
889 					   "1", enable);
890 }
891 
892 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)893 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
894 			   char *glob, char *cmd, char *param, int enable)
895 {
896 	struct ftrace_probe_ops *ops;
897 
898 	if (!tr)
899 		return -ENODEV;
900 
901 	ops = &cpudump_probe_ops;
902 
903 	/* Only dump once. */
904 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
905 					   "1", enable);
906 }
907 
908 static struct ftrace_func_command ftrace_traceon_cmd = {
909 	.name			= "traceon",
910 	.func			= ftrace_trace_onoff_callback,
911 };
912 
913 static struct ftrace_func_command ftrace_traceoff_cmd = {
914 	.name			= "traceoff",
915 	.func			= ftrace_trace_onoff_callback,
916 };
917 
918 static struct ftrace_func_command ftrace_stacktrace_cmd = {
919 	.name			= "stacktrace",
920 	.func			= ftrace_stacktrace_callback,
921 };
922 
923 static struct ftrace_func_command ftrace_dump_cmd = {
924 	.name			= "dump",
925 	.func			= ftrace_dump_callback,
926 };
927 
928 static struct ftrace_func_command ftrace_cpudump_cmd = {
929 	.name			= "cpudump",
930 	.func			= ftrace_cpudump_callback,
931 };
932 
init_func_cmd_traceon(void)933 static int __init init_func_cmd_traceon(void)
934 {
935 	int ret;
936 
937 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
938 	if (ret)
939 		return ret;
940 
941 	ret = register_ftrace_command(&ftrace_traceon_cmd);
942 	if (ret)
943 		goto out_free_traceoff;
944 
945 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
946 	if (ret)
947 		goto out_free_traceon;
948 
949 	ret = register_ftrace_command(&ftrace_dump_cmd);
950 	if (ret)
951 		goto out_free_stacktrace;
952 
953 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
954 	if (ret)
955 		goto out_free_dump;
956 
957 	return 0;
958 
959  out_free_dump:
960 	unregister_ftrace_command(&ftrace_dump_cmd);
961  out_free_stacktrace:
962 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
963  out_free_traceon:
964 	unregister_ftrace_command(&ftrace_traceon_cmd);
965  out_free_traceoff:
966 	unregister_ftrace_command(&ftrace_traceoff_cmd);
967 
968 	return ret;
969 }
970 #else
init_func_cmd_traceon(void)971 static inline int init_func_cmd_traceon(void)
972 {
973 	return 0;
974 }
975 #endif /* CONFIG_DYNAMIC_FTRACE */
976 
init_function_trace(void)977 __init int init_function_trace(void)
978 {
979 	init_func_cmd_traceon();
980 	return register_tracer(&function_trace);
981 }
982