• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *
4   * Function graph tracer.
5   * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6   * Mostly borrowed from function tracer which
7   * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8   *
9   */
10  #include <linux/uaccess.h>
11  #include <linux/ftrace.h>
12  #include <linux/interrupt.h>
13  #include <linux/slab.h>
14  #include <linux/fs.h>
15  
16  #include "trace.h"
17  #include "trace_output.h"
18  
19  /* When set, irq functions will be ignored */
20  static int ftrace_graph_skip_irqs;
21  
22  struct fgraph_cpu_data {
23  	pid_t		last_pid;
24  	int		depth;
25  	int		depth_irq;
26  	int		ignore;
27  	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28  };
29  
30  struct fgraph_data {
31  	struct fgraph_cpu_data __percpu *cpu_data;
32  
33  	/* Place to preserve last processed entry. */
34  	struct ftrace_graph_ent_entry	ent;
35  	struct ftrace_graph_ret_entry	ret;
36  	int				failed;
37  	int				cpu;
38  };
39  
40  #define TRACE_GRAPH_INDENT	2
41  
42  unsigned int fgraph_max_depth;
43  
44  static struct tracer_opt trace_opts[] = {
45  	/* Display overruns? (for self-debug purpose) */
46  	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47  	/* Display CPU ? */
48  	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49  	/* Display Overhead ? */
50  	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51  	/* Display proc name/pid */
52  	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53  	/* Display duration of execution */
54  	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55  	/* Display absolute time of an entry */
56  	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57  	/* Display interrupts */
58  	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59  	/* Display function name after trailing } */
60  	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61  	/* Include sleep time (scheduled out) between entry and return */
62  	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63  
64  #ifdef CONFIG_FUNCTION_PROFILER
65  	/* Include time within nested functions */
66  	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67  #endif
68  
69  	{ } /* Empty entry */
70  };
71  
72  static struct tracer_flags tracer_flags = {
73  	/* Don't display overruns, proc, or tail by default */
74  	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75  	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76  	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77  	.opts = trace_opts
78  };
79  
80  static struct trace_array *graph_array;
81  
82  /*
83   * DURATION column is being also used to display IRQ signs,
84   * following values are used by print_graph_irq and others
85   * to fill in space into DURATION column.
86   */
87  enum {
88  	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89  	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90  	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91  };
92  
93  static void
94  print_graph_duration(struct trace_array *tr, unsigned long long duration,
95  		     struct trace_seq *s, u32 flags);
96  
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned long flags,int pc)97  int __trace_graph_entry(struct trace_array *tr,
98  				struct ftrace_graph_ent *trace,
99  				unsigned long flags,
100  				int pc)
101  {
102  	struct trace_event_call *call = &event_funcgraph_entry;
103  	struct ring_buffer_event *event;
104  	struct trace_buffer *buffer = tr->array_buffer.buffer;
105  	struct ftrace_graph_ent_entry *entry;
106  
107  	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
108  					  sizeof(*entry), flags, pc);
109  	if (!event)
110  		return 0;
111  	entry	= ring_buffer_event_data(event);
112  	entry->graph_ent			= *trace;
113  	if (!call_filter_check_discard(call, entry, buffer, event))
114  		trace_buffer_unlock_commit_nostack(buffer, event);
115  
116  	return 1;
117  }
118  
ftrace_graph_ignore_irqs(void)119  static inline int ftrace_graph_ignore_irqs(void)
120  {
121  	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
122  		return 0;
123  
124  	return in_irq();
125  }
126  
trace_graph_entry(struct ftrace_graph_ent * trace)127  int trace_graph_entry(struct ftrace_graph_ent *trace)
128  {
129  	struct trace_array *tr = graph_array;
130  	struct trace_array_cpu *data;
131  	unsigned long flags;
132  	long disabled;
133  	int ret;
134  	int cpu;
135  	int pc;
136  
137  	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
138  		return 0;
139  
140  	/*
141  	 * Do not trace a function if it's filtered by set_graph_notrace.
142  	 * Make the index of ret stack negative to indicate that it should
143  	 * ignore further functions.  But it needs its own ret stack entry
144  	 * to recover the original index in order to continue tracing after
145  	 * returning from the function.
146  	 */
147  	if (ftrace_graph_notrace_addr(trace->func)) {
148  		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
149  		/*
150  		 * Need to return 1 to have the return called
151  		 * that will clear the NOTRACE bit.
152  		 */
153  		return 1;
154  	}
155  
156  	if (!ftrace_trace_task(tr))
157  		return 0;
158  
159  	if (ftrace_graph_ignore_func(trace))
160  		return 0;
161  
162  	if (ftrace_graph_ignore_irqs())
163  		return 0;
164  
165  	/*
166  	 * Stop here if tracing_threshold is set. We only write function return
167  	 * events to the ring buffer.
168  	 */
169  	if (tracing_thresh)
170  		return 1;
171  
172  	local_irq_save(flags);
173  	cpu = raw_smp_processor_id();
174  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
175  	disabled = atomic_inc_return(&data->disabled);
176  	if (likely(disabled == 1)) {
177  		pc = preempt_count();
178  		ret = __trace_graph_entry(tr, trace, flags, pc);
179  	} else {
180  		ret = 0;
181  	}
182  
183  	atomic_dec(&data->disabled);
184  	local_irq_restore(flags);
185  
186  	return ret;
187  }
188  
189  static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long flags,int pc)190  __trace_graph_function(struct trace_array *tr,
191  		unsigned long ip, unsigned long flags, int pc)
192  {
193  	u64 time = trace_clock_local();
194  	struct ftrace_graph_ent ent = {
195  		.func  = ip,
196  		.depth = 0,
197  	};
198  	struct ftrace_graph_ret ret = {
199  		.func     = ip,
200  		.depth    = 0,
201  		.calltime = time,
202  		.rettime  = time,
203  	};
204  
205  	__trace_graph_entry(tr, &ent, flags, pc);
206  	__trace_graph_return(tr, &ret, flags, pc);
207  }
208  
209  void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)210  trace_graph_function(struct trace_array *tr,
211  		unsigned long ip, unsigned long parent_ip,
212  		unsigned long flags, int pc)
213  {
214  	__trace_graph_function(tr, ip, flags, pc);
215  }
216  
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned long flags,int pc)217  void __trace_graph_return(struct trace_array *tr,
218  				struct ftrace_graph_ret *trace,
219  				unsigned long flags,
220  				int pc)
221  {
222  	struct trace_event_call *call = &event_funcgraph_exit;
223  	struct ring_buffer_event *event;
224  	struct trace_buffer *buffer = tr->array_buffer.buffer;
225  	struct ftrace_graph_ret_entry *entry;
226  
227  	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
228  					  sizeof(*entry), flags, pc);
229  	if (!event)
230  		return;
231  	entry	= ring_buffer_event_data(event);
232  	entry->ret				= *trace;
233  	if (!call_filter_check_discard(call, entry, buffer, event))
234  		trace_buffer_unlock_commit_nostack(buffer, event);
235  }
236  
trace_graph_return(struct ftrace_graph_ret * trace)237  void trace_graph_return(struct ftrace_graph_ret *trace)
238  {
239  	struct trace_array *tr = graph_array;
240  	struct trace_array_cpu *data;
241  	unsigned long flags;
242  	long disabled;
243  	int cpu;
244  	int pc;
245  
246  	ftrace_graph_addr_finish(trace);
247  
248  	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
249  		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
250  		return;
251  	}
252  
253  	local_irq_save(flags);
254  	cpu = raw_smp_processor_id();
255  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
256  	disabled = atomic_inc_return(&data->disabled);
257  	if (likely(disabled == 1)) {
258  		pc = preempt_count();
259  		__trace_graph_return(tr, trace, flags, pc);
260  	}
261  	atomic_dec(&data->disabled);
262  	local_irq_restore(flags);
263  }
264  
set_graph_array(struct trace_array * tr)265  void set_graph_array(struct trace_array *tr)
266  {
267  	graph_array = tr;
268  
269  	/* Make graph_array visible before we start tracing */
270  
271  	smp_mb();
272  }
273  
trace_graph_thresh_return(struct ftrace_graph_ret * trace)274  static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
275  {
276  	ftrace_graph_addr_finish(trace);
277  
278  	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
279  		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
280  		return;
281  	}
282  
283  	if (tracing_thresh &&
284  	    (trace->rettime - trace->calltime < tracing_thresh))
285  		return;
286  	else
287  		trace_graph_return(trace);
288  }
289  
290  static struct fgraph_ops funcgraph_thresh_ops = {
291  	.entryfunc = &trace_graph_entry,
292  	.retfunc = &trace_graph_thresh_return,
293  };
294  
295  static struct fgraph_ops funcgraph_ops = {
296  	.entryfunc = &trace_graph_entry,
297  	.retfunc = &trace_graph_return,
298  };
299  
graph_trace_init(struct trace_array * tr)300  static int graph_trace_init(struct trace_array *tr)
301  {
302  	int ret;
303  
304  	set_graph_array(tr);
305  	if (tracing_thresh)
306  		ret = register_ftrace_graph(&funcgraph_thresh_ops);
307  	else
308  		ret = register_ftrace_graph(&funcgraph_ops);
309  	if (ret)
310  		return ret;
311  	tracing_start_cmdline_record();
312  
313  	return 0;
314  }
315  
graph_trace_reset(struct trace_array * tr)316  static void graph_trace_reset(struct trace_array *tr)
317  {
318  	tracing_stop_cmdline_record();
319  	if (tracing_thresh)
320  		unregister_ftrace_graph(&funcgraph_thresh_ops);
321  	else
322  		unregister_ftrace_graph(&funcgraph_ops);
323  }
324  
graph_trace_update_thresh(struct trace_array * tr)325  static int graph_trace_update_thresh(struct trace_array *tr)
326  {
327  	graph_trace_reset(tr);
328  	return graph_trace_init(tr);
329  }
330  
331  static int max_bytes_for_cpu;
332  
print_graph_cpu(struct trace_seq * s,int cpu)333  static void print_graph_cpu(struct trace_seq *s, int cpu)
334  {
335  	/*
336  	 * Start with a space character - to make it stand out
337  	 * to the right a bit when trace output is pasted into
338  	 * email:
339  	 */
340  	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
341  }
342  
343  #define TRACE_GRAPH_PROCINFO_LENGTH	14
344  
print_graph_proc(struct trace_seq * s,pid_t pid)345  static void print_graph_proc(struct trace_seq *s, pid_t pid)
346  {
347  	char comm[TASK_COMM_LEN];
348  	/* sign + log10(MAX_INT) + '\0' */
349  	char pid_str[11];
350  	int spaces = 0;
351  	int len;
352  	int i;
353  
354  	trace_find_cmdline(pid, comm);
355  	comm[7] = '\0';
356  	sprintf(pid_str, "%d", pid);
357  
358  	/* 1 stands for the "-" character */
359  	len = strlen(comm) + strlen(pid_str) + 1;
360  
361  	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
362  		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
363  
364  	/* First spaces to align center */
365  	for (i = 0; i < spaces / 2; i++)
366  		trace_seq_putc(s, ' ');
367  
368  	trace_seq_printf(s, "%s-%s", comm, pid_str);
369  
370  	/* Last spaces to align center */
371  	for (i = 0; i < spaces - (spaces / 2); i++)
372  		trace_seq_putc(s, ' ');
373  }
374  
375  
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)376  static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
377  {
378  	trace_seq_putc(s, ' ');
379  	trace_print_lat_fmt(s, entry);
380  	trace_seq_puts(s, " | ");
381  }
382  
383  /* If the pid changed since the last trace, output this event */
384  static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)385  verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
386  {
387  	pid_t prev_pid;
388  	pid_t *last_pid;
389  
390  	if (!data)
391  		return;
392  
393  	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
394  
395  	if (*last_pid == pid)
396  		return;
397  
398  	prev_pid = *last_pid;
399  	*last_pid = pid;
400  
401  	if (prev_pid == -1)
402  		return;
403  /*
404   * Context-switch trace line:
405  
406   ------------------------------------------
407   | 1)  migration/0--1  =>  sshd-1755
408   ------------------------------------------
409  
410   */
411  	trace_seq_puts(s, " ------------------------------------------\n");
412  	print_graph_cpu(s, cpu);
413  	print_graph_proc(s, prev_pid);
414  	trace_seq_puts(s, " => ");
415  	print_graph_proc(s, pid);
416  	trace_seq_puts(s, "\n ------------------------------------------\n\n");
417  }
418  
419  static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)420  get_return_for_leaf(struct trace_iterator *iter,
421  		struct ftrace_graph_ent_entry *curr)
422  {
423  	struct fgraph_data *data = iter->private;
424  	struct ring_buffer_iter *ring_iter = NULL;
425  	struct ring_buffer_event *event;
426  	struct ftrace_graph_ret_entry *next;
427  
428  	/*
429  	 * If the previous output failed to write to the seq buffer,
430  	 * then we just reuse the data from before.
431  	 */
432  	if (data && data->failed) {
433  		curr = &data->ent;
434  		next = &data->ret;
435  	} else {
436  
437  		ring_iter = trace_buffer_iter(iter, iter->cpu);
438  
439  		/* First peek to compare current entry and the next one */
440  		if (ring_iter)
441  			event = ring_buffer_iter_peek(ring_iter, NULL);
442  		else {
443  			/*
444  			 * We need to consume the current entry to see
445  			 * the next one.
446  			 */
447  			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
448  					    NULL, NULL);
449  			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
450  						 NULL, NULL);
451  		}
452  
453  		if (!event)
454  			return NULL;
455  
456  		next = ring_buffer_event_data(event);
457  
458  		if (data) {
459  			/*
460  			 * Save current and next entries for later reference
461  			 * if the output fails.
462  			 */
463  			data->ent = *curr;
464  			/*
465  			 * If the next event is not a return type, then
466  			 * we only care about what type it is. Otherwise we can
467  			 * safely copy the entire event.
468  			 */
469  			if (next->ent.type == TRACE_GRAPH_RET)
470  				data->ret = *next;
471  			else
472  				data->ret.ent.type = next->ent.type;
473  		}
474  	}
475  
476  	if (next->ent.type != TRACE_GRAPH_RET)
477  		return NULL;
478  
479  	if (curr->ent.pid != next->ent.pid ||
480  			curr->graph_ent.func != next->ret.func)
481  		return NULL;
482  
483  	/* this is a leaf, now advance the iterator */
484  	if (ring_iter)
485  		ring_buffer_iter_advance(ring_iter);
486  
487  	return next;
488  }
489  
print_graph_abs_time(u64 t,struct trace_seq * s)490  static void print_graph_abs_time(u64 t, struct trace_seq *s)
491  {
492  	unsigned long usecs_rem;
493  
494  	usecs_rem = do_div(t, NSEC_PER_SEC);
495  	usecs_rem /= 1000;
496  
497  	trace_seq_printf(s, "%5lu.%06lu |  ",
498  			 (unsigned long)t, usecs_rem);
499  }
500  
501  static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)502  print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
503  {
504  	unsigned long long usecs;
505  
506  	usecs = iter->ts - iter->array_buffer->time_start;
507  	do_div(usecs, NSEC_PER_USEC);
508  
509  	trace_seq_printf(s, "%9llu us |  ", usecs);
510  }
511  
512  static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)513  print_graph_irq(struct trace_iterator *iter, unsigned long addr,
514  		enum trace_type type, int cpu, pid_t pid, u32 flags)
515  {
516  	struct trace_array *tr = iter->tr;
517  	struct trace_seq *s = &iter->seq;
518  	struct trace_entry *ent = iter->ent;
519  
520  	if (addr < (unsigned long)__irqentry_text_start ||
521  		addr >= (unsigned long)__irqentry_text_end)
522  		return;
523  
524  	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
525  		/* Absolute time */
526  		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
527  			print_graph_abs_time(iter->ts, s);
528  
529  		/* Relative time */
530  		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
531  			print_graph_rel_time(iter, s);
532  
533  		/* Cpu */
534  		if (flags & TRACE_GRAPH_PRINT_CPU)
535  			print_graph_cpu(s, cpu);
536  
537  		/* Proc */
538  		if (flags & TRACE_GRAPH_PRINT_PROC) {
539  			print_graph_proc(s, pid);
540  			trace_seq_puts(s, " | ");
541  		}
542  
543  		/* Latency format */
544  		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
545  			print_graph_lat_fmt(s, ent);
546  	}
547  
548  	/* No overhead */
549  	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
550  
551  	if (type == TRACE_GRAPH_ENT)
552  		trace_seq_puts(s, "==========>");
553  	else
554  		trace_seq_puts(s, "<==========");
555  
556  	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
557  	trace_seq_putc(s, '\n');
558  }
559  
560  void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)561  trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
562  {
563  	unsigned long nsecs_rem = do_div(duration, 1000);
564  	/* log10(ULONG_MAX) + '\0' */
565  	char usecs_str[21];
566  	char nsecs_str[5];
567  	int len;
568  	int i;
569  
570  	sprintf(usecs_str, "%lu", (unsigned long) duration);
571  
572  	/* Print msecs */
573  	trace_seq_printf(s, "%s", usecs_str);
574  
575  	len = strlen(usecs_str);
576  
577  	/* Print nsecs (we don't want to exceed 7 numbers) */
578  	if (len < 7) {
579  		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
580  
581  		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
582  		trace_seq_printf(s, ".%s", nsecs_str);
583  		len += strlen(nsecs_str) + 1;
584  	}
585  
586  	trace_seq_puts(s, " us ");
587  
588  	/* Print remaining spaces to fit the row's width */
589  	for (i = len; i < 8; i++)
590  		trace_seq_putc(s, ' ');
591  }
592  
593  static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)594  print_graph_duration(struct trace_array *tr, unsigned long long duration,
595  		     struct trace_seq *s, u32 flags)
596  {
597  	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
598  	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
599  		return;
600  
601  	/* No real adata, just filling the column with spaces */
602  	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
603  	case FLAGS_FILL_FULL:
604  		trace_seq_puts(s, "              |  ");
605  		return;
606  	case FLAGS_FILL_START:
607  		trace_seq_puts(s, "  ");
608  		return;
609  	case FLAGS_FILL_END:
610  		trace_seq_puts(s, " |");
611  		return;
612  	}
613  
614  	/* Signal a overhead of time execution to the output */
615  	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
616  		trace_seq_printf(s, "%c ", trace_find_mark(duration));
617  	else
618  		trace_seq_puts(s, "  ");
619  
620  	trace_print_graph_duration(duration, s);
621  	trace_seq_puts(s, "|  ");
622  }
623  
624  /* Case of a leaf function on its call entry */
625  static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)626  print_graph_entry_leaf(struct trace_iterator *iter,
627  		struct ftrace_graph_ent_entry *entry,
628  		struct ftrace_graph_ret_entry *ret_entry,
629  		struct trace_seq *s, u32 flags)
630  {
631  	struct fgraph_data *data = iter->private;
632  	struct trace_array *tr = iter->tr;
633  	struct ftrace_graph_ret *graph_ret;
634  	struct ftrace_graph_ent *call;
635  	unsigned long long duration;
636  	int cpu = iter->cpu;
637  	int i;
638  
639  	graph_ret = &ret_entry->ret;
640  	call = &entry->graph_ent;
641  	duration = graph_ret->rettime - graph_ret->calltime;
642  
643  	if (data) {
644  		struct fgraph_cpu_data *cpu_data;
645  
646  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
647  
648  		/*
649  		 * Comments display at + 1 to depth. Since
650  		 * this is a leaf function, keep the comments
651  		 * equal to this depth.
652  		 */
653  		cpu_data->depth = call->depth - 1;
654  
655  		/* No need to keep this function around for this depth */
656  		if (call->depth < FTRACE_RETFUNC_DEPTH &&
657  		    !WARN_ON_ONCE(call->depth < 0))
658  			cpu_data->enter_funcs[call->depth] = 0;
659  	}
660  
661  	/* Overhead and duration */
662  	print_graph_duration(tr, duration, s, flags);
663  
664  	/* Function */
665  	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
666  		trace_seq_putc(s, ' ');
667  
668  	trace_seq_printf(s, "%ps();\n", (void *)call->func);
669  
670  	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
671  			cpu, iter->ent->pid, flags);
672  
673  	return trace_handle_return(s);
674  }
675  
676  static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)677  print_graph_entry_nested(struct trace_iterator *iter,
678  			 struct ftrace_graph_ent_entry *entry,
679  			 struct trace_seq *s, int cpu, u32 flags)
680  {
681  	struct ftrace_graph_ent *call = &entry->graph_ent;
682  	struct fgraph_data *data = iter->private;
683  	struct trace_array *tr = iter->tr;
684  	int i;
685  
686  	if (data) {
687  		struct fgraph_cpu_data *cpu_data;
688  		int cpu = iter->cpu;
689  
690  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
691  		cpu_data->depth = call->depth;
692  
693  		/* Save this function pointer to see if the exit matches */
694  		if (call->depth < FTRACE_RETFUNC_DEPTH &&
695  		    !WARN_ON_ONCE(call->depth < 0))
696  			cpu_data->enter_funcs[call->depth] = call->func;
697  	}
698  
699  	/* No time */
700  	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
701  
702  	/* Function */
703  	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
704  		trace_seq_putc(s, ' ');
705  
706  	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
707  
708  	if (trace_seq_has_overflowed(s))
709  		return TRACE_TYPE_PARTIAL_LINE;
710  
711  	/*
712  	 * we already consumed the current entry to check the next one
713  	 * and see if this is a leaf.
714  	 */
715  	return TRACE_TYPE_NO_CONSUME;
716  }
717  
718  static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)719  print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
720  		     int type, unsigned long addr, u32 flags)
721  {
722  	struct fgraph_data *data = iter->private;
723  	struct trace_entry *ent = iter->ent;
724  	struct trace_array *tr = iter->tr;
725  	int cpu = iter->cpu;
726  
727  	/* Pid */
728  	verif_pid(s, ent->pid, cpu, data);
729  
730  	if (type)
731  		/* Interrupt */
732  		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
733  
734  	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
735  		return;
736  
737  	/* Absolute time */
738  	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
739  		print_graph_abs_time(iter->ts, s);
740  
741  	/* Relative time */
742  	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
743  		print_graph_rel_time(iter, s);
744  
745  	/* Cpu */
746  	if (flags & TRACE_GRAPH_PRINT_CPU)
747  		print_graph_cpu(s, cpu);
748  
749  	/* Proc */
750  	if (flags & TRACE_GRAPH_PRINT_PROC) {
751  		print_graph_proc(s, ent->pid);
752  		trace_seq_puts(s, " | ");
753  	}
754  
755  	/* Latency format */
756  	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
757  		print_graph_lat_fmt(s, ent);
758  
759  	return;
760  }
761  
762  /*
763   * Entry check for irq code
764   *
765   * returns 1 if
766   *  - we are inside irq code
767   *  - we just entered irq code
768   *
769   * retunns 0 if
770   *  - funcgraph-interrupts option is set
771   *  - we are not inside irq code
772   */
773  static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)774  check_irq_entry(struct trace_iterator *iter, u32 flags,
775  		unsigned long addr, int depth)
776  {
777  	int cpu = iter->cpu;
778  	int *depth_irq;
779  	struct fgraph_data *data = iter->private;
780  
781  	/*
782  	 * If we are either displaying irqs, or we got called as
783  	 * a graph event and private data does not exist,
784  	 * then we bypass the irq check.
785  	 */
786  	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
787  	    (!data))
788  		return 0;
789  
790  	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
791  
792  	/*
793  	 * We are inside the irq code
794  	 */
795  	if (*depth_irq >= 0)
796  		return 1;
797  
798  	if ((addr < (unsigned long)__irqentry_text_start) ||
799  	    (addr >= (unsigned long)__irqentry_text_end))
800  		return 0;
801  
802  	/*
803  	 * We are entering irq code.
804  	 */
805  	*depth_irq = depth;
806  	return 1;
807  }
808  
809  /*
810   * Return check for irq code
811   *
812   * returns 1 if
813   *  - we are inside irq code
814   *  - we just left irq code
815   *
816   * returns 0 if
817   *  - funcgraph-interrupts option is set
818   *  - we are not inside irq code
819   */
820  static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)821  check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
822  {
823  	int cpu = iter->cpu;
824  	int *depth_irq;
825  	struct fgraph_data *data = iter->private;
826  
827  	/*
828  	 * If we are either displaying irqs, or we got called as
829  	 * a graph event and private data does not exist,
830  	 * then we bypass the irq check.
831  	 */
832  	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
833  	    (!data))
834  		return 0;
835  
836  	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
837  
838  	/*
839  	 * We are not inside the irq code.
840  	 */
841  	if (*depth_irq == -1)
842  		return 0;
843  
844  	/*
845  	 * We are inside the irq code, and this is returning entry.
846  	 * Let's not trace it and clear the entry depth, since
847  	 * we are out of irq code.
848  	 *
849  	 * This condition ensures that we 'leave the irq code' once
850  	 * we are out of the entry depth. Thus protecting us from
851  	 * the RETURN entry loss.
852  	 */
853  	if (*depth_irq >= depth) {
854  		*depth_irq = -1;
855  		return 1;
856  	}
857  
858  	/*
859  	 * We are inside the irq code, and this is not the entry.
860  	 */
861  	return 1;
862  }
863  
864  static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)865  print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
866  			struct trace_iterator *iter, u32 flags)
867  {
868  	struct fgraph_data *data = iter->private;
869  	struct ftrace_graph_ent *call = &field->graph_ent;
870  	struct ftrace_graph_ret_entry *leaf_ret;
871  	static enum print_line_t ret;
872  	int cpu = iter->cpu;
873  
874  	if (check_irq_entry(iter, flags, call->func, call->depth))
875  		return TRACE_TYPE_HANDLED;
876  
877  	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
878  
879  	leaf_ret = get_return_for_leaf(iter, field);
880  	if (leaf_ret)
881  		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
882  	else
883  		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
884  
885  	if (data) {
886  		/*
887  		 * If we failed to write our output, then we need to make
888  		 * note of it. Because we already consumed our entry.
889  		 */
890  		if (s->full) {
891  			data->failed = 1;
892  			data->cpu = cpu;
893  		} else
894  			data->failed = 0;
895  	}
896  
897  	return ret;
898  }
899  
900  static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)901  print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
902  		   struct trace_entry *ent, struct trace_iterator *iter,
903  		   u32 flags)
904  {
905  	unsigned long long duration = trace->rettime - trace->calltime;
906  	struct fgraph_data *data = iter->private;
907  	struct trace_array *tr = iter->tr;
908  	pid_t pid = ent->pid;
909  	int cpu = iter->cpu;
910  	int func_match = 1;
911  	int i;
912  
913  	if (check_irq_return(iter, flags, trace->depth))
914  		return TRACE_TYPE_HANDLED;
915  
916  	if (data) {
917  		struct fgraph_cpu_data *cpu_data;
918  		int cpu = iter->cpu;
919  
920  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
921  
922  		/*
923  		 * Comments display at + 1 to depth. This is the
924  		 * return from a function, we now want the comments
925  		 * to display at the same level of the bracket.
926  		 */
927  		cpu_data->depth = trace->depth - 1;
928  
929  		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
930  		    !WARN_ON_ONCE(trace->depth < 0)) {
931  			if (cpu_data->enter_funcs[trace->depth] != trace->func)
932  				func_match = 0;
933  			cpu_data->enter_funcs[trace->depth] = 0;
934  		}
935  	}
936  
937  	print_graph_prologue(iter, s, 0, 0, flags);
938  
939  	/* Overhead and duration */
940  	print_graph_duration(tr, duration, s, flags);
941  
942  	/* Closing brace */
943  	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
944  		trace_seq_putc(s, ' ');
945  
946  	/*
947  	 * If the return function does not have a matching entry,
948  	 * then the entry was lost. Instead of just printing
949  	 * the '}' and letting the user guess what function this
950  	 * belongs to, write out the function name. Always do
951  	 * that if the funcgraph-tail option is enabled.
952  	 */
953  	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
954  		trace_seq_puts(s, "}\n");
955  	else
956  		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
957  
958  	/* Overrun */
959  	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
960  		trace_seq_printf(s, " (Overruns: %lu)\n",
961  				 trace->overrun);
962  
963  	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
964  			cpu, pid, flags);
965  
966  	return trace_handle_return(s);
967  }
968  
969  static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)970  print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
971  		    struct trace_iterator *iter, u32 flags)
972  {
973  	struct trace_array *tr = iter->tr;
974  	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
975  	struct fgraph_data *data = iter->private;
976  	struct trace_event *event;
977  	int depth = 0;
978  	int ret;
979  	int i;
980  
981  	if (data)
982  		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
983  
984  	print_graph_prologue(iter, s, 0, 0, flags);
985  
986  	/* No time */
987  	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
988  
989  	/* Indentation */
990  	if (depth > 0)
991  		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
992  			trace_seq_putc(s, ' ');
993  
994  	/* The comment */
995  	trace_seq_puts(s, "/* ");
996  
997  	switch (iter->ent->type) {
998  	case TRACE_BPUTS:
999  		ret = trace_print_bputs_msg_only(iter);
1000  		if (ret != TRACE_TYPE_HANDLED)
1001  			return ret;
1002  		break;
1003  	case TRACE_BPRINT:
1004  		ret = trace_print_bprintk_msg_only(iter);
1005  		if (ret != TRACE_TYPE_HANDLED)
1006  			return ret;
1007  		break;
1008  	case TRACE_PRINT:
1009  		ret = trace_print_printk_msg_only(iter);
1010  		if (ret != TRACE_TYPE_HANDLED)
1011  			return ret;
1012  		break;
1013  	default:
1014  		event = ftrace_find_event(ent->type);
1015  		if (!event)
1016  			return TRACE_TYPE_UNHANDLED;
1017  
1018  		ret = event->funcs->trace(iter, sym_flags, event);
1019  		if (ret != TRACE_TYPE_HANDLED)
1020  			return ret;
1021  	}
1022  
1023  	if (trace_seq_has_overflowed(s))
1024  		goto out;
1025  
1026  	/* Strip ending newline */
1027  	if (s->buffer[s->seq.len - 1] == '\n') {
1028  		s->buffer[s->seq.len - 1] = '\0';
1029  		s->seq.len--;
1030  	}
1031  
1032  	trace_seq_puts(s, " */\n");
1033   out:
1034  	return trace_handle_return(s);
1035  }
1036  
1037  
1038  enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1039  print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1040  {
1041  	struct ftrace_graph_ent_entry *field;
1042  	struct fgraph_data *data = iter->private;
1043  	struct trace_entry *entry = iter->ent;
1044  	struct trace_seq *s = &iter->seq;
1045  	int cpu = iter->cpu;
1046  	int ret;
1047  
1048  	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1049  		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1050  		return TRACE_TYPE_HANDLED;
1051  	}
1052  
1053  	/*
1054  	 * If the last output failed, there's a possibility we need
1055  	 * to print out the missing entry which would never go out.
1056  	 */
1057  	if (data && data->failed) {
1058  		field = &data->ent;
1059  		iter->cpu = data->cpu;
1060  		ret = print_graph_entry(field, s, iter, flags);
1061  		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1062  			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1063  			ret = TRACE_TYPE_NO_CONSUME;
1064  		}
1065  		iter->cpu = cpu;
1066  		return ret;
1067  	}
1068  
1069  	switch (entry->type) {
1070  	case TRACE_GRAPH_ENT: {
1071  		/*
1072  		 * print_graph_entry() may consume the current event,
1073  		 * thus @field may become invalid, so we need to save it.
1074  		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1075  		 * it can be safely saved at the stack.
1076  		 */
1077  		struct ftrace_graph_ent_entry saved;
1078  		trace_assign_type(field, entry);
1079  		saved = *field;
1080  		return print_graph_entry(&saved, s, iter, flags);
1081  	}
1082  	case TRACE_GRAPH_RET: {
1083  		struct ftrace_graph_ret_entry *field;
1084  		trace_assign_type(field, entry);
1085  		return print_graph_return(&field->ret, s, entry, iter, flags);
1086  	}
1087  	case TRACE_STACK:
1088  	case TRACE_FN:
1089  		/* dont trace stack and functions as comments */
1090  		return TRACE_TYPE_UNHANDLED;
1091  
1092  	default:
1093  		return print_graph_comment(s, entry, iter, flags);
1094  	}
1095  
1096  	return TRACE_TYPE_HANDLED;
1097  }
1098  
1099  static enum print_line_t
print_graph_function(struct trace_iterator * iter)1100  print_graph_function(struct trace_iterator *iter)
1101  {
1102  	return print_graph_function_flags(iter, tracer_flags.val);
1103  }
1104  
1105  static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1106  print_graph_function_event(struct trace_iterator *iter, int flags,
1107  			   struct trace_event *event)
1108  {
1109  	return print_graph_function(iter);
1110  }
1111  
print_lat_header(struct seq_file * s,u32 flags)1112  static void print_lat_header(struct seq_file *s, u32 flags)
1113  {
1114  	static const char spaces[] = "                "	/* 16 spaces */
1115  		"    "					/* 4 spaces */
1116  		"                 ";			/* 17 spaces */
1117  	int size = 0;
1118  
1119  	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1120  		size += 16;
1121  	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1122  		size += 16;
1123  	if (flags & TRACE_GRAPH_PRINT_CPU)
1124  		size += 4;
1125  	if (flags & TRACE_GRAPH_PRINT_PROC)
1126  		size += 17;
1127  
1128  	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1129  	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1130  	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1131  	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1132  	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1133  }
1134  
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1135  static void __print_graph_headers_flags(struct trace_array *tr,
1136  					struct seq_file *s, u32 flags)
1137  {
1138  	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1139  
1140  	if (lat)
1141  		print_lat_header(s, flags);
1142  
1143  	/* 1st line */
1144  	seq_putc(s, '#');
1145  	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1146  		seq_puts(s, "     TIME       ");
1147  	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1148  		seq_puts(s, "   REL TIME     ");
1149  	if (flags & TRACE_GRAPH_PRINT_CPU)
1150  		seq_puts(s, " CPU");
1151  	if (flags & TRACE_GRAPH_PRINT_PROC)
1152  		seq_puts(s, "  TASK/PID       ");
1153  	if (lat)
1154  		seq_puts(s, "||||   ");
1155  	if (flags & TRACE_GRAPH_PRINT_DURATION)
1156  		seq_puts(s, "  DURATION   ");
1157  	seq_puts(s, "               FUNCTION CALLS\n");
1158  
1159  	/* 2nd line */
1160  	seq_putc(s, '#');
1161  	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1162  		seq_puts(s, "      |         ");
1163  	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1164  		seq_puts(s, "      |         ");
1165  	if (flags & TRACE_GRAPH_PRINT_CPU)
1166  		seq_puts(s, " |  ");
1167  	if (flags & TRACE_GRAPH_PRINT_PROC)
1168  		seq_puts(s, "   |    |        ");
1169  	if (lat)
1170  		seq_puts(s, "||||   ");
1171  	if (flags & TRACE_GRAPH_PRINT_DURATION)
1172  		seq_puts(s, "   |   |      ");
1173  	seq_puts(s, "               |   |   |   |\n");
1174  }
1175  
print_graph_headers(struct seq_file * s)1176  static void print_graph_headers(struct seq_file *s)
1177  {
1178  	print_graph_headers_flags(s, tracer_flags.val);
1179  }
1180  
print_graph_headers_flags(struct seq_file * s,u32 flags)1181  void print_graph_headers_flags(struct seq_file *s, u32 flags)
1182  {
1183  	struct trace_iterator *iter = s->private;
1184  	struct trace_array *tr = iter->tr;
1185  
1186  	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1187  		return;
1188  
1189  	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1190  		/* print nothing if the buffers are empty */
1191  		if (trace_empty(iter))
1192  			return;
1193  
1194  		print_trace_header(s, iter);
1195  	}
1196  
1197  	__print_graph_headers_flags(tr, s, flags);
1198  }
1199  
graph_trace_open(struct trace_iterator * iter)1200  void graph_trace_open(struct trace_iterator *iter)
1201  {
1202  	/* pid and depth on the last trace processed */
1203  	struct fgraph_data *data;
1204  	gfp_t gfpflags;
1205  	int cpu;
1206  
1207  	iter->private = NULL;
1208  
1209  	/* We can be called in atomic context via ftrace_dump() */
1210  	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1211  
1212  	data = kzalloc(sizeof(*data), gfpflags);
1213  	if (!data)
1214  		goto out_err;
1215  
1216  	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1217  	if (!data->cpu_data)
1218  		goto out_err_free;
1219  
1220  	for_each_possible_cpu(cpu) {
1221  		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1222  		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1223  		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1224  		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1225  
1226  		*pid = -1;
1227  		*depth = 0;
1228  		*ignore = 0;
1229  		*depth_irq = -1;
1230  	}
1231  
1232  	iter->private = data;
1233  
1234  	return;
1235  
1236   out_err_free:
1237  	kfree(data);
1238   out_err:
1239  	pr_warn("function graph tracer: not enough memory\n");
1240  }
1241  
graph_trace_close(struct trace_iterator * iter)1242  void graph_trace_close(struct trace_iterator *iter)
1243  {
1244  	struct fgraph_data *data = iter->private;
1245  
1246  	if (data) {
1247  		free_percpu(data->cpu_data);
1248  		kfree(data);
1249  	}
1250  }
1251  
1252  static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1253  func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1254  {
1255  	if (bit == TRACE_GRAPH_PRINT_IRQS)
1256  		ftrace_graph_skip_irqs = !set;
1257  
1258  	if (bit == TRACE_GRAPH_SLEEP_TIME)
1259  		ftrace_graph_sleep_time_control(set);
1260  
1261  	if (bit == TRACE_GRAPH_GRAPH_TIME)
1262  		ftrace_graph_graph_time_control(set);
1263  
1264  	return 0;
1265  }
1266  
1267  static struct trace_event_functions graph_functions = {
1268  	.trace		= print_graph_function_event,
1269  };
1270  
1271  static struct trace_event graph_trace_entry_event = {
1272  	.type		= TRACE_GRAPH_ENT,
1273  	.funcs		= &graph_functions,
1274  };
1275  
1276  static struct trace_event graph_trace_ret_event = {
1277  	.type		= TRACE_GRAPH_RET,
1278  	.funcs		= &graph_functions
1279  };
1280  
1281  static struct tracer graph_trace __tracer_data = {
1282  	.name		= "function_graph",
1283  	.update_thresh	= graph_trace_update_thresh,
1284  	.open		= graph_trace_open,
1285  	.pipe_open	= graph_trace_open,
1286  	.close		= graph_trace_close,
1287  	.pipe_close	= graph_trace_close,
1288  	.init		= graph_trace_init,
1289  	.reset		= graph_trace_reset,
1290  	.print_line	= print_graph_function,
1291  	.print_header	= print_graph_headers,
1292  	.flags		= &tracer_flags,
1293  	.set_flag	= func_graph_set_flag,
1294  #ifdef CONFIG_FTRACE_SELFTEST
1295  	.selftest	= trace_selftest_startup_function_graph,
1296  #endif
1297  };
1298  
1299  
1300  static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1301  graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1302  		  loff_t *ppos)
1303  {
1304  	unsigned long val;
1305  	int ret;
1306  
1307  	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1308  	if (ret)
1309  		return ret;
1310  
1311  	fgraph_max_depth = val;
1312  
1313  	*ppos += cnt;
1314  
1315  	return cnt;
1316  }
1317  
1318  static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1319  graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1320  		 loff_t *ppos)
1321  {
1322  	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1323  	int n;
1324  
1325  	n = sprintf(buf, "%d\n", fgraph_max_depth);
1326  
1327  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1328  }
1329  
1330  static const struct file_operations graph_depth_fops = {
1331  	.open		= tracing_open_generic,
1332  	.write		= graph_depth_write,
1333  	.read		= graph_depth_read,
1334  	.llseek		= generic_file_llseek,
1335  };
1336  
init_graph_tracefs(void)1337  static __init int init_graph_tracefs(void)
1338  {
1339  	int ret;
1340  
1341  	ret = tracing_init_dentry();
1342  	if (ret)
1343  		return 0;
1344  
1345  	trace_create_file("max_graph_depth", 0644, NULL,
1346  			  NULL, &graph_depth_fops);
1347  
1348  	return 0;
1349  }
1350  fs_initcall(init_graph_tracefs);
1351  
init_graph_trace(void)1352  static __init int init_graph_trace(void)
1353  {
1354  	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1355  
1356  	if (!register_trace_event(&graph_trace_entry_event)) {
1357  		pr_warn("Warning: could not register graph trace events\n");
1358  		return 1;
1359  	}
1360  
1361  	if (!register_trace_event(&graph_trace_ret_event)) {
1362  		pr_warn("Warning: could not register graph trace events\n");
1363  		return 1;
1364  	}
1365  
1366  	return register_tracer(&graph_trace);
1367  }
1368  
1369  core_initcall(init_graph_trace);
1370