• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 	/* Include sleep time (scheduled out) between entry and return */
62 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63 
64 #ifdef CONFIG_FUNCTION_PROFILER
65 	/* Include time within nested functions */
66 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67 #endif
68 
69 	{ } /* Empty entry */
70 };
71 
72 static struct tracer_flags tracer_flags = {
73 	/* Don't display overruns, proc, or tail by default */
74 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77 	.opts = trace_opts
78 };
79 
80 static struct trace_array *graph_array;
81 
82 /*
83  * DURATION column is being also used to display IRQ signs,
84  * following values are used by print_graph_irq and others
85  * to fill in space into DURATION column.
86  */
87 enum {
88 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91 };
92 
93 static void
94 print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 		     struct trace_seq *s, u32 flags);
96 
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx)97 int __trace_graph_entry(struct trace_array *tr,
98 				struct ftrace_graph_ent *trace,
99 				unsigned int trace_ctx)
100 {
101 	struct trace_event_call *call = &event_funcgraph_entry;
102 	struct ring_buffer_event *event;
103 	struct trace_buffer *buffer = tr->array_buffer.buffer;
104 	struct ftrace_graph_ent_entry *entry;
105 
106 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
107 					  sizeof(*entry), trace_ctx);
108 	if (!event)
109 		return 0;
110 	entry	= ring_buffer_event_data(event);
111 	entry->graph_ent			= *trace;
112 	if (!call_filter_check_discard(call, entry, buffer, event))
113 		trace_buffer_unlock_commit_nostack(buffer, event);
114 
115 	return 1;
116 }
117 
ftrace_graph_ignore_irqs(void)118 static inline int ftrace_graph_ignore_irqs(void)
119 {
120 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
121 		return 0;
122 
123 	return in_irq();
124 }
125 
trace_graph_entry(struct ftrace_graph_ent * trace)126 int trace_graph_entry(struct ftrace_graph_ent *trace)
127 {
128 	struct trace_array *tr = graph_array;
129 	struct trace_array_cpu *data;
130 	unsigned long flags;
131 	unsigned int trace_ctx;
132 	long disabled;
133 	int ret;
134 	int cpu;
135 
136 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
137 		return 0;
138 
139 	/*
140 	 * Do not trace a function if it's filtered by set_graph_notrace.
141 	 * Make the index of ret stack negative to indicate that it should
142 	 * ignore further functions.  But it needs its own ret stack entry
143 	 * to recover the original index in order to continue tracing after
144 	 * returning from the function.
145 	 */
146 	if (ftrace_graph_notrace_addr(trace->func)) {
147 		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
148 		/*
149 		 * Need to return 1 to have the return called
150 		 * that will clear the NOTRACE bit.
151 		 */
152 		return 1;
153 	}
154 
155 	if (!ftrace_trace_task(tr))
156 		return 0;
157 
158 	if (ftrace_graph_ignore_func(trace))
159 		return 0;
160 
161 	if (ftrace_graph_ignore_irqs())
162 		return 0;
163 
164 	/*
165 	 * Stop here if tracing_threshold is set. We only write function return
166 	 * events to the ring buffer.
167 	 */
168 	if (tracing_thresh)
169 		return 1;
170 
171 	local_irq_save(flags);
172 	cpu = raw_smp_processor_id();
173 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
174 	disabled = atomic_inc_return(&data->disabled);
175 	if (likely(disabled == 1)) {
176 		trace_ctx = tracing_gen_ctx_flags(flags);
177 		ret = __trace_graph_entry(tr, trace, trace_ctx);
178 	} else {
179 		ret = 0;
180 	}
181 
182 	atomic_dec(&data->disabled);
183 	local_irq_restore(flags);
184 
185 	return ret;
186 }
187 
188 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned int trace_ctx)189 __trace_graph_function(struct trace_array *tr,
190 		unsigned long ip, unsigned int trace_ctx)
191 {
192 	u64 time = trace_clock_local();
193 	struct ftrace_graph_ent ent = {
194 		.func  = ip,
195 		.depth = 0,
196 	};
197 	struct ftrace_graph_ret ret = {
198 		.func     = ip,
199 		.depth    = 0,
200 		.calltime = time,
201 		.rettime  = time,
202 	};
203 
204 	__trace_graph_entry(tr, &ent, trace_ctx);
205 	__trace_graph_return(tr, &ret, trace_ctx);
206 }
207 
208 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)209 trace_graph_function(struct trace_array *tr,
210 		unsigned long ip, unsigned long parent_ip,
211 		unsigned int trace_ctx)
212 {
213 	__trace_graph_function(tr, ip, trace_ctx);
214 }
215 
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned int trace_ctx)216 void __trace_graph_return(struct trace_array *tr,
217 				struct ftrace_graph_ret *trace,
218 				unsigned int trace_ctx)
219 {
220 	struct trace_event_call *call = &event_funcgraph_exit;
221 	struct ring_buffer_event *event;
222 	struct trace_buffer *buffer = tr->array_buffer.buffer;
223 	struct ftrace_graph_ret_entry *entry;
224 
225 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
226 					  sizeof(*entry), trace_ctx);
227 	if (!event)
228 		return;
229 	entry	= ring_buffer_event_data(event);
230 	entry->ret				= *trace;
231 	if (!call_filter_check_discard(call, entry, buffer, event))
232 		trace_buffer_unlock_commit_nostack(buffer, event);
233 }
234 
trace_graph_return(struct ftrace_graph_ret * trace)235 void trace_graph_return(struct ftrace_graph_ret *trace)
236 {
237 	struct trace_array *tr = graph_array;
238 	struct trace_array_cpu *data;
239 	unsigned long flags;
240 	unsigned int trace_ctx;
241 	long disabled;
242 	int cpu;
243 
244 	ftrace_graph_addr_finish(trace);
245 
246 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
247 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
248 		return;
249 	}
250 
251 	local_irq_save(flags);
252 	cpu = raw_smp_processor_id();
253 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
254 	disabled = atomic_inc_return(&data->disabled);
255 	if (likely(disabled == 1)) {
256 		trace_ctx = tracing_gen_ctx_flags(flags);
257 		__trace_graph_return(tr, trace, trace_ctx);
258 	}
259 	atomic_dec(&data->disabled);
260 	local_irq_restore(flags);
261 }
262 
set_graph_array(struct trace_array * tr)263 void set_graph_array(struct trace_array *tr)
264 {
265 	graph_array = tr;
266 
267 	/* Make graph_array visible before we start tracing */
268 
269 	smp_mb();
270 }
271 
trace_graph_thresh_return(struct ftrace_graph_ret * trace)272 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
273 {
274 	ftrace_graph_addr_finish(trace);
275 
276 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278 		return;
279 	}
280 
281 	if (tracing_thresh &&
282 	    (trace->rettime - trace->calltime < tracing_thresh))
283 		return;
284 	else
285 		trace_graph_return(trace);
286 }
287 
288 static struct fgraph_ops funcgraph_thresh_ops = {
289 	.entryfunc = &trace_graph_entry,
290 	.retfunc = &trace_graph_thresh_return,
291 };
292 
293 static struct fgraph_ops funcgraph_ops = {
294 	.entryfunc = &trace_graph_entry,
295 	.retfunc = &trace_graph_return,
296 };
297 
graph_trace_init(struct trace_array * tr)298 static int graph_trace_init(struct trace_array *tr)
299 {
300 	int ret;
301 
302 	set_graph_array(tr);
303 	if (tracing_thresh)
304 		ret = register_ftrace_graph(&funcgraph_thresh_ops);
305 	else
306 		ret = register_ftrace_graph(&funcgraph_ops);
307 	if (ret)
308 		return ret;
309 	tracing_start_cmdline_record();
310 
311 	return 0;
312 }
313 
graph_trace_reset(struct trace_array * tr)314 static void graph_trace_reset(struct trace_array *tr)
315 {
316 	tracing_stop_cmdline_record();
317 	if (tracing_thresh)
318 		unregister_ftrace_graph(&funcgraph_thresh_ops);
319 	else
320 		unregister_ftrace_graph(&funcgraph_ops);
321 }
322 
graph_trace_update_thresh(struct trace_array * tr)323 static int graph_trace_update_thresh(struct trace_array *tr)
324 {
325 	graph_trace_reset(tr);
326 	return graph_trace_init(tr);
327 }
328 
329 static int max_bytes_for_cpu;
330 
print_graph_cpu(struct trace_seq * s,int cpu)331 static void print_graph_cpu(struct trace_seq *s, int cpu)
332 {
333 	/*
334 	 * Start with a space character - to make it stand out
335 	 * to the right a bit when trace output is pasted into
336 	 * email:
337 	 */
338 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
339 }
340 
341 #define TRACE_GRAPH_PROCINFO_LENGTH	14
342 
print_graph_proc(struct trace_seq * s,pid_t pid)343 static void print_graph_proc(struct trace_seq *s, pid_t pid)
344 {
345 	char comm[TASK_COMM_LEN];
346 	/* sign + log10(MAX_INT) + '\0' */
347 	char pid_str[11];
348 	int spaces = 0;
349 	int len;
350 	int i;
351 
352 	trace_find_cmdline(pid, comm);
353 	comm[7] = '\0';
354 	sprintf(pid_str, "%d", pid);
355 
356 	/* 1 stands for the "-" character */
357 	len = strlen(comm) + strlen(pid_str) + 1;
358 
359 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
360 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
361 
362 	/* First spaces to align center */
363 	for (i = 0; i < spaces / 2; i++)
364 		trace_seq_putc(s, ' ');
365 
366 	trace_seq_printf(s, "%s-%s", comm, pid_str);
367 
368 	/* Last spaces to align center */
369 	for (i = 0; i < spaces - (spaces / 2); i++)
370 		trace_seq_putc(s, ' ');
371 }
372 
373 
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)374 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
375 {
376 	trace_seq_putc(s, ' ');
377 	trace_print_lat_fmt(s, entry);
378 	trace_seq_puts(s, " | ");
379 }
380 
381 /* If the pid changed since the last trace, output this event */
382 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)383 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
384 {
385 	pid_t prev_pid;
386 	pid_t *last_pid;
387 
388 	if (!data)
389 		return;
390 
391 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
392 
393 	if (*last_pid == pid)
394 		return;
395 
396 	prev_pid = *last_pid;
397 	*last_pid = pid;
398 
399 	if (prev_pid == -1)
400 		return;
401 /*
402  * Context-switch trace line:
403 
404  ------------------------------------------
405  | 1)  migration/0--1  =>  sshd-1755
406  ------------------------------------------
407 
408  */
409 	trace_seq_puts(s, " ------------------------------------------\n");
410 	print_graph_cpu(s, cpu);
411 	print_graph_proc(s, prev_pid);
412 	trace_seq_puts(s, " => ");
413 	print_graph_proc(s, pid);
414 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
415 }
416 
417 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)418 get_return_for_leaf(struct trace_iterator *iter,
419 		struct ftrace_graph_ent_entry *curr)
420 {
421 	struct fgraph_data *data = iter->private;
422 	struct ring_buffer_iter *ring_iter = NULL;
423 	struct ring_buffer_event *event;
424 	struct ftrace_graph_ret_entry *next;
425 
426 	/*
427 	 * If the previous output failed to write to the seq buffer,
428 	 * then we just reuse the data from before.
429 	 */
430 	if (data && data->failed) {
431 		curr = &data->ent;
432 		next = &data->ret;
433 	} else {
434 
435 		ring_iter = trace_buffer_iter(iter, iter->cpu);
436 
437 		/* First peek to compare current entry and the next one */
438 		if (ring_iter)
439 			event = ring_buffer_iter_peek(ring_iter, NULL);
440 		else {
441 			/*
442 			 * We need to consume the current entry to see
443 			 * the next one.
444 			 */
445 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
446 					    NULL, NULL);
447 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
448 						 NULL, NULL);
449 		}
450 
451 		if (!event)
452 			return NULL;
453 
454 		next = ring_buffer_event_data(event);
455 
456 		if (data) {
457 			/*
458 			 * Save current and next entries for later reference
459 			 * if the output fails.
460 			 */
461 			data->ent = *curr;
462 			/*
463 			 * If the next event is not a return type, then
464 			 * we only care about what type it is. Otherwise we can
465 			 * safely copy the entire event.
466 			 */
467 			if (next->ent.type == TRACE_GRAPH_RET)
468 				data->ret = *next;
469 			else
470 				data->ret.ent.type = next->ent.type;
471 		}
472 	}
473 
474 	if (next->ent.type != TRACE_GRAPH_RET)
475 		return NULL;
476 
477 	if (curr->ent.pid != next->ent.pid ||
478 			curr->graph_ent.func != next->ret.func)
479 		return NULL;
480 
481 	/* this is a leaf, now advance the iterator */
482 	if (ring_iter)
483 		ring_buffer_iter_advance(ring_iter);
484 
485 	return next;
486 }
487 
print_graph_abs_time(u64 t,struct trace_seq * s)488 static void print_graph_abs_time(u64 t, struct trace_seq *s)
489 {
490 	unsigned long usecs_rem;
491 
492 	usecs_rem = do_div(t, NSEC_PER_SEC);
493 	usecs_rem /= 1000;
494 
495 	trace_seq_printf(s, "%5lu.%06lu |  ",
496 			 (unsigned long)t, usecs_rem);
497 }
498 
499 static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)500 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
501 {
502 	unsigned long long usecs;
503 
504 	usecs = iter->ts - iter->array_buffer->time_start;
505 	do_div(usecs, NSEC_PER_USEC);
506 
507 	trace_seq_printf(s, "%9llu us |  ", usecs);
508 }
509 
510 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)511 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
512 		enum trace_type type, int cpu, pid_t pid, u32 flags)
513 {
514 	struct trace_array *tr = iter->tr;
515 	struct trace_seq *s = &iter->seq;
516 	struct trace_entry *ent = iter->ent;
517 
518 	if (addr < (unsigned long)__irqentry_text_start ||
519 		addr >= (unsigned long)__irqentry_text_end)
520 		return;
521 
522 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
523 		/* Absolute time */
524 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
525 			print_graph_abs_time(iter->ts, s);
526 
527 		/* Relative time */
528 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
529 			print_graph_rel_time(iter, s);
530 
531 		/* Cpu */
532 		if (flags & TRACE_GRAPH_PRINT_CPU)
533 			print_graph_cpu(s, cpu);
534 
535 		/* Proc */
536 		if (flags & TRACE_GRAPH_PRINT_PROC) {
537 			print_graph_proc(s, pid);
538 			trace_seq_puts(s, " | ");
539 		}
540 
541 		/* Latency format */
542 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
543 			print_graph_lat_fmt(s, ent);
544 	}
545 
546 	/* No overhead */
547 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
548 
549 	if (type == TRACE_GRAPH_ENT)
550 		trace_seq_puts(s, "==========>");
551 	else
552 		trace_seq_puts(s, "<==========");
553 
554 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
555 	trace_seq_putc(s, '\n');
556 }
557 
558 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)559 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
560 {
561 	unsigned long nsecs_rem = do_div(duration, 1000);
562 	/* log10(ULONG_MAX) + '\0' */
563 	char usecs_str[21];
564 	char nsecs_str[5];
565 	int len;
566 	int i;
567 
568 	sprintf(usecs_str, "%lu", (unsigned long) duration);
569 
570 	/* Print msecs */
571 	trace_seq_printf(s, "%s", usecs_str);
572 
573 	len = strlen(usecs_str);
574 
575 	/* Print nsecs (we don't want to exceed 7 numbers) */
576 	if (len < 7) {
577 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
578 
579 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
580 		trace_seq_printf(s, ".%s", nsecs_str);
581 		len += strlen(nsecs_str) + 1;
582 	}
583 
584 	trace_seq_puts(s, " us ");
585 
586 	/* Print remaining spaces to fit the row's width */
587 	for (i = len; i < 8; i++)
588 		trace_seq_putc(s, ' ');
589 }
590 
591 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)592 print_graph_duration(struct trace_array *tr, unsigned long long duration,
593 		     struct trace_seq *s, u32 flags)
594 {
595 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
596 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
597 		return;
598 
599 	/* No real adata, just filling the column with spaces */
600 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
601 	case FLAGS_FILL_FULL:
602 		trace_seq_puts(s, "              |  ");
603 		return;
604 	case FLAGS_FILL_START:
605 		trace_seq_puts(s, "  ");
606 		return;
607 	case FLAGS_FILL_END:
608 		trace_seq_puts(s, " |");
609 		return;
610 	}
611 
612 	/* Signal a overhead of time execution to the output */
613 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
614 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
615 	else
616 		trace_seq_puts(s, "  ");
617 
618 	trace_print_graph_duration(duration, s);
619 	trace_seq_puts(s, "|  ");
620 }
621 
622 /* Case of a leaf function on its call entry */
623 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)624 print_graph_entry_leaf(struct trace_iterator *iter,
625 		struct ftrace_graph_ent_entry *entry,
626 		struct ftrace_graph_ret_entry *ret_entry,
627 		struct trace_seq *s, u32 flags)
628 {
629 	struct fgraph_data *data = iter->private;
630 	struct trace_array *tr = iter->tr;
631 	struct ftrace_graph_ret *graph_ret;
632 	struct ftrace_graph_ent *call;
633 	unsigned long long duration;
634 	int cpu = iter->cpu;
635 	int i;
636 
637 	graph_ret = &ret_entry->ret;
638 	call = &entry->graph_ent;
639 	duration = graph_ret->rettime - graph_ret->calltime;
640 
641 	if (data) {
642 		struct fgraph_cpu_data *cpu_data;
643 
644 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
645 
646 		/*
647 		 * Comments display at + 1 to depth. Since
648 		 * this is a leaf function, keep the comments
649 		 * equal to this depth.
650 		 */
651 		cpu_data->depth = call->depth - 1;
652 
653 		/* No need to keep this function around for this depth */
654 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
655 		    !WARN_ON_ONCE(call->depth < 0))
656 			cpu_data->enter_funcs[call->depth] = 0;
657 	}
658 
659 	/* Overhead and duration */
660 	print_graph_duration(tr, duration, s, flags);
661 
662 	/* Function */
663 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
664 		trace_seq_putc(s, ' ');
665 
666 	trace_seq_printf(s, "%ps();\n", (void *)call->func);
667 
668 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
669 			cpu, iter->ent->pid, flags);
670 
671 	return trace_handle_return(s);
672 }
673 
674 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)675 print_graph_entry_nested(struct trace_iterator *iter,
676 			 struct ftrace_graph_ent_entry *entry,
677 			 struct trace_seq *s, int cpu, u32 flags)
678 {
679 	struct ftrace_graph_ent *call = &entry->graph_ent;
680 	struct fgraph_data *data = iter->private;
681 	struct trace_array *tr = iter->tr;
682 	int i;
683 
684 	if (data) {
685 		struct fgraph_cpu_data *cpu_data;
686 		int cpu = iter->cpu;
687 
688 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
689 		cpu_data->depth = call->depth;
690 
691 		/* Save this function pointer to see if the exit matches */
692 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
693 		    !WARN_ON_ONCE(call->depth < 0))
694 			cpu_data->enter_funcs[call->depth] = call->func;
695 	}
696 
697 	/* No time */
698 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
699 
700 	/* Function */
701 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
702 		trace_seq_putc(s, ' ');
703 
704 	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
705 
706 	if (trace_seq_has_overflowed(s))
707 		return TRACE_TYPE_PARTIAL_LINE;
708 
709 	/*
710 	 * we already consumed the current entry to check the next one
711 	 * and see if this is a leaf.
712 	 */
713 	return TRACE_TYPE_NO_CONSUME;
714 }
715 
716 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)717 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
718 		     int type, unsigned long addr, u32 flags)
719 {
720 	struct fgraph_data *data = iter->private;
721 	struct trace_entry *ent = iter->ent;
722 	struct trace_array *tr = iter->tr;
723 	int cpu = iter->cpu;
724 
725 	/* Pid */
726 	verif_pid(s, ent->pid, cpu, data);
727 
728 	if (type)
729 		/* Interrupt */
730 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
731 
732 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
733 		return;
734 
735 	/* Absolute time */
736 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
737 		print_graph_abs_time(iter->ts, s);
738 
739 	/* Relative time */
740 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
741 		print_graph_rel_time(iter, s);
742 
743 	/* Cpu */
744 	if (flags & TRACE_GRAPH_PRINT_CPU)
745 		print_graph_cpu(s, cpu);
746 
747 	/* Proc */
748 	if (flags & TRACE_GRAPH_PRINT_PROC) {
749 		print_graph_proc(s, ent->pid);
750 		trace_seq_puts(s, " | ");
751 	}
752 
753 	/* Latency format */
754 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
755 		print_graph_lat_fmt(s, ent);
756 
757 	return;
758 }
759 
760 /*
761  * Entry check for irq code
762  *
763  * returns 1 if
764  *  - we are inside irq code
765  *  - we just entered irq code
766  *
767  * returns 0 if
768  *  - funcgraph-interrupts option is set
769  *  - we are not inside irq code
770  */
771 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)772 check_irq_entry(struct trace_iterator *iter, u32 flags,
773 		unsigned long addr, int depth)
774 {
775 	int cpu = iter->cpu;
776 	int *depth_irq;
777 	struct fgraph_data *data = iter->private;
778 
779 	/*
780 	 * If we are either displaying irqs, or we got called as
781 	 * a graph event and private data does not exist,
782 	 * then we bypass the irq check.
783 	 */
784 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
785 	    (!data))
786 		return 0;
787 
788 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
789 
790 	/*
791 	 * We are inside the irq code
792 	 */
793 	if (*depth_irq >= 0)
794 		return 1;
795 
796 	if ((addr < (unsigned long)__irqentry_text_start) ||
797 	    (addr >= (unsigned long)__irqentry_text_end))
798 		return 0;
799 
800 	/*
801 	 * We are entering irq code.
802 	 */
803 	*depth_irq = depth;
804 	return 1;
805 }
806 
807 /*
808  * Return check for irq code
809  *
810  * returns 1 if
811  *  - we are inside irq code
812  *  - we just left irq code
813  *
814  * returns 0 if
815  *  - funcgraph-interrupts option is set
816  *  - we are not inside irq code
817  */
818 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)819 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
820 {
821 	int cpu = iter->cpu;
822 	int *depth_irq;
823 	struct fgraph_data *data = iter->private;
824 
825 	/*
826 	 * If we are either displaying irqs, or we got called as
827 	 * a graph event and private data does not exist,
828 	 * then we bypass the irq check.
829 	 */
830 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
831 	    (!data))
832 		return 0;
833 
834 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
835 
836 	/*
837 	 * We are not inside the irq code.
838 	 */
839 	if (*depth_irq == -1)
840 		return 0;
841 
842 	/*
843 	 * We are inside the irq code, and this is returning entry.
844 	 * Let's not trace it and clear the entry depth, since
845 	 * we are out of irq code.
846 	 *
847 	 * This condition ensures that we 'leave the irq code' once
848 	 * we are out of the entry depth. Thus protecting us from
849 	 * the RETURN entry loss.
850 	 */
851 	if (*depth_irq >= depth) {
852 		*depth_irq = -1;
853 		return 1;
854 	}
855 
856 	/*
857 	 * We are inside the irq code, and this is not the entry.
858 	 */
859 	return 1;
860 }
861 
862 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)863 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
864 			struct trace_iterator *iter, u32 flags)
865 {
866 	struct fgraph_data *data = iter->private;
867 	struct ftrace_graph_ent *call = &field->graph_ent;
868 	struct ftrace_graph_ret_entry *leaf_ret;
869 	static enum print_line_t ret;
870 	int cpu = iter->cpu;
871 
872 	if (check_irq_entry(iter, flags, call->func, call->depth))
873 		return TRACE_TYPE_HANDLED;
874 
875 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
876 
877 	leaf_ret = get_return_for_leaf(iter, field);
878 	if (leaf_ret)
879 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
880 	else
881 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
882 
883 	if (data) {
884 		/*
885 		 * If we failed to write our output, then we need to make
886 		 * note of it. Because we already consumed our entry.
887 		 */
888 		if (s->full) {
889 			data->failed = 1;
890 			data->cpu = cpu;
891 		} else
892 			data->failed = 0;
893 	}
894 
895 	return ret;
896 }
897 
898 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)899 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
900 		   struct trace_entry *ent, struct trace_iterator *iter,
901 		   u32 flags)
902 {
903 	unsigned long long duration = trace->rettime - trace->calltime;
904 	struct fgraph_data *data = iter->private;
905 	struct trace_array *tr = iter->tr;
906 	pid_t pid = ent->pid;
907 	int cpu = iter->cpu;
908 	int func_match = 1;
909 	int i;
910 
911 	if (check_irq_return(iter, flags, trace->depth))
912 		return TRACE_TYPE_HANDLED;
913 
914 	if (data) {
915 		struct fgraph_cpu_data *cpu_data;
916 		int cpu = iter->cpu;
917 
918 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
919 
920 		/*
921 		 * Comments display at + 1 to depth. This is the
922 		 * return from a function, we now want the comments
923 		 * to display at the same level of the bracket.
924 		 */
925 		cpu_data->depth = trace->depth - 1;
926 
927 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
928 		    !WARN_ON_ONCE(trace->depth < 0)) {
929 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
930 				func_match = 0;
931 			cpu_data->enter_funcs[trace->depth] = 0;
932 		}
933 	}
934 
935 	print_graph_prologue(iter, s, 0, 0, flags);
936 
937 	/* Overhead and duration */
938 	print_graph_duration(tr, duration, s, flags);
939 
940 	/* Closing brace */
941 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
942 		trace_seq_putc(s, ' ');
943 
944 	/*
945 	 * If the return function does not have a matching entry,
946 	 * then the entry was lost. Instead of just printing
947 	 * the '}' and letting the user guess what function this
948 	 * belongs to, write out the function name. Always do
949 	 * that if the funcgraph-tail option is enabled.
950 	 */
951 	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
952 		trace_seq_puts(s, "}\n");
953 	else
954 		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
955 
956 	/* Overrun */
957 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
958 		trace_seq_printf(s, " (Overruns: %u)\n",
959 				 trace->overrun);
960 
961 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
962 			cpu, pid, flags);
963 
964 	return trace_handle_return(s);
965 }
966 
967 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)968 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
969 		    struct trace_iterator *iter, u32 flags)
970 {
971 	struct trace_array *tr = iter->tr;
972 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
973 	struct fgraph_data *data = iter->private;
974 	struct trace_event *event;
975 	int depth = 0;
976 	int ret;
977 	int i;
978 
979 	if (data)
980 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
981 
982 	print_graph_prologue(iter, s, 0, 0, flags);
983 
984 	/* No time */
985 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
986 
987 	/* Indentation */
988 	if (depth > 0)
989 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
990 			trace_seq_putc(s, ' ');
991 
992 	/* The comment */
993 	trace_seq_puts(s, "/* ");
994 
995 	switch (iter->ent->type) {
996 	case TRACE_BPUTS:
997 		ret = trace_print_bputs_msg_only(iter);
998 		if (ret != TRACE_TYPE_HANDLED)
999 			return ret;
1000 		break;
1001 	case TRACE_BPRINT:
1002 		ret = trace_print_bprintk_msg_only(iter);
1003 		if (ret != TRACE_TYPE_HANDLED)
1004 			return ret;
1005 		break;
1006 	case TRACE_PRINT:
1007 		ret = trace_print_printk_msg_only(iter);
1008 		if (ret != TRACE_TYPE_HANDLED)
1009 			return ret;
1010 		break;
1011 	default:
1012 		event = ftrace_find_event(ent->type);
1013 		if (!event)
1014 			return TRACE_TYPE_UNHANDLED;
1015 
1016 		ret = event->funcs->trace(iter, sym_flags, event);
1017 		if (ret != TRACE_TYPE_HANDLED)
1018 			return ret;
1019 	}
1020 
1021 	if (trace_seq_has_overflowed(s))
1022 		goto out;
1023 
1024 	/* Strip ending newline */
1025 	if (s->buffer[s->seq.len - 1] == '\n') {
1026 		s->buffer[s->seq.len - 1] = '\0';
1027 		s->seq.len--;
1028 	}
1029 
1030 	trace_seq_puts(s, " */\n");
1031  out:
1032 	return trace_handle_return(s);
1033 }
1034 
1035 
1036 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1037 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038 {
1039 	struct ftrace_graph_ent_entry *field;
1040 	struct fgraph_data *data = iter->private;
1041 	struct trace_entry *entry = iter->ent;
1042 	struct trace_seq *s = &iter->seq;
1043 	int cpu = iter->cpu;
1044 	int ret;
1045 
1046 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048 		return TRACE_TYPE_HANDLED;
1049 	}
1050 
1051 	/*
1052 	 * If the last output failed, there's a possibility we need
1053 	 * to print out the missing entry which would never go out.
1054 	 */
1055 	if (data && data->failed) {
1056 		field = &data->ent;
1057 		iter->cpu = data->cpu;
1058 		ret = print_graph_entry(field, s, iter, flags);
1059 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061 			ret = TRACE_TYPE_NO_CONSUME;
1062 		}
1063 		iter->cpu = cpu;
1064 		return ret;
1065 	}
1066 
1067 	switch (entry->type) {
1068 	case TRACE_GRAPH_ENT: {
1069 		/*
1070 		 * print_graph_entry() may consume the current event,
1071 		 * thus @field may become invalid, so we need to save it.
1072 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073 		 * it can be safely saved at the stack.
1074 		 */
1075 		struct ftrace_graph_ent_entry saved;
1076 		trace_assign_type(field, entry);
1077 		saved = *field;
1078 		return print_graph_entry(&saved, s, iter, flags);
1079 	}
1080 	case TRACE_GRAPH_RET: {
1081 		struct ftrace_graph_ret_entry *field;
1082 		trace_assign_type(field, entry);
1083 		return print_graph_return(&field->ret, s, entry, iter, flags);
1084 	}
1085 	case TRACE_STACK:
1086 	case TRACE_FN:
1087 		/* dont trace stack and functions as comments */
1088 		return TRACE_TYPE_UNHANDLED;
1089 
1090 	default:
1091 		return print_graph_comment(s, entry, iter, flags);
1092 	}
1093 
1094 	return TRACE_TYPE_HANDLED;
1095 }
1096 
1097 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1098 print_graph_function(struct trace_iterator *iter)
1099 {
1100 	return print_graph_function_flags(iter, tracer_flags.val);
1101 }
1102 
1103 static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1104 print_graph_function_event(struct trace_iterator *iter, int flags,
1105 			   struct trace_event *event)
1106 {
1107 	return print_graph_function(iter);
1108 }
1109 
print_lat_header(struct seq_file * s,u32 flags)1110 static void print_lat_header(struct seq_file *s, u32 flags)
1111 {
1112 	static const char spaces[] = "                "	/* 16 spaces */
1113 		"    "					/* 4 spaces */
1114 		"                 ";			/* 17 spaces */
1115 	int size = 0;
1116 
1117 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118 		size += 16;
1119 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120 		size += 16;
1121 	if (flags & TRACE_GRAPH_PRINT_CPU)
1122 		size += 4;
1123 	if (flags & TRACE_GRAPH_PRINT_PROC)
1124 		size += 17;
1125 
1126 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1127 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1128 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1130 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1131 }
1132 
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1133 static void __print_graph_headers_flags(struct trace_array *tr,
1134 					struct seq_file *s, u32 flags)
1135 {
1136 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137 
1138 	if (lat)
1139 		print_lat_header(s, flags);
1140 
1141 	/* 1st line */
1142 	seq_putc(s, '#');
1143 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144 		seq_puts(s, "     TIME       ");
1145 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146 		seq_puts(s, "   REL TIME     ");
1147 	if (flags & TRACE_GRAPH_PRINT_CPU)
1148 		seq_puts(s, " CPU");
1149 	if (flags & TRACE_GRAPH_PRINT_PROC)
1150 		seq_puts(s, "  TASK/PID       ");
1151 	if (lat)
1152 		seq_puts(s, "||||   ");
1153 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1154 		seq_puts(s, "  DURATION   ");
1155 	seq_puts(s, "               FUNCTION CALLS\n");
1156 
1157 	/* 2nd line */
1158 	seq_putc(s, '#');
1159 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160 		seq_puts(s, "      |         ");
1161 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162 		seq_puts(s, "      |         ");
1163 	if (flags & TRACE_GRAPH_PRINT_CPU)
1164 		seq_puts(s, " |  ");
1165 	if (flags & TRACE_GRAPH_PRINT_PROC)
1166 		seq_puts(s, "   |    |        ");
1167 	if (lat)
1168 		seq_puts(s, "||||   ");
1169 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1170 		seq_puts(s, "   |   |      ");
1171 	seq_puts(s, "               |   |   |   |\n");
1172 }
1173 
print_graph_headers(struct seq_file * s)1174 static void print_graph_headers(struct seq_file *s)
1175 {
1176 	print_graph_headers_flags(s, tracer_flags.val);
1177 }
1178 
print_graph_headers_flags(struct seq_file * s,u32 flags)1179 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180 {
1181 	struct trace_iterator *iter = s->private;
1182 	struct trace_array *tr = iter->tr;
1183 
1184 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185 		return;
1186 
1187 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188 		/* print nothing if the buffers are empty */
1189 		if (trace_empty(iter))
1190 			return;
1191 
1192 		print_trace_header(s, iter);
1193 	}
1194 
1195 	__print_graph_headers_flags(tr, s, flags);
1196 }
1197 
graph_trace_open(struct trace_iterator * iter)1198 void graph_trace_open(struct trace_iterator *iter)
1199 {
1200 	/* pid and depth on the last trace processed */
1201 	struct fgraph_data *data;
1202 	gfp_t gfpflags;
1203 	int cpu;
1204 
1205 	iter->private = NULL;
1206 
1207 	/* We can be called in atomic context via ftrace_dump() */
1208 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209 
1210 	data = kzalloc(sizeof(*data), gfpflags);
1211 	if (!data)
1212 		goto out_err;
1213 
1214 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215 	if (!data->cpu_data)
1216 		goto out_err_free;
1217 
1218 	for_each_possible_cpu(cpu) {
1219 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223 
1224 		*pid = -1;
1225 		*depth = 0;
1226 		*ignore = 0;
1227 		*depth_irq = -1;
1228 	}
1229 
1230 	iter->private = data;
1231 
1232 	return;
1233 
1234  out_err_free:
1235 	kfree(data);
1236  out_err:
1237 	pr_warn("function graph tracer: not enough memory\n");
1238 }
1239 
graph_trace_close(struct trace_iterator * iter)1240 void graph_trace_close(struct trace_iterator *iter)
1241 {
1242 	struct fgraph_data *data = iter->private;
1243 
1244 	if (data) {
1245 		free_percpu(data->cpu_data);
1246 		kfree(data);
1247 	}
1248 }
1249 
1250 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1251 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252 {
1253 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1254 		ftrace_graph_skip_irqs = !set;
1255 
1256 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1257 		ftrace_graph_sleep_time_control(set);
1258 
1259 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1260 		ftrace_graph_graph_time_control(set);
1261 
1262 	return 0;
1263 }
1264 
1265 static struct trace_event_functions graph_functions = {
1266 	.trace		= print_graph_function_event,
1267 };
1268 
1269 static struct trace_event graph_trace_entry_event = {
1270 	.type		= TRACE_GRAPH_ENT,
1271 	.funcs		= &graph_functions,
1272 };
1273 
1274 static struct trace_event graph_trace_ret_event = {
1275 	.type		= TRACE_GRAPH_RET,
1276 	.funcs		= &graph_functions
1277 };
1278 
1279 static struct tracer graph_trace __tracer_data = {
1280 	.name		= "function_graph",
1281 	.update_thresh	= graph_trace_update_thresh,
1282 	.open		= graph_trace_open,
1283 	.pipe_open	= graph_trace_open,
1284 	.close		= graph_trace_close,
1285 	.pipe_close	= graph_trace_close,
1286 	.init		= graph_trace_init,
1287 	.reset		= graph_trace_reset,
1288 	.print_line	= print_graph_function,
1289 	.print_header	= print_graph_headers,
1290 	.flags		= &tracer_flags,
1291 	.set_flag	= func_graph_set_flag,
1292 #ifdef CONFIG_FTRACE_SELFTEST
1293 	.selftest	= trace_selftest_startup_function_graph,
1294 #endif
1295 };
1296 
1297 
1298 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1299 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300 		  loff_t *ppos)
1301 {
1302 	unsigned long val;
1303 	int ret;
1304 
1305 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306 	if (ret)
1307 		return ret;
1308 
1309 	fgraph_max_depth = val;
1310 
1311 	*ppos += cnt;
1312 
1313 	return cnt;
1314 }
1315 
1316 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1317 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318 		 loff_t *ppos)
1319 {
1320 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321 	int n;
1322 
1323 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1324 
1325 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326 }
1327 
1328 static const struct file_operations graph_depth_fops = {
1329 	.open		= tracing_open_generic,
1330 	.write		= graph_depth_write,
1331 	.read		= graph_depth_read,
1332 	.llseek		= generic_file_llseek,
1333 };
1334 
init_graph_tracefs(void)1335 static __init int init_graph_tracefs(void)
1336 {
1337 	int ret;
1338 
1339 	ret = tracing_init_dentry();
1340 	if (ret)
1341 		return 0;
1342 
1343 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1344 			  NULL, &graph_depth_fops);
1345 
1346 	return 0;
1347 }
1348 fs_initcall(init_graph_tracefs);
1349 
init_graph_trace(void)1350 static __init int init_graph_trace(void)
1351 {
1352 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353 
1354 	if (!register_trace_event(&graph_trace_entry_event)) {
1355 		pr_warn("Warning: could not register graph trace events\n");
1356 		return 1;
1357 	}
1358 
1359 	if (!register_trace_event(&graph_trace_ret_event)) {
1360 		pr_warn("Warning: could not register graph trace events\n");
1361 		return 1;
1362 	}
1363 
1364 	return register_tracer(&graph_trace);
1365 }
1366 
1367 core_initcall(init_graph_trace);
1368