1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
ftrace_graph_is_dead(void)27 bool ftrace_graph_is_dead(void)
28 {
29 return kill_ftrace_graph;
30 }
31
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
ftrace_graph_stop(void)40 void ftrace_graph_stop(void)
41 {
42 kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT 2
67
68 /* Flag options */
69 #define TRACE_GRAPH_PRINT_FLAT 0x80
70
71 static unsigned int max_depth;
72
73 static struct tracer_opt trace_opts[] = {
74 /* Display overruns? (for self-debug purpose) */
75 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
76 /* Display CPU ? */
77 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
78 /* Display Overhead ? */
79 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
80 /* Display proc name/pid */
81 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
82 /* Display duration of execution */
83 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
84 /* Display absolute time of an entry */
85 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
86 /* Display interrupts */
87 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
88 /* Display function name after trailing } */
89 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
90 /* Include sleep time (scheduled out) between entry and return */
91 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
92 /* Include time within nested functions */
93 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
94 /* Use standard trace formatting rather than hierarchical */
95 { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
96 { } /* Empty entry */
97 };
98
99 static struct tracer_flags tracer_flags = {
100 /* Don't display overruns, proc, or tail by default */
101 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
102 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
103 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
104 .opts = trace_opts
105 };
106
107 static struct trace_array *graph_array;
108
109 /*
110 * DURATION column is being also used to display IRQ signs,
111 * following values are used by print_graph_irq and others
112 * to fill in space into DURATION column.
113 */
114 enum {
115 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
116 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
117 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
118 };
119
120 static void
121 print_graph_duration(struct trace_array *tr, unsigned long long duration,
122 struct trace_seq *s, u32 flags);
123
124 /* Add a function return address to the trace stack on thread info.*/
125 int
ftrace_push_return_trace(unsigned long ret,unsigned long func,int * depth,unsigned long frame_pointer,unsigned long * retp)126 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
127 unsigned long frame_pointer, unsigned long *retp)
128 {
129 unsigned long long calltime;
130 int index;
131
132 if (unlikely(ftrace_graph_is_dead()))
133 return -EBUSY;
134
135 if (!current->ret_stack)
136 return -EBUSY;
137
138 /*
139 * We must make sure the ret_stack is tested before we read
140 * anything else.
141 */
142 smp_rmb();
143
144 /* The return trace stack is full */
145 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
146 atomic_inc(¤t->trace_overrun);
147 return -EBUSY;
148 }
149
150 /*
151 * The curr_ret_stack is an index to ftrace return stack of
152 * current task. Its value should be in [0, FTRACE_RETFUNC_
153 * DEPTH) when the function graph tracer is used. To support
154 * filtering out specific functions, it makes the index
155 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
156 * so when it sees a negative index the ftrace will ignore
157 * the record. And the index gets recovered when returning
158 * from the filtered function by adding the FTRACE_NOTRACE_
159 * DEPTH and then it'll continue to record functions normally.
160 *
161 * The curr_ret_stack is initialized to -1 and get increased
162 * in this function. So it can be less than -1 only if it was
163 * filtered out via ftrace_graph_notrace_addr() which can be
164 * set from set_graph_notrace file in tracefs by user.
165 */
166 if (current->curr_ret_stack < -1)
167 return -EBUSY;
168
169 calltime = trace_clock_local();
170
171 index = ++current->curr_ret_stack;
172 if (ftrace_graph_notrace_addr(func))
173 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
174 barrier();
175 current->ret_stack[index].ret = ret;
176 current->ret_stack[index].func = func;
177 current->ret_stack[index].calltime = calltime;
178 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
179 current->ret_stack[index].fp = frame_pointer;
180 #endif
181 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
182 current->ret_stack[index].retp = retp;
183 #endif
184 *depth = current->curr_ret_stack;
185
186 return 0;
187 }
188
189 /* Retrieve a function return address to the trace stack on thread info.*/
190 static void
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer)191 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
192 unsigned long frame_pointer)
193 {
194 int index;
195
196 index = current->curr_ret_stack;
197
198 /*
199 * A negative index here means that it's just returned from a
200 * notrace'd function. Recover index to get an original
201 * return address. See ftrace_push_return_trace().
202 *
203 * TODO: Need to check whether the stack gets corrupted.
204 */
205 if (index < 0)
206 index += FTRACE_NOTRACE_DEPTH;
207
208 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
209 ftrace_graph_stop();
210 WARN_ON(1);
211 /* Might as well panic, otherwise we have no where to go */
212 *ret = (unsigned long)panic;
213 return;
214 }
215
216 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
217 /*
218 * The arch may choose to record the frame pointer used
219 * and check it here to make sure that it is what we expect it
220 * to be. If gcc does not set the place holder of the return
221 * address in the frame pointer, and does a copy instead, then
222 * the function graph trace will fail. This test detects this
223 * case.
224 *
225 * Currently, x86_32 with optimize for size (-Os) makes the latest
226 * gcc do the above.
227 *
228 * Note, -mfentry does not use frame pointers, and this test
229 * is not needed if CC_USING_FENTRY is set.
230 */
231 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
232 ftrace_graph_stop();
233 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
234 " from func %ps return to %lx\n",
235 current->ret_stack[index].fp,
236 frame_pointer,
237 (void *)current->ret_stack[index].func,
238 current->ret_stack[index].ret);
239 *ret = (unsigned long)panic;
240 return;
241 }
242 #endif
243
244 *ret = current->ret_stack[index].ret;
245 trace->func = current->ret_stack[index].func;
246 trace->calltime = current->ret_stack[index].calltime;
247 trace->overrun = atomic_read(¤t->trace_overrun);
248 trace->depth = index;
249 }
250
251 /*
252 * Send the trace to the ring-buffer.
253 * @return the original return address.
254 */
ftrace_return_to_handler(unsigned long frame_pointer)255 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
256 {
257 struct ftrace_graph_ret trace;
258 unsigned long ret;
259
260 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
261 trace.rettime = trace_clock_local();
262 barrier();
263 current->curr_ret_stack--;
264 /*
265 * The curr_ret_stack can be less than -1 only if it was
266 * filtered out and it's about to return from the function.
267 * Recover the index and continue to trace normal functions.
268 */
269 if (current->curr_ret_stack < -1) {
270 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
271 return ret;
272 }
273
274 /*
275 * The trace should run after decrementing the ret counter
276 * in case an interrupt were to come in. We don't want to
277 * lose the interrupt if max_depth is set.
278 */
279 ftrace_graph_return(&trace);
280
281 if (unlikely(!ret)) {
282 ftrace_graph_stop();
283 WARN_ON(1);
284 /* Might as well panic. What else to do? */
285 ret = (unsigned long)panic;
286 }
287
288 return ret;
289 }
290
291 /**
292 * ftrace_graph_ret_addr - convert a potentially modified stack return address
293 * to its original value
294 *
295 * This function can be called by stack unwinding code to convert a found stack
296 * return address ('ret') to its original value, in case the function graph
297 * tracer has modified it to be 'return_to_handler'. If the address hasn't
298 * been modified, the unchanged value of 'ret' is returned.
299 *
300 * 'idx' is a state variable which should be initialized by the caller to zero
301 * before the first call.
302 *
303 * 'retp' is a pointer to the return address on the stack. It's ignored if
304 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
305 */
306 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)307 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
308 unsigned long ret, unsigned long *retp)
309 {
310 int index = task->curr_ret_stack;
311 int i;
312
313 if (ret != (unsigned long)return_to_handler)
314 return ret;
315
316 if (index < -1)
317 index += FTRACE_NOTRACE_DEPTH;
318
319 if (index < 0)
320 return ret;
321
322 for (i = 0; i <= index; i++)
323 if (task->ret_stack[i].retp == retp)
324 return task->ret_stack[i].ret;
325
326 return ret;
327 }
328 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)329 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
330 unsigned long ret, unsigned long *retp)
331 {
332 int task_idx;
333
334 if (ret != (unsigned long)return_to_handler)
335 return ret;
336
337 task_idx = task->curr_ret_stack;
338
339 if (!task->ret_stack || task_idx < *idx)
340 return ret;
341
342 task_idx -= *idx;
343 (*idx)++;
344
345 return task->ret_stack[task_idx].ret;
346 }
347 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
348
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned long flags,int pc)349 int __trace_graph_entry(struct trace_array *tr,
350 struct ftrace_graph_ent *trace,
351 unsigned long flags,
352 int pc)
353 {
354 struct trace_event_call *call = &event_funcgraph_entry;
355 struct ring_buffer_event *event;
356 struct ring_buffer *buffer = tr->trace_buffer.buffer;
357 struct ftrace_graph_ent_entry *entry;
358
359 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
360 sizeof(*entry), flags, pc);
361 if (!event)
362 return 0;
363 entry = ring_buffer_event_data(event);
364 entry->graph_ent = *trace;
365 if (!call_filter_check_discard(call, entry, buffer, event))
366 __buffer_unlock_commit(buffer, event);
367
368 return 1;
369 }
370
ftrace_graph_ignore_irqs(void)371 static inline int ftrace_graph_ignore_irqs(void)
372 {
373 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
374 return 0;
375
376 return in_irq();
377 }
378
trace_graph_entry(struct ftrace_graph_ent * trace)379 int trace_graph_entry(struct ftrace_graph_ent *trace)
380 {
381 struct trace_array *tr = graph_array;
382 struct trace_array_cpu *data;
383 unsigned long flags;
384 long disabled;
385 int ret;
386 int cpu;
387 int pc;
388
389 if (!ftrace_trace_task(tr))
390 return 0;
391
392 /* trace it when it is-nested-in or is a function enabled. */
393 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
394 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
395 (max_depth && trace->depth >= max_depth))
396 return 0;
397
398 /*
399 * Do not trace a function if it's filtered by set_graph_notrace.
400 * Make the index of ret stack negative to indicate that it should
401 * ignore further functions. But it needs its own ret stack entry
402 * to recover the original index in order to continue tracing after
403 * returning from the function.
404 */
405 if (ftrace_graph_notrace_addr(trace->func))
406 return 1;
407
408 /*
409 * Stop here if tracing_threshold is set. We only write function return
410 * events to the ring buffer.
411 */
412 if (tracing_thresh)
413 return 1;
414
415 local_irq_save(flags);
416 cpu = raw_smp_processor_id();
417 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
418 disabled = atomic_inc_return(&data->disabled);
419 if (likely(disabled == 1)) {
420 pc = preempt_count();
421 ret = __trace_graph_entry(tr, trace, flags, pc);
422 } else {
423 ret = 0;
424 }
425
426 atomic_dec(&data->disabled);
427 local_irq_restore(flags);
428
429 return ret;
430 }
431
432 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long flags,int pc)433 __trace_graph_function(struct trace_array *tr,
434 unsigned long ip, unsigned long flags, int pc)
435 {
436 u64 time = trace_clock_local();
437 struct ftrace_graph_ent ent = {
438 .func = ip,
439 .depth = 0,
440 };
441 struct ftrace_graph_ret ret = {
442 .func = ip,
443 .depth = 0,
444 .calltime = time,
445 .rettime = time,
446 };
447
448 __trace_graph_entry(tr, &ent, flags, pc);
449 __trace_graph_return(tr, &ret, flags, pc);
450 }
451
452 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)453 trace_graph_function(struct trace_array *tr,
454 unsigned long ip, unsigned long parent_ip,
455 unsigned long flags, int pc)
456 {
457 __trace_graph_function(tr, ip, flags, pc);
458 }
459
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned long flags,int pc)460 void __trace_graph_return(struct trace_array *tr,
461 struct ftrace_graph_ret *trace,
462 unsigned long flags,
463 int pc)
464 {
465 struct trace_event_call *call = &event_funcgraph_exit;
466 struct ring_buffer_event *event;
467 struct ring_buffer *buffer = tr->trace_buffer.buffer;
468 struct ftrace_graph_ret_entry *entry;
469
470 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
471 sizeof(*entry), flags, pc);
472 if (!event)
473 return;
474 entry = ring_buffer_event_data(event);
475 entry->ret = *trace;
476 if (!call_filter_check_discard(call, entry, buffer, event))
477 __buffer_unlock_commit(buffer, event);
478 }
479
trace_graph_return(struct ftrace_graph_ret * trace)480 void trace_graph_return(struct ftrace_graph_ret *trace)
481 {
482 struct trace_array *tr = graph_array;
483 struct trace_array_cpu *data;
484 unsigned long flags;
485 long disabled;
486 int cpu;
487 int pc;
488
489 local_irq_save(flags);
490 cpu = raw_smp_processor_id();
491 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
492 disabled = atomic_inc_return(&data->disabled);
493 if (likely(disabled == 1)) {
494 pc = preempt_count();
495 __trace_graph_return(tr, trace, flags, pc);
496 }
497 atomic_dec(&data->disabled);
498 local_irq_restore(flags);
499 }
500
set_graph_array(struct trace_array * tr)501 void set_graph_array(struct trace_array *tr)
502 {
503 graph_array = tr;
504
505 /* Make graph_array visible before we start tracing */
506
507 smp_mb();
508 }
509
trace_graph_thresh_return(struct ftrace_graph_ret * trace)510 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
511 {
512 if (tracing_thresh &&
513 (trace->rettime - trace->calltime < tracing_thresh))
514 return;
515 else
516 trace_graph_return(trace);
517 }
518
graph_trace_init(struct trace_array * tr)519 static int graph_trace_init(struct trace_array *tr)
520 {
521 int ret;
522
523 set_graph_array(tr);
524 if (tracing_thresh)
525 ret = register_ftrace_graph(&trace_graph_thresh_return,
526 &trace_graph_entry);
527 else
528 ret = register_ftrace_graph(&trace_graph_return,
529 &trace_graph_entry);
530 if (ret)
531 return ret;
532 tracing_start_cmdline_record();
533
534 return 0;
535 }
536
graph_trace_reset(struct trace_array * tr)537 static void graph_trace_reset(struct trace_array *tr)
538 {
539 tracing_stop_cmdline_record();
540 unregister_ftrace_graph();
541 }
542
graph_trace_update_thresh(struct trace_array * tr)543 static int graph_trace_update_thresh(struct trace_array *tr)
544 {
545 graph_trace_reset(tr);
546 return graph_trace_init(tr);
547 }
548
549 static int max_bytes_for_cpu;
550
print_graph_cpu(struct trace_seq * s,int cpu)551 static void print_graph_cpu(struct trace_seq *s, int cpu)
552 {
553 /*
554 * Start with a space character - to make it stand out
555 * to the right a bit when trace output is pasted into
556 * email:
557 */
558 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
559 }
560
561 #define TRACE_GRAPH_PROCINFO_LENGTH 14
562
print_graph_proc(struct trace_seq * s,pid_t pid)563 static void print_graph_proc(struct trace_seq *s, pid_t pid)
564 {
565 char comm[TASK_COMM_LEN];
566 /* sign + log10(MAX_INT) + '\0' */
567 char pid_str[11];
568 int spaces = 0;
569 int len;
570 int i;
571
572 trace_find_cmdline(pid, comm);
573 comm[7] = '\0';
574 sprintf(pid_str, "%d", pid);
575
576 /* 1 stands for the "-" character */
577 len = strlen(comm) + strlen(pid_str) + 1;
578
579 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
580 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
581
582 /* First spaces to align center */
583 for (i = 0; i < spaces / 2; i++)
584 trace_seq_putc(s, ' ');
585
586 trace_seq_printf(s, "%s-%s", comm, pid_str);
587
588 /* Last spaces to align center */
589 for (i = 0; i < spaces - (spaces / 2); i++)
590 trace_seq_putc(s, ' ');
591 }
592
593
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)594 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
595 {
596 trace_seq_putc(s, ' ');
597 trace_print_lat_fmt(s, entry);
598 }
599
600 /* If the pid changed since the last trace, output this event */
601 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)602 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
603 {
604 pid_t prev_pid;
605 pid_t *last_pid;
606
607 if (!data)
608 return;
609
610 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
611
612 if (*last_pid == pid)
613 return;
614
615 prev_pid = *last_pid;
616 *last_pid = pid;
617
618 if (prev_pid == -1)
619 return;
620 /*
621 * Context-switch trace line:
622
623 ------------------------------------------
624 | 1) migration/0--1 => sshd-1755
625 ------------------------------------------
626
627 */
628 trace_seq_puts(s, " ------------------------------------------\n");
629 print_graph_cpu(s, cpu);
630 print_graph_proc(s, prev_pid);
631 trace_seq_puts(s, " => ");
632 print_graph_proc(s, pid);
633 trace_seq_puts(s, "\n ------------------------------------------\n\n");
634 }
635
636 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)637 get_return_for_leaf(struct trace_iterator *iter,
638 struct ftrace_graph_ent_entry *curr)
639 {
640 struct fgraph_data *data = iter->private;
641 struct ring_buffer_iter *ring_iter = NULL;
642 struct ring_buffer_event *event;
643 struct ftrace_graph_ret_entry *next;
644
645 /*
646 * If the previous output failed to write to the seq buffer,
647 * then we just reuse the data from before.
648 */
649 if (data && data->failed) {
650 curr = &data->ent;
651 next = &data->ret;
652 } else {
653
654 ring_iter = trace_buffer_iter(iter, iter->cpu);
655
656 /* First peek to compare current entry and the next one */
657 if (ring_iter)
658 event = ring_buffer_iter_peek(ring_iter, NULL);
659 else {
660 /*
661 * We need to consume the current entry to see
662 * the next one.
663 */
664 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
665 NULL, NULL);
666 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
667 NULL, NULL);
668 }
669
670 if (!event)
671 return NULL;
672
673 next = ring_buffer_event_data(event);
674
675 if (data) {
676 /*
677 * Save current and next entries for later reference
678 * if the output fails.
679 */
680 data->ent = *curr;
681 /*
682 * If the next event is not a return type, then
683 * we only care about what type it is. Otherwise we can
684 * safely copy the entire event.
685 */
686 if (next->ent.type == TRACE_GRAPH_RET)
687 data->ret = *next;
688 else
689 data->ret.ent.type = next->ent.type;
690 }
691 }
692
693 if (next->ent.type != TRACE_GRAPH_RET)
694 return NULL;
695
696 if (curr->ent.pid != next->ent.pid ||
697 curr->graph_ent.func != next->ret.func)
698 return NULL;
699
700 /* this is a leaf, now advance the iterator */
701 if (ring_iter)
702 ring_buffer_read(ring_iter, NULL);
703
704 return next;
705 }
706
print_graph_abs_time(u64 t,struct trace_seq * s)707 static void print_graph_abs_time(u64 t, struct trace_seq *s)
708 {
709 unsigned long usecs_rem;
710
711 usecs_rem = do_div(t, NSEC_PER_SEC);
712 usecs_rem /= 1000;
713
714 trace_seq_printf(s, "%5lu.%06lu | ",
715 (unsigned long)t, usecs_rem);
716 }
717
718 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)719 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
720 enum trace_type type, int cpu, pid_t pid, u32 flags)
721 {
722 struct trace_array *tr = iter->tr;
723 struct trace_seq *s = &iter->seq;
724 struct trace_entry *ent = iter->ent;
725
726 if (addr < (unsigned long)__irqentry_text_start ||
727 addr >= (unsigned long)__irqentry_text_end)
728 return;
729
730 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
731 /* Absolute time */
732 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
733 print_graph_abs_time(iter->ts, s);
734
735 /* Cpu */
736 if (flags & TRACE_GRAPH_PRINT_CPU)
737 print_graph_cpu(s, cpu);
738
739 /* Proc */
740 if (flags & TRACE_GRAPH_PRINT_PROC) {
741 print_graph_proc(s, pid);
742 trace_seq_puts(s, " | ");
743 }
744
745 /* Latency format */
746 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
747 print_graph_lat_fmt(s, ent);
748 }
749
750 /* No overhead */
751 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
752
753 if (type == TRACE_GRAPH_ENT)
754 trace_seq_puts(s, "==========>");
755 else
756 trace_seq_puts(s, "<==========");
757
758 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
759 trace_seq_putc(s, '\n');
760 }
761
762 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)763 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
764 {
765 unsigned long nsecs_rem = do_div(duration, 1000);
766 /* log10(ULONG_MAX) + '\0' */
767 char usecs_str[21];
768 char nsecs_str[5];
769 int len;
770 int i;
771
772 sprintf(usecs_str, "%lu", (unsigned long) duration);
773
774 /* Print msecs */
775 trace_seq_printf(s, "%s", usecs_str);
776
777 len = strlen(usecs_str);
778
779 /* Print nsecs (we don't want to exceed 7 numbers) */
780 if (len < 7) {
781 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
782
783 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
784 trace_seq_printf(s, ".%s", nsecs_str);
785 len += strlen(nsecs_str) + 1;
786 }
787
788 trace_seq_puts(s, " us ");
789
790 /* Print remaining spaces to fit the row's width */
791 for (i = len; i < 8; i++)
792 trace_seq_putc(s, ' ');
793 }
794
795 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)796 print_graph_duration(struct trace_array *tr, unsigned long long duration,
797 struct trace_seq *s, u32 flags)
798 {
799 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
800 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
801 return;
802
803 /* No real adata, just filling the column with spaces */
804 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
805 case FLAGS_FILL_FULL:
806 trace_seq_puts(s, " | ");
807 return;
808 case FLAGS_FILL_START:
809 trace_seq_puts(s, " ");
810 return;
811 case FLAGS_FILL_END:
812 trace_seq_puts(s, " |");
813 return;
814 }
815
816 /* Signal a overhead of time execution to the output */
817 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
818 trace_seq_printf(s, "%c ", trace_find_mark(duration));
819 else
820 trace_seq_puts(s, " ");
821
822 trace_print_graph_duration(duration, s);
823 trace_seq_puts(s, "| ");
824 }
825
826 /* Case of a leaf function on its call entry */
827 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)828 print_graph_entry_leaf(struct trace_iterator *iter,
829 struct ftrace_graph_ent_entry *entry,
830 struct ftrace_graph_ret_entry *ret_entry,
831 struct trace_seq *s, u32 flags)
832 {
833 struct fgraph_data *data = iter->private;
834 struct trace_array *tr = iter->tr;
835 struct ftrace_graph_ret *graph_ret;
836 struct ftrace_graph_ent *call;
837 unsigned long long duration;
838 int i;
839
840 graph_ret = &ret_entry->ret;
841 call = &entry->graph_ent;
842 duration = graph_ret->rettime - graph_ret->calltime;
843
844 if (data) {
845 struct fgraph_cpu_data *cpu_data;
846 int cpu = iter->cpu;
847
848 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
849
850 /* If a graph tracer ignored set_graph_notrace */
851 if (call->depth < -1)
852 call->depth += FTRACE_NOTRACE_DEPTH;
853
854 /*
855 * Comments display at + 1 to depth. Since
856 * this is a leaf function, keep the comments
857 * equal to this depth.
858 */
859 cpu_data->depth = call->depth - 1;
860
861 /* No need to keep this function around for this depth */
862 if (call->depth < FTRACE_RETFUNC_DEPTH &&
863 !WARN_ON_ONCE(call->depth < 0))
864 cpu_data->enter_funcs[call->depth] = 0;
865 }
866
867 /* Overhead and duration */
868 print_graph_duration(tr, duration, s, flags);
869
870 /* Function */
871 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
872 trace_seq_putc(s, ' ');
873
874 trace_seq_printf(s, "%ps();\n", (void *)call->func);
875
876 return trace_handle_return(s);
877 }
878
879 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)880 print_graph_entry_nested(struct trace_iterator *iter,
881 struct ftrace_graph_ent_entry *entry,
882 struct trace_seq *s, int cpu, u32 flags)
883 {
884 struct ftrace_graph_ent *call = &entry->graph_ent;
885 struct fgraph_data *data = iter->private;
886 struct trace_array *tr = iter->tr;
887 int i;
888
889 if (data) {
890 struct fgraph_cpu_data *cpu_data;
891 int cpu = iter->cpu;
892
893 /* If a graph tracer ignored set_graph_notrace */
894 if (call->depth < -1)
895 call->depth += FTRACE_NOTRACE_DEPTH;
896
897 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
898 cpu_data->depth = call->depth;
899
900 /* Save this function pointer to see if the exit matches */
901 if (call->depth < FTRACE_RETFUNC_DEPTH &&
902 !WARN_ON_ONCE(call->depth < 0))
903 cpu_data->enter_funcs[call->depth] = call->func;
904 }
905
906 /* No time */
907 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
908
909 /* Function */
910 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
911 trace_seq_putc(s, ' ');
912
913 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
914
915 if (trace_seq_has_overflowed(s))
916 return TRACE_TYPE_PARTIAL_LINE;
917
918 /*
919 * we already consumed the current entry to check the next one
920 * and see if this is a leaf.
921 */
922 return TRACE_TYPE_NO_CONSUME;
923 }
924
925 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)926 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
927 int type, unsigned long addr, u32 flags)
928 {
929 struct fgraph_data *data = iter->private;
930 struct trace_entry *ent = iter->ent;
931 struct trace_array *tr = iter->tr;
932 int cpu = iter->cpu;
933
934 /* Pid */
935 verif_pid(s, ent->pid, cpu, data);
936
937 if (type)
938 /* Interrupt */
939 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
940
941 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
942 return;
943
944 /* Absolute time */
945 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
946 print_graph_abs_time(iter->ts, s);
947
948 /* Cpu */
949 if (flags & TRACE_GRAPH_PRINT_CPU)
950 print_graph_cpu(s, cpu);
951
952 /* Proc */
953 if (flags & TRACE_GRAPH_PRINT_PROC) {
954 print_graph_proc(s, ent->pid);
955 trace_seq_puts(s, " | ");
956 }
957
958 /* Latency format */
959 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
960 print_graph_lat_fmt(s, ent);
961
962 return;
963 }
964
965 /*
966 * Entry check for irq code
967 *
968 * returns 1 if
969 * - we are inside irq code
970 * - we just entered irq code
971 *
972 * retunns 0 if
973 * - funcgraph-interrupts option is set
974 * - we are not inside irq code
975 */
976 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)977 check_irq_entry(struct trace_iterator *iter, u32 flags,
978 unsigned long addr, int depth)
979 {
980 int cpu = iter->cpu;
981 int *depth_irq;
982 struct fgraph_data *data = iter->private;
983
984 /*
985 * If we are either displaying irqs, or we got called as
986 * a graph event and private data does not exist,
987 * then we bypass the irq check.
988 */
989 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
990 (!data))
991 return 0;
992
993 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
994
995 /*
996 * We are inside the irq code
997 */
998 if (*depth_irq >= 0)
999 return 1;
1000
1001 if ((addr < (unsigned long)__irqentry_text_start) ||
1002 (addr >= (unsigned long)__irqentry_text_end))
1003 return 0;
1004
1005 /*
1006 * We are entering irq code.
1007 */
1008 *depth_irq = depth;
1009 return 1;
1010 }
1011
1012 /*
1013 * Return check for irq code
1014 *
1015 * returns 1 if
1016 * - we are inside irq code
1017 * - we just left irq code
1018 *
1019 * returns 0 if
1020 * - funcgraph-interrupts option is set
1021 * - we are not inside irq code
1022 */
1023 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)1024 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1025 {
1026 int cpu = iter->cpu;
1027 int *depth_irq;
1028 struct fgraph_data *data = iter->private;
1029
1030 /*
1031 * If we are either displaying irqs, or we got called as
1032 * a graph event and private data does not exist,
1033 * then we bypass the irq check.
1034 */
1035 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1036 (!data))
1037 return 0;
1038
1039 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1040
1041 /*
1042 * We are not inside the irq code.
1043 */
1044 if (*depth_irq == -1)
1045 return 0;
1046
1047 /*
1048 * We are inside the irq code, and this is returning entry.
1049 * Let's not trace it and clear the entry depth, since
1050 * we are out of irq code.
1051 *
1052 * This condition ensures that we 'leave the irq code' once
1053 * we are out of the entry depth. Thus protecting us from
1054 * the RETURN entry loss.
1055 */
1056 if (*depth_irq >= depth) {
1057 *depth_irq = -1;
1058 return 1;
1059 }
1060
1061 /*
1062 * We are inside the irq code, and this is not the entry.
1063 */
1064 return 1;
1065 }
1066
1067 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)1068 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1069 struct trace_iterator *iter, u32 flags)
1070 {
1071 struct fgraph_data *data = iter->private;
1072 struct ftrace_graph_ent *call = &field->graph_ent;
1073 struct ftrace_graph_ret_entry *leaf_ret;
1074 static enum print_line_t ret;
1075 int cpu = iter->cpu;
1076
1077 if (check_irq_entry(iter, flags, call->func, call->depth))
1078 return TRACE_TYPE_HANDLED;
1079
1080 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1081
1082 leaf_ret = get_return_for_leaf(iter, field);
1083 if (leaf_ret)
1084 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1085 else
1086 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1087
1088 if (data) {
1089 /*
1090 * If we failed to write our output, then we need to make
1091 * note of it. Because we already consumed our entry.
1092 */
1093 if (s->full) {
1094 data->failed = 1;
1095 data->cpu = cpu;
1096 } else
1097 data->failed = 0;
1098 }
1099
1100 return ret;
1101 }
1102
1103 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1104 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1105 struct trace_entry *ent, struct trace_iterator *iter,
1106 u32 flags)
1107 {
1108 unsigned long long duration = trace->rettime - trace->calltime;
1109 struct fgraph_data *data = iter->private;
1110 struct trace_array *tr = iter->tr;
1111 pid_t pid = ent->pid;
1112 int cpu = iter->cpu;
1113 int func_match = 1;
1114 int i;
1115
1116 if (check_irq_return(iter, flags, trace->depth))
1117 return TRACE_TYPE_HANDLED;
1118
1119 if (data) {
1120 struct fgraph_cpu_data *cpu_data;
1121 int cpu = iter->cpu;
1122
1123 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1124
1125 /*
1126 * Comments display at + 1 to depth. This is the
1127 * return from a function, we now want the comments
1128 * to display at the same level of the bracket.
1129 */
1130 cpu_data->depth = trace->depth - 1;
1131
1132 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1133 !WARN_ON_ONCE(trace->depth < 0)) {
1134 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1135 func_match = 0;
1136 cpu_data->enter_funcs[trace->depth] = 0;
1137 }
1138 }
1139
1140 print_graph_prologue(iter, s, 0, 0, flags);
1141
1142 /* Overhead and duration */
1143 print_graph_duration(tr, duration, s, flags);
1144
1145 /* Closing brace */
1146 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1147 trace_seq_putc(s, ' ');
1148
1149 /*
1150 * If the return function does not have a matching entry,
1151 * then the entry was lost. Instead of just printing
1152 * the '}' and letting the user guess what function this
1153 * belongs to, write out the function name. Always do
1154 * that if the funcgraph-tail option is enabled.
1155 */
1156 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1157 trace_seq_puts(s, "}\n");
1158 else
1159 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1160
1161 /* Overrun */
1162 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1163 trace_seq_printf(s, " (Overruns: %lu)\n",
1164 trace->overrun);
1165
1166 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1167 cpu, pid, flags);
1168
1169 return trace_handle_return(s);
1170 }
1171
1172 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1173 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1174 struct trace_iterator *iter, u32 flags)
1175 {
1176 struct trace_array *tr = iter->tr;
1177 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1178 struct fgraph_data *data = iter->private;
1179 struct trace_event *event;
1180 int depth = 0;
1181 int ret;
1182 int i;
1183
1184 if (data)
1185 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1186
1187 print_graph_prologue(iter, s, 0, 0, flags);
1188
1189 /* No time */
1190 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1191
1192 /* Indentation */
1193 if (depth > 0)
1194 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1195 trace_seq_putc(s, ' ');
1196
1197 /* The comment */
1198 trace_seq_puts(s, "/* ");
1199
1200 switch (iter->ent->type) {
1201 case TRACE_BPUTS:
1202 ret = trace_print_bputs_msg_only(iter);
1203 if (ret != TRACE_TYPE_HANDLED)
1204 return ret;
1205 break;
1206 case TRACE_BPRINT:
1207 ret = trace_print_bprintk_msg_only(iter);
1208 if (ret != TRACE_TYPE_HANDLED)
1209 return ret;
1210 break;
1211 case TRACE_PRINT:
1212 ret = trace_print_printk_msg_only(iter);
1213 if (ret != TRACE_TYPE_HANDLED)
1214 return ret;
1215 break;
1216 default:
1217 event = ftrace_find_event(ent->type);
1218 if (!event)
1219 return TRACE_TYPE_UNHANDLED;
1220
1221 ret = event->funcs->trace(iter, sym_flags, event);
1222 if (ret != TRACE_TYPE_HANDLED)
1223 return ret;
1224 }
1225
1226 if (trace_seq_has_overflowed(s))
1227 goto out;
1228
1229 /* Strip ending newline */
1230 if (s->buffer[s->seq.len - 1] == '\n') {
1231 s->buffer[s->seq.len - 1] = '\0';
1232 s->seq.len--;
1233 }
1234
1235 trace_seq_puts(s, " */\n");
1236 out:
1237 return trace_handle_return(s);
1238 }
1239
1240
1241 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1242 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1243 {
1244 struct ftrace_graph_ent_entry *field;
1245 struct fgraph_data *data = iter->private;
1246 struct trace_entry *entry = iter->ent;
1247 struct trace_seq *s = &iter->seq;
1248 int cpu = iter->cpu;
1249 int ret;
1250
1251 if (flags & TRACE_GRAPH_PRINT_FLAT)
1252 return TRACE_TYPE_UNHANDLED;
1253
1254 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1255 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1256 return TRACE_TYPE_HANDLED;
1257 }
1258
1259 /*
1260 * If the last output failed, there's a possibility we need
1261 * to print out the missing entry which would never go out.
1262 */
1263 if (data && data->failed) {
1264 field = &data->ent;
1265 iter->cpu = data->cpu;
1266 ret = print_graph_entry(field, s, iter, flags);
1267 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1268 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1269 ret = TRACE_TYPE_NO_CONSUME;
1270 }
1271 iter->cpu = cpu;
1272 return ret;
1273 }
1274
1275 switch (entry->type) {
1276 case TRACE_GRAPH_ENT: {
1277 /*
1278 * print_graph_entry() may consume the current event,
1279 * thus @field may become invalid, so we need to save it.
1280 * sizeof(struct ftrace_graph_ent_entry) is very small,
1281 * it can be safely saved at the stack.
1282 */
1283 struct ftrace_graph_ent_entry saved;
1284 trace_assign_type(field, entry);
1285 saved = *field;
1286 return print_graph_entry(&saved, s, iter, flags);
1287 }
1288 case TRACE_GRAPH_RET: {
1289 struct ftrace_graph_ret_entry *field;
1290 trace_assign_type(field, entry);
1291 return print_graph_return(&field->ret, s, entry, iter, flags);
1292 }
1293 case TRACE_STACK:
1294 case TRACE_FN:
1295 /* dont trace stack and functions as comments */
1296 return TRACE_TYPE_UNHANDLED;
1297
1298 default:
1299 return print_graph_comment(s, entry, iter, flags);
1300 }
1301
1302 return TRACE_TYPE_HANDLED;
1303 }
1304
1305 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1306 print_graph_function(struct trace_iterator *iter)
1307 {
1308 return print_graph_function_flags(iter, tracer_flags.val);
1309 }
1310
print_lat_header(struct seq_file * s,u32 flags)1311 static void print_lat_header(struct seq_file *s, u32 flags)
1312 {
1313 static const char spaces[] = " " /* 16 spaces */
1314 " " /* 4 spaces */
1315 " "; /* 17 spaces */
1316 int size = 0;
1317
1318 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1319 size += 16;
1320 if (flags & TRACE_GRAPH_PRINT_CPU)
1321 size += 4;
1322 if (flags & TRACE_GRAPH_PRINT_PROC)
1323 size += 17;
1324
1325 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1326 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1327 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1328 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1329 seq_printf(s, "#%.*s||| / \n", size, spaces);
1330 }
1331
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1332 static void __print_graph_headers_flags(struct trace_array *tr,
1333 struct seq_file *s, u32 flags)
1334 {
1335 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1336
1337 if (lat)
1338 print_lat_header(s, flags);
1339
1340 /* 1st line */
1341 seq_putc(s, '#');
1342 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1343 seq_puts(s, " TIME ");
1344 if (flags & TRACE_GRAPH_PRINT_CPU)
1345 seq_puts(s, " CPU");
1346 if (flags & TRACE_GRAPH_PRINT_PROC)
1347 seq_puts(s, " TASK/PID ");
1348 if (lat)
1349 seq_puts(s, "||||");
1350 if (flags & TRACE_GRAPH_PRINT_DURATION)
1351 seq_puts(s, " DURATION ");
1352 seq_puts(s, " FUNCTION CALLS\n");
1353
1354 /* 2nd line */
1355 seq_putc(s, '#');
1356 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1357 seq_puts(s, " | ");
1358 if (flags & TRACE_GRAPH_PRINT_CPU)
1359 seq_puts(s, " | ");
1360 if (flags & TRACE_GRAPH_PRINT_PROC)
1361 seq_puts(s, " | | ");
1362 if (lat)
1363 seq_puts(s, "||||");
1364 if (flags & TRACE_GRAPH_PRINT_DURATION)
1365 seq_puts(s, " | | ");
1366 seq_puts(s, " | | | |\n");
1367 }
1368
print_graph_headers(struct seq_file * s)1369 static void print_graph_headers(struct seq_file *s)
1370 {
1371 print_graph_headers_flags(s, tracer_flags.val);
1372 }
1373
print_graph_headers_flags(struct seq_file * s,u32 flags)1374 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1375 {
1376 struct trace_iterator *iter = s->private;
1377 struct trace_array *tr = iter->tr;
1378
1379 if (flags & TRACE_GRAPH_PRINT_FLAT) {
1380 trace_default_header(s);
1381 return;
1382 }
1383
1384 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1385 return;
1386
1387 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1388 /* print nothing if the buffers are empty */
1389 if (trace_empty(iter))
1390 return;
1391
1392 print_trace_header(s, iter);
1393 }
1394
1395 __print_graph_headers_flags(tr, s, flags);
1396 }
1397
graph_trace_open(struct trace_iterator * iter)1398 void graph_trace_open(struct trace_iterator *iter)
1399 {
1400 /* pid and depth on the last trace processed */
1401 struct fgraph_data *data;
1402 gfp_t gfpflags;
1403 int cpu;
1404
1405 iter->private = NULL;
1406
1407 /* We can be called in atomic context via ftrace_dump() */
1408 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1409
1410 data = kzalloc(sizeof(*data), gfpflags);
1411 if (!data)
1412 goto out_err;
1413
1414 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1415 if (!data->cpu_data)
1416 goto out_err_free;
1417
1418 for_each_possible_cpu(cpu) {
1419 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1420 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1421 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1422 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1423
1424 *pid = -1;
1425 *depth = 0;
1426 *ignore = 0;
1427 *depth_irq = -1;
1428 }
1429
1430 iter->private = data;
1431
1432 return;
1433
1434 out_err_free:
1435 kfree(data);
1436 out_err:
1437 pr_warn("function graph tracer: not enough memory\n");
1438 }
1439
graph_trace_close(struct trace_iterator * iter)1440 void graph_trace_close(struct trace_iterator *iter)
1441 {
1442 struct fgraph_data *data = iter->private;
1443
1444 if (data) {
1445 free_percpu(data->cpu_data);
1446 kfree(data);
1447 }
1448 }
1449
1450 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1451 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1452 {
1453 if (bit == TRACE_GRAPH_PRINT_IRQS)
1454 ftrace_graph_skip_irqs = !set;
1455
1456 if (bit == TRACE_GRAPH_SLEEP_TIME)
1457 ftrace_graph_sleep_time_control(set);
1458
1459 if (bit == TRACE_GRAPH_GRAPH_TIME)
1460 ftrace_graph_graph_time_control(set);
1461
1462 return 0;
1463 }
1464
1465
1466 static struct tracer graph_trace __tracer_data = {
1467 .name = "function_graph",
1468 .update_thresh = graph_trace_update_thresh,
1469 .open = graph_trace_open,
1470 .pipe_open = graph_trace_open,
1471 .close = graph_trace_close,
1472 .pipe_close = graph_trace_close,
1473 .init = graph_trace_init,
1474 .reset = graph_trace_reset,
1475 .print_line = print_graph_function,
1476 .print_header = print_graph_headers,
1477 .flags = &tracer_flags,
1478 .set_flag = func_graph_set_flag,
1479 #ifdef CONFIG_FTRACE_SELFTEST
1480 .selftest = trace_selftest_startup_function_graph,
1481 #endif
1482 };
1483
1484
1485 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1486 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1487 loff_t *ppos)
1488 {
1489 unsigned long val;
1490 int ret;
1491
1492 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1493 if (ret)
1494 return ret;
1495
1496 max_depth = val;
1497
1498 *ppos += cnt;
1499
1500 return cnt;
1501 }
1502
1503 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1504 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1505 loff_t *ppos)
1506 {
1507 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1508 int n;
1509
1510 n = sprintf(buf, "%d\n", max_depth);
1511
1512 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1513 }
1514
1515 static const struct file_operations graph_depth_fops = {
1516 .open = tracing_open_generic,
1517 .write = graph_depth_write,
1518 .read = graph_depth_read,
1519 .llseek = generic_file_llseek,
1520 };
1521
init_graph_tracefs(void)1522 static __init int init_graph_tracefs(void)
1523 {
1524 struct dentry *d_tracer;
1525
1526 d_tracer = tracing_init_dentry();
1527 if (IS_ERR(d_tracer))
1528 return 0;
1529
1530 trace_create_file("max_graph_depth", 0644, d_tracer,
1531 NULL, &graph_depth_fops);
1532
1533 return 0;
1534 }
1535 fs_initcall(init_graph_tracefs);
1536
init_graph_trace(void)1537 static __init int init_graph_trace(void)
1538 {
1539 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1540
1541 return register_tracer(&graph_trace);
1542 }
1543
1544 core_initcall(init_graph_trace);
1545