1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/slab.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 static bool kill_ftrace_graph;
18
19 /**
20 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
21 *
22 * ftrace_graph_stop() is called when a severe error is detected in
23 * the function graph tracing. This function is called by the critical
24 * paths of function graph to keep those paths from doing any more harm.
25 */
ftrace_graph_is_dead(void)26 bool ftrace_graph_is_dead(void)
27 {
28 return kill_ftrace_graph;
29 }
30
31 /**
32 * ftrace_graph_stop - set to permanently disable function graph tracincg
33 *
34 * In case of an error int function graph tracing, this is called
35 * to try to keep function graph tracing from causing any more harm.
36 * Usually this is pretty severe and this is called to try to at least
37 * get a warning out to the user.
38 */
ftrace_graph_stop(void)39 void ftrace_graph_stop(void)
40 {
41 kill_ftrace_graph = true;
42 }
43
44 /* When set, irq functions will be ignored */
45 static int ftrace_graph_skip_irqs;
46
47 struct fgraph_cpu_data {
48 pid_t last_pid;
49 int depth;
50 int depth_irq;
51 int ignore;
52 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
53 };
54
55 struct fgraph_data {
56 struct fgraph_cpu_data __percpu *cpu_data;
57
58 /* Place to preserve last processed entry. */
59 struct ftrace_graph_ent_entry ent;
60 struct ftrace_graph_ret_entry ret;
61 int failed;
62 int cpu;
63 };
64
65 #define TRACE_GRAPH_INDENT 2
66
67 /* Flag options */
68 #define TRACE_GRAPH_PRINT_FLAT 0x80
69
70 static unsigned int max_depth;
71
72 static struct tracer_opt trace_opts[] = {
73 /* Display overruns? (for self-debug purpose) */
74 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
75 /* Display CPU ? */
76 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
77 /* Display Overhead ? */
78 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
79 /* Display proc name/pid */
80 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
81 /* Display duration of execution */
82 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
83 /* Display absolute time of an entry */
84 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
85 /* Display interrupts */
86 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
87 /* Display function name after trailing } */
88 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
89 /* Use standard trace formatting rather than hierarchical */
90 { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
91 { } /* Empty entry */
92 };
93
94 static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
98 .opts = trace_opts
99 };
100
101 static struct trace_array *graph_array;
102
103 /*
104 * DURATION column is being also used to display IRQ signs,
105 * following values are used by print_graph_irq and others
106 * to fill in space into DURATION column.
107 */
108 enum {
109 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 };
113
114 static enum print_line_t
115 print_graph_duration(unsigned long long duration, struct trace_seq *s,
116 u32 flags);
117
118 /* Add a function return address to the trace stack on thread info.*/
119 int
ftrace_push_return_trace(unsigned long ret,unsigned long func,int * depth,unsigned long frame_pointer)120 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
121 unsigned long frame_pointer)
122 {
123 unsigned long long calltime;
124 int index;
125
126 if (unlikely(ftrace_graph_is_dead()))
127 return -EBUSY;
128
129 if (!current->ret_stack)
130 return -EBUSY;
131
132 /*
133 * We must make sure the ret_stack is tested before we read
134 * anything else.
135 */
136 smp_rmb();
137
138 /* The return trace stack is full */
139 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
140 atomic_inc(¤t->trace_overrun);
141 return -EBUSY;
142 }
143
144 /*
145 * The curr_ret_stack is an index to ftrace return stack of
146 * current task. Its value should be in [0, FTRACE_RETFUNC_
147 * DEPTH) when the function graph tracer is used. To support
148 * filtering out specific functions, it makes the index
149 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
150 * so when it sees a negative index the ftrace will ignore
151 * the record. And the index gets recovered when returning
152 * from the filtered function by adding the FTRACE_NOTRACE_
153 * DEPTH and then it'll continue to record functions normally.
154 *
155 * The curr_ret_stack is initialized to -1 and get increased
156 * in this function. So it can be less than -1 only if it was
157 * filtered out via ftrace_graph_notrace_addr() which can be
158 * set from set_graph_notrace file in tracefs by user.
159 */
160 if (current->curr_ret_stack < -1)
161 return -EBUSY;
162
163 calltime = trace_clock_local();
164
165 index = ++current->curr_ret_stack;
166 if (ftrace_graph_notrace_addr(func))
167 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
168 barrier();
169 current->ret_stack[index].ret = ret;
170 current->ret_stack[index].func = func;
171 current->ret_stack[index].calltime = calltime;
172 current->ret_stack[index].subtime = 0;
173 current->ret_stack[index].fp = frame_pointer;
174 *depth = current->curr_ret_stack;
175
176 return 0;
177 }
178
179 /* Retrieve a function return address to the trace stack on thread info.*/
180 static void
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer)181 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
182 unsigned long frame_pointer)
183 {
184 int index;
185
186 index = current->curr_ret_stack;
187
188 /*
189 * A negative index here means that it's just returned from a
190 * notrace'd function. Recover index to get an original
191 * return address. See ftrace_push_return_trace().
192 *
193 * TODO: Need to check whether the stack gets corrupted.
194 */
195 if (index < 0)
196 index += FTRACE_NOTRACE_DEPTH;
197
198 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199 ftrace_graph_stop();
200 WARN_ON(1);
201 /* Might as well panic, otherwise we have no where to go */
202 *ret = (unsigned long)panic;
203 return;
204 }
205
206 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207 /*
208 * The arch may choose to record the frame pointer used
209 * and check it here to make sure that it is what we expect it
210 * to be. If gcc does not set the place holder of the return
211 * address in the frame pointer, and does a copy instead, then
212 * the function graph trace will fail. This test detects this
213 * case.
214 *
215 * Currently, x86_32 with optimize for size (-Os) makes the latest
216 * gcc do the above.
217 *
218 * Note, -mfentry does not use frame pointers, and this test
219 * is not needed if CC_USING_FENTRY is set.
220 */
221 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
222 ftrace_graph_stop();
223 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224 " from func %ps return to %lx\n",
225 current->ret_stack[index].fp,
226 frame_pointer,
227 (void *)current->ret_stack[index].func,
228 current->ret_stack[index].ret);
229 *ret = (unsigned long)panic;
230 return;
231 }
232 #endif
233
234 *ret = current->ret_stack[index].ret;
235 trace->func = current->ret_stack[index].func;
236 trace->calltime = current->ret_stack[index].calltime;
237 trace->overrun = atomic_read(¤t->trace_overrun);
238 trace->depth = index;
239 }
240
241 /*
242 * Send the trace to the ring-buffer.
243 * @return the original return address.
244 */
ftrace_return_to_handler(unsigned long frame_pointer)245 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246 {
247 struct ftrace_graph_ret trace;
248 unsigned long ret;
249
250 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251 trace.rettime = trace_clock_local();
252 barrier();
253 current->curr_ret_stack--;
254 /*
255 * The curr_ret_stack can be less than -1 only if it was
256 * filtered out and it's about to return from the function.
257 * Recover the index and continue to trace normal functions.
258 */
259 if (current->curr_ret_stack < -1) {
260 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
261 return ret;
262 }
263
264 /*
265 * The trace should run after decrementing the ret counter
266 * in case an interrupt were to come in. We don't want to
267 * lose the interrupt if max_depth is set.
268 */
269 ftrace_graph_return(&trace);
270
271 if (unlikely(!ret)) {
272 ftrace_graph_stop();
273 WARN_ON(1);
274 /* Might as well panic. What else to do? */
275 ret = (unsigned long)panic;
276 }
277
278 return ret;
279 }
280
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned long flags,int pc)281 int __trace_graph_entry(struct trace_array *tr,
282 struct ftrace_graph_ent *trace,
283 unsigned long flags,
284 int pc)
285 {
286 struct ftrace_event_call *call = &event_funcgraph_entry;
287 struct ring_buffer_event *event;
288 struct ring_buffer *buffer = tr->trace_buffer.buffer;
289 struct ftrace_graph_ent_entry *entry;
290
291 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
292 return 0;
293
294 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
295 sizeof(*entry), flags, pc);
296 if (!event)
297 return 0;
298 entry = ring_buffer_event_data(event);
299 entry->graph_ent = *trace;
300 if (!call_filter_check_discard(call, entry, buffer, event))
301 __buffer_unlock_commit(buffer, event);
302
303 return 1;
304 }
305
ftrace_graph_ignore_irqs(void)306 static inline int ftrace_graph_ignore_irqs(void)
307 {
308 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
309 return 0;
310
311 return in_irq();
312 }
313
trace_graph_entry(struct ftrace_graph_ent * trace)314 int trace_graph_entry(struct ftrace_graph_ent *trace)
315 {
316 struct trace_array *tr = graph_array;
317 struct trace_array_cpu *data;
318 unsigned long flags;
319 long disabled;
320 int ret;
321 int cpu;
322 int pc;
323
324 if (!ftrace_trace_task(current))
325 return 0;
326
327 /* trace it when it is-nested-in or is a function enabled. */
328 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
329 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
330 (max_depth && trace->depth >= max_depth))
331 return 0;
332
333 /*
334 * Do not trace a function if it's filtered by set_graph_notrace.
335 * Make the index of ret stack negative to indicate that it should
336 * ignore further functions. But it needs its own ret stack entry
337 * to recover the original index in order to continue tracing after
338 * returning from the function.
339 */
340 if (ftrace_graph_notrace_addr(trace->func))
341 return 1;
342
343 local_irq_save(flags);
344 cpu = raw_smp_processor_id();
345 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
346 disabled = atomic_inc_return(&data->disabled);
347 if (likely(disabled == 1)) {
348 pc = preempt_count();
349 ret = __trace_graph_entry(tr, trace, flags, pc);
350 } else {
351 ret = 0;
352 }
353
354 atomic_dec(&data->disabled);
355 local_irq_restore(flags);
356
357 return ret;
358 }
359
trace_graph_thresh_entry(struct ftrace_graph_ent * trace)360 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
361 {
362 if (tracing_thresh)
363 return 1;
364 else
365 return trace_graph_entry(trace);
366 }
367
368 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long flags,int pc)369 __trace_graph_function(struct trace_array *tr,
370 unsigned long ip, unsigned long flags, int pc)
371 {
372 u64 time = trace_clock_local();
373 struct ftrace_graph_ent ent = {
374 .func = ip,
375 .depth = 0,
376 };
377 struct ftrace_graph_ret ret = {
378 .func = ip,
379 .depth = 0,
380 .calltime = time,
381 .rettime = time,
382 };
383
384 __trace_graph_entry(tr, &ent, flags, pc);
385 __trace_graph_return(tr, &ret, flags, pc);
386 }
387
388 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)389 trace_graph_function(struct trace_array *tr,
390 unsigned long ip, unsigned long parent_ip,
391 unsigned long flags, int pc)
392 {
393 __trace_graph_function(tr, ip, flags, pc);
394 }
395
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned long flags,int pc)396 void __trace_graph_return(struct trace_array *tr,
397 struct ftrace_graph_ret *trace,
398 unsigned long flags,
399 int pc)
400 {
401 struct ftrace_event_call *call = &event_funcgraph_exit;
402 struct ring_buffer_event *event;
403 struct ring_buffer *buffer = tr->trace_buffer.buffer;
404 struct ftrace_graph_ret_entry *entry;
405
406 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
407 return;
408
409 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
410 sizeof(*entry), flags, pc);
411 if (!event)
412 return;
413 entry = ring_buffer_event_data(event);
414 entry->ret = *trace;
415 if (!call_filter_check_discard(call, entry, buffer, event))
416 __buffer_unlock_commit(buffer, event);
417 }
418
trace_graph_return(struct ftrace_graph_ret * trace)419 void trace_graph_return(struct ftrace_graph_ret *trace)
420 {
421 struct trace_array *tr = graph_array;
422 struct trace_array_cpu *data;
423 unsigned long flags;
424 long disabled;
425 int cpu;
426 int pc;
427
428 local_irq_save(flags);
429 cpu = raw_smp_processor_id();
430 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
431 disabled = atomic_inc_return(&data->disabled);
432 if (likely(disabled == 1)) {
433 pc = preempt_count();
434 __trace_graph_return(tr, trace, flags, pc);
435 }
436 atomic_dec(&data->disabled);
437 local_irq_restore(flags);
438 }
439
set_graph_array(struct trace_array * tr)440 void set_graph_array(struct trace_array *tr)
441 {
442 graph_array = tr;
443
444 /* Make graph_array visible before we start tracing */
445
446 smp_mb();
447 }
448
trace_graph_thresh_return(struct ftrace_graph_ret * trace)449 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
450 {
451 if (tracing_thresh &&
452 (trace->rettime - trace->calltime < tracing_thresh))
453 return;
454 else
455 trace_graph_return(trace);
456 }
457
graph_trace_init(struct trace_array * tr)458 static int graph_trace_init(struct trace_array *tr)
459 {
460 int ret;
461
462 set_graph_array(tr);
463 if (tracing_thresh)
464 ret = register_ftrace_graph(&trace_graph_thresh_return,
465 &trace_graph_thresh_entry);
466 else
467 ret = register_ftrace_graph(&trace_graph_return,
468 &trace_graph_entry);
469 if (ret)
470 return ret;
471 tracing_start_cmdline_record();
472
473 return 0;
474 }
475
graph_trace_reset(struct trace_array * tr)476 static void graph_trace_reset(struct trace_array *tr)
477 {
478 tracing_stop_cmdline_record();
479 unregister_ftrace_graph();
480 }
481
graph_trace_update_thresh(struct trace_array * tr)482 static int graph_trace_update_thresh(struct trace_array *tr)
483 {
484 graph_trace_reset(tr);
485 return graph_trace_init(tr);
486 }
487
488 static int max_bytes_for_cpu;
489
490 static enum print_line_t
print_graph_cpu(struct trace_seq * s,int cpu)491 print_graph_cpu(struct trace_seq *s, int cpu)
492 {
493 int ret;
494
495 /*
496 * Start with a space character - to make it stand out
497 * to the right a bit when trace output is pasted into
498 * email:
499 */
500 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
501 if (!ret)
502 return TRACE_TYPE_PARTIAL_LINE;
503
504 return TRACE_TYPE_HANDLED;
505 }
506
507 #define TRACE_GRAPH_PROCINFO_LENGTH 14
508
509 static enum print_line_t
print_graph_proc(struct trace_seq * s,pid_t pid)510 print_graph_proc(struct trace_seq *s, pid_t pid)
511 {
512 char comm[TASK_COMM_LEN];
513 /* sign + log10(MAX_INT) + '\0' */
514 char pid_str[11];
515 int spaces = 0;
516 int ret;
517 int len;
518 int i;
519
520 trace_find_cmdline(pid, comm);
521 comm[7] = '\0';
522 sprintf(pid_str, "%d", pid);
523
524 /* 1 stands for the "-" character */
525 len = strlen(comm) + strlen(pid_str) + 1;
526
527 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
528 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
529
530 /* First spaces to align center */
531 for (i = 0; i < spaces / 2; i++) {
532 ret = trace_seq_putc(s, ' ');
533 if (!ret)
534 return TRACE_TYPE_PARTIAL_LINE;
535 }
536
537 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
538 if (!ret)
539 return TRACE_TYPE_PARTIAL_LINE;
540
541 /* Last spaces to align center */
542 for (i = 0; i < spaces - (spaces / 2); i++) {
543 ret = trace_seq_putc(s, ' ');
544 if (!ret)
545 return TRACE_TYPE_PARTIAL_LINE;
546 }
547 return TRACE_TYPE_HANDLED;
548 }
549
550
551 static enum print_line_t
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)552 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
553 {
554 if (!trace_seq_putc(s, ' '))
555 return 0;
556
557 return trace_print_lat_fmt(s, entry);
558 }
559
560 /* If the pid changed since the last trace, output this event */
561 static enum print_line_t
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)562 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
563 {
564 pid_t prev_pid;
565 pid_t *last_pid;
566 int ret;
567
568 if (!data)
569 return TRACE_TYPE_HANDLED;
570
571 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
572
573 if (*last_pid == pid)
574 return TRACE_TYPE_HANDLED;
575
576 prev_pid = *last_pid;
577 *last_pid = pid;
578
579 if (prev_pid == -1)
580 return TRACE_TYPE_HANDLED;
581 /*
582 * Context-switch trace line:
583
584 ------------------------------------------
585 | 1) migration/0--1 => sshd-1755
586 ------------------------------------------
587
588 */
589 ret = trace_seq_puts(s,
590 " ------------------------------------------\n");
591 if (!ret)
592 return TRACE_TYPE_PARTIAL_LINE;
593
594 ret = print_graph_cpu(s, cpu);
595 if (ret == TRACE_TYPE_PARTIAL_LINE)
596 return TRACE_TYPE_PARTIAL_LINE;
597
598 ret = print_graph_proc(s, prev_pid);
599 if (ret == TRACE_TYPE_PARTIAL_LINE)
600 return TRACE_TYPE_PARTIAL_LINE;
601
602 ret = trace_seq_puts(s, " => ");
603 if (!ret)
604 return TRACE_TYPE_PARTIAL_LINE;
605
606 ret = print_graph_proc(s, pid);
607 if (ret == TRACE_TYPE_PARTIAL_LINE)
608 return TRACE_TYPE_PARTIAL_LINE;
609
610 ret = trace_seq_puts(s,
611 "\n ------------------------------------------\n\n");
612 if (!ret)
613 return TRACE_TYPE_PARTIAL_LINE;
614
615 return TRACE_TYPE_HANDLED;
616 }
617
618 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)619 get_return_for_leaf(struct trace_iterator *iter,
620 struct ftrace_graph_ent_entry *curr)
621 {
622 struct fgraph_data *data = iter->private;
623 struct ring_buffer_iter *ring_iter = NULL;
624 struct ring_buffer_event *event;
625 struct ftrace_graph_ret_entry *next;
626
627 /*
628 * If the previous output failed to write to the seq buffer,
629 * then we just reuse the data from before.
630 */
631 if (data && data->failed) {
632 curr = &data->ent;
633 next = &data->ret;
634 } else {
635
636 ring_iter = trace_buffer_iter(iter, iter->cpu);
637
638 /* First peek to compare current entry and the next one */
639 if (ring_iter)
640 event = ring_buffer_iter_peek(ring_iter, NULL);
641 else {
642 /*
643 * We need to consume the current entry to see
644 * the next one.
645 */
646 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
647 NULL, NULL);
648 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
649 NULL, NULL);
650 }
651
652 if (!event)
653 return NULL;
654
655 next = ring_buffer_event_data(event);
656
657 if (data) {
658 /*
659 * Save current and next entries for later reference
660 * if the output fails.
661 */
662 data->ent = *curr;
663 /*
664 * If the next event is not a return type, then
665 * we only care about what type it is. Otherwise we can
666 * safely copy the entire event.
667 */
668 if (next->ent.type == TRACE_GRAPH_RET)
669 data->ret = *next;
670 else
671 data->ret.ent.type = next->ent.type;
672 }
673 }
674
675 if (next->ent.type != TRACE_GRAPH_RET)
676 return NULL;
677
678 if (curr->ent.pid != next->ent.pid ||
679 curr->graph_ent.func != next->ret.func)
680 return NULL;
681
682 /* this is a leaf, now advance the iterator */
683 if (ring_iter)
684 ring_buffer_read(ring_iter, NULL);
685
686 return next;
687 }
688
print_graph_abs_time(u64 t,struct trace_seq * s)689 static int print_graph_abs_time(u64 t, struct trace_seq *s)
690 {
691 unsigned long usecs_rem;
692
693 usecs_rem = do_div(t, NSEC_PER_SEC);
694 usecs_rem /= 1000;
695
696 return trace_seq_printf(s, "%5lu.%06lu | ",
697 (unsigned long)t, usecs_rem);
698 }
699
700 static enum print_line_t
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)701 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
702 enum trace_type type, int cpu, pid_t pid, u32 flags)
703 {
704 int ret;
705 struct trace_seq *s = &iter->seq;
706
707 if (addr < (unsigned long)__irqentry_text_start ||
708 addr >= (unsigned long)__irqentry_text_end)
709 return TRACE_TYPE_UNHANDLED;
710
711 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
712 /* Absolute time */
713 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
714 ret = print_graph_abs_time(iter->ts, s);
715 if (!ret)
716 return TRACE_TYPE_PARTIAL_LINE;
717 }
718
719 /* Cpu */
720 if (flags & TRACE_GRAPH_PRINT_CPU) {
721 ret = print_graph_cpu(s, cpu);
722 if (ret == TRACE_TYPE_PARTIAL_LINE)
723 return TRACE_TYPE_PARTIAL_LINE;
724 }
725
726 /* Proc */
727 if (flags & TRACE_GRAPH_PRINT_PROC) {
728 ret = print_graph_proc(s, pid);
729 if (ret == TRACE_TYPE_PARTIAL_LINE)
730 return TRACE_TYPE_PARTIAL_LINE;
731 ret = trace_seq_puts(s, " | ");
732 if (!ret)
733 return TRACE_TYPE_PARTIAL_LINE;
734 }
735 }
736
737 /* No overhead */
738 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
739 if (ret != TRACE_TYPE_HANDLED)
740 return ret;
741
742 if (type == TRACE_GRAPH_ENT)
743 ret = trace_seq_puts(s, "==========>");
744 else
745 ret = trace_seq_puts(s, "<==========");
746
747 if (!ret)
748 return TRACE_TYPE_PARTIAL_LINE;
749
750 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
751 if (ret != TRACE_TYPE_HANDLED)
752 return ret;
753
754 ret = trace_seq_putc(s, '\n');
755
756 if (!ret)
757 return TRACE_TYPE_PARTIAL_LINE;
758 return TRACE_TYPE_HANDLED;
759 }
760
761 enum print_line_t
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)762 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
763 {
764 unsigned long nsecs_rem = do_div(duration, 1000);
765 /* log10(ULONG_MAX) + '\0' */
766 char msecs_str[21];
767 char nsecs_str[5];
768 int ret, len;
769 int i;
770
771 sprintf(msecs_str, "%lu", (unsigned long) duration);
772
773 /* Print msecs */
774 ret = trace_seq_printf(s, "%s", msecs_str);
775 if (!ret)
776 return TRACE_TYPE_PARTIAL_LINE;
777
778 len = strlen(msecs_str);
779
780 /* Print nsecs (we don't want to exceed 7 numbers) */
781 if (len < 7) {
782 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
783
784 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
785 ret = trace_seq_printf(s, ".%s", nsecs_str);
786 if (!ret)
787 return TRACE_TYPE_PARTIAL_LINE;
788 len += strlen(nsecs_str);
789 }
790
791 ret = trace_seq_puts(s, " us ");
792 if (!ret)
793 return TRACE_TYPE_PARTIAL_LINE;
794
795 /* Print remaining spaces to fit the row's width */
796 for (i = len; i < 7; i++) {
797 ret = trace_seq_putc(s, ' ');
798 if (!ret)
799 return TRACE_TYPE_PARTIAL_LINE;
800 }
801 return TRACE_TYPE_HANDLED;
802 }
803
804 static enum print_line_t
print_graph_duration(unsigned long long duration,struct trace_seq * s,u32 flags)805 print_graph_duration(unsigned long long duration, struct trace_seq *s,
806 u32 flags)
807 {
808 int ret = -1;
809
810 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
811 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
812 return TRACE_TYPE_HANDLED;
813
814 /* No real adata, just filling the column with spaces */
815 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
816 case FLAGS_FILL_FULL:
817 ret = trace_seq_puts(s, " | ");
818 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
819 case FLAGS_FILL_START:
820 ret = trace_seq_puts(s, " ");
821 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
822 case FLAGS_FILL_END:
823 ret = trace_seq_puts(s, " |");
824 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
825 }
826
827 /* Signal a overhead of time execution to the output */
828 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
829 /* Duration exceeded 100 msecs */
830 if (duration > 100000ULL)
831 ret = trace_seq_puts(s, "! ");
832 /* Duration exceeded 10 msecs */
833 else if (duration > 10000ULL)
834 ret = trace_seq_puts(s, "+ ");
835 }
836
837 /*
838 * The -1 means we either did not exceed the duration tresholds
839 * or we dont want to print out the overhead. Either way we need
840 * to fill out the space.
841 */
842 if (ret == -1)
843 ret = trace_seq_puts(s, " ");
844
845 /* Catching here any failure happenned above */
846 if (!ret)
847 return TRACE_TYPE_PARTIAL_LINE;
848
849 ret = trace_print_graph_duration(duration, s);
850 if (ret != TRACE_TYPE_HANDLED)
851 return ret;
852
853 ret = trace_seq_puts(s, "| ");
854 if (!ret)
855 return TRACE_TYPE_PARTIAL_LINE;
856
857 return TRACE_TYPE_HANDLED;
858 }
859
860 /* Case of a leaf function on its call entry */
861 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)862 print_graph_entry_leaf(struct trace_iterator *iter,
863 struct ftrace_graph_ent_entry *entry,
864 struct ftrace_graph_ret_entry *ret_entry,
865 struct trace_seq *s, u32 flags)
866 {
867 struct fgraph_data *data = iter->private;
868 struct ftrace_graph_ret *graph_ret;
869 struct ftrace_graph_ent *call;
870 unsigned long long duration;
871 int ret;
872 int i;
873
874 graph_ret = &ret_entry->ret;
875 call = &entry->graph_ent;
876 duration = graph_ret->rettime - graph_ret->calltime;
877
878 if (data) {
879 struct fgraph_cpu_data *cpu_data;
880 int cpu = iter->cpu;
881
882 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
883
884 /* If a graph tracer ignored set_graph_notrace */
885 if (call->depth < -1)
886 call->depth += FTRACE_NOTRACE_DEPTH;
887
888 /*
889 * Comments display at + 1 to depth. Since
890 * this is a leaf function, keep the comments
891 * equal to this depth.
892 */
893 cpu_data->depth = call->depth - 1;
894
895 /* No need to keep this function around for this depth */
896 if (call->depth < FTRACE_RETFUNC_DEPTH &&
897 !WARN_ON_ONCE(call->depth < 0))
898 cpu_data->enter_funcs[call->depth] = 0;
899 }
900
901 /* Overhead and duration */
902 ret = print_graph_duration(duration, s, flags);
903 if (ret == TRACE_TYPE_PARTIAL_LINE)
904 return TRACE_TYPE_PARTIAL_LINE;
905
906 /* Function */
907 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
908 ret = trace_seq_putc(s, ' ');
909 if (!ret)
910 return TRACE_TYPE_PARTIAL_LINE;
911 }
912
913 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
914 if (!ret)
915 return TRACE_TYPE_PARTIAL_LINE;
916
917 return TRACE_TYPE_HANDLED;
918 }
919
920 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)921 print_graph_entry_nested(struct trace_iterator *iter,
922 struct ftrace_graph_ent_entry *entry,
923 struct trace_seq *s, int cpu, u32 flags)
924 {
925 struct ftrace_graph_ent *call = &entry->graph_ent;
926 struct fgraph_data *data = iter->private;
927 int ret;
928 int i;
929
930 if (data) {
931 struct fgraph_cpu_data *cpu_data;
932 int cpu = iter->cpu;
933
934 /* If a graph tracer ignored set_graph_notrace */
935 if (call->depth < -1)
936 call->depth += FTRACE_NOTRACE_DEPTH;
937
938 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
939 cpu_data->depth = call->depth;
940
941 /* Save this function pointer to see if the exit matches */
942 if (call->depth < FTRACE_RETFUNC_DEPTH &&
943 !WARN_ON_ONCE(call->depth < 0))
944 cpu_data->enter_funcs[call->depth] = call->func;
945 }
946
947 /* No time */
948 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
949 if (ret != TRACE_TYPE_HANDLED)
950 return ret;
951
952 /* Function */
953 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
954 ret = trace_seq_putc(s, ' ');
955 if (!ret)
956 return TRACE_TYPE_PARTIAL_LINE;
957 }
958
959 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
960 if (!ret)
961 return TRACE_TYPE_PARTIAL_LINE;
962
963 /*
964 * we already consumed the current entry to check the next one
965 * and see if this is a leaf.
966 */
967 return TRACE_TYPE_NO_CONSUME;
968 }
969
970 static enum print_line_t
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)971 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
972 int type, unsigned long addr, u32 flags)
973 {
974 struct fgraph_data *data = iter->private;
975 struct trace_entry *ent = iter->ent;
976 int cpu = iter->cpu;
977 int ret;
978
979 /* Pid */
980 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
981 return TRACE_TYPE_PARTIAL_LINE;
982
983 if (type) {
984 /* Interrupt */
985 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
986 if (ret == TRACE_TYPE_PARTIAL_LINE)
987 return TRACE_TYPE_PARTIAL_LINE;
988 }
989
990 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
991 return 0;
992
993 /* Absolute time */
994 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
995 ret = print_graph_abs_time(iter->ts, s);
996 if (!ret)
997 return TRACE_TYPE_PARTIAL_LINE;
998 }
999
1000 /* Cpu */
1001 if (flags & TRACE_GRAPH_PRINT_CPU) {
1002 ret = print_graph_cpu(s, cpu);
1003 if (ret == TRACE_TYPE_PARTIAL_LINE)
1004 return TRACE_TYPE_PARTIAL_LINE;
1005 }
1006
1007 /* Proc */
1008 if (flags & TRACE_GRAPH_PRINT_PROC) {
1009 ret = print_graph_proc(s, ent->pid);
1010 if (ret == TRACE_TYPE_PARTIAL_LINE)
1011 return TRACE_TYPE_PARTIAL_LINE;
1012
1013 ret = trace_seq_puts(s, " | ");
1014 if (!ret)
1015 return TRACE_TYPE_PARTIAL_LINE;
1016 }
1017
1018 /* Latency format */
1019 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1020 ret = print_graph_lat_fmt(s, ent);
1021 if (ret == TRACE_TYPE_PARTIAL_LINE)
1022 return TRACE_TYPE_PARTIAL_LINE;
1023 }
1024
1025 return 0;
1026 }
1027
1028 /*
1029 * Entry check for irq code
1030 *
1031 * returns 1 if
1032 * - we are inside irq code
1033 * - we just entered irq code
1034 *
1035 * retunns 0 if
1036 * - funcgraph-interrupts option is set
1037 * - we are not inside irq code
1038 */
1039 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)1040 check_irq_entry(struct trace_iterator *iter, u32 flags,
1041 unsigned long addr, int depth)
1042 {
1043 int cpu = iter->cpu;
1044 int *depth_irq;
1045 struct fgraph_data *data = iter->private;
1046
1047 /*
1048 * If we are either displaying irqs, or we got called as
1049 * a graph event and private data does not exist,
1050 * then we bypass the irq check.
1051 */
1052 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1053 (!data))
1054 return 0;
1055
1056 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1057
1058 /*
1059 * We are inside the irq code
1060 */
1061 if (*depth_irq >= 0)
1062 return 1;
1063
1064 if ((addr < (unsigned long)__irqentry_text_start) ||
1065 (addr >= (unsigned long)__irqentry_text_end))
1066 return 0;
1067
1068 /*
1069 * We are entering irq code.
1070 */
1071 *depth_irq = depth;
1072 return 1;
1073 }
1074
1075 /*
1076 * Return check for irq code
1077 *
1078 * returns 1 if
1079 * - we are inside irq code
1080 * - we just left irq code
1081 *
1082 * returns 0 if
1083 * - funcgraph-interrupts option is set
1084 * - we are not inside irq code
1085 */
1086 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)1087 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1088 {
1089 int cpu = iter->cpu;
1090 int *depth_irq;
1091 struct fgraph_data *data = iter->private;
1092
1093 /*
1094 * If we are either displaying irqs, or we got called as
1095 * a graph event and private data does not exist,
1096 * then we bypass the irq check.
1097 */
1098 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1099 (!data))
1100 return 0;
1101
1102 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1103
1104 /*
1105 * We are not inside the irq code.
1106 */
1107 if (*depth_irq == -1)
1108 return 0;
1109
1110 /*
1111 * We are inside the irq code, and this is returning entry.
1112 * Let's not trace it and clear the entry depth, since
1113 * we are out of irq code.
1114 *
1115 * This condition ensures that we 'leave the irq code' once
1116 * we are out of the entry depth. Thus protecting us from
1117 * the RETURN entry loss.
1118 */
1119 if (*depth_irq >= depth) {
1120 *depth_irq = -1;
1121 return 1;
1122 }
1123
1124 /*
1125 * We are inside the irq code, and this is not the entry.
1126 */
1127 return 1;
1128 }
1129
1130 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)1131 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1132 struct trace_iterator *iter, u32 flags)
1133 {
1134 struct fgraph_data *data = iter->private;
1135 struct ftrace_graph_ent *call = &field->graph_ent;
1136 struct ftrace_graph_ret_entry *leaf_ret;
1137 static enum print_line_t ret;
1138 int cpu = iter->cpu;
1139
1140 if (check_irq_entry(iter, flags, call->func, call->depth))
1141 return TRACE_TYPE_HANDLED;
1142
1143 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1144 return TRACE_TYPE_PARTIAL_LINE;
1145
1146 leaf_ret = get_return_for_leaf(iter, field);
1147 if (leaf_ret)
1148 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1149 else
1150 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1151
1152 if (data) {
1153 /*
1154 * If we failed to write our output, then we need to make
1155 * note of it. Because we already consumed our entry.
1156 */
1157 if (s->full) {
1158 data->failed = 1;
1159 data->cpu = cpu;
1160 } else
1161 data->failed = 0;
1162 }
1163
1164 return ret;
1165 }
1166
1167 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1168 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1169 struct trace_entry *ent, struct trace_iterator *iter,
1170 u32 flags)
1171 {
1172 unsigned long long duration = trace->rettime - trace->calltime;
1173 struct fgraph_data *data = iter->private;
1174 pid_t pid = ent->pid;
1175 int cpu = iter->cpu;
1176 int func_match = 1;
1177 int ret;
1178 int i;
1179
1180 if (check_irq_return(iter, flags, trace->depth))
1181 return TRACE_TYPE_HANDLED;
1182
1183 if (data) {
1184 struct fgraph_cpu_data *cpu_data;
1185 int cpu = iter->cpu;
1186
1187 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1188
1189 /*
1190 * Comments display at + 1 to depth. This is the
1191 * return from a function, we now want the comments
1192 * to display at the same level of the bracket.
1193 */
1194 cpu_data->depth = trace->depth - 1;
1195
1196 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1197 !WARN_ON_ONCE(trace->depth < 0)) {
1198 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1199 func_match = 0;
1200 cpu_data->enter_funcs[trace->depth] = 0;
1201 }
1202 }
1203
1204 if (print_graph_prologue(iter, s, 0, 0, flags))
1205 return TRACE_TYPE_PARTIAL_LINE;
1206
1207 /* Overhead and duration */
1208 ret = print_graph_duration(duration, s, flags);
1209 if (ret == TRACE_TYPE_PARTIAL_LINE)
1210 return TRACE_TYPE_PARTIAL_LINE;
1211
1212 /* Closing brace */
1213 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1214 ret = trace_seq_putc(s, ' ');
1215 if (!ret)
1216 return TRACE_TYPE_PARTIAL_LINE;
1217 }
1218
1219 /*
1220 * If the return function does not have a matching entry,
1221 * then the entry was lost. Instead of just printing
1222 * the '}' and letting the user guess what function this
1223 * belongs to, write out the function name. Always do
1224 * that if the funcgraph-tail option is enabled.
1225 */
1226 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1227 ret = trace_seq_puts(s, "}\n");
1228 if (!ret)
1229 return TRACE_TYPE_PARTIAL_LINE;
1230 } else {
1231 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1232 if (!ret)
1233 return TRACE_TYPE_PARTIAL_LINE;
1234 }
1235
1236 /* Overrun */
1237 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1238 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1239 trace->overrun);
1240 if (!ret)
1241 return TRACE_TYPE_PARTIAL_LINE;
1242 }
1243
1244 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1245 cpu, pid, flags);
1246 if (ret == TRACE_TYPE_PARTIAL_LINE)
1247 return TRACE_TYPE_PARTIAL_LINE;
1248
1249 return TRACE_TYPE_HANDLED;
1250 }
1251
1252 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1253 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1254 struct trace_iterator *iter, u32 flags)
1255 {
1256 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1257 struct fgraph_data *data = iter->private;
1258 struct trace_event *event;
1259 int depth = 0;
1260 int ret;
1261 int i;
1262
1263 if (data)
1264 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1265
1266 if (print_graph_prologue(iter, s, 0, 0, flags))
1267 return TRACE_TYPE_PARTIAL_LINE;
1268
1269 /* No time */
1270 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1271 if (ret != TRACE_TYPE_HANDLED)
1272 return ret;
1273
1274 /* Indentation */
1275 if (depth > 0)
1276 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1277 ret = trace_seq_putc(s, ' ');
1278 if (!ret)
1279 return TRACE_TYPE_PARTIAL_LINE;
1280 }
1281
1282 /* The comment */
1283 ret = trace_seq_puts(s, "/* ");
1284 if (!ret)
1285 return TRACE_TYPE_PARTIAL_LINE;
1286
1287 switch (iter->ent->type) {
1288 case TRACE_BPRINT:
1289 ret = trace_print_bprintk_msg_only(iter);
1290 if (ret != TRACE_TYPE_HANDLED)
1291 return ret;
1292 break;
1293 case TRACE_PRINT:
1294 ret = trace_print_printk_msg_only(iter);
1295 if (ret != TRACE_TYPE_HANDLED)
1296 return ret;
1297 break;
1298 default:
1299 event = ftrace_find_event(ent->type);
1300 if (!event)
1301 return TRACE_TYPE_UNHANDLED;
1302
1303 ret = event->funcs->trace(iter, sym_flags, event);
1304 if (ret != TRACE_TYPE_HANDLED)
1305 return ret;
1306 }
1307
1308 /* Strip ending newline */
1309 if (s->buffer[s->len - 1] == '\n') {
1310 s->buffer[s->len - 1] = '\0';
1311 s->len--;
1312 }
1313
1314 ret = trace_seq_puts(s, " */\n");
1315 if (!ret)
1316 return TRACE_TYPE_PARTIAL_LINE;
1317
1318 return TRACE_TYPE_HANDLED;
1319 }
1320
1321
1322 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1323 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1324 {
1325 struct ftrace_graph_ent_entry *field;
1326 struct fgraph_data *data = iter->private;
1327 struct trace_entry *entry = iter->ent;
1328 struct trace_seq *s = &iter->seq;
1329 int cpu = iter->cpu;
1330 int ret;
1331
1332 if (flags & TRACE_GRAPH_PRINT_FLAT)
1333 return TRACE_TYPE_UNHANDLED;
1334
1335 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1336 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1337 return TRACE_TYPE_HANDLED;
1338 }
1339
1340 /*
1341 * If the last output failed, there's a possibility we need
1342 * to print out the missing entry which would never go out.
1343 */
1344 if (data && data->failed) {
1345 field = &data->ent;
1346 iter->cpu = data->cpu;
1347 ret = print_graph_entry(field, s, iter, flags);
1348 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1349 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1350 ret = TRACE_TYPE_NO_CONSUME;
1351 }
1352 iter->cpu = cpu;
1353 return ret;
1354 }
1355
1356 switch (entry->type) {
1357 case TRACE_GRAPH_ENT: {
1358 /*
1359 * print_graph_entry() may consume the current event,
1360 * thus @field may become invalid, so we need to save it.
1361 * sizeof(struct ftrace_graph_ent_entry) is very small,
1362 * it can be safely saved at the stack.
1363 */
1364 struct ftrace_graph_ent_entry saved;
1365 trace_assign_type(field, entry);
1366 saved = *field;
1367 return print_graph_entry(&saved, s, iter, flags);
1368 }
1369 case TRACE_GRAPH_RET: {
1370 struct ftrace_graph_ret_entry *field;
1371 trace_assign_type(field, entry);
1372 return print_graph_return(&field->ret, s, entry, iter, flags);
1373 }
1374 case TRACE_STACK:
1375 case TRACE_FN:
1376 /* dont trace stack and functions as comments */
1377 return TRACE_TYPE_UNHANDLED;
1378
1379 default:
1380 return print_graph_comment(s, entry, iter, flags);
1381 }
1382
1383 return TRACE_TYPE_HANDLED;
1384 }
1385
1386 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1387 print_graph_function(struct trace_iterator *iter)
1388 {
1389 return print_graph_function_flags(iter, tracer_flags.val);
1390 }
1391
print_lat_header(struct seq_file * s,u32 flags)1392 static void print_lat_header(struct seq_file *s, u32 flags)
1393 {
1394 static const char spaces[] = " " /* 16 spaces */
1395 " " /* 4 spaces */
1396 " "; /* 17 spaces */
1397 int size = 0;
1398
1399 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1400 size += 16;
1401 if (flags & TRACE_GRAPH_PRINT_CPU)
1402 size += 4;
1403 if (flags & TRACE_GRAPH_PRINT_PROC)
1404 size += 17;
1405
1406 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1407 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1408 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1409 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1410 seq_printf(s, "#%.*s||| / \n", size, spaces);
1411 }
1412
__print_graph_headers_flags(struct seq_file * s,u32 flags)1413 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1414 {
1415 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1416
1417 if (lat)
1418 print_lat_header(s, flags);
1419
1420 /* 1st line */
1421 seq_printf(s, "#");
1422 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1423 seq_printf(s, " TIME ");
1424 if (flags & TRACE_GRAPH_PRINT_CPU)
1425 seq_printf(s, " CPU");
1426 if (flags & TRACE_GRAPH_PRINT_PROC)
1427 seq_printf(s, " TASK/PID ");
1428 if (lat)
1429 seq_printf(s, "||||");
1430 if (flags & TRACE_GRAPH_PRINT_DURATION)
1431 seq_printf(s, " DURATION ");
1432 seq_printf(s, " FUNCTION CALLS\n");
1433
1434 /* 2nd line */
1435 seq_printf(s, "#");
1436 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1437 seq_printf(s, " | ");
1438 if (flags & TRACE_GRAPH_PRINT_CPU)
1439 seq_printf(s, " | ");
1440 if (flags & TRACE_GRAPH_PRINT_PROC)
1441 seq_printf(s, " | | ");
1442 if (lat)
1443 seq_printf(s, "||||");
1444 if (flags & TRACE_GRAPH_PRINT_DURATION)
1445 seq_printf(s, " | | ");
1446 seq_printf(s, " | | | |\n");
1447 }
1448
print_graph_headers(struct seq_file * s)1449 static void print_graph_headers(struct seq_file *s)
1450 {
1451 print_graph_headers_flags(s, tracer_flags.val);
1452 }
1453
print_graph_headers_flags(struct seq_file * s,u32 flags)1454 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1455 {
1456 struct trace_iterator *iter = s->private;
1457
1458 if (flags & TRACE_GRAPH_PRINT_FLAT) {
1459 trace_default_header(s);
1460 return;
1461 }
1462
1463 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1464 return;
1465
1466 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1467 /* print nothing if the buffers are empty */
1468 if (trace_empty(iter))
1469 return;
1470
1471 print_trace_header(s, iter);
1472 }
1473
1474 __print_graph_headers_flags(s, flags);
1475 }
1476
graph_trace_open(struct trace_iterator * iter)1477 void graph_trace_open(struct trace_iterator *iter)
1478 {
1479 /* pid and depth on the last trace processed */
1480 struct fgraph_data *data;
1481 gfp_t gfpflags;
1482 int cpu;
1483
1484 iter->private = NULL;
1485
1486 /* We can be called in atomic context via ftrace_dump() */
1487 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1488
1489 data = kzalloc(sizeof(*data), gfpflags);
1490 if (!data)
1491 goto out_err;
1492
1493 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1494 if (!data->cpu_data)
1495 goto out_err_free;
1496
1497 for_each_possible_cpu(cpu) {
1498 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1499 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1500 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1501 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1502
1503 *pid = -1;
1504 *depth = 0;
1505 *ignore = 0;
1506 *depth_irq = -1;
1507 }
1508
1509 iter->private = data;
1510
1511 return;
1512
1513 out_err_free:
1514 kfree(data);
1515 out_err:
1516 pr_warning("function graph tracer: not enough memory\n");
1517 }
1518
graph_trace_close(struct trace_iterator * iter)1519 void graph_trace_close(struct trace_iterator *iter)
1520 {
1521 struct fgraph_data *data = iter->private;
1522
1523 if (data) {
1524 free_percpu(data->cpu_data);
1525 kfree(data);
1526 }
1527 }
1528
1529 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1530 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1531 {
1532 if (bit == TRACE_GRAPH_PRINT_IRQS)
1533 ftrace_graph_skip_irqs = !set;
1534
1535 return 0;
1536 }
1537
1538
1539 static struct tracer graph_trace __tracer_data = {
1540 .name = "function_graph",
1541 .update_thresh = graph_trace_update_thresh,
1542 .open = graph_trace_open,
1543 .pipe_open = graph_trace_open,
1544 .close = graph_trace_close,
1545 .pipe_close = graph_trace_close,
1546 .init = graph_trace_init,
1547 .reset = graph_trace_reset,
1548 .print_line = print_graph_function,
1549 .print_header = print_graph_headers,
1550 .flags = &tracer_flags,
1551 .set_flag = func_graph_set_flag,
1552 #ifdef CONFIG_FTRACE_SELFTEST
1553 .selftest = trace_selftest_startup_function_graph,
1554 #endif
1555 };
1556
1557
1558 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1559 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1560 loff_t *ppos)
1561 {
1562 unsigned long val;
1563 int ret;
1564
1565 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1566 if (ret)
1567 return ret;
1568
1569 max_depth = val;
1570
1571 *ppos += cnt;
1572
1573 return cnt;
1574 }
1575
1576 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1577 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1578 loff_t *ppos)
1579 {
1580 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1581 int n;
1582
1583 n = sprintf(buf, "%d\n", max_depth);
1584
1585 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1586 }
1587
1588 static const struct file_operations graph_depth_fops = {
1589 .open = tracing_open_generic,
1590 .write = graph_depth_write,
1591 .read = graph_depth_read,
1592 .llseek = generic_file_llseek,
1593 };
1594
init_graph_tracefs(void)1595 static __init int init_graph_tracefs(void)
1596 {
1597 struct dentry *d_tracer;
1598
1599 d_tracer = tracing_init_dentry();
1600 if (IS_ERR(d_tracer))
1601 return 0;
1602
1603 trace_create_file("max_graph_depth", 0644, d_tracer,
1604 NULL, &graph_depth_fops);
1605
1606 return 0;
1607 }
1608 fs_initcall(init_graph_tracefs);
1609
init_graph_trace(void)1610 static __init int init_graph_trace(void)
1611 {
1612 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1613
1614 return register_tracer(&graph_trace);
1615 }
1616
1617 core_initcall(init_graph_trace);
1618