1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
ftrace_graph_is_dead(void)27 bool ftrace_graph_is_dead(void)
28 {
29 return kill_ftrace_graph;
30 }
31
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
ftrace_graph_stop(void)40 void ftrace_graph_stop(void)
41 {
42 kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT 2
67
68 /* Flag options */
69 #define TRACE_GRAPH_PRINT_FLAT 0x80
70
71 static unsigned int max_depth;
72
73 static struct tracer_opt trace_opts[] = {
74 /* Display overruns? (for self-debug purpose) */
75 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
76 /* Display CPU ? */
77 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
78 /* Display Overhead ? */
79 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
80 /* Display proc name/pid */
81 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
82 /* Display duration of execution */
83 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
84 /* Display absolute time of an entry */
85 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
86 /* Display interrupts */
87 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
88 /* Display function name after trailing } */
89 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
90 /* Include sleep time (scheduled out) between entry and return */
91 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
92 /* Include time within nested functions */
93 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
94 /* Use standard trace formatting rather than hierarchical */
95 { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
96 { } /* Empty entry */
97 };
98
99 static struct tracer_flags tracer_flags = {
100 /* Don't display overruns, proc, or tail by default */
101 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
102 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
103 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
104 .opts = trace_opts
105 };
106
107 static struct trace_array *graph_array;
108
109 /*
110 * DURATION column is being also used to display IRQ signs,
111 * following values are used by print_graph_irq and others
112 * to fill in space into DURATION column.
113 */
114 enum {
115 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
116 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
117 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
118 };
119
120 static void
121 print_graph_duration(struct trace_array *tr, unsigned long long duration,
122 struct trace_seq *s, u32 flags);
123
124 /* Add a function return address to the trace stack on thread info.*/
125 int
ftrace_push_return_trace(unsigned long ret,unsigned long func,int * depth,unsigned long frame_pointer)126 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
127 unsigned long frame_pointer)
128 {
129 unsigned long long calltime;
130 int index;
131
132 if (unlikely(ftrace_graph_is_dead()))
133 return -EBUSY;
134
135 if (!current->ret_stack)
136 return -EBUSY;
137
138 /*
139 * We must make sure the ret_stack is tested before we read
140 * anything else.
141 */
142 smp_rmb();
143
144 /* The return trace stack is full */
145 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
146 atomic_inc(¤t->trace_overrun);
147 return -EBUSY;
148 }
149
150 /*
151 * The curr_ret_stack is an index to ftrace return stack of
152 * current task. Its value should be in [0, FTRACE_RETFUNC_
153 * DEPTH) when the function graph tracer is used. To support
154 * filtering out specific functions, it makes the index
155 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
156 * so when it sees a negative index the ftrace will ignore
157 * the record. And the index gets recovered when returning
158 * from the filtered function by adding the FTRACE_NOTRACE_
159 * DEPTH and then it'll continue to record functions normally.
160 *
161 * The curr_ret_stack is initialized to -1 and get increased
162 * in this function. So it can be less than -1 only if it was
163 * filtered out via ftrace_graph_notrace_addr() which can be
164 * set from set_graph_notrace file in tracefs by user.
165 */
166 if (current->curr_ret_stack < -1)
167 return -EBUSY;
168
169 calltime = trace_clock_local();
170
171 index = ++current->curr_ret_stack;
172 if (ftrace_graph_notrace_addr(func))
173 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
174 barrier();
175 current->ret_stack[index].ret = ret;
176 current->ret_stack[index].func = func;
177 current->ret_stack[index].calltime = calltime;
178 current->ret_stack[index].subtime = 0;
179 current->ret_stack[index].fp = frame_pointer;
180 *depth = current->curr_ret_stack;
181
182 return 0;
183 }
184
185 /* Retrieve a function return address to the trace stack on thread info.*/
186 static void
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer)187 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
188 unsigned long frame_pointer)
189 {
190 int index;
191
192 index = current->curr_ret_stack;
193
194 /*
195 * A negative index here means that it's just returned from a
196 * notrace'd function. Recover index to get an original
197 * return address. See ftrace_push_return_trace().
198 *
199 * TODO: Need to check whether the stack gets corrupted.
200 */
201 if (index < 0)
202 index += FTRACE_NOTRACE_DEPTH;
203
204 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
205 ftrace_graph_stop();
206 WARN_ON(1);
207 /* Might as well panic, otherwise we have no where to go */
208 *ret = (unsigned long)panic;
209 return;
210 }
211
212 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
213 /*
214 * The arch may choose to record the frame pointer used
215 * and check it here to make sure that it is what we expect it
216 * to be. If gcc does not set the place holder of the return
217 * address in the frame pointer, and does a copy instead, then
218 * the function graph trace will fail. This test detects this
219 * case.
220 *
221 * Currently, x86_32 with optimize for size (-Os) makes the latest
222 * gcc do the above.
223 *
224 * Note, -mfentry does not use frame pointers, and this test
225 * is not needed if CC_USING_FENTRY is set.
226 */
227 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
228 ftrace_graph_stop();
229 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
230 " from func %ps return to %lx\n",
231 current->ret_stack[index].fp,
232 frame_pointer,
233 (void *)current->ret_stack[index].func,
234 current->ret_stack[index].ret);
235 *ret = (unsigned long)panic;
236 return;
237 }
238 #endif
239
240 *ret = current->ret_stack[index].ret;
241 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(¤t->trace_overrun);
244 trace->depth = index;
245 }
246
247 /*
248 * Send the trace to the ring-buffer.
249 * @return the original return address.
250 */
ftrace_return_to_handler(unsigned long frame_pointer)251 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
252 {
253 struct ftrace_graph_ret trace;
254 unsigned long ret;
255
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local();
258 barrier();
259 current->curr_ret_stack--;
260 /*
261 * The curr_ret_stack can be less than -1 only if it was
262 * filtered out and it's about to return from the function.
263 * Recover the index and continue to trace normal functions.
264 */
265 if (current->curr_ret_stack < -1) {
266 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
267 return ret;
268 }
269
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) {
278 ftrace_graph_stop();
279 WARN_ON(1);
280 /* Might as well panic. What else to do? */
281 ret = (unsigned long)panic;
282 }
283
284 return ret;
285 }
286
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned long flags,int pc)287 int __trace_graph_entry(struct trace_array *tr,
288 struct ftrace_graph_ent *trace,
289 unsigned long flags,
290 int pc)
291 {
292 struct trace_event_call *call = &event_funcgraph_entry;
293 struct ring_buffer_event *event;
294 struct ring_buffer *buffer = tr->trace_buffer.buffer;
295 struct ftrace_graph_ent_entry *entry;
296
297 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
298 sizeof(*entry), flags, pc);
299 if (!event)
300 return 0;
301 entry = ring_buffer_event_data(event);
302 entry->graph_ent = *trace;
303 if (!call_filter_check_discard(call, entry, buffer, event))
304 __buffer_unlock_commit(buffer, event);
305
306 return 1;
307 }
308
ftrace_graph_ignore_irqs(void)309 static inline int ftrace_graph_ignore_irqs(void)
310 {
311 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
312 return 0;
313
314 return in_irq();
315 }
316
trace_graph_entry(struct ftrace_graph_ent * trace)317 int trace_graph_entry(struct ftrace_graph_ent *trace)
318 {
319 struct trace_array *tr = graph_array;
320 struct trace_array_cpu *data;
321 unsigned long flags;
322 long disabled;
323 int ret;
324 int cpu;
325 int pc;
326
327 if (!ftrace_trace_task(current))
328 return 0;
329
330 /* trace it when it is-nested-in or is a function enabled. */
331 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
332 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
333 (max_depth && trace->depth >= max_depth))
334 return 0;
335
336 /*
337 * Do not trace a function if it's filtered by set_graph_notrace.
338 * Make the index of ret stack negative to indicate that it should
339 * ignore further functions. But it needs its own ret stack entry
340 * to recover the original index in order to continue tracing after
341 * returning from the function.
342 */
343 if (ftrace_graph_notrace_addr(trace->func))
344 return 1;
345
346 local_irq_save(flags);
347 cpu = raw_smp_processor_id();
348 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
349 disabled = atomic_inc_return(&data->disabled);
350 if (likely(disabled == 1)) {
351 pc = preempt_count();
352 ret = __trace_graph_entry(tr, trace, flags, pc);
353 } else {
354 ret = 0;
355 }
356
357 atomic_dec(&data->disabled);
358 local_irq_restore(flags);
359
360 return ret;
361 }
362
trace_graph_thresh_entry(struct ftrace_graph_ent * trace)363 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
364 {
365 if (tracing_thresh)
366 return 1;
367 else
368 return trace_graph_entry(trace);
369 }
370
371 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long flags,int pc)372 __trace_graph_function(struct trace_array *tr,
373 unsigned long ip, unsigned long flags, int pc)
374 {
375 u64 time = trace_clock_local();
376 struct ftrace_graph_ent ent = {
377 .func = ip,
378 .depth = 0,
379 };
380 struct ftrace_graph_ret ret = {
381 .func = ip,
382 .depth = 0,
383 .calltime = time,
384 .rettime = time,
385 };
386
387 __trace_graph_entry(tr, &ent, flags, pc);
388 __trace_graph_return(tr, &ret, flags, pc);
389 }
390
391 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)392 trace_graph_function(struct trace_array *tr,
393 unsigned long ip, unsigned long parent_ip,
394 unsigned long flags, int pc)
395 {
396 __trace_graph_function(tr, ip, flags, pc);
397 }
398
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned long flags,int pc)399 void __trace_graph_return(struct trace_array *tr,
400 struct ftrace_graph_ret *trace,
401 unsigned long flags,
402 int pc)
403 {
404 struct trace_event_call *call = &event_funcgraph_exit;
405 struct ring_buffer_event *event;
406 struct ring_buffer *buffer = tr->trace_buffer.buffer;
407 struct ftrace_graph_ret_entry *entry;
408
409 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
410 sizeof(*entry), flags, pc);
411 if (!event)
412 return;
413 entry = ring_buffer_event_data(event);
414 entry->ret = *trace;
415 if (!call_filter_check_discard(call, entry, buffer, event))
416 __buffer_unlock_commit(buffer, event);
417 }
418
trace_graph_return(struct ftrace_graph_ret * trace)419 void trace_graph_return(struct ftrace_graph_ret *trace)
420 {
421 struct trace_array *tr = graph_array;
422 struct trace_array_cpu *data;
423 unsigned long flags;
424 long disabled;
425 int cpu;
426 int pc;
427
428 local_irq_save(flags);
429 cpu = raw_smp_processor_id();
430 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
431 disabled = atomic_inc_return(&data->disabled);
432 if (likely(disabled == 1)) {
433 pc = preempt_count();
434 __trace_graph_return(tr, trace, flags, pc);
435 }
436 atomic_dec(&data->disabled);
437 local_irq_restore(flags);
438 }
439
set_graph_array(struct trace_array * tr)440 void set_graph_array(struct trace_array *tr)
441 {
442 graph_array = tr;
443
444 /* Make graph_array visible before we start tracing */
445
446 smp_mb();
447 }
448
trace_graph_thresh_return(struct ftrace_graph_ret * trace)449 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
450 {
451 if (tracing_thresh &&
452 (trace->rettime - trace->calltime < tracing_thresh))
453 return;
454 else
455 trace_graph_return(trace);
456 }
457
graph_trace_init(struct trace_array * tr)458 static int graph_trace_init(struct trace_array *tr)
459 {
460 int ret;
461
462 set_graph_array(tr);
463 if (tracing_thresh)
464 ret = register_ftrace_graph(&trace_graph_thresh_return,
465 &trace_graph_thresh_entry);
466 else
467 ret = register_ftrace_graph(&trace_graph_return,
468 &trace_graph_entry);
469 if (ret)
470 return ret;
471 tracing_start_cmdline_record();
472
473 return 0;
474 }
475
graph_trace_reset(struct trace_array * tr)476 static void graph_trace_reset(struct trace_array *tr)
477 {
478 tracing_stop_cmdline_record();
479 unregister_ftrace_graph();
480 }
481
graph_trace_update_thresh(struct trace_array * tr)482 static int graph_trace_update_thresh(struct trace_array *tr)
483 {
484 graph_trace_reset(tr);
485 return graph_trace_init(tr);
486 }
487
488 static int max_bytes_for_cpu;
489
print_graph_cpu(struct trace_seq * s,int cpu)490 static void print_graph_cpu(struct trace_seq *s, int cpu)
491 {
492 /*
493 * Start with a space character - to make it stand out
494 * to the right a bit when trace output is pasted into
495 * email:
496 */
497 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
498 }
499
500 #define TRACE_GRAPH_PROCINFO_LENGTH 14
501
print_graph_proc(struct trace_seq * s,pid_t pid)502 static void print_graph_proc(struct trace_seq *s, pid_t pid)
503 {
504 char comm[TASK_COMM_LEN];
505 /* sign + log10(MAX_INT) + '\0' */
506 char pid_str[11];
507 int spaces = 0;
508 int len;
509 int i;
510
511 trace_find_cmdline(pid, comm);
512 comm[7] = '\0';
513 sprintf(pid_str, "%d", pid);
514
515 /* 1 stands for the "-" character */
516 len = strlen(comm) + strlen(pid_str) + 1;
517
518 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
519 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
520
521 /* First spaces to align center */
522 for (i = 0; i < spaces / 2; i++)
523 trace_seq_putc(s, ' ');
524
525 trace_seq_printf(s, "%s-%s", comm, pid_str);
526
527 /* Last spaces to align center */
528 for (i = 0; i < spaces - (spaces / 2); i++)
529 trace_seq_putc(s, ' ');
530 }
531
532
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)533 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
534 {
535 trace_seq_putc(s, ' ');
536 trace_print_lat_fmt(s, entry);
537 }
538
539 /* If the pid changed since the last trace, output this event */
540 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)541 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
542 {
543 pid_t prev_pid;
544 pid_t *last_pid;
545
546 if (!data)
547 return;
548
549 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
550
551 if (*last_pid == pid)
552 return;
553
554 prev_pid = *last_pid;
555 *last_pid = pid;
556
557 if (prev_pid == -1)
558 return;
559 /*
560 * Context-switch trace line:
561
562 ------------------------------------------
563 | 1) migration/0--1 => sshd-1755
564 ------------------------------------------
565
566 */
567 trace_seq_puts(s, " ------------------------------------------\n");
568 print_graph_cpu(s, cpu);
569 print_graph_proc(s, prev_pid);
570 trace_seq_puts(s, " => ");
571 print_graph_proc(s, pid);
572 trace_seq_puts(s, "\n ------------------------------------------\n\n");
573 }
574
575 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)576 get_return_for_leaf(struct trace_iterator *iter,
577 struct ftrace_graph_ent_entry *curr)
578 {
579 struct fgraph_data *data = iter->private;
580 struct ring_buffer_iter *ring_iter = NULL;
581 struct ring_buffer_event *event;
582 struct ftrace_graph_ret_entry *next;
583
584 /*
585 * If the previous output failed to write to the seq buffer,
586 * then we just reuse the data from before.
587 */
588 if (data && data->failed) {
589 curr = &data->ent;
590 next = &data->ret;
591 } else {
592
593 ring_iter = trace_buffer_iter(iter, iter->cpu);
594
595 /* First peek to compare current entry and the next one */
596 if (ring_iter)
597 event = ring_buffer_iter_peek(ring_iter, NULL);
598 else {
599 /*
600 * We need to consume the current entry to see
601 * the next one.
602 */
603 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
604 NULL, NULL);
605 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
606 NULL, NULL);
607 }
608
609 if (!event)
610 return NULL;
611
612 next = ring_buffer_event_data(event);
613
614 if (data) {
615 /*
616 * Save current and next entries for later reference
617 * if the output fails.
618 */
619 data->ent = *curr;
620 /*
621 * If the next event is not a return type, then
622 * we only care about what type it is. Otherwise we can
623 * safely copy the entire event.
624 */
625 if (next->ent.type == TRACE_GRAPH_RET)
626 data->ret = *next;
627 else
628 data->ret.ent.type = next->ent.type;
629 }
630 }
631
632 if (next->ent.type != TRACE_GRAPH_RET)
633 return NULL;
634
635 if (curr->ent.pid != next->ent.pid ||
636 curr->graph_ent.func != next->ret.func)
637 return NULL;
638
639 /* this is a leaf, now advance the iterator */
640 if (ring_iter)
641 ring_buffer_read(ring_iter, NULL);
642
643 return next;
644 }
645
print_graph_abs_time(u64 t,struct trace_seq * s)646 static void print_graph_abs_time(u64 t, struct trace_seq *s)
647 {
648 unsigned long usecs_rem;
649
650 usecs_rem = do_div(t, NSEC_PER_SEC);
651 usecs_rem /= 1000;
652
653 trace_seq_printf(s, "%5lu.%06lu | ",
654 (unsigned long)t, usecs_rem);
655 }
656
657 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)658 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
659 enum trace_type type, int cpu, pid_t pid, u32 flags)
660 {
661 struct trace_array *tr = iter->tr;
662 struct trace_seq *s = &iter->seq;
663 struct trace_entry *ent = iter->ent;
664
665 if (addr < (unsigned long)__irqentry_text_start ||
666 addr >= (unsigned long)__irqentry_text_end)
667 return;
668
669 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
670 /* Absolute time */
671 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
672 print_graph_abs_time(iter->ts, s);
673
674 /* Cpu */
675 if (flags & TRACE_GRAPH_PRINT_CPU)
676 print_graph_cpu(s, cpu);
677
678 /* Proc */
679 if (flags & TRACE_GRAPH_PRINT_PROC) {
680 print_graph_proc(s, pid);
681 trace_seq_puts(s, " | ");
682 }
683
684 /* Latency format */
685 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
686 print_graph_lat_fmt(s, ent);
687 }
688
689 /* No overhead */
690 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
691
692 if (type == TRACE_GRAPH_ENT)
693 trace_seq_puts(s, "==========>");
694 else
695 trace_seq_puts(s, "<==========");
696
697 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
698 trace_seq_putc(s, '\n');
699 }
700
701 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)702 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
703 {
704 unsigned long nsecs_rem = do_div(duration, 1000);
705 /* log10(ULONG_MAX) + '\0' */
706 char usecs_str[21];
707 char nsecs_str[5];
708 int len;
709 int i;
710
711 sprintf(usecs_str, "%lu", (unsigned long) duration);
712
713 /* Print msecs */
714 trace_seq_printf(s, "%s", usecs_str);
715
716 len = strlen(usecs_str);
717
718 /* Print nsecs (we don't want to exceed 7 numbers) */
719 if (len < 7) {
720 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
721
722 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
723 trace_seq_printf(s, ".%s", nsecs_str);
724 len += strlen(nsecs_str) + 1;
725 }
726
727 trace_seq_puts(s, " us ");
728
729 /* Print remaining spaces to fit the row's width */
730 for (i = len; i < 8; i++)
731 trace_seq_putc(s, ' ');
732 }
733
734 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)735 print_graph_duration(struct trace_array *tr, unsigned long long duration,
736 struct trace_seq *s, u32 flags)
737 {
738 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
739 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
740 return;
741
742 /* No real adata, just filling the column with spaces */
743 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
744 case FLAGS_FILL_FULL:
745 trace_seq_puts(s, " | ");
746 return;
747 case FLAGS_FILL_START:
748 trace_seq_puts(s, " ");
749 return;
750 case FLAGS_FILL_END:
751 trace_seq_puts(s, " |");
752 return;
753 }
754
755 /* Signal a overhead of time execution to the output */
756 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
757 trace_seq_printf(s, "%c ", trace_find_mark(duration));
758 else
759 trace_seq_puts(s, " ");
760
761 trace_print_graph_duration(duration, s);
762 trace_seq_puts(s, "| ");
763 }
764
765 /* Case of a leaf function on its call entry */
766 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)767 print_graph_entry_leaf(struct trace_iterator *iter,
768 struct ftrace_graph_ent_entry *entry,
769 struct ftrace_graph_ret_entry *ret_entry,
770 struct trace_seq *s, u32 flags)
771 {
772 struct fgraph_data *data = iter->private;
773 struct trace_array *tr = iter->tr;
774 struct ftrace_graph_ret *graph_ret;
775 struct ftrace_graph_ent *call;
776 unsigned long long duration;
777 int cpu = iter->cpu;
778 int i;
779
780 graph_ret = &ret_entry->ret;
781 call = &entry->graph_ent;
782 duration = graph_ret->rettime - graph_ret->calltime;
783
784 if (data) {
785 struct fgraph_cpu_data *cpu_data;
786
787 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
788
789 /* If a graph tracer ignored set_graph_notrace */
790 if (call->depth < -1)
791 call->depth += FTRACE_NOTRACE_DEPTH;
792
793 /*
794 * Comments display at + 1 to depth. Since
795 * this is a leaf function, keep the comments
796 * equal to this depth.
797 */
798 cpu_data->depth = call->depth - 1;
799
800 /* No need to keep this function around for this depth */
801 if (call->depth < FTRACE_RETFUNC_DEPTH &&
802 !WARN_ON_ONCE(call->depth < 0))
803 cpu_data->enter_funcs[call->depth] = 0;
804 }
805
806 /* Overhead and duration */
807 print_graph_duration(tr, duration, s, flags);
808
809 /* Function */
810 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
811 trace_seq_putc(s, ' ');
812
813 trace_seq_printf(s, "%ps();\n", (void *)call->func);
814
815 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
816 cpu, iter->ent->pid, flags);
817
818 return trace_handle_return(s);
819 }
820
821 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)822 print_graph_entry_nested(struct trace_iterator *iter,
823 struct ftrace_graph_ent_entry *entry,
824 struct trace_seq *s, int cpu, u32 flags)
825 {
826 struct ftrace_graph_ent *call = &entry->graph_ent;
827 struct fgraph_data *data = iter->private;
828 struct trace_array *tr = iter->tr;
829 int i;
830
831 if (data) {
832 struct fgraph_cpu_data *cpu_data;
833 int cpu = iter->cpu;
834
835 /* If a graph tracer ignored set_graph_notrace */
836 if (call->depth < -1)
837 call->depth += FTRACE_NOTRACE_DEPTH;
838
839 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
840 cpu_data->depth = call->depth;
841
842 /* Save this function pointer to see if the exit matches */
843 if (call->depth < FTRACE_RETFUNC_DEPTH &&
844 !WARN_ON_ONCE(call->depth < 0))
845 cpu_data->enter_funcs[call->depth] = call->func;
846 }
847
848 /* No time */
849 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
850
851 /* Function */
852 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
853 trace_seq_putc(s, ' ');
854
855 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
856
857 if (trace_seq_has_overflowed(s))
858 return TRACE_TYPE_PARTIAL_LINE;
859
860 /*
861 * we already consumed the current entry to check the next one
862 * and see if this is a leaf.
863 */
864 return TRACE_TYPE_NO_CONSUME;
865 }
866
867 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)868 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
869 int type, unsigned long addr, u32 flags)
870 {
871 struct fgraph_data *data = iter->private;
872 struct trace_entry *ent = iter->ent;
873 struct trace_array *tr = iter->tr;
874 int cpu = iter->cpu;
875
876 /* Pid */
877 verif_pid(s, ent->pid, cpu, data);
878
879 if (type)
880 /* Interrupt */
881 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
882
883 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
884 return;
885
886 /* Absolute time */
887 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
888 print_graph_abs_time(iter->ts, s);
889
890 /* Cpu */
891 if (flags & TRACE_GRAPH_PRINT_CPU)
892 print_graph_cpu(s, cpu);
893
894 /* Proc */
895 if (flags & TRACE_GRAPH_PRINT_PROC) {
896 print_graph_proc(s, ent->pid);
897 trace_seq_puts(s, " | ");
898 }
899
900 /* Latency format */
901 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
902 print_graph_lat_fmt(s, ent);
903
904 return;
905 }
906
907 /*
908 * Entry check for irq code
909 *
910 * returns 1 if
911 * - we are inside irq code
912 * - we just entered irq code
913 *
914 * retunns 0 if
915 * - funcgraph-interrupts option is set
916 * - we are not inside irq code
917 */
918 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)919 check_irq_entry(struct trace_iterator *iter, u32 flags,
920 unsigned long addr, int depth)
921 {
922 int cpu = iter->cpu;
923 int *depth_irq;
924 struct fgraph_data *data = iter->private;
925
926 /*
927 * If we are either displaying irqs, or we got called as
928 * a graph event and private data does not exist,
929 * then we bypass the irq check.
930 */
931 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
932 (!data))
933 return 0;
934
935 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
936
937 /*
938 * We are inside the irq code
939 */
940 if (*depth_irq >= 0)
941 return 1;
942
943 if ((addr < (unsigned long)__irqentry_text_start) ||
944 (addr >= (unsigned long)__irqentry_text_end))
945 return 0;
946
947 /*
948 * We are entering irq code.
949 */
950 *depth_irq = depth;
951 return 1;
952 }
953
954 /*
955 * Return check for irq code
956 *
957 * returns 1 if
958 * - we are inside irq code
959 * - we just left irq code
960 *
961 * returns 0 if
962 * - funcgraph-interrupts option is set
963 * - we are not inside irq code
964 */
965 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)966 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
967 {
968 int cpu = iter->cpu;
969 int *depth_irq;
970 struct fgraph_data *data = iter->private;
971
972 /*
973 * If we are either displaying irqs, or we got called as
974 * a graph event and private data does not exist,
975 * then we bypass the irq check.
976 */
977 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
978 (!data))
979 return 0;
980
981 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
982
983 /*
984 * We are not inside the irq code.
985 */
986 if (*depth_irq == -1)
987 return 0;
988
989 /*
990 * We are inside the irq code, and this is returning entry.
991 * Let's not trace it and clear the entry depth, since
992 * we are out of irq code.
993 *
994 * This condition ensures that we 'leave the irq code' once
995 * we are out of the entry depth. Thus protecting us from
996 * the RETURN entry loss.
997 */
998 if (*depth_irq >= depth) {
999 *depth_irq = -1;
1000 return 1;
1001 }
1002
1003 /*
1004 * We are inside the irq code, and this is not the entry.
1005 */
1006 return 1;
1007 }
1008
1009 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)1010 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1011 struct trace_iterator *iter, u32 flags)
1012 {
1013 struct fgraph_data *data = iter->private;
1014 struct ftrace_graph_ent *call = &field->graph_ent;
1015 struct ftrace_graph_ret_entry *leaf_ret;
1016 static enum print_line_t ret;
1017 int cpu = iter->cpu;
1018
1019 if (check_irq_entry(iter, flags, call->func, call->depth))
1020 return TRACE_TYPE_HANDLED;
1021
1022 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1023
1024 leaf_ret = get_return_for_leaf(iter, field);
1025 if (leaf_ret)
1026 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1027 else
1028 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1029
1030 if (data) {
1031 /*
1032 * If we failed to write our output, then we need to make
1033 * note of it. Because we already consumed our entry.
1034 */
1035 if (s->full) {
1036 data->failed = 1;
1037 data->cpu = cpu;
1038 } else
1039 data->failed = 0;
1040 }
1041
1042 return ret;
1043 }
1044
1045 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1046 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1047 struct trace_entry *ent, struct trace_iterator *iter,
1048 u32 flags)
1049 {
1050 unsigned long long duration = trace->rettime - trace->calltime;
1051 struct fgraph_data *data = iter->private;
1052 struct trace_array *tr = iter->tr;
1053 pid_t pid = ent->pid;
1054 int cpu = iter->cpu;
1055 int func_match = 1;
1056 int i;
1057
1058 if (check_irq_return(iter, flags, trace->depth))
1059 return TRACE_TYPE_HANDLED;
1060
1061 if (data) {
1062 struct fgraph_cpu_data *cpu_data;
1063 int cpu = iter->cpu;
1064
1065 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1066
1067 /*
1068 * Comments display at + 1 to depth. This is the
1069 * return from a function, we now want the comments
1070 * to display at the same level of the bracket.
1071 */
1072 cpu_data->depth = trace->depth - 1;
1073
1074 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1075 !WARN_ON_ONCE(trace->depth < 0)) {
1076 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1077 func_match = 0;
1078 cpu_data->enter_funcs[trace->depth] = 0;
1079 }
1080 }
1081
1082 print_graph_prologue(iter, s, 0, 0, flags);
1083
1084 /* Overhead and duration */
1085 print_graph_duration(tr, duration, s, flags);
1086
1087 /* Closing brace */
1088 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1089 trace_seq_putc(s, ' ');
1090
1091 /*
1092 * If the return function does not have a matching entry,
1093 * then the entry was lost. Instead of just printing
1094 * the '}' and letting the user guess what function this
1095 * belongs to, write out the function name. Always do
1096 * that if the funcgraph-tail option is enabled.
1097 */
1098 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1099 trace_seq_puts(s, "}\n");
1100 else
1101 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1102
1103 /* Overrun */
1104 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1105 trace_seq_printf(s, " (Overruns: %lu)\n",
1106 trace->overrun);
1107
1108 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1109 cpu, pid, flags);
1110
1111 return trace_handle_return(s);
1112 }
1113
1114 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1115 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1116 struct trace_iterator *iter, u32 flags)
1117 {
1118 struct trace_array *tr = iter->tr;
1119 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1120 struct fgraph_data *data = iter->private;
1121 struct trace_event *event;
1122 int depth = 0;
1123 int ret;
1124 int i;
1125
1126 if (data)
1127 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1128
1129 print_graph_prologue(iter, s, 0, 0, flags);
1130
1131 /* No time */
1132 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1133
1134 /* Indentation */
1135 if (depth > 0)
1136 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1137 trace_seq_putc(s, ' ');
1138
1139 /* The comment */
1140 trace_seq_puts(s, "/* ");
1141
1142 switch (iter->ent->type) {
1143 case TRACE_BPRINT:
1144 ret = trace_print_bprintk_msg_only(iter);
1145 if (ret != TRACE_TYPE_HANDLED)
1146 return ret;
1147 break;
1148 case TRACE_PRINT:
1149 ret = trace_print_printk_msg_only(iter);
1150 if (ret != TRACE_TYPE_HANDLED)
1151 return ret;
1152 break;
1153 default:
1154 event = ftrace_find_event(ent->type);
1155 if (!event)
1156 return TRACE_TYPE_UNHANDLED;
1157
1158 ret = event->funcs->trace(iter, sym_flags, event);
1159 if (ret != TRACE_TYPE_HANDLED)
1160 return ret;
1161 }
1162
1163 if (trace_seq_has_overflowed(s))
1164 goto out;
1165
1166 /* Strip ending newline */
1167 if (s->buffer[s->seq.len - 1] == '\n') {
1168 s->buffer[s->seq.len - 1] = '\0';
1169 s->seq.len--;
1170 }
1171
1172 trace_seq_puts(s, " */\n");
1173 out:
1174 return trace_handle_return(s);
1175 }
1176
1177
1178 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1179 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1180 {
1181 struct ftrace_graph_ent_entry *field;
1182 struct fgraph_data *data = iter->private;
1183 struct trace_entry *entry = iter->ent;
1184 struct trace_seq *s = &iter->seq;
1185 int cpu = iter->cpu;
1186 int ret;
1187
1188 if (flags & TRACE_GRAPH_PRINT_FLAT)
1189 return TRACE_TYPE_UNHANDLED;
1190
1191 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1192 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1193 return TRACE_TYPE_HANDLED;
1194 }
1195
1196 /*
1197 * If the last output failed, there's a possibility we need
1198 * to print out the missing entry which would never go out.
1199 */
1200 if (data && data->failed) {
1201 field = &data->ent;
1202 iter->cpu = data->cpu;
1203 ret = print_graph_entry(field, s, iter, flags);
1204 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1205 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1206 ret = TRACE_TYPE_NO_CONSUME;
1207 }
1208 iter->cpu = cpu;
1209 return ret;
1210 }
1211
1212 switch (entry->type) {
1213 case TRACE_GRAPH_ENT: {
1214 /*
1215 * print_graph_entry() may consume the current event,
1216 * thus @field may become invalid, so we need to save it.
1217 * sizeof(struct ftrace_graph_ent_entry) is very small,
1218 * it can be safely saved at the stack.
1219 */
1220 struct ftrace_graph_ent_entry saved;
1221 trace_assign_type(field, entry);
1222 saved = *field;
1223 return print_graph_entry(&saved, s, iter, flags);
1224 }
1225 case TRACE_GRAPH_RET: {
1226 struct ftrace_graph_ret_entry *field;
1227 trace_assign_type(field, entry);
1228 return print_graph_return(&field->ret, s, entry, iter, flags);
1229 }
1230 case TRACE_STACK:
1231 case TRACE_FN:
1232 /* dont trace stack and functions as comments */
1233 return TRACE_TYPE_UNHANDLED;
1234
1235 default:
1236 return print_graph_comment(s, entry, iter, flags);
1237 }
1238
1239 return TRACE_TYPE_HANDLED;
1240 }
1241
1242 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1243 print_graph_function(struct trace_iterator *iter)
1244 {
1245 return print_graph_function_flags(iter, tracer_flags.val);
1246 }
1247
print_lat_header(struct seq_file * s,u32 flags)1248 static void print_lat_header(struct seq_file *s, u32 flags)
1249 {
1250 static const char spaces[] = " " /* 16 spaces */
1251 " " /* 4 spaces */
1252 " "; /* 17 spaces */
1253 int size = 0;
1254
1255 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1256 size += 16;
1257 if (flags & TRACE_GRAPH_PRINT_CPU)
1258 size += 4;
1259 if (flags & TRACE_GRAPH_PRINT_PROC)
1260 size += 17;
1261
1262 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1263 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1264 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1265 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1266 seq_printf(s, "#%.*s||| / \n", size, spaces);
1267 }
1268
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1269 static void __print_graph_headers_flags(struct trace_array *tr,
1270 struct seq_file *s, u32 flags)
1271 {
1272 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1273
1274 if (lat)
1275 print_lat_header(s, flags);
1276
1277 /* 1st line */
1278 seq_putc(s, '#');
1279 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1280 seq_puts(s, " TIME ");
1281 if (flags & TRACE_GRAPH_PRINT_CPU)
1282 seq_puts(s, " CPU");
1283 if (flags & TRACE_GRAPH_PRINT_PROC)
1284 seq_puts(s, " TASK/PID ");
1285 if (lat)
1286 seq_puts(s, "||||");
1287 if (flags & TRACE_GRAPH_PRINT_DURATION)
1288 seq_puts(s, " DURATION ");
1289 seq_puts(s, " FUNCTION CALLS\n");
1290
1291 /* 2nd line */
1292 seq_putc(s, '#');
1293 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1294 seq_puts(s, " | ");
1295 if (flags & TRACE_GRAPH_PRINT_CPU)
1296 seq_puts(s, " | ");
1297 if (flags & TRACE_GRAPH_PRINT_PROC)
1298 seq_puts(s, " | | ");
1299 if (lat)
1300 seq_puts(s, "||||");
1301 if (flags & TRACE_GRAPH_PRINT_DURATION)
1302 seq_puts(s, " | | ");
1303 seq_puts(s, " | | | |\n");
1304 }
1305
print_graph_headers(struct seq_file * s)1306 static void print_graph_headers(struct seq_file *s)
1307 {
1308 print_graph_headers_flags(s, tracer_flags.val);
1309 }
1310
print_graph_headers_flags(struct seq_file * s,u32 flags)1311 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1312 {
1313 struct trace_iterator *iter = s->private;
1314 struct trace_array *tr = iter->tr;
1315
1316 if (flags & TRACE_GRAPH_PRINT_FLAT) {
1317 trace_default_header(s);
1318 return;
1319 }
1320
1321 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1322 return;
1323
1324 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1325 /* print nothing if the buffers are empty */
1326 if (trace_empty(iter))
1327 return;
1328
1329 print_trace_header(s, iter);
1330 }
1331
1332 __print_graph_headers_flags(tr, s, flags);
1333 }
1334
graph_trace_open(struct trace_iterator * iter)1335 void graph_trace_open(struct trace_iterator *iter)
1336 {
1337 /* pid and depth on the last trace processed */
1338 struct fgraph_data *data;
1339 gfp_t gfpflags;
1340 int cpu;
1341
1342 iter->private = NULL;
1343
1344 /* We can be called in atomic context via ftrace_dump() */
1345 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1346
1347 data = kzalloc(sizeof(*data), gfpflags);
1348 if (!data)
1349 goto out_err;
1350
1351 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1352 if (!data->cpu_data)
1353 goto out_err_free;
1354
1355 for_each_possible_cpu(cpu) {
1356 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1357 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1358 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1359 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1360
1361 *pid = -1;
1362 *depth = 0;
1363 *ignore = 0;
1364 *depth_irq = -1;
1365 }
1366
1367 iter->private = data;
1368
1369 return;
1370
1371 out_err_free:
1372 kfree(data);
1373 out_err:
1374 pr_warning("function graph tracer: not enough memory\n");
1375 }
1376
graph_trace_close(struct trace_iterator * iter)1377 void graph_trace_close(struct trace_iterator *iter)
1378 {
1379 struct fgraph_data *data = iter->private;
1380
1381 if (data) {
1382 free_percpu(data->cpu_data);
1383 kfree(data);
1384 }
1385 }
1386
1387 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1388 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1389 {
1390 if (bit == TRACE_GRAPH_PRINT_IRQS)
1391 ftrace_graph_skip_irqs = !set;
1392
1393 if (bit == TRACE_GRAPH_SLEEP_TIME)
1394 ftrace_graph_sleep_time_control(set);
1395
1396 if (bit == TRACE_GRAPH_GRAPH_TIME)
1397 ftrace_graph_graph_time_control(set);
1398
1399 return 0;
1400 }
1401
1402
1403 static struct tracer graph_trace __tracer_data = {
1404 .name = "function_graph",
1405 .update_thresh = graph_trace_update_thresh,
1406 .open = graph_trace_open,
1407 .pipe_open = graph_trace_open,
1408 .close = graph_trace_close,
1409 .pipe_close = graph_trace_close,
1410 .init = graph_trace_init,
1411 .reset = graph_trace_reset,
1412 .print_line = print_graph_function,
1413 .print_header = print_graph_headers,
1414 .flags = &tracer_flags,
1415 .set_flag = func_graph_set_flag,
1416 #ifdef CONFIG_FTRACE_SELFTEST
1417 .selftest = trace_selftest_startup_function_graph,
1418 #endif
1419 };
1420
1421
1422 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1423 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1424 loff_t *ppos)
1425 {
1426 unsigned long val;
1427 int ret;
1428
1429 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1430 if (ret)
1431 return ret;
1432
1433 max_depth = val;
1434
1435 *ppos += cnt;
1436
1437 return cnt;
1438 }
1439
1440 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1441 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1442 loff_t *ppos)
1443 {
1444 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1445 int n;
1446
1447 n = sprintf(buf, "%d\n", max_depth);
1448
1449 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1450 }
1451
1452 static const struct file_operations graph_depth_fops = {
1453 .open = tracing_open_generic,
1454 .write = graph_depth_write,
1455 .read = graph_depth_read,
1456 .llseek = generic_file_llseek,
1457 };
1458
init_graph_tracefs(void)1459 static __init int init_graph_tracefs(void)
1460 {
1461 struct dentry *d_tracer;
1462
1463 d_tracer = tracing_init_dentry();
1464 if (IS_ERR(d_tracer))
1465 return 0;
1466
1467 trace_create_file("max_graph_depth", 0644, d_tracer,
1468 NULL, &graph_depth_fops);
1469
1470 return 0;
1471 }
1472 fs_initcall(init_graph_tracefs);
1473
init_graph_trace(void)1474 static __init int init_graph_trace(void)
1475 {
1476 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1477
1478 return register_tracer(&graph_trace);
1479 }
1480
1481 core_initcall(init_graph_trace);
1482