1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * builtin-ftrace.c
4 *
5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
6 * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement.
7 */
8
9 #include "builtin.h"
10
11 #include <errno.h>
12 #include <unistd.h>
13 #include <signal.h>
14 #include <stdlib.h>
15 #include <fcntl.h>
16 #include <poll.h>
17 #include <linux/capability.h>
18 #include <linux/string.h>
19
20 #include "debug.h"
21 #include <subcmd/pager.h>
22 #include <subcmd/parse-options.h>
23 #include <api/fs/tracing_path.h>
24 #include "evlist.h"
25 #include "target.h"
26 #include "cpumap.h"
27 #include "thread_map.h"
28 #include "strfilter.h"
29 #include "util/cap.h"
30 #include "util/config.h"
31 #include "util/units.h"
32 #include "util/parse-sublevel-options.h"
33
34 #define DEFAULT_TRACER "function_graph"
35
36 struct perf_ftrace {
37 struct evlist *evlist;
38 struct target target;
39 const char *tracer;
40 struct list_head filters;
41 struct list_head notrace;
42 struct list_head graph_funcs;
43 struct list_head nograph_funcs;
44 int graph_depth;
45 unsigned long percpu_buffer_size;
46 bool inherit;
47 int func_stack_trace;
48 int func_irq_info;
49 int graph_nosleep_time;
50 int graph_noirqs;
51 int graph_verbose;
52 int graph_thresh;
53 unsigned int initial_delay;
54 };
55
56 struct filter_entry {
57 struct list_head list;
58 char name[];
59 };
60
61 static volatile int workload_exec_errno;
62 static bool done;
63
sig_handler(int sig __maybe_unused)64 static void sig_handler(int sig __maybe_unused)
65 {
66 done = true;
67 }
68
69 /*
70 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
71 * we asked by setting its exec_error to the function below,
72 * ftrace__workload_exec_failed_signal.
73 *
74 * XXX We need to handle this more appropriately, emitting an error, etc.
75 */
ftrace__workload_exec_failed_signal(int signo __maybe_unused,siginfo_t * info __maybe_unused,void * ucontext __maybe_unused)76 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
77 siginfo_t *info __maybe_unused,
78 void *ucontext __maybe_unused)
79 {
80 workload_exec_errno = info->si_value.sival_int;
81 done = true;
82 }
83
__write_tracing_file(const char * name,const char * val,bool append)84 static int __write_tracing_file(const char *name, const char *val, bool append)
85 {
86 char *file;
87 int fd, ret = -1;
88 ssize_t size = strlen(val);
89 int flags = O_WRONLY;
90 char errbuf[512];
91 char *val_copy;
92
93 file = get_tracing_file(name);
94 if (!file) {
95 pr_debug("cannot get tracing file: %s\n", name);
96 return -1;
97 }
98
99 if (append)
100 flags |= O_APPEND;
101 else
102 flags |= O_TRUNC;
103
104 fd = open(file, flags);
105 if (fd < 0) {
106 pr_debug("cannot open tracing file: %s: %s\n",
107 name, str_error_r(errno, errbuf, sizeof(errbuf)));
108 goto out;
109 }
110
111 /*
112 * Copy the original value and append a '\n'. Without this,
113 * the kernel can hide possible errors.
114 */
115 val_copy = strdup(val);
116 if (!val_copy)
117 goto out_close;
118 val_copy[size] = '\n';
119
120 if (write(fd, val_copy, size + 1) == size + 1)
121 ret = 0;
122 else
123 pr_debug("write '%s' to tracing/%s failed: %s\n",
124 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
125
126 free(val_copy);
127 out_close:
128 close(fd);
129 out:
130 put_tracing_file(file);
131 return ret;
132 }
133
write_tracing_file(const char * name,const char * val)134 static int write_tracing_file(const char *name, const char *val)
135 {
136 return __write_tracing_file(name, val, false);
137 }
138
append_tracing_file(const char * name,const char * val)139 static int append_tracing_file(const char *name, const char *val)
140 {
141 return __write_tracing_file(name, val, true);
142 }
143
read_tracing_file_to_stdout(const char * name)144 static int read_tracing_file_to_stdout(const char *name)
145 {
146 char buf[4096];
147 char *file;
148 int fd;
149 int ret = -1;
150
151 file = get_tracing_file(name);
152 if (!file) {
153 pr_debug("cannot get tracing file: %s\n", name);
154 return -1;
155 }
156
157 fd = open(file, O_RDONLY);
158 if (fd < 0) {
159 pr_debug("cannot open tracing file: %s: %s\n",
160 name, str_error_r(errno, buf, sizeof(buf)));
161 goto out;
162 }
163
164 /* read contents to stdout */
165 while (true) {
166 int n = read(fd, buf, sizeof(buf));
167 if (n == 0)
168 break;
169 else if (n < 0)
170 goto out_close;
171
172 if (fwrite(buf, n, 1, stdout) != 1)
173 goto out_close;
174 }
175 ret = 0;
176
177 out_close:
178 close(fd);
179 out:
180 put_tracing_file(file);
181 return ret;
182 }
183
read_tracing_file_by_line(const char * name,void (* cb)(char * str,void * arg),void * cb_arg)184 static int read_tracing_file_by_line(const char *name,
185 void (*cb)(char *str, void *arg),
186 void *cb_arg)
187 {
188 char *line = NULL;
189 size_t len = 0;
190 char *file;
191 FILE *fp;
192
193 file = get_tracing_file(name);
194 if (!file) {
195 pr_debug("cannot get tracing file: %s\n", name);
196 return -1;
197 }
198
199 fp = fopen(file, "r");
200 if (fp == NULL) {
201 pr_debug("cannot open tracing file: %s\n", name);
202 put_tracing_file(file);
203 return -1;
204 }
205
206 while (getline(&line, &len, fp) != -1) {
207 cb(line, cb_arg);
208 }
209
210 if (line)
211 free(line);
212
213 fclose(fp);
214 put_tracing_file(file);
215 return 0;
216 }
217
write_tracing_file_int(const char * name,int value)218 static int write_tracing_file_int(const char *name, int value)
219 {
220 char buf[16];
221
222 snprintf(buf, sizeof(buf), "%d", value);
223 if (write_tracing_file(name, buf) < 0)
224 return -1;
225
226 return 0;
227 }
228
write_tracing_option_file(const char * name,const char * val)229 static int write_tracing_option_file(const char *name, const char *val)
230 {
231 char *file;
232 int ret;
233
234 if (asprintf(&file, "options/%s", name) < 0)
235 return -1;
236
237 ret = __write_tracing_file(file, val, false);
238 free(file);
239 return ret;
240 }
241
242 static int reset_tracing_cpu(void);
243 static void reset_tracing_filters(void);
244
reset_tracing_options(struct perf_ftrace * ftrace __maybe_unused)245 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
246 {
247 write_tracing_option_file("function-fork", "0");
248 write_tracing_option_file("func_stack_trace", "0");
249 write_tracing_option_file("sleep-time", "1");
250 write_tracing_option_file("funcgraph-irqs", "1");
251 write_tracing_option_file("funcgraph-proc", "0");
252 write_tracing_option_file("funcgraph-abstime", "0");
253 write_tracing_option_file("latency-format", "0");
254 write_tracing_option_file("irq-info", "0");
255 }
256
reset_tracing_files(struct perf_ftrace * ftrace __maybe_unused)257 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
258 {
259 if (write_tracing_file("tracing_on", "0") < 0)
260 return -1;
261
262 if (write_tracing_file("current_tracer", "nop") < 0)
263 return -1;
264
265 if (write_tracing_file("set_ftrace_pid", " ") < 0)
266 return -1;
267
268 if (reset_tracing_cpu() < 0)
269 return -1;
270
271 if (write_tracing_file("max_graph_depth", "0") < 0)
272 return -1;
273
274 if (write_tracing_file("tracing_thresh", "0") < 0)
275 return -1;
276
277 reset_tracing_filters();
278 reset_tracing_options(ftrace);
279 return 0;
280 }
281
set_tracing_pid(struct perf_ftrace * ftrace)282 static int set_tracing_pid(struct perf_ftrace *ftrace)
283 {
284 int i;
285 char buf[16];
286
287 if (target__has_cpu(&ftrace->target))
288 return 0;
289
290 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
291 scnprintf(buf, sizeof(buf), "%d",
292 perf_thread_map__pid(ftrace->evlist->core.threads, i));
293 if (append_tracing_file("set_ftrace_pid", buf) < 0)
294 return -1;
295 }
296 return 0;
297 }
298
set_tracing_cpumask(struct perf_cpu_map * cpumap)299 static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
300 {
301 char *cpumask;
302 size_t mask_size;
303 int ret;
304 int last_cpu;
305
306 last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
307 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
308 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
309
310 cpumask = malloc(mask_size);
311 if (cpumask == NULL) {
312 pr_debug("failed to allocate cpu mask\n");
313 return -1;
314 }
315
316 cpu_map__snprint_mask(cpumap, cpumask, mask_size);
317
318 ret = write_tracing_file("tracing_cpumask", cpumask);
319
320 free(cpumask);
321 return ret;
322 }
323
set_tracing_cpu(struct perf_ftrace * ftrace)324 static int set_tracing_cpu(struct perf_ftrace *ftrace)
325 {
326 struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
327
328 if (!target__has_cpu(&ftrace->target))
329 return 0;
330
331 return set_tracing_cpumask(cpumap);
332 }
333
set_tracing_func_stack_trace(struct perf_ftrace * ftrace)334 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
335 {
336 if (!ftrace->func_stack_trace)
337 return 0;
338
339 if (write_tracing_option_file("func_stack_trace", "1") < 0)
340 return -1;
341
342 return 0;
343 }
344
set_tracing_func_irqinfo(struct perf_ftrace * ftrace)345 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
346 {
347 if (!ftrace->func_irq_info)
348 return 0;
349
350 if (write_tracing_option_file("irq-info", "1") < 0)
351 return -1;
352
353 return 0;
354 }
355
reset_tracing_cpu(void)356 static int reset_tracing_cpu(void)
357 {
358 struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
359 int ret;
360
361 ret = set_tracing_cpumask(cpumap);
362 perf_cpu_map__put(cpumap);
363 return ret;
364 }
365
__set_tracing_filter(const char * filter_file,struct list_head * funcs)366 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
367 {
368 struct filter_entry *pos;
369
370 list_for_each_entry(pos, funcs, list) {
371 if (append_tracing_file(filter_file, pos->name) < 0)
372 return -1;
373 }
374
375 return 0;
376 }
377
set_tracing_filters(struct perf_ftrace * ftrace)378 static int set_tracing_filters(struct perf_ftrace *ftrace)
379 {
380 int ret;
381
382 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
383 if (ret < 0)
384 return ret;
385
386 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
387 if (ret < 0)
388 return ret;
389
390 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
391 if (ret < 0)
392 return ret;
393
394 /* old kernels do not have this filter */
395 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
396
397 return ret;
398 }
399
reset_tracing_filters(void)400 static void reset_tracing_filters(void)
401 {
402 write_tracing_file("set_ftrace_filter", " ");
403 write_tracing_file("set_ftrace_notrace", " ");
404 write_tracing_file("set_graph_function", " ");
405 write_tracing_file("set_graph_notrace", " ");
406 }
407
set_tracing_depth(struct perf_ftrace * ftrace)408 static int set_tracing_depth(struct perf_ftrace *ftrace)
409 {
410 if (ftrace->graph_depth == 0)
411 return 0;
412
413 if (ftrace->graph_depth < 0) {
414 pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
415 return -1;
416 }
417
418 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
419 return -1;
420
421 return 0;
422 }
423
set_tracing_percpu_buffer_size(struct perf_ftrace * ftrace)424 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
425 {
426 int ret;
427
428 if (ftrace->percpu_buffer_size == 0)
429 return 0;
430
431 ret = write_tracing_file_int("buffer_size_kb",
432 ftrace->percpu_buffer_size / 1024);
433 if (ret < 0)
434 return ret;
435
436 return 0;
437 }
438
set_tracing_trace_inherit(struct perf_ftrace * ftrace)439 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
440 {
441 if (!ftrace->inherit)
442 return 0;
443
444 if (write_tracing_option_file("function-fork", "1") < 0)
445 return -1;
446
447 return 0;
448 }
449
set_tracing_sleep_time(struct perf_ftrace * ftrace)450 static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
451 {
452 if (!ftrace->graph_nosleep_time)
453 return 0;
454
455 if (write_tracing_option_file("sleep-time", "0") < 0)
456 return -1;
457
458 return 0;
459 }
460
set_tracing_funcgraph_irqs(struct perf_ftrace * ftrace)461 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
462 {
463 if (!ftrace->graph_noirqs)
464 return 0;
465
466 if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
467 return -1;
468
469 return 0;
470 }
471
set_tracing_funcgraph_verbose(struct perf_ftrace * ftrace)472 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
473 {
474 if (!ftrace->graph_verbose)
475 return 0;
476
477 if (write_tracing_option_file("funcgraph-proc", "1") < 0)
478 return -1;
479
480 if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
481 return -1;
482
483 if (write_tracing_option_file("latency-format", "1") < 0)
484 return -1;
485
486 return 0;
487 }
488
set_tracing_thresh(struct perf_ftrace * ftrace)489 static int set_tracing_thresh(struct perf_ftrace *ftrace)
490 {
491 int ret;
492
493 if (ftrace->graph_thresh == 0)
494 return 0;
495
496 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
497 if (ret < 0)
498 return ret;
499
500 return 0;
501 }
502
set_tracing_options(struct perf_ftrace * ftrace)503 static int set_tracing_options(struct perf_ftrace *ftrace)
504 {
505 if (set_tracing_pid(ftrace) < 0) {
506 pr_err("failed to set ftrace pid\n");
507 return -1;
508 }
509
510 if (set_tracing_cpu(ftrace) < 0) {
511 pr_err("failed to set tracing cpumask\n");
512 return -1;
513 }
514
515 if (set_tracing_func_stack_trace(ftrace) < 0) {
516 pr_err("failed to set tracing option func_stack_trace\n");
517 return -1;
518 }
519
520 if (set_tracing_func_irqinfo(ftrace) < 0) {
521 pr_err("failed to set tracing option irq-info\n");
522 return -1;
523 }
524
525 if (set_tracing_filters(ftrace) < 0) {
526 pr_err("failed to set tracing filters\n");
527 return -1;
528 }
529
530 if (set_tracing_depth(ftrace) < 0) {
531 pr_err("failed to set graph depth\n");
532 return -1;
533 }
534
535 if (set_tracing_percpu_buffer_size(ftrace) < 0) {
536 pr_err("failed to set tracing per-cpu buffer size\n");
537 return -1;
538 }
539
540 if (set_tracing_trace_inherit(ftrace) < 0) {
541 pr_err("failed to set tracing option function-fork\n");
542 return -1;
543 }
544
545 if (set_tracing_sleep_time(ftrace) < 0) {
546 pr_err("failed to set tracing option sleep-time\n");
547 return -1;
548 }
549
550 if (set_tracing_funcgraph_irqs(ftrace) < 0) {
551 pr_err("failed to set tracing option funcgraph-irqs\n");
552 return -1;
553 }
554
555 if (set_tracing_funcgraph_verbose(ftrace) < 0) {
556 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
557 return -1;
558 }
559
560 if (set_tracing_thresh(ftrace) < 0) {
561 pr_err("failed to set tracing thresh\n");
562 return -1;
563 }
564
565 return 0;
566 }
567
__cmd_ftrace(struct perf_ftrace * ftrace,int argc,const char ** argv)568 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
569 {
570 char *trace_file;
571 int trace_fd;
572 char buf[4096];
573 struct pollfd pollfd = {
574 .events = POLLIN,
575 };
576
577 if (!(perf_cap__capable(CAP_PERFMON) ||
578 perf_cap__capable(CAP_SYS_ADMIN))) {
579 pr_err("ftrace only works for %s!\n",
580 #ifdef HAVE_LIBCAP_SUPPORT
581 "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
582 #else
583 "root"
584 #endif
585 );
586 return -1;
587 }
588
589 signal(SIGINT, sig_handler);
590 signal(SIGUSR1, sig_handler);
591 signal(SIGCHLD, sig_handler);
592 signal(SIGPIPE, sig_handler);
593
594 if (reset_tracing_files(ftrace) < 0) {
595 pr_err("failed to reset ftrace\n");
596 goto out;
597 }
598
599 /* reset ftrace buffer */
600 if (write_tracing_file("trace", "0") < 0)
601 goto out;
602
603 if (argc && perf_evlist__prepare_workload(ftrace->evlist,
604 &ftrace->target, argv, false,
605 ftrace__workload_exec_failed_signal) < 0) {
606 goto out;
607 }
608
609 if (set_tracing_options(ftrace) < 0)
610 goto out_reset;
611
612 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
613 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
614 goto out_reset;
615 }
616
617 setup_pager();
618
619 trace_file = get_tracing_file("trace_pipe");
620 if (!trace_file) {
621 pr_err("failed to open trace_pipe\n");
622 goto out_reset;
623 }
624
625 trace_fd = open(trace_file, O_RDONLY);
626
627 put_tracing_file(trace_file);
628
629 if (trace_fd < 0) {
630 pr_err("failed to open trace_pipe\n");
631 goto out_reset;
632 }
633
634 fcntl(trace_fd, F_SETFL, O_NONBLOCK);
635 pollfd.fd = trace_fd;
636
637 /* display column headers */
638 read_tracing_file_to_stdout("trace");
639
640 if (!ftrace->initial_delay) {
641 if (write_tracing_file("tracing_on", "1") < 0) {
642 pr_err("can't enable tracing\n");
643 goto out_close_fd;
644 }
645 }
646
647 perf_evlist__start_workload(ftrace->evlist);
648
649 if (ftrace->initial_delay) {
650 usleep(ftrace->initial_delay * 1000);
651 if (write_tracing_file("tracing_on", "1") < 0) {
652 pr_err("can't enable tracing\n");
653 goto out_close_fd;
654 }
655 }
656
657 while (!done) {
658 if (poll(&pollfd, 1, -1) < 0)
659 break;
660
661 if (pollfd.revents & POLLIN) {
662 int n = read(trace_fd, buf, sizeof(buf));
663 if (n < 0)
664 break;
665 if (fwrite(buf, n, 1, stdout) != 1)
666 break;
667 }
668 }
669
670 write_tracing_file("tracing_on", "0");
671
672 if (workload_exec_errno) {
673 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
674 /* flush stdout first so below error msg appears at the end. */
675 fflush(stdout);
676 pr_err("workload failed: %s\n", emsg);
677 goto out_close_fd;
678 }
679
680 /* read remaining buffer contents */
681 while (true) {
682 int n = read(trace_fd, buf, sizeof(buf));
683 if (n <= 0)
684 break;
685 if (fwrite(buf, n, 1, stdout) != 1)
686 break;
687 }
688
689 out_close_fd:
690 close(trace_fd);
691 out_reset:
692 reset_tracing_files(ftrace);
693 out:
694 return (done && !workload_exec_errno) ? 0 : -1;
695 }
696
perf_ftrace_config(const char * var,const char * value,void * cb)697 static int perf_ftrace_config(const char *var, const char *value, void *cb)
698 {
699 struct perf_ftrace *ftrace = cb;
700
701 if (!strstarts(var, "ftrace."))
702 return 0;
703
704 if (strcmp(var, "ftrace.tracer"))
705 return -1;
706
707 if (!strcmp(value, "function_graph") ||
708 !strcmp(value, "function")) {
709 ftrace->tracer = value;
710 return 0;
711 }
712
713 pr_err("Please select \"function_graph\" (default) or \"function\"\n");
714 return -1;
715 }
716
list_function_cb(char * str,void * arg)717 static void list_function_cb(char *str, void *arg)
718 {
719 struct strfilter *filter = (struct strfilter *)arg;
720
721 if (strfilter__compare(filter, str))
722 printf("%s", str);
723 }
724
opt_list_avail_functions(const struct option * opt __maybe_unused,const char * str,int unset)725 static int opt_list_avail_functions(const struct option *opt __maybe_unused,
726 const char *str, int unset)
727 {
728 struct strfilter *filter;
729 const char *err = NULL;
730 int ret;
731
732 if (unset || !str)
733 return -1;
734
735 filter = strfilter__new(str, &err);
736 if (!filter)
737 return err ? -EINVAL : -ENOMEM;
738
739 ret = strfilter__or(filter, str, &err);
740 if (ret == -EINVAL) {
741 pr_err("Filter parse error at %td.\n", err - str + 1);
742 pr_err("Source: \"%s\"\n", str);
743 pr_err(" %*c\n", (int)(err - str + 1), '^');
744 strfilter__delete(filter);
745 return ret;
746 }
747
748 ret = read_tracing_file_by_line("available_filter_functions",
749 list_function_cb, filter);
750 strfilter__delete(filter);
751 if (ret < 0)
752 return ret;
753
754 exit(0);
755 }
756
parse_filter_func(const struct option * opt,const char * str,int unset __maybe_unused)757 static int parse_filter_func(const struct option *opt, const char *str,
758 int unset __maybe_unused)
759 {
760 struct list_head *head = opt->value;
761 struct filter_entry *entry;
762
763 entry = malloc(sizeof(*entry) + strlen(str) + 1);
764 if (entry == NULL)
765 return -ENOMEM;
766
767 strcpy(entry->name, str);
768 list_add_tail(&entry->list, head);
769
770 return 0;
771 }
772
delete_filter_func(struct list_head * head)773 static void delete_filter_func(struct list_head *head)
774 {
775 struct filter_entry *pos, *tmp;
776
777 list_for_each_entry_safe(pos, tmp, head, list) {
778 list_del_init(&pos->list);
779 free(pos);
780 }
781 }
782
parse_buffer_size(const struct option * opt,const char * str,int unset)783 static int parse_buffer_size(const struct option *opt,
784 const char *str, int unset)
785 {
786 unsigned long *s = (unsigned long *)opt->value;
787 static struct parse_tag tags_size[] = {
788 { .tag = 'B', .mult = 1 },
789 { .tag = 'K', .mult = 1 << 10 },
790 { .tag = 'M', .mult = 1 << 20 },
791 { .tag = 'G', .mult = 1 << 30 },
792 { .tag = 0 },
793 };
794 unsigned long val;
795
796 if (unset) {
797 *s = 0;
798 return 0;
799 }
800
801 val = parse_tag_value(str, tags_size);
802 if (val != (unsigned long) -1) {
803 if (val < 1024) {
804 pr_err("buffer size too small, must larger than 1KB.");
805 return -1;
806 }
807 *s = val;
808 return 0;
809 }
810
811 return -1;
812 }
813
parse_func_tracer_opts(const struct option * opt,const char * str,int unset)814 static int parse_func_tracer_opts(const struct option *opt,
815 const char *str, int unset)
816 {
817 int ret;
818 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
819 struct sublevel_option func_tracer_opts[] = {
820 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
821 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
822 { .name = NULL, }
823 };
824
825 if (unset)
826 return 0;
827
828 ret = perf_parse_sublevel_options(str, func_tracer_opts);
829 if (ret)
830 return ret;
831
832 return 0;
833 }
834
parse_graph_tracer_opts(const struct option * opt,const char * str,int unset)835 static int parse_graph_tracer_opts(const struct option *opt,
836 const char *str, int unset)
837 {
838 int ret;
839 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
840 struct sublevel_option graph_tracer_opts[] = {
841 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
842 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
843 { .name = "verbose", .value_ptr = &ftrace->graph_verbose },
844 { .name = "thresh", .value_ptr = &ftrace->graph_thresh },
845 { .name = "depth", .value_ptr = &ftrace->graph_depth },
846 { .name = NULL, }
847 };
848
849 if (unset)
850 return 0;
851
852 ret = perf_parse_sublevel_options(str, graph_tracer_opts);
853 if (ret)
854 return ret;
855
856 return 0;
857 }
858
select_tracer(struct perf_ftrace * ftrace)859 static void select_tracer(struct perf_ftrace *ftrace)
860 {
861 bool graph = !list_empty(&ftrace->graph_funcs) ||
862 !list_empty(&ftrace->nograph_funcs);
863 bool func = !list_empty(&ftrace->filters) ||
864 !list_empty(&ftrace->notrace);
865
866 /* The function_graph has priority over function tracer. */
867 if (graph)
868 ftrace->tracer = "function_graph";
869 else if (func)
870 ftrace->tracer = "function";
871 /* Otherwise, the default tracer is used. */
872
873 pr_debug("%s tracer is used\n", ftrace->tracer);
874 }
875
cmd_ftrace(int argc,const char ** argv)876 int cmd_ftrace(int argc, const char **argv)
877 {
878 int ret;
879 struct perf_ftrace ftrace = {
880 .tracer = DEFAULT_TRACER,
881 .target = { .uid = UINT_MAX, },
882 };
883 const char * const ftrace_usage[] = {
884 "perf ftrace [<options>] [<command>]",
885 "perf ftrace [<options>] -- <command> [<options>]",
886 NULL
887 };
888 const struct option ftrace_options[] = {
889 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
890 "Tracer to use: function_graph(default) or function"),
891 OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
892 "Show available functions to filter",
893 opt_list_avail_functions, "*"),
894 OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
895 "Trace on existing process id"),
896 /* TODO: Add short option -t after -t/--tracer can be removed. */
897 OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
898 "Trace on existing thread id (exclusive to --pid)"),
899 OPT_INCR('v', "verbose", &verbose,
900 "Be more verbose"),
901 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
902 "System-wide collection from all CPUs"),
903 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
904 "List of cpus to monitor"),
905 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
906 "Trace given functions using function tracer",
907 parse_filter_func),
908 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
909 "Do not trace given functions", parse_filter_func),
910 OPT_CALLBACK(0, "func-opts", &ftrace, "options",
911 "Function tracer options, available options: call-graph,irq-info",
912 parse_func_tracer_opts),
913 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
914 "Trace given functions using function_graph tracer",
915 parse_filter_func),
916 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
917 "Set nograph filter on given functions", parse_filter_func),
918 OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
919 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
920 parse_graph_tracer_opts),
921 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
922 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
923 OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
924 "Trace children processes"),
925 OPT_UINTEGER('D', "delay", &ftrace.initial_delay,
926 "Number of milliseconds to wait before starting tracing after program start"),
927 OPT_END()
928 };
929
930 INIT_LIST_HEAD(&ftrace.filters);
931 INIT_LIST_HEAD(&ftrace.notrace);
932 INIT_LIST_HEAD(&ftrace.graph_funcs);
933 INIT_LIST_HEAD(&ftrace.nograph_funcs);
934
935 ret = perf_config(perf_ftrace_config, &ftrace);
936 if (ret < 0)
937 return -1;
938
939 argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
940 PARSE_OPT_STOP_AT_NON_OPTION);
941 if (!argc && target__none(&ftrace.target))
942 ftrace.target.system_wide = true;
943
944 select_tracer(&ftrace);
945
946 ret = target__validate(&ftrace.target);
947 if (ret) {
948 char errbuf[512];
949
950 target__strerror(&ftrace.target, ret, errbuf, 512);
951 pr_err("%s\n", errbuf);
952 goto out_delete_filters;
953 }
954
955 ftrace.evlist = evlist__new();
956 if (ftrace.evlist == NULL) {
957 ret = -ENOMEM;
958 goto out_delete_filters;
959 }
960
961 ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
962 if (ret < 0)
963 goto out_delete_evlist;
964
965 ret = __cmd_ftrace(&ftrace, argc, argv);
966
967 out_delete_evlist:
968 evlist__delete(ftrace.evlist);
969
970 out_delete_filters:
971 delete_filter_func(&ftrace.filters);
972 delete_filter_func(&ftrace.notrace);
973 delete_filter_func(&ftrace.graph_funcs);
974 delete_filter_func(&ftrace.nograph_funcs);
975
976 return ret;
977 }
978