• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * builtin-test.c
3  *
4  * Builtin regression testing command: ever growing number of sanity tests
5  */
6 #include "builtin.h"
7 
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/evlist.h"
11 #include "util/parse-options.h"
12 #include "util/parse-events.h"
13 #include "util/symbol.h"
14 #include "util/thread_map.h"
15 
16 static long page_size;
17 
vmlinux_matches_kallsyms_filter(struct map * map __used,struct symbol * sym)18 static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
19 {
20 	bool *visited = symbol__priv(sym);
21 	*visited = true;
22 	return 0;
23 }
24 
test__vmlinux_matches_kallsyms(void)25 static int test__vmlinux_matches_kallsyms(void)
26 {
27 	int err = -1;
28 	struct rb_node *nd;
29 	struct symbol *sym;
30 	struct map *kallsyms_map, *vmlinux_map;
31 	struct machine kallsyms, vmlinux;
32 	enum map_type type = MAP__FUNCTION;
33 	struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
34 
35 	/*
36 	 * Step 1:
37 	 *
38 	 * Init the machines that will hold kernel, modules obtained from
39 	 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
40 	 */
41 	machine__init(&kallsyms, "", HOST_KERNEL_ID);
42 	machine__init(&vmlinux, "", HOST_KERNEL_ID);
43 
44 	/*
45 	 * Step 2:
46 	 *
47 	 * Create the kernel maps for kallsyms and the DSO where we will then
48 	 * load /proc/kallsyms. Also create the modules maps from /proc/modules
49 	 * and find the .ko files that match them in /lib/modules/`uname -r`/.
50 	 */
51 	if (machine__create_kernel_maps(&kallsyms) < 0) {
52 		pr_debug("machine__create_kernel_maps ");
53 		return -1;
54 	}
55 
56 	/*
57 	 * Step 3:
58 	 *
59 	 * Load and split /proc/kallsyms into multiple maps, one per module.
60 	 */
61 	if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
62 		pr_debug("dso__load_kallsyms ");
63 		goto out;
64 	}
65 
66 	/*
67 	 * Step 4:
68 	 *
69 	 * kallsyms will be internally on demand sorted by name so that we can
70 	 * find the reference relocation * symbol, i.e. the symbol we will use
71 	 * to see if the running kernel was relocated by checking if it has the
72 	 * same value in the vmlinux file we load.
73 	 */
74 	kallsyms_map = machine__kernel_map(&kallsyms, type);
75 
76 	sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
77 	if (sym == NULL) {
78 		pr_debug("dso__find_symbol_by_name ");
79 		goto out;
80 	}
81 
82 	ref_reloc_sym.addr = sym->start;
83 
84 	/*
85 	 * Step 5:
86 	 *
87 	 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
88 	 */
89 	if (machine__create_kernel_maps(&vmlinux) < 0) {
90 		pr_debug("machine__create_kernel_maps ");
91 		goto out;
92 	}
93 
94 	vmlinux_map = machine__kernel_map(&vmlinux, type);
95 	map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
96 
97 	/*
98 	 * Step 6:
99 	 *
100 	 * Locate a vmlinux file in the vmlinux path that has a buildid that
101 	 * matches the one of the running kernel.
102 	 *
103 	 * While doing that look if we find the ref reloc symbol, if we find it
104 	 * we'll have its ref_reloc_symbol.unrelocated_addr and then
105 	 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
106 	 * to fixup the symbols.
107 	 */
108 	if (machine__load_vmlinux_path(&vmlinux, type,
109 				       vmlinux_matches_kallsyms_filter) <= 0) {
110 		pr_debug("machine__load_vmlinux_path ");
111 		goto out;
112 	}
113 
114 	err = 0;
115 	/*
116 	 * Step 7:
117 	 *
118 	 * Now look at the symbols in the vmlinux DSO and check if we find all of them
119 	 * in the kallsyms dso. For the ones that are in both, check its names and
120 	 * end addresses too.
121 	 */
122 	for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
123 		struct symbol *pair, *first_pair;
124 		bool backwards = true;
125 
126 		sym  = rb_entry(nd, struct symbol, rb_node);
127 
128 		if (sym->start == sym->end)
129 			continue;
130 
131 		first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
132 		pair = first_pair;
133 
134 		if (pair && pair->start == sym->start) {
135 next_pair:
136 			if (strcmp(sym->name, pair->name) == 0) {
137 				/*
138 				 * kallsyms don't have the symbol end, so we
139 				 * set that by using the next symbol start - 1,
140 				 * in some cases we get this up to a page
141 				 * wrong, trace_kmalloc when I was developing
142 				 * this code was one such example, 2106 bytes
143 				 * off the real size. More than that and we
144 				 * _really_ have a problem.
145 				 */
146 				s64 skew = sym->end - pair->end;
147 				if (llabs(skew) < page_size)
148 					continue;
149 
150 				pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
151 					 sym->start, sym->name, sym->end, pair->end);
152 			} else {
153 				struct rb_node *nnd;
154 detour:
155 				nnd = backwards ? rb_prev(&pair->rb_node) :
156 						  rb_next(&pair->rb_node);
157 				if (nnd) {
158 					struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
159 
160 					if (next->start == sym->start) {
161 						pair = next;
162 						goto next_pair;
163 					}
164 				}
165 
166 				if (backwards) {
167 					backwards = false;
168 					pair = first_pair;
169 					goto detour;
170 				}
171 
172 				pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
173 					 sym->start, sym->name, pair->name);
174 			}
175 		} else
176 			pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
177 
178 		err = -1;
179 	}
180 
181 	if (!verbose)
182 		goto out;
183 
184 	pr_info("Maps only in vmlinux:\n");
185 
186 	for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
187 		struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
188 		/*
189 		 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
190 		 * the kernel will have the path for the vmlinux file being used,
191 		 * so use the short name, less descriptive but the same ("[kernel]" in
192 		 * both cases.
193 		 */
194 		pair = map_groups__find_by_name(&kallsyms.kmaps, type,
195 						(pos->dso->kernel ?
196 							pos->dso->short_name :
197 							pos->dso->name));
198 		if (pair)
199 			pair->priv = 1;
200 		else
201 			map__fprintf(pos, stderr);
202 	}
203 
204 	pr_info("Maps in vmlinux with a different name in kallsyms:\n");
205 
206 	for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
207 		struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
208 
209 		pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
210 		if (pair == NULL || pair->priv)
211 			continue;
212 
213 		if (pair->start == pos->start) {
214 			pair->priv = 1;
215 			pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
216 				pos->start, pos->end, pos->pgoff, pos->dso->name);
217 			if (pos->pgoff != pair->pgoff || pos->end != pair->end)
218 				pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
219 					pair->start, pair->end, pair->pgoff);
220 			pr_info(" %s\n", pair->dso->name);
221 			pair->priv = 1;
222 		}
223 	}
224 
225 	pr_info("Maps only in kallsyms:\n");
226 
227 	for (nd = rb_first(&kallsyms.kmaps.maps[type]);
228 	     nd; nd = rb_next(nd)) {
229 		struct map *pos = rb_entry(nd, struct map, rb_node);
230 
231 		if (!pos->priv)
232 			map__fprintf(pos, stderr);
233 	}
234 out:
235 	return err;
236 }
237 
238 #include "util/cpumap.h"
239 #include "util/evsel.h"
240 #include <sys/types.h>
241 
trace_event__id(const char * evname)242 static int trace_event__id(const char *evname)
243 {
244 	char *filename;
245 	int err = -1, fd;
246 
247 	if (asprintf(&filename,
248 		     "/sys/kernel/debug/tracing/events/syscalls/%s/id",
249 		     evname) < 0)
250 		return -1;
251 
252 	fd = open(filename, O_RDONLY);
253 	if (fd >= 0) {
254 		char id[16];
255 		if (read(fd, id, sizeof(id)) > 0)
256 			err = atoi(id);
257 		close(fd);
258 	}
259 
260 	free(filename);
261 	return err;
262 }
263 
test__open_syscall_event(void)264 static int test__open_syscall_event(void)
265 {
266 	int err = -1, fd;
267 	struct thread_map *threads;
268 	struct perf_evsel *evsel;
269 	struct perf_event_attr attr;
270 	unsigned int nr_open_calls = 111, i;
271 	int id = trace_event__id("sys_enter_open");
272 
273 	if (id < 0) {
274 		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
275 		return -1;
276 	}
277 
278 	threads = thread_map__new(-1, getpid());
279 	if (threads == NULL) {
280 		pr_debug("thread_map__new\n");
281 		return -1;
282 	}
283 
284 	memset(&attr, 0, sizeof(attr));
285 	attr.type = PERF_TYPE_TRACEPOINT;
286 	attr.config = id;
287 	evsel = perf_evsel__new(&attr, 0);
288 	if (evsel == NULL) {
289 		pr_debug("perf_evsel__new\n");
290 		goto out_thread_map_delete;
291 	}
292 
293 	if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
294 		pr_debug("failed to open counter: %s, "
295 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
296 			 strerror(errno));
297 		goto out_evsel_delete;
298 	}
299 
300 	for (i = 0; i < nr_open_calls; ++i) {
301 		fd = open("/etc/passwd", O_RDONLY);
302 		close(fd);
303 	}
304 
305 	if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
306 		pr_debug("perf_evsel__read_on_cpu\n");
307 		goto out_close_fd;
308 	}
309 
310 	if (evsel->counts->cpu[0].val != nr_open_calls) {
311 		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
312 			 nr_open_calls, evsel->counts->cpu[0].val);
313 		goto out_close_fd;
314 	}
315 
316 	err = 0;
317 out_close_fd:
318 	perf_evsel__close_fd(evsel, 1, threads->nr);
319 out_evsel_delete:
320 	perf_evsel__delete(evsel);
321 out_thread_map_delete:
322 	thread_map__delete(threads);
323 	return err;
324 }
325 
326 #include <sched.h>
327 
test__open_syscall_event_on_all_cpus(void)328 static int test__open_syscall_event_on_all_cpus(void)
329 {
330 	int err = -1, fd, cpu;
331 	struct thread_map *threads;
332 	struct cpu_map *cpus;
333 	struct perf_evsel *evsel;
334 	struct perf_event_attr attr;
335 	unsigned int nr_open_calls = 111, i;
336 	cpu_set_t cpu_set;
337 	int id = trace_event__id("sys_enter_open");
338 
339 	if (id < 0) {
340 		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
341 		return -1;
342 	}
343 
344 	threads = thread_map__new(-1, getpid());
345 	if (threads == NULL) {
346 		pr_debug("thread_map__new\n");
347 		return -1;
348 	}
349 
350 	cpus = cpu_map__new(NULL);
351 	if (cpus == NULL) {
352 		pr_debug("cpu_map__new\n");
353 		goto out_thread_map_delete;
354 	}
355 
356 
357 	CPU_ZERO(&cpu_set);
358 
359 	memset(&attr, 0, sizeof(attr));
360 	attr.type = PERF_TYPE_TRACEPOINT;
361 	attr.config = id;
362 	evsel = perf_evsel__new(&attr, 0);
363 	if (evsel == NULL) {
364 		pr_debug("perf_evsel__new\n");
365 		goto out_thread_map_delete;
366 	}
367 
368 	if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
369 		pr_debug("failed to open counter: %s, "
370 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
371 			 strerror(errno));
372 		goto out_evsel_delete;
373 	}
374 
375 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
376 		unsigned int ncalls = nr_open_calls + cpu;
377 		/*
378 		 * XXX eventually lift this restriction in a way that
379 		 * keeps perf building on older glibc installations
380 		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
381 		 * a reasonable upper limit tho :-)
382 		 */
383 		if (cpus->map[cpu] >= CPU_SETSIZE) {
384 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
385 			continue;
386 		}
387 
388 		CPU_SET(cpus->map[cpu], &cpu_set);
389 		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
390 			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
391 				 cpus->map[cpu],
392 				 strerror(errno));
393 			goto out_close_fd;
394 		}
395 		for (i = 0; i < ncalls; ++i) {
396 			fd = open("/etc/passwd", O_RDONLY);
397 			close(fd);
398 		}
399 		CPU_CLR(cpus->map[cpu], &cpu_set);
400 	}
401 
402 	/*
403 	 * Here we need to explicitely preallocate the counts, as if
404 	 * we use the auto allocation it will allocate just for 1 cpu,
405 	 * as we start by cpu 0.
406 	 */
407 	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
408 		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
409 		goto out_close_fd;
410 	}
411 
412 	err = 0;
413 
414 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
415 		unsigned int expected;
416 
417 		if (cpus->map[cpu] >= CPU_SETSIZE)
418 			continue;
419 
420 		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
421 			pr_debug("perf_evsel__read_on_cpu\n");
422 			err = -1;
423 			break;
424 		}
425 
426 		expected = nr_open_calls + cpu;
427 		if (evsel->counts->cpu[cpu].val != expected) {
428 			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
429 				 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
430 			err = -1;
431 		}
432 	}
433 
434 out_close_fd:
435 	perf_evsel__close_fd(evsel, 1, threads->nr);
436 out_evsel_delete:
437 	perf_evsel__delete(evsel);
438 out_thread_map_delete:
439 	thread_map__delete(threads);
440 	return err;
441 }
442 
443 /*
444  * This test will generate random numbers of calls to some getpid syscalls,
445  * then establish an mmap for a group of events that are created to monitor
446  * the syscalls.
447  *
448  * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
449  * sample.id field to map back to its respective perf_evsel instance.
450  *
451  * Then it checks if the number of syscalls reported as perf events by
452  * the kernel corresponds to the number of syscalls made.
453  */
test__basic_mmap(void)454 static int test__basic_mmap(void)
455 {
456         /* ANDROID_CHANGE_BEGIN */
457 #ifdef __BIONIC__
458         return 0;
459 #else
460 	int err = -1;
461 	union perf_event *event;
462 	struct thread_map *threads;
463 	struct cpu_map *cpus;
464 	struct perf_evlist *evlist;
465 	struct perf_event_attr attr = {
466 		.type		= PERF_TYPE_TRACEPOINT,
467 		.read_format	= PERF_FORMAT_ID,
468 		.sample_type	= PERF_SAMPLE_ID,
469 		.watermark	= 0,
470 	};
471 	cpu_set_t cpu_set;
472 	const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
473 					"getpgid", };
474 	pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
475 				      (void*)getpgid };
476 #define nsyscalls ARRAY_SIZE(syscall_names)
477 	int ids[nsyscalls];
478 	unsigned int nr_events[nsyscalls],
479 		     expected_nr_events[nsyscalls], i, j;
480 	struct perf_evsel *evsels[nsyscalls], *evsel;
481 	int sample_size = __perf_evsel__sample_size(attr.sample_type);
482 
483 	for (i = 0; i < nsyscalls; ++i) {
484 		char name[64];
485 
486 		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
487 		ids[i] = trace_event__id(name);
488 		if (ids[i] < 0) {
489 			pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
490 			return -1;
491 		}
492 		nr_events[i] = 0;
493 		expected_nr_events[i] = random() % 257;
494 	}
495 
496 	threads = thread_map__new(-1, getpid());
497 	if (threads == NULL) {
498 		pr_debug("thread_map__new\n");
499 		return -1;
500 	}
501 
502 	cpus = cpu_map__new(NULL);
503 	if (cpus == NULL) {
504 		pr_debug("cpu_map__new\n");
505 		goto out_free_threads;
506 	}
507 
508 	CPU_ZERO(&cpu_set);
509 	CPU_SET(cpus->map[0], &cpu_set);
510 	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
511 	if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
512 		pr_debug("sched_setaffinity() failed on CPU %d: %s ",
513 			 cpus->map[0], strerror(errno));
514 		goto out_free_cpus;
515 	}
516 
517 	evlist = perf_evlist__new(cpus, threads);
518 	if (evlist == NULL) {
519 		pr_debug("perf_evlist__new\n");
520 		goto out_free_cpus;
521 	}
522 
523 	/* anonymous union fields, can't be initialized above */
524 	attr.wakeup_events = 1;
525 	attr.sample_period = 1;
526 
527 	for (i = 0; i < nsyscalls; ++i) {
528 		attr.config = ids[i];
529 		evsels[i] = perf_evsel__new(&attr, i);
530 		if (evsels[i] == NULL) {
531 			pr_debug("perf_evsel__new\n");
532 			goto out_free_evlist;
533 		}
534 
535 		perf_evlist__add(evlist, evsels[i]);
536 
537 		if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
538 			pr_debug("failed to open counter: %s, "
539 				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
540 				 strerror(errno));
541 			goto out_close_fd;
542 		}
543 	}
544 
545 	if (perf_evlist__mmap(evlist, 128, true) < 0) {
546 		pr_debug("failed to mmap events: %d (%s)\n", errno,
547 			 strerror(errno));
548 		goto out_close_fd;
549 	}
550 
551 	for (i = 0; i < nsyscalls; ++i)
552 		for (j = 0; j < expected_nr_events[i]; ++j) {
553 			int foo = syscalls[i]();
554 			++foo;
555 		}
556 
557 	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
558 		struct perf_sample sample;
559 
560 		if (event->header.type != PERF_RECORD_SAMPLE) {
561 			pr_debug("unexpected %s event\n",
562 				 perf_event__name(event->header.type));
563 			goto out_munmap;
564 		}
565 
566 		err = perf_event__parse_sample(event, attr.sample_type, sample_size,
567 					       false, &sample);
568 		if (err) {
569 			pr_err("Can't parse sample, err = %d\n", err);
570 			goto out_munmap;
571 		}
572 
573 		evsel = perf_evlist__id2evsel(evlist, sample.id);
574 		if (evsel == NULL) {
575 			pr_debug("event with id %" PRIu64
576 				 " doesn't map to an evsel\n", sample.id);
577 			goto out_munmap;
578 		}
579 		nr_events[evsel->idx]++;
580 	}
581 
582 	list_for_each_entry(evsel, &evlist->entries, node) {
583 		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
584 			pr_debug("expected %d %s events, got %d\n",
585 				 expected_nr_events[evsel->idx],
586 				 event_name(evsel), nr_events[evsel->idx]);
587 			goto out_munmap;
588 		}
589 	}
590 
591 	err = 0;
592 out_munmap:
593 	perf_evlist__munmap(evlist);
594 out_close_fd:
595 	for (i = 0; i < nsyscalls; ++i)
596 		perf_evsel__close_fd(evsels[i], 1, threads->nr);
597 out_free_evlist:
598 	perf_evlist__delete(evlist);
599 out_free_cpus:
600 	cpu_map__delete(cpus);
601 out_free_threads:
602 	thread_map__delete(threads);
603 	return err;
604 #undef nsyscalls
605 #endif
606         /* ANDROID_CHANGE_END */
607 }
608 
609 static struct test {
610 	const char *desc;
611 	int (*func)(void);
612 } tests[] = {
613 	{
614 		.desc = "vmlinux symtab matches kallsyms",
615 		.func = test__vmlinux_matches_kallsyms,
616 	},
617 	{
618 		.desc = "detect open syscall event",
619 		.func = test__open_syscall_event,
620 	},
621 	{
622 		.desc = "detect open syscall event on all cpus",
623 		.func = test__open_syscall_event_on_all_cpus,
624 	},
625 	{
626 		.desc = "read samples using the mmap interface",
627 		.func = test__basic_mmap,
628 	},
629 	{
630 		.func = NULL,
631 	},
632 };
633 
__cmd_test(void)634 static int __cmd_test(void)
635 {
636 	int i = 0;
637 
638 	page_size = sysconf(_SC_PAGE_SIZE);
639 
640 	while (tests[i].func) {
641 		int err;
642 		pr_info("%2d: %s:", i + 1, tests[i].desc);
643 		pr_debug("\n--- start ---\n");
644 		err = tests[i].func();
645 		pr_debug("---- end ----\n%s:", tests[i].desc);
646 		pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
647 		++i;
648 	}
649 
650 	return 0;
651 }
652 
653 static const char * const test_usage[] = {
654 	"perf test [<options>]",
655 	NULL,
656 };
657 
658 static const struct option test_options[] = {
659 	OPT_INTEGER('v', "verbose", &verbose,
660 		    "be more verbose (show symbol address, etc)"),
661 	OPT_END()
662 };
663 
cmd_test(int argc,const char ** argv,const char * prefix __used)664 int cmd_test(int argc, const char **argv, const char *prefix __used)
665 {
666 	argc = parse_options(argc, argv, test_options, test_usage, 0);
667 	if (argc)
668 		usage_with_options(test_usage, test_options);
669 
670 	symbol_conf.priv_size = sizeof(int);
671 	symbol_conf.sort_by_name = true;
672 	symbol_conf.try_vmlinux_path = true;
673 
674 	if (symbol__init() < 0)
675 		return -1;
676 
677 	setup_pager();
678 
679 	return __cmd_test();
680 }
681