• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <dirent.h>
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <sys/param.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include "build-id.h"
13 #include "util.h"
14 #include "debug.h"
15 #include "machine.h"
16 #include "symbol.h"
17 #include "strlist.h"
18 #include "intlist.h"
19 #include "header.h"
20 
21 #include <elf.h>
22 #include <limits.h>
23 #include <symbol/kallsyms.h>
24 #include <sys/utsname.h>
25 
26 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
27 				symbol_filter_t filter);
28 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
29 			symbol_filter_t filter);
30 int vmlinux_path__nr_entries;
31 char **vmlinux_path;
32 
33 struct symbol_conf symbol_conf = {
34 	.use_modules		= true,
35 	.try_vmlinux_path	= true,
36 	.annotate_src		= true,
37 	.demangle		= true,
38 	.demangle_kernel	= false,
39 	.cumulate_callchain	= true,
40 	.show_hist_headers	= true,
41 	.symfs			= "",
42 };
43 
44 static enum dso_binary_type binary_type_symtab[] = {
45 	DSO_BINARY_TYPE__KALLSYMS,
46 	DSO_BINARY_TYPE__GUEST_KALLSYMS,
47 	DSO_BINARY_TYPE__JAVA_JIT,
48 	DSO_BINARY_TYPE__DEBUGLINK,
49 	DSO_BINARY_TYPE__BUILD_ID_CACHE,
50 	DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
51 	DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
52 	DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
53 	DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
54 	DSO_BINARY_TYPE__GUEST_KMODULE,
55 	DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
56 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
57 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
58 	DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
59 	DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
60 	DSO_BINARY_TYPE__NOT_FOUND,
61 };
62 
63 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
64 
symbol_type__is_a(char symbol_type,enum map_type map_type)65 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
66 {
67 	symbol_type = toupper(symbol_type);
68 
69 	switch (map_type) {
70 	case MAP__FUNCTION:
71 		return symbol_type == 'T' || symbol_type == 'W';
72 	case MAP__VARIABLE:
73 		return symbol_type == 'D';
74 	default:
75 		return false;
76 	}
77 }
78 
prefix_underscores_count(const char * str)79 static int prefix_underscores_count(const char *str)
80 {
81 	const char *tail = str;
82 
83 	while (*tail == '_')
84 		tail++;
85 
86 	return tail - str;
87 }
88 
arch__choose_best_symbol(struct symbol * syma,struct symbol * symb __maybe_unused)89 int __weak arch__choose_best_symbol(struct symbol *syma,
90 				    struct symbol *symb __maybe_unused)
91 {
92 	/* Avoid "SyS" kernel syscall aliases */
93 	if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
94 		return SYMBOL_B;
95 	if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
96 		return SYMBOL_B;
97 
98 	return SYMBOL_A;
99 }
100 
choose_best_symbol(struct symbol * syma,struct symbol * symb)101 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
102 {
103 	s64 a;
104 	s64 b;
105 	size_t na, nb;
106 
107 	/* Prefer a symbol with non zero length */
108 	a = syma->end - syma->start;
109 	b = symb->end - symb->start;
110 	if ((b == 0) && (a > 0))
111 		return SYMBOL_A;
112 	else if ((a == 0) && (b > 0))
113 		return SYMBOL_B;
114 
115 	/* Prefer a non weak symbol over a weak one */
116 	a = syma->binding == STB_WEAK;
117 	b = symb->binding == STB_WEAK;
118 	if (b && !a)
119 		return SYMBOL_A;
120 	if (a && !b)
121 		return SYMBOL_B;
122 
123 	/* Prefer a global symbol over a non global one */
124 	a = syma->binding == STB_GLOBAL;
125 	b = symb->binding == STB_GLOBAL;
126 	if (a && !b)
127 		return SYMBOL_A;
128 	if (b && !a)
129 		return SYMBOL_B;
130 
131 	/* Prefer a symbol with less underscores */
132 	a = prefix_underscores_count(syma->name);
133 	b = prefix_underscores_count(symb->name);
134 	if (b > a)
135 		return SYMBOL_A;
136 	else if (a > b)
137 		return SYMBOL_B;
138 
139 	/* Choose the symbol with the longest name */
140 	na = strlen(syma->name);
141 	nb = strlen(symb->name);
142 	if (na > nb)
143 		return SYMBOL_A;
144 	else if (na < nb)
145 		return SYMBOL_B;
146 
147 	return arch__choose_best_symbol(syma, symb);
148 }
149 
symbols__fixup_duplicate(struct rb_root * symbols)150 void symbols__fixup_duplicate(struct rb_root *symbols)
151 {
152 	struct rb_node *nd;
153 	struct symbol *curr, *next;
154 
155 	if (symbol_conf.allow_aliases)
156 		return;
157 
158 	nd = rb_first(symbols);
159 
160 	while (nd) {
161 		curr = rb_entry(nd, struct symbol, rb_node);
162 again:
163 		nd = rb_next(&curr->rb_node);
164 		next = rb_entry(nd, struct symbol, rb_node);
165 
166 		if (!nd)
167 			break;
168 
169 		if (curr->start != next->start)
170 			continue;
171 
172 		if (choose_best_symbol(curr, next) == SYMBOL_A) {
173 			rb_erase(&next->rb_node, symbols);
174 			symbol__delete(next);
175 			goto again;
176 		} else {
177 			nd = rb_next(&curr->rb_node);
178 			rb_erase(&curr->rb_node, symbols);
179 			symbol__delete(curr);
180 		}
181 	}
182 }
183 
symbols__fixup_end(struct rb_root * symbols)184 void symbols__fixup_end(struct rb_root *symbols)
185 {
186 	struct rb_node *nd, *prevnd = rb_first(symbols);
187 	struct symbol *curr, *prev;
188 
189 	if (prevnd == NULL)
190 		return;
191 
192 	curr = rb_entry(prevnd, struct symbol, rb_node);
193 
194 	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
195 		prev = curr;
196 		curr = rb_entry(nd, struct symbol, rb_node);
197 
198 		if (prev->end == prev->start && prev->end != curr->start)
199 			prev->end = curr->start;
200 	}
201 
202 	/* Last entry */
203 	if (curr->end == curr->start)
204 		curr->end = roundup(curr->start, 4096) + 4096;
205 }
206 
__map_groups__fixup_end(struct map_groups * mg,enum map_type type)207 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
208 {
209 	struct maps *maps = &mg->maps[type];
210 	struct map *next, *curr;
211 
212 	pthread_rwlock_wrlock(&maps->lock);
213 
214 	curr = maps__first(maps);
215 	if (curr == NULL)
216 		goto out_unlock;
217 
218 	for (next = map__next(curr); next; next = map__next(curr)) {
219 		curr->end = next->start;
220 		curr = next;
221 	}
222 
223 	/*
224 	 * We still haven't the actual symbols, so guess the
225 	 * last map final address.
226 	 */
227 	curr->end = ~0ULL;
228 
229 out_unlock:
230 	pthread_rwlock_unlock(&maps->lock);
231 }
232 
symbol__new(u64 start,u64 len,u8 binding,const char * name)233 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
234 {
235 	size_t namelen = strlen(name) + 1;
236 	struct symbol *sym = calloc(1, (symbol_conf.priv_size +
237 					sizeof(*sym) + namelen));
238 	if (sym == NULL)
239 		return NULL;
240 
241 	if (symbol_conf.priv_size)
242 		sym = ((void *)sym) + symbol_conf.priv_size;
243 
244 	sym->start   = start;
245 	sym->end     = len ? start + len : start;
246 	sym->binding = binding;
247 	sym->namelen = namelen - 1;
248 
249 	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
250 		  __func__, name, start, sym->end);
251 	memcpy(sym->name, name, namelen);
252 
253 	return sym;
254 }
255 
symbol__delete(struct symbol * sym)256 void symbol__delete(struct symbol *sym)
257 {
258 	free(((void *)sym) - symbol_conf.priv_size);
259 }
260 
symbol__fprintf(struct symbol * sym,FILE * fp)261 size_t symbol__fprintf(struct symbol *sym, FILE *fp)
262 {
263 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
264 		       sym->start, sym->end,
265 		       sym->binding == STB_GLOBAL ? 'g' :
266 		       sym->binding == STB_LOCAL  ? 'l' : 'w',
267 		       sym->name);
268 }
269 
symbol__fprintf_symname_offs(const struct symbol * sym,const struct addr_location * al,FILE * fp)270 size_t symbol__fprintf_symname_offs(const struct symbol *sym,
271 				    const struct addr_location *al, FILE *fp)
272 {
273 	unsigned long offset;
274 	size_t length;
275 
276 	if (sym && sym->name) {
277 		length = fprintf(fp, "%s", sym->name);
278 		if (al) {
279 			if (al->addr < sym->end)
280 				offset = al->addr - sym->start;
281 			else
282 				offset = al->addr - al->map->start - sym->start;
283 			length += fprintf(fp, "+0x%lx", offset);
284 		}
285 		return length;
286 	} else
287 		return fprintf(fp, "[unknown]");
288 }
289 
symbol__fprintf_symname(const struct symbol * sym,FILE * fp)290 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
291 {
292 	return symbol__fprintf_symname_offs(sym, NULL, fp);
293 }
294 
symbols__delete(struct rb_root * symbols)295 void symbols__delete(struct rb_root *symbols)
296 {
297 	struct symbol *pos;
298 	struct rb_node *next = rb_first(symbols);
299 
300 	while (next) {
301 		pos = rb_entry(next, struct symbol, rb_node);
302 		next = rb_next(&pos->rb_node);
303 		rb_erase(&pos->rb_node, symbols);
304 		symbol__delete(pos);
305 	}
306 }
307 
symbols__insert(struct rb_root * symbols,struct symbol * sym)308 void symbols__insert(struct rb_root *symbols, struct symbol *sym)
309 {
310 	struct rb_node **p = &symbols->rb_node;
311 	struct rb_node *parent = NULL;
312 	const u64 ip = sym->start;
313 	struct symbol *s;
314 
315 	while (*p != NULL) {
316 		parent = *p;
317 		s = rb_entry(parent, struct symbol, rb_node);
318 		if (ip < s->start)
319 			p = &(*p)->rb_left;
320 		else
321 			p = &(*p)->rb_right;
322 	}
323 	rb_link_node(&sym->rb_node, parent, p);
324 	rb_insert_color(&sym->rb_node, symbols);
325 }
326 
symbols__find(struct rb_root * symbols,u64 ip)327 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
328 {
329 	struct rb_node *n;
330 
331 	if (symbols == NULL)
332 		return NULL;
333 
334 	n = symbols->rb_node;
335 
336 	while (n) {
337 		struct symbol *s = rb_entry(n, struct symbol, rb_node);
338 
339 		if (ip < s->start)
340 			n = n->rb_left;
341 		else if (ip >= s->end)
342 			n = n->rb_right;
343 		else
344 			return s;
345 	}
346 
347 	return NULL;
348 }
349 
symbols__first(struct rb_root * symbols)350 static struct symbol *symbols__first(struct rb_root *symbols)
351 {
352 	struct rb_node *n = rb_first(symbols);
353 
354 	if (n)
355 		return rb_entry(n, struct symbol, rb_node);
356 
357 	return NULL;
358 }
359 
symbols__next(struct symbol * sym)360 static struct symbol *symbols__next(struct symbol *sym)
361 {
362 	struct rb_node *n = rb_next(&sym->rb_node);
363 
364 	if (n)
365 		return rb_entry(n, struct symbol, rb_node);
366 
367 	return NULL;
368 }
369 
370 struct symbol_name_rb_node {
371 	struct rb_node	rb_node;
372 	struct symbol	sym;
373 };
374 
symbols__insert_by_name(struct rb_root * symbols,struct symbol * sym)375 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
376 {
377 	struct rb_node **p = &symbols->rb_node;
378 	struct rb_node *parent = NULL;
379 	struct symbol_name_rb_node *symn, *s;
380 
381 	symn = container_of(sym, struct symbol_name_rb_node, sym);
382 
383 	while (*p != NULL) {
384 		parent = *p;
385 		s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
386 		if (strcmp(sym->name, s->sym.name) < 0)
387 			p = &(*p)->rb_left;
388 		else
389 			p = &(*p)->rb_right;
390 	}
391 	rb_link_node(&symn->rb_node, parent, p);
392 	rb_insert_color(&symn->rb_node, symbols);
393 }
394 
symbols__sort_by_name(struct rb_root * symbols,struct rb_root * source)395 static void symbols__sort_by_name(struct rb_root *symbols,
396 				  struct rb_root *source)
397 {
398 	struct rb_node *nd;
399 
400 	for (nd = rb_first(source); nd; nd = rb_next(nd)) {
401 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
402 		symbols__insert_by_name(symbols, pos);
403 	}
404 }
405 
symbols__find_by_name(struct rb_root * symbols,const char * name)406 static struct symbol *symbols__find_by_name(struct rb_root *symbols,
407 					    const char *name)
408 {
409 	struct rb_node *n;
410 	struct symbol_name_rb_node *s = NULL;
411 
412 	if (symbols == NULL)
413 		return NULL;
414 
415 	n = symbols->rb_node;
416 
417 	while (n) {
418 		int cmp;
419 
420 		s = rb_entry(n, struct symbol_name_rb_node, rb_node);
421 		cmp = arch__compare_symbol_names(name, s->sym.name);
422 
423 		if (cmp < 0)
424 			n = n->rb_left;
425 		else if (cmp > 0)
426 			n = n->rb_right;
427 		else
428 			break;
429 	}
430 
431 	if (n == NULL)
432 		return NULL;
433 
434 	/* return first symbol that has same name (if any) */
435 	for (n = rb_prev(n); n; n = rb_prev(n)) {
436 		struct symbol_name_rb_node *tmp;
437 
438 		tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
439 		if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
440 			break;
441 
442 		s = tmp;
443 	}
444 
445 	return &s->sym;
446 }
447 
dso__reset_find_symbol_cache(struct dso * dso)448 void dso__reset_find_symbol_cache(struct dso *dso)
449 {
450 	enum map_type type;
451 
452 	for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
453 		dso->last_find_result[type].addr   = 0;
454 		dso->last_find_result[type].symbol = NULL;
455 	}
456 }
457 
dso__find_symbol(struct dso * dso,enum map_type type,u64 addr)458 struct symbol *dso__find_symbol(struct dso *dso,
459 				enum map_type type, u64 addr)
460 {
461 	if (dso->last_find_result[type].addr != addr) {
462 		dso->last_find_result[type].addr   = addr;
463 		dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
464 	}
465 
466 	return dso->last_find_result[type].symbol;
467 }
468 
dso__first_symbol(struct dso * dso,enum map_type type)469 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
470 {
471 	return symbols__first(&dso->symbols[type]);
472 }
473 
dso__next_symbol(struct symbol * sym)474 struct symbol *dso__next_symbol(struct symbol *sym)
475 {
476 	return symbols__next(sym);
477 }
478 
symbol__next_by_name(struct symbol * sym)479 struct symbol *symbol__next_by_name(struct symbol *sym)
480 {
481 	struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
482 	struct rb_node *n = rb_next(&s->rb_node);
483 
484 	return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
485 }
486 
487  /*
488   * Teturns first symbol that matched with @name.
489   */
dso__find_symbol_by_name(struct dso * dso,enum map_type type,const char * name)490 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
491 					const char *name)
492 {
493 	return symbols__find_by_name(&dso->symbol_names[type], name);
494 }
495 
dso__sort_by_name(struct dso * dso,enum map_type type)496 void dso__sort_by_name(struct dso *dso, enum map_type type)
497 {
498 	dso__set_sorted_by_name(dso, type);
499 	return symbols__sort_by_name(&dso->symbol_names[type],
500 				     &dso->symbols[type]);
501 }
502 
dso__fprintf_symbols_by_name(struct dso * dso,enum map_type type,FILE * fp)503 size_t dso__fprintf_symbols_by_name(struct dso *dso,
504 				    enum map_type type, FILE *fp)
505 {
506 	size_t ret = 0;
507 	struct rb_node *nd;
508 	struct symbol_name_rb_node *pos;
509 
510 	for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
511 		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
512 		fprintf(fp, "%s\n", pos->sym.name);
513 	}
514 
515 	return ret;
516 }
517 
modules__parse(const char * filename,void * arg,int (* process_module)(void * arg,const char * name,u64 start))518 int modules__parse(const char *filename, void *arg,
519 		   int (*process_module)(void *arg, const char *name,
520 					 u64 start))
521 {
522 	char *line = NULL;
523 	size_t n;
524 	FILE *file;
525 	int err = 0;
526 
527 	file = fopen(filename, "r");
528 	if (file == NULL)
529 		return -1;
530 
531 	while (1) {
532 		char name[PATH_MAX];
533 		u64 start;
534 		char *sep;
535 		ssize_t line_len;
536 
537 		line_len = getline(&line, &n, file);
538 		if (line_len < 0) {
539 			if (feof(file))
540 				break;
541 			err = -1;
542 			goto out;
543 		}
544 
545 		if (!line) {
546 			err = -1;
547 			goto out;
548 		}
549 
550 		line[--line_len] = '\0'; /* \n */
551 
552 		sep = strrchr(line, 'x');
553 		if (sep == NULL)
554 			continue;
555 
556 		hex2u64(sep + 1, &start);
557 
558 		sep = strchr(line, ' ');
559 		if (sep == NULL)
560 			continue;
561 
562 		*sep = '\0';
563 
564 		scnprintf(name, sizeof(name), "[%s]", line);
565 
566 		err = process_module(arg, name, start);
567 		if (err)
568 			break;
569 	}
570 out:
571 	free(line);
572 	fclose(file);
573 	return err;
574 }
575 
576 struct process_kallsyms_args {
577 	struct map *map;
578 	struct dso *dso;
579 };
580 
581 /*
582  * These are symbols in the kernel image, so make sure that
583  * sym is from a kernel DSO.
584  */
symbol__is_idle(struct symbol * sym)585 bool symbol__is_idle(struct symbol *sym)
586 {
587 	const char * const idle_symbols[] = {
588 		"cpu_idle",
589 		"cpu_startup_entry",
590 		"intel_idle",
591 		"default_idle",
592 		"native_safe_halt",
593 		"enter_idle",
594 		"exit_idle",
595 		"mwait_idle",
596 		"mwait_idle_with_hints",
597 		"poll_idle",
598 		"ppc64_runlatch_off",
599 		"pseries_dedicated_idle_sleep",
600 		NULL
601 	};
602 
603 	int i;
604 
605 	if (!sym)
606 		return false;
607 
608 	for (i = 0; idle_symbols[i]; i++) {
609 		if (!strcmp(idle_symbols[i], sym->name))
610 			return true;
611 	}
612 
613 	return false;
614 }
615 
map__process_kallsym_symbol(void * arg,const char * name,char type,u64 start)616 static int map__process_kallsym_symbol(void *arg, const char *name,
617 				       char type, u64 start)
618 {
619 	struct symbol *sym;
620 	struct process_kallsyms_args *a = arg;
621 	struct rb_root *root = &a->dso->symbols[a->map->type];
622 
623 	if (!symbol_type__is_a(type, a->map->type))
624 		return 0;
625 
626 	/*
627 	 * module symbols are not sorted so we add all
628 	 * symbols, setting length to 0, and rely on
629 	 * symbols__fixup_end() to fix it up.
630 	 */
631 	sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
632 	if (sym == NULL)
633 		return -ENOMEM;
634 	/*
635 	 * We will pass the symbols to the filter later, in
636 	 * map__split_kallsyms, when we have split the maps per module
637 	 */
638 	symbols__insert(root, sym);
639 
640 	return 0;
641 }
642 
643 /*
644  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
645  * so that we can in the next step set the symbol ->end address and then
646  * call kernel_maps__split_kallsyms.
647  */
dso__load_all_kallsyms(struct dso * dso,const char * filename,struct map * map)648 static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
649 				  struct map *map)
650 {
651 	struct process_kallsyms_args args = { .map = map, .dso = dso, };
652 	return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
653 }
654 
dso__split_kallsyms_for_kcore(struct dso * dso,struct map * map,symbol_filter_t filter)655 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
656 					 symbol_filter_t filter)
657 {
658 	struct map_groups *kmaps = map__kmaps(map);
659 	struct map *curr_map;
660 	struct symbol *pos;
661 	int count = 0;
662 	struct rb_root old_root = dso->symbols[map->type];
663 	struct rb_root *root = &dso->symbols[map->type];
664 	struct rb_node *next = rb_first(root);
665 
666 	if (!kmaps)
667 		return -1;
668 
669 	*root = RB_ROOT;
670 
671 	while (next) {
672 		char *module;
673 
674 		pos = rb_entry(next, struct symbol, rb_node);
675 		next = rb_next(&pos->rb_node);
676 
677 		rb_erase_init(&pos->rb_node, &old_root);
678 
679 		module = strchr(pos->name, '\t');
680 		if (module)
681 			*module = '\0';
682 
683 		curr_map = map_groups__find(kmaps, map->type, pos->start);
684 
685 		if (!curr_map || (filter && filter(curr_map, pos))) {
686 			symbol__delete(pos);
687 			continue;
688 		}
689 
690 		pos->start -= curr_map->start - curr_map->pgoff;
691 		if (pos->end)
692 			pos->end -= curr_map->start - curr_map->pgoff;
693 		symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
694 		++count;
695 	}
696 
697 	/* Symbols have been adjusted */
698 	dso->adjust_symbols = 1;
699 
700 	return count;
701 }
702 
703 /*
704  * Split the symbols into maps, making sure there are no overlaps, i.e. the
705  * kernel range is broken in several maps, named [kernel].N, as we don't have
706  * the original ELF section names vmlinux have.
707  */
dso__split_kallsyms(struct dso * dso,struct map * map,u64 delta,symbol_filter_t filter)708 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
709 			       symbol_filter_t filter)
710 {
711 	struct map_groups *kmaps = map__kmaps(map);
712 	struct machine *machine;
713 	struct map *curr_map = map;
714 	struct symbol *pos;
715 	int count = 0, moved = 0;
716 	struct rb_root *root = &dso->symbols[map->type];
717 	struct rb_node *next = rb_first(root);
718 	int kernel_range = 0;
719 
720 	if (!kmaps)
721 		return -1;
722 
723 	machine = kmaps->machine;
724 
725 	while (next) {
726 		char *module;
727 
728 		pos = rb_entry(next, struct symbol, rb_node);
729 		next = rb_next(&pos->rb_node);
730 
731 		module = strchr(pos->name, '\t');
732 		if (module) {
733 			if (!symbol_conf.use_modules)
734 				goto discard_symbol;
735 
736 			*module++ = '\0';
737 
738 			if (strcmp(curr_map->dso->short_name, module)) {
739 				if (curr_map != map &&
740 				    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
741 				    machine__is_default_guest(machine)) {
742 					/*
743 					 * We assume all symbols of a module are
744 					 * continuous in * kallsyms, so curr_map
745 					 * points to a module and all its
746 					 * symbols are in its kmap. Mark it as
747 					 * loaded.
748 					 */
749 					dso__set_loaded(curr_map->dso,
750 							curr_map->type);
751 				}
752 
753 				curr_map = map_groups__find_by_name(kmaps,
754 							map->type, module);
755 				if (curr_map == NULL) {
756 					pr_debug("%s/proc/{kallsyms,modules} "
757 					         "inconsistency while looking "
758 						 "for \"%s\" module!\n",
759 						 machine->root_dir, module);
760 					curr_map = map;
761 					goto discard_symbol;
762 				}
763 
764 				if (curr_map->dso->loaded &&
765 				    !machine__is_default_guest(machine))
766 					goto discard_symbol;
767 			}
768 			/*
769 			 * So that we look just like we get from .ko files,
770 			 * i.e. not prelinked, relative to map->start.
771 			 */
772 			pos->start = curr_map->map_ip(curr_map, pos->start);
773 			pos->end   = curr_map->map_ip(curr_map, pos->end);
774 		} else if (curr_map != map) {
775 			char dso_name[PATH_MAX];
776 			struct dso *ndso;
777 
778 			if (delta) {
779 				/* Kernel was relocated at boot time */
780 				pos->start -= delta;
781 				pos->end -= delta;
782 			}
783 
784 			if (count == 0) {
785 				curr_map = map;
786 				goto filter_symbol;
787 			}
788 
789 			if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
790 				snprintf(dso_name, sizeof(dso_name),
791 					"[guest.kernel].%d",
792 					kernel_range++);
793 			else
794 				snprintf(dso_name, sizeof(dso_name),
795 					"[kernel].%d",
796 					kernel_range++);
797 
798 			ndso = dso__new(dso_name);
799 			if (ndso == NULL)
800 				return -1;
801 
802 			ndso->kernel = dso->kernel;
803 
804 			curr_map = map__new2(pos->start, ndso, map->type);
805 			if (curr_map == NULL) {
806 				dso__put(ndso);
807 				return -1;
808 			}
809 
810 			curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
811 			map_groups__insert(kmaps, curr_map);
812 			++kernel_range;
813 		} else if (delta) {
814 			/* Kernel was relocated at boot time */
815 			pos->start -= delta;
816 			pos->end -= delta;
817 		}
818 filter_symbol:
819 		if (filter && filter(curr_map, pos)) {
820 discard_symbol:		rb_erase(&pos->rb_node, root);
821 			symbol__delete(pos);
822 		} else {
823 			if (curr_map != map) {
824 				rb_erase(&pos->rb_node, root);
825 				symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
826 				++moved;
827 			} else
828 				++count;
829 		}
830 	}
831 
832 	if (curr_map != map &&
833 	    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
834 	    machine__is_default_guest(kmaps->machine)) {
835 		dso__set_loaded(curr_map->dso, curr_map->type);
836 	}
837 
838 	return count + moved;
839 }
840 
symbol__restricted_filename(const char * filename,const char * restricted_filename)841 bool symbol__restricted_filename(const char *filename,
842 				 const char *restricted_filename)
843 {
844 	bool restricted = false;
845 
846 	if (symbol_conf.kptr_restrict) {
847 		char *r = realpath(filename, NULL);
848 
849 		if (r != NULL) {
850 			restricted = strcmp(r, restricted_filename) == 0;
851 			free(r);
852 			return restricted;
853 		}
854 	}
855 
856 	return restricted;
857 }
858 
859 struct module_info {
860 	struct rb_node rb_node;
861 	char *name;
862 	u64 start;
863 };
864 
add_module(struct module_info * mi,struct rb_root * modules)865 static void add_module(struct module_info *mi, struct rb_root *modules)
866 {
867 	struct rb_node **p = &modules->rb_node;
868 	struct rb_node *parent = NULL;
869 	struct module_info *m;
870 
871 	while (*p != NULL) {
872 		parent = *p;
873 		m = rb_entry(parent, struct module_info, rb_node);
874 		if (strcmp(mi->name, m->name) < 0)
875 			p = &(*p)->rb_left;
876 		else
877 			p = &(*p)->rb_right;
878 	}
879 	rb_link_node(&mi->rb_node, parent, p);
880 	rb_insert_color(&mi->rb_node, modules);
881 }
882 
delete_modules(struct rb_root * modules)883 static void delete_modules(struct rb_root *modules)
884 {
885 	struct module_info *mi;
886 	struct rb_node *next = rb_first(modules);
887 
888 	while (next) {
889 		mi = rb_entry(next, struct module_info, rb_node);
890 		next = rb_next(&mi->rb_node);
891 		rb_erase(&mi->rb_node, modules);
892 		zfree(&mi->name);
893 		free(mi);
894 	}
895 }
896 
find_module(const char * name,struct rb_root * modules)897 static struct module_info *find_module(const char *name,
898 				       struct rb_root *modules)
899 {
900 	struct rb_node *n = modules->rb_node;
901 
902 	while (n) {
903 		struct module_info *m;
904 		int cmp;
905 
906 		m = rb_entry(n, struct module_info, rb_node);
907 		cmp = strcmp(name, m->name);
908 		if (cmp < 0)
909 			n = n->rb_left;
910 		else if (cmp > 0)
911 			n = n->rb_right;
912 		else
913 			return m;
914 	}
915 
916 	return NULL;
917 }
918 
__read_proc_modules(void * arg,const char * name,u64 start)919 static int __read_proc_modules(void *arg, const char *name, u64 start)
920 {
921 	struct rb_root *modules = arg;
922 	struct module_info *mi;
923 
924 	mi = zalloc(sizeof(struct module_info));
925 	if (!mi)
926 		return -ENOMEM;
927 
928 	mi->name = strdup(name);
929 	mi->start = start;
930 
931 	if (!mi->name) {
932 		free(mi);
933 		return -ENOMEM;
934 	}
935 
936 	add_module(mi, modules);
937 
938 	return 0;
939 }
940 
read_proc_modules(const char * filename,struct rb_root * modules)941 static int read_proc_modules(const char *filename, struct rb_root *modules)
942 {
943 	if (symbol__restricted_filename(filename, "/proc/modules"))
944 		return -1;
945 
946 	if (modules__parse(filename, modules, __read_proc_modules)) {
947 		delete_modules(modules);
948 		return -1;
949 	}
950 
951 	return 0;
952 }
953 
compare_proc_modules(const char * from,const char * to)954 int compare_proc_modules(const char *from, const char *to)
955 {
956 	struct rb_root from_modules = RB_ROOT;
957 	struct rb_root to_modules = RB_ROOT;
958 	struct rb_node *from_node, *to_node;
959 	struct module_info *from_m, *to_m;
960 	int ret = -1;
961 
962 	if (read_proc_modules(from, &from_modules))
963 		return -1;
964 
965 	if (read_proc_modules(to, &to_modules))
966 		goto out_delete_from;
967 
968 	from_node = rb_first(&from_modules);
969 	to_node = rb_first(&to_modules);
970 	while (from_node) {
971 		if (!to_node)
972 			break;
973 
974 		from_m = rb_entry(from_node, struct module_info, rb_node);
975 		to_m = rb_entry(to_node, struct module_info, rb_node);
976 
977 		if (from_m->start != to_m->start ||
978 		    strcmp(from_m->name, to_m->name))
979 			break;
980 
981 		from_node = rb_next(from_node);
982 		to_node = rb_next(to_node);
983 	}
984 
985 	if (!from_node && !to_node)
986 		ret = 0;
987 
988 	delete_modules(&to_modules);
989 out_delete_from:
990 	delete_modules(&from_modules);
991 
992 	return ret;
993 }
994 
do_validate_kcore_modules(const char * filename,struct map * map,struct map_groups * kmaps)995 static int do_validate_kcore_modules(const char *filename, struct map *map,
996 				  struct map_groups *kmaps)
997 {
998 	struct rb_root modules = RB_ROOT;
999 	struct map *old_map;
1000 	int err;
1001 
1002 	err = read_proc_modules(filename, &modules);
1003 	if (err)
1004 		return err;
1005 
1006 	old_map = map_groups__first(kmaps, map->type);
1007 	while (old_map) {
1008 		struct map *next = map_groups__next(old_map);
1009 		struct module_info *mi;
1010 
1011 		if (old_map == map || old_map->start == map->start) {
1012 			/* The kernel map */
1013 			old_map = next;
1014 			continue;
1015 		}
1016 
1017 		/* Module must be in memory at the same address */
1018 		mi = find_module(old_map->dso->short_name, &modules);
1019 		if (!mi || mi->start != old_map->start) {
1020 			err = -EINVAL;
1021 			goto out;
1022 		}
1023 
1024 		old_map = next;
1025 	}
1026 out:
1027 	delete_modules(&modules);
1028 	return err;
1029 }
1030 
1031 /*
1032  * If kallsyms is referenced by name then we look for filename in the same
1033  * directory.
1034  */
filename_from_kallsyms_filename(char * filename,const char * base_name,const char * kallsyms_filename)1035 static bool filename_from_kallsyms_filename(char *filename,
1036 					    const char *base_name,
1037 					    const char *kallsyms_filename)
1038 {
1039 	char *name;
1040 
1041 	strcpy(filename, kallsyms_filename);
1042 	name = strrchr(filename, '/');
1043 	if (!name)
1044 		return false;
1045 
1046 	name += 1;
1047 
1048 	if (!strcmp(name, "kallsyms")) {
1049 		strcpy(name, base_name);
1050 		return true;
1051 	}
1052 
1053 	return false;
1054 }
1055 
validate_kcore_modules(const char * kallsyms_filename,struct map * map)1056 static int validate_kcore_modules(const char *kallsyms_filename,
1057 				  struct map *map)
1058 {
1059 	struct map_groups *kmaps = map__kmaps(map);
1060 	char modules_filename[PATH_MAX];
1061 
1062 	if (!kmaps)
1063 		return -EINVAL;
1064 
1065 	if (!filename_from_kallsyms_filename(modules_filename, "modules",
1066 					     kallsyms_filename))
1067 		return -EINVAL;
1068 
1069 	if (do_validate_kcore_modules(modules_filename, map, kmaps))
1070 		return -EINVAL;
1071 
1072 	return 0;
1073 }
1074 
validate_kcore_addresses(const char * kallsyms_filename,struct map * map)1075 static int validate_kcore_addresses(const char *kallsyms_filename,
1076 				    struct map *map)
1077 {
1078 	struct kmap *kmap = map__kmap(map);
1079 
1080 	if (!kmap)
1081 		return -EINVAL;
1082 
1083 	if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1084 		u64 start;
1085 
1086 		start = kallsyms__get_function_start(kallsyms_filename,
1087 						     kmap->ref_reloc_sym->name);
1088 		if (start != kmap->ref_reloc_sym->addr)
1089 			return -EINVAL;
1090 	}
1091 
1092 	return validate_kcore_modules(kallsyms_filename, map);
1093 }
1094 
1095 struct kcore_mapfn_data {
1096 	struct dso *dso;
1097 	enum map_type type;
1098 	struct list_head maps;
1099 };
1100 
kcore_mapfn(u64 start,u64 len,u64 pgoff,void * data)1101 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1102 {
1103 	struct kcore_mapfn_data *md = data;
1104 	struct map *map;
1105 
1106 	map = map__new2(start, md->dso, md->type);
1107 	if (map == NULL)
1108 		return -ENOMEM;
1109 
1110 	map->end = map->start + len;
1111 	map->pgoff = pgoff;
1112 
1113 	list_add(&map->node, &md->maps);
1114 
1115 	return 0;
1116 }
1117 
dso__load_kcore(struct dso * dso,struct map * map,const char * kallsyms_filename)1118 static int dso__load_kcore(struct dso *dso, struct map *map,
1119 			   const char *kallsyms_filename)
1120 {
1121 	struct map_groups *kmaps = map__kmaps(map);
1122 	struct machine *machine;
1123 	struct kcore_mapfn_data md;
1124 	struct map *old_map, *new_map, *replacement_map = NULL;
1125 	bool is_64_bit;
1126 	int err, fd;
1127 	char kcore_filename[PATH_MAX];
1128 	struct symbol *sym;
1129 
1130 	if (!kmaps)
1131 		return -EINVAL;
1132 
1133 	machine = kmaps->machine;
1134 
1135 	/* This function requires that the map is the kernel map */
1136 	if (map != machine->vmlinux_maps[map->type])
1137 		return -EINVAL;
1138 
1139 	if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1140 					     kallsyms_filename))
1141 		return -EINVAL;
1142 
1143 	/* Modules and kernel must be present at their original addresses */
1144 	if (validate_kcore_addresses(kallsyms_filename, map))
1145 		return -EINVAL;
1146 
1147 	md.dso = dso;
1148 	md.type = map->type;
1149 	INIT_LIST_HEAD(&md.maps);
1150 
1151 	fd = open(kcore_filename, O_RDONLY);
1152 	if (fd < 0) {
1153 		pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1154 			 kcore_filename);
1155 		return -EINVAL;
1156 	}
1157 
1158 	/* Read new maps into temporary lists */
1159 	err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
1160 			      &is_64_bit);
1161 	if (err)
1162 		goto out_err;
1163 	dso->is_64_bit = is_64_bit;
1164 
1165 	if (list_empty(&md.maps)) {
1166 		err = -EINVAL;
1167 		goto out_err;
1168 	}
1169 
1170 	/* Remove old maps */
1171 	old_map = map_groups__first(kmaps, map->type);
1172 	while (old_map) {
1173 		struct map *next = map_groups__next(old_map);
1174 
1175 		if (old_map != map)
1176 			map_groups__remove(kmaps, old_map);
1177 		old_map = next;
1178 	}
1179 
1180 	/* Find the kernel map using the first symbol */
1181 	sym = dso__first_symbol(dso, map->type);
1182 	list_for_each_entry(new_map, &md.maps, node) {
1183 		if (sym && sym->start >= new_map->start &&
1184 		    sym->start < new_map->end) {
1185 			replacement_map = new_map;
1186 			break;
1187 		}
1188 	}
1189 
1190 	if (!replacement_map)
1191 		replacement_map = list_entry(md.maps.next, struct map, node);
1192 
1193 	/* Add new maps */
1194 	while (!list_empty(&md.maps)) {
1195 		new_map = list_entry(md.maps.next, struct map, node);
1196 		list_del_init(&new_map->node);
1197 		if (new_map == replacement_map) {
1198 			map->start	= new_map->start;
1199 			map->end	= new_map->end;
1200 			map->pgoff	= new_map->pgoff;
1201 			map->map_ip	= new_map->map_ip;
1202 			map->unmap_ip	= new_map->unmap_ip;
1203 			/* Ensure maps are correctly ordered */
1204 			map__get(map);
1205 			map_groups__remove(kmaps, map);
1206 			map_groups__insert(kmaps, map);
1207 			map__put(map);
1208 		} else {
1209 			map_groups__insert(kmaps, new_map);
1210 		}
1211 
1212 		map__put(new_map);
1213 	}
1214 
1215 	/*
1216 	 * Set the data type and long name so that kcore can be read via
1217 	 * dso__data_read_addr().
1218 	 */
1219 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1220 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1221 	else
1222 		dso->binary_type = DSO_BINARY_TYPE__KCORE;
1223 	dso__set_long_name(dso, strdup(kcore_filename), true);
1224 
1225 	close(fd);
1226 
1227 	if (map->type == MAP__FUNCTION)
1228 		pr_debug("Using %s for kernel object code\n", kcore_filename);
1229 	else
1230 		pr_debug("Using %s for kernel data\n", kcore_filename);
1231 
1232 	return 0;
1233 
1234 out_err:
1235 	while (!list_empty(&md.maps)) {
1236 		map = list_entry(md.maps.next, struct map, node);
1237 		list_del_init(&map->node);
1238 		map__put(map);
1239 	}
1240 	close(fd);
1241 	return -EINVAL;
1242 }
1243 
1244 /*
1245  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1246  * delta based on the relocation reference symbol.
1247  */
kallsyms__delta(struct map * map,const char * filename,u64 * delta)1248 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1249 {
1250 	struct kmap *kmap = map__kmap(map);
1251 	u64 addr;
1252 
1253 	if (!kmap)
1254 		return -1;
1255 
1256 	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1257 		return 0;
1258 
1259 	addr = kallsyms__get_function_start(filename,
1260 					    kmap->ref_reloc_sym->name);
1261 	if (!addr)
1262 		return -1;
1263 
1264 	*delta = addr - kmap->ref_reloc_sym->addr;
1265 	return 0;
1266 }
1267 
dso__load_kallsyms(struct dso * dso,const char * filename,struct map * map,symbol_filter_t filter)1268 int dso__load_kallsyms(struct dso *dso, const char *filename,
1269 		       struct map *map, symbol_filter_t filter)
1270 {
1271 	u64 delta = 0;
1272 
1273 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1274 		return -1;
1275 
1276 	if (dso__load_all_kallsyms(dso, filename, map) < 0)
1277 		return -1;
1278 
1279 	if (kallsyms__delta(map, filename, &delta))
1280 		return -1;
1281 
1282 	symbols__fixup_end(&dso->symbols[map->type]);
1283 	symbols__fixup_duplicate(&dso->symbols[map->type]);
1284 
1285 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1286 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1287 	else
1288 		dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1289 
1290 	if (!dso__load_kcore(dso, map, filename))
1291 		return dso__split_kallsyms_for_kcore(dso, map, filter);
1292 	else
1293 		return dso__split_kallsyms(dso, map, delta, filter);
1294 }
1295 
dso__load_perf_map(struct dso * dso,struct map * map,symbol_filter_t filter)1296 static int dso__load_perf_map(struct dso *dso, struct map *map,
1297 			      symbol_filter_t filter)
1298 {
1299 	char *line = NULL;
1300 	size_t n;
1301 	FILE *file;
1302 	int nr_syms = 0;
1303 
1304 	file = fopen(dso->long_name, "r");
1305 	if (file == NULL)
1306 		goto out_failure;
1307 
1308 	while (!feof(file)) {
1309 		u64 start, size;
1310 		struct symbol *sym;
1311 		int line_len, len;
1312 
1313 		line_len = getline(&line, &n, file);
1314 		if (line_len < 0)
1315 			break;
1316 
1317 		if (!line)
1318 			goto out_failure;
1319 
1320 		line[--line_len] = '\0'; /* \n */
1321 
1322 		len = hex2u64(line, &start);
1323 
1324 		len++;
1325 		if (len + 2 >= line_len)
1326 			continue;
1327 
1328 		len += hex2u64(line + len, &size);
1329 
1330 		len++;
1331 		if (len + 2 >= line_len)
1332 			continue;
1333 
1334 		sym = symbol__new(start, size, STB_GLOBAL, line + len);
1335 
1336 		if (sym == NULL)
1337 			goto out_delete_line;
1338 
1339 		if (filter && filter(map, sym))
1340 			symbol__delete(sym);
1341 		else {
1342 			symbols__insert(&dso->symbols[map->type], sym);
1343 			nr_syms++;
1344 		}
1345 	}
1346 
1347 	free(line);
1348 	fclose(file);
1349 
1350 	return nr_syms;
1351 
1352 out_delete_line:
1353 	free(line);
1354 out_failure:
1355 	return -1;
1356 }
1357 
dso__is_compatible_symtab_type(struct dso * dso,bool kmod,enum dso_binary_type type)1358 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1359 					   enum dso_binary_type type)
1360 {
1361 	switch (type) {
1362 	case DSO_BINARY_TYPE__JAVA_JIT:
1363 	case DSO_BINARY_TYPE__DEBUGLINK:
1364 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1365 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1366 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1367 	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1368 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1369 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1370 		return !kmod && dso->kernel == DSO_TYPE_USER;
1371 
1372 	case DSO_BINARY_TYPE__KALLSYMS:
1373 	case DSO_BINARY_TYPE__VMLINUX:
1374 	case DSO_BINARY_TYPE__KCORE:
1375 		return dso->kernel == DSO_TYPE_KERNEL;
1376 
1377 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1378 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
1379 	case DSO_BINARY_TYPE__GUEST_KCORE:
1380 		return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1381 
1382 	case DSO_BINARY_TYPE__GUEST_KMODULE:
1383 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1384 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1385 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1386 		/*
1387 		 * kernel modules know their symtab type - it's set when
1388 		 * creating a module dso in machine__findnew_module_map().
1389 		 */
1390 		return kmod && dso->symtab_type == type;
1391 
1392 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1393 		return true;
1394 
1395 	case DSO_BINARY_TYPE__NOT_FOUND:
1396 	default:
1397 		return false;
1398 	}
1399 }
1400 
dso__load(struct dso * dso,struct map * map,symbol_filter_t filter)1401 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1402 {
1403 	char *name;
1404 	int ret = -1;
1405 	u_int i;
1406 	struct machine *machine;
1407 	char *root_dir = (char *) "";
1408 	int ss_pos = 0;
1409 	struct symsrc ss_[2];
1410 	struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1411 	bool kmod;
1412 	unsigned char build_id[BUILD_ID_SIZE];
1413 
1414 	pthread_mutex_lock(&dso->lock);
1415 
1416 	/* check again under the dso->lock */
1417 	if (dso__loaded(dso, map->type)) {
1418 		ret = 1;
1419 		goto out;
1420 	}
1421 
1422 	if (dso->kernel) {
1423 		if (dso->kernel == DSO_TYPE_KERNEL)
1424 			ret = dso__load_kernel_sym(dso, map, filter);
1425 		else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1426 			ret = dso__load_guest_kernel_sym(dso, map, filter);
1427 
1428 		goto out;
1429 	}
1430 
1431 	if (map->groups && map->groups->machine)
1432 		machine = map->groups->machine;
1433 	else
1434 		machine = NULL;
1435 
1436 	dso->adjust_symbols = 0;
1437 
1438 	if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1439 		struct stat st;
1440 
1441 		if (lstat(dso->name, &st) < 0)
1442 			goto out;
1443 
1444 		if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1445 			pr_warning("File %s not owned by current user or root, "
1446 				   "ignoring it (use -f to override).\n", dso->name);
1447 			goto out;
1448 		}
1449 
1450 		ret = dso__load_perf_map(dso, map, filter);
1451 		dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1452 					     DSO_BINARY_TYPE__NOT_FOUND;
1453 		goto out;
1454 	}
1455 
1456 	if (machine)
1457 		root_dir = machine->root_dir;
1458 
1459 	name = malloc(PATH_MAX);
1460 	if (!name)
1461 		goto out;
1462 
1463 	kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1464 		dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1465 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1466 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1467 
1468 
1469 	/*
1470 	 * Read the build id if possible. This is required for
1471 	 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1472 	 */
1473 	if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
1474 		dso__set_build_id(dso, build_id);
1475 
1476 	/*
1477 	 * Iterate over candidate debug images.
1478 	 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1479 	 * and/or opd section) for processing.
1480 	 */
1481 	for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1482 		struct symsrc *ss = &ss_[ss_pos];
1483 		bool next_slot = false;
1484 
1485 		enum dso_binary_type symtab_type = binary_type_symtab[i];
1486 
1487 		if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1488 			continue;
1489 
1490 		if (dso__read_binary_type_filename(dso, symtab_type,
1491 						   root_dir, name, PATH_MAX))
1492 			continue;
1493 
1494 		/* Name is now the name of the next image to try */
1495 		if (symsrc__init(ss, dso, name, symtab_type) < 0)
1496 			continue;
1497 
1498 		if (!syms_ss && symsrc__has_symtab(ss)) {
1499 			syms_ss = ss;
1500 			next_slot = true;
1501 			if (!dso->symsrc_filename)
1502 				dso->symsrc_filename = strdup(name);
1503 		}
1504 
1505 		if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1506 			runtime_ss = ss;
1507 			next_slot = true;
1508 		}
1509 
1510 		if (next_slot) {
1511 			ss_pos++;
1512 
1513 			if (syms_ss && runtime_ss)
1514 				break;
1515 		} else {
1516 			symsrc__destroy(ss);
1517 		}
1518 
1519 	}
1520 
1521 	if (!runtime_ss && !syms_ss)
1522 		goto out_free;
1523 
1524 	if (runtime_ss && !syms_ss) {
1525 		syms_ss = runtime_ss;
1526 	}
1527 
1528 	/* We'll have to hope for the best */
1529 	if (!runtime_ss && syms_ss)
1530 		runtime_ss = syms_ss;
1531 
1532 	if (syms_ss)
1533 		ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
1534 	else
1535 		ret = -1;
1536 
1537 	if (ret > 0) {
1538 		int nr_plt;
1539 
1540 		nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
1541 		if (nr_plt > 0)
1542 			ret += nr_plt;
1543 	}
1544 
1545 	for (; ss_pos > 0; ss_pos--)
1546 		symsrc__destroy(&ss_[ss_pos - 1]);
1547 out_free:
1548 	free(name);
1549 	if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1550 		ret = 0;
1551 out:
1552 	dso__set_loaded(dso, map->type);
1553 	pthread_mutex_unlock(&dso->lock);
1554 
1555 	return ret;
1556 }
1557 
map_groups__find_by_name(struct map_groups * mg,enum map_type type,const char * name)1558 struct map *map_groups__find_by_name(struct map_groups *mg,
1559 				     enum map_type type, const char *name)
1560 {
1561 	struct maps *maps = &mg->maps[type];
1562 	struct map *map;
1563 
1564 	pthread_rwlock_rdlock(&maps->lock);
1565 
1566 	for (map = maps__first(maps); map; map = map__next(map)) {
1567 		if (map->dso && strcmp(map->dso->short_name, name) == 0)
1568 			goto out_unlock;
1569 	}
1570 
1571 	map = NULL;
1572 
1573 out_unlock:
1574 	pthread_rwlock_unlock(&maps->lock);
1575 	return map;
1576 }
1577 
dso__load_vmlinux(struct dso * dso,struct map * map,const char * vmlinux,bool vmlinux_allocated,symbol_filter_t filter)1578 int dso__load_vmlinux(struct dso *dso, struct map *map,
1579 		      const char *vmlinux, bool vmlinux_allocated,
1580 		      symbol_filter_t filter)
1581 {
1582 	int err = -1;
1583 	struct symsrc ss;
1584 	char symfs_vmlinux[PATH_MAX];
1585 	enum dso_binary_type symtab_type;
1586 
1587 	if (vmlinux[0] == '/')
1588 		snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1589 	else
1590 		symbol__join_symfs(symfs_vmlinux, vmlinux);
1591 
1592 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1593 		symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1594 	else
1595 		symtab_type = DSO_BINARY_TYPE__VMLINUX;
1596 
1597 	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1598 		return -1;
1599 
1600 	err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
1601 	symsrc__destroy(&ss);
1602 
1603 	if (err > 0) {
1604 		if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1605 			dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1606 		else
1607 			dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1608 		dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1609 		dso__set_loaded(dso, map->type);
1610 		pr_debug("Using %s for symbols\n", symfs_vmlinux);
1611 	}
1612 
1613 	return err;
1614 }
1615 
dso__load_vmlinux_path(struct dso * dso,struct map * map,symbol_filter_t filter)1616 int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1617 			   symbol_filter_t filter)
1618 {
1619 	int i, err = 0;
1620 	char *filename = NULL;
1621 
1622 	pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1623 		 vmlinux_path__nr_entries + 1);
1624 
1625 	for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1626 		err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1627 		if (err > 0)
1628 			goto out;
1629 	}
1630 
1631 	if (!symbol_conf.ignore_vmlinux_buildid)
1632 		filename = dso__build_id_filename(dso, NULL, 0);
1633 	if (filename != NULL) {
1634 		err = dso__load_vmlinux(dso, map, filename, true, filter);
1635 		if (err > 0)
1636 			goto out;
1637 		free(filename);
1638 	}
1639 out:
1640 	return err;
1641 }
1642 
find_matching_kcore(struct map * map,char * dir,size_t dir_sz)1643 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1644 {
1645 	char kallsyms_filename[PATH_MAX];
1646 	struct dirent *dent;
1647 	int ret = -1;
1648 	DIR *d;
1649 
1650 	d = opendir(dir);
1651 	if (!d)
1652 		return -1;
1653 
1654 	while (1) {
1655 		dent = readdir(d);
1656 		if (!dent)
1657 			break;
1658 		if (dent->d_type != DT_DIR)
1659 			continue;
1660 		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1661 			  "%s/%s/kallsyms", dir, dent->d_name);
1662 		if (!validate_kcore_addresses(kallsyms_filename, map)) {
1663 			strlcpy(dir, kallsyms_filename, dir_sz);
1664 			ret = 0;
1665 			break;
1666 		}
1667 	}
1668 
1669 	closedir(d);
1670 
1671 	return ret;
1672 }
1673 
dso__find_kallsyms(struct dso * dso,struct map * map)1674 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1675 {
1676 	u8 host_build_id[BUILD_ID_SIZE];
1677 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1678 	bool is_host = false;
1679 	char path[PATH_MAX];
1680 
1681 	if (!dso->has_build_id) {
1682 		/*
1683 		 * Last resort, if we don't have a build-id and couldn't find
1684 		 * any vmlinux file, try the running kernel kallsyms table.
1685 		 */
1686 		goto proc_kallsyms;
1687 	}
1688 
1689 	if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1690 				 sizeof(host_build_id)) == 0)
1691 		is_host = dso__build_id_equal(dso, host_build_id);
1692 
1693 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1694 
1695 	scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
1696 		  sbuild_id);
1697 
1698 	/* Use /proc/kallsyms if possible */
1699 	if (is_host) {
1700 		DIR *d;
1701 		int fd;
1702 
1703 		/* If no cached kcore go with /proc/kallsyms */
1704 		d = opendir(path);
1705 		if (!d)
1706 			goto proc_kallsyms;
1707 		closedir(d);
1708 
1709 		/*
1710 		 * Do not check the build-id cache, until we know we cannot use
1711 		 * /proc/kcore.
1712 		 */
1713 		fd = open("/proc/kcore", O_RDONLY);
1714 		if (fd != -1) {
1715 			close(fd);
1716 			/* If module maps match go with /proc/kallsyms */
1717 			if (!validate_kcore_addresses("/proc/kallsyms", map))
1718 				goto proc_kallsyms;
1719 		}
1720 
1721 		/* Find kallsyms in build-id cache with kcore */
1722 		if (!find_matching_kcore(map, path, sizeof(path)))
1723 			return strdup(path);
1724 
1725 		goto proc_kallsyms;
1726 	}
1727 
1728 	/* Find kallsyms in build-id cache with kcore */
1729 	if (!find_matching_kcore(map, path, sizeof(path)))
1730 		return strdup(path);
1731 
1732 	scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
1733 		  buildid_dir, sbuild_id);
1734 
1735 	if (access(path, F_OK)) {
1736 		pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1737 		       sbuild_id);
1738 		return NULL;
1739 	}
1740 
1741 	return strdup(path);
1742 
1743 proc_kallsyms:
1744 	return strdup("/proc/kallsyms");
1745 }
1746 
dso__load_kernel_sym(struct dso * dso,struct map * map,symbol_filter_t filter)1747 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1748 				symbol_filter_t filter)
1749 {
1750 	int err;
1751 	const char *kallsyms_filename = NULL;
1752 	char *kallsyms_allocated_filename = NULL;
1753 	/*
1754 	 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1755 	 * it and only it, reporting errors to the user if it cannot be used.
1756 	 *
1757 	 * For instance, try to analyse an ARM perf.data file _without_ a
1758 	 * build-id, or if the user specifies the wrong path to the right
1759 	 * vmlinux file, obviously we can't fallback to another vmlinux (a
1760 	 * x86_86 one, on the machine where analysis is being performed, say),
1761 	 * or worse, /proc/kallsyms.
1762 	 *
1763 	 * If the specified file _has_ a build-id and there is a build-id
1764 	 * section in the perf.data file, we will still do the expected
1765 	 * validation in dso__load_vmlinux and will bail out if they don't
1766 	 * match.
1767 	 */
1768 	if (symbol_conf.kallsyms_name != NULL) {
1769 		kallsyms_filename = symbol_conf.kallsyms_name;
1770 		goto do_kallsyms;
1771 	}
1772 
1773 	if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1774 		return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
1775 					 false, filter);
1776 	}
1777 
1778 	if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1779 		err = dso__load_vmlinux_path(dso, map, filter);
1780 		if (err > 0)
1781 			return err;
1782 	}
1783 
1784 	/* do not try local files if a symfs was given */
1785 	if (symbol_conf.symfs[0] != 0)
1786 		return -1;
1787 
1788 	kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1789 	if (!kallsyms_allocated_filename)
1790 		return -1;
1791 
1792 	kallsyms_filename = kallsyms_allocated_filename;
1793 
1794 do_kallsyms:
1795 	err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1796 	if (err > 0)
1797 		pr_debug("Using %s for symbols\n", kallsyms_filename);
1798 	free(kallsyms_allocated_filename);
1799 
1800 	if (err > 0 && !dso__is_kcore(dso)) {
1801 		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1802 		dso__set_long_name(dso, "[kernel.kallsyms]", false);
1803 		map__fixup_start(map);
1804 		map__fixup_end(map);
1805 	}
1806 
1807 	return err;
1808 }
1809 
dso__load_guest_kernel_sym(struct dso * dso,struct map * map,symbol_filter_t filter)1810 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
1811 				      symbol_filter_t filter)
1812 {
1813 	int err;
1814 	const char *kallsyms_filename = NULL;
1815 	struct machine *machine;
1816 	char path[PATH_MAX];
1817 
1818 	if (!map->groups) {
1819 		pr_debug("Guest kernel map hasn't the point to groups\n");
1820 		return -1;
1821 	}
1822 	machine = map->groups->machine;
1823 
1824 	if (machine__is_default_guest(machine)) {
1825 		/*
1826 		 * if the user specified a vmlinux filename, use it and only
1827 		 * it, reporting errors to the user if it cannot be used.
1828 		 * Or use file guest_kallsyms inputted by user on commandline
1829 		 */
1830 		if (symbol_conf.default_guest_vmlinux_name != NULL) {
1831 			err = dso__load_vmlinux(dso, map,
1832 						symbol_conf.default_guest_vmlinux_name,
1833 						false, filter);
1834 			return err;
1835 		}
1836 
1837 		kallsyms_filename = symbol_conf.default_guest_kallsyms;
1838 		if (!kallsyms_filename)
1839 			return -1;
1840 	} else {
1841 		sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1842 		kallsyms_filename = path;
1843 	}
1844 
1845 	err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1846 	if (err > 0)
1847 		pr_debug("Using %s for symbols\n", kallsyms_filename);
1848 	if (err > 0 && !dso__is_kcore(dso)) {
1849 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1850 		machine__mmap_name(machine, path, sizeof(path));
1851 		dso__set_long_name(dso, strdup(path), true);
1852 		map__fixup_start(map);
1853 		map__fixup_end(map);
1854 	}
1855 
1856 	return err;
1857 }
1858 
vmlinux_path__exit(void)1859 static void vmlinux_path__exit(void)
1860 {
1861 	while (--vmlinux_path__nr_entries >= 0)
1862 		zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1863 	vmlinux_path__nr_entries = 0;
1864 
1865 	zfree(&vmlinux_path);
1866 }
1867 
vmlinux_path__init(struct perf_env * env)1868 static int vmlinux_path__init(struct perf_env *env)
1869 {
1870 	struct utsname uts;
1871 	char bf[PATH_MAX];
1872 	char *kernel_version;
1873 
1874 	vmlinux_path = malloc(sizeof(char *) * 6);
1875 	if (vmlinux_path == NULL)
1876 		return -1;
1877 
1878 	vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
1879 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1880 		goto out_fail;
1881 	++vmlinux_path__nr_entries;
1882 	vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
1883 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1884 		goto out_fail;
1885 	++vmlinux_path__nr_entries;
1886 
1887 	/* only try kernel version if no symfs was given */
1888 	if (symbol_conf.symfs[0] != 0)
1889 		return 0;
1890 
1891 	if (env) {
1892 		kernel_version = env->os_release;
1893 	} else {
1894 		if (uname(&uts) < 0)
1895 			goto out_fail;
1896 
1897 		kernel_version = uts.release;
1898 	}
1899 
1900 	snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
1901 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1902 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1903 		goto out_fail;
1904 	++vmlinux_path__nr_entries;
1905 	snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
1906 		 kernel_version);
1907 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1908 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1909 		goto out_fail;
1910         ++vmlinux_path__nr_entries;
1911 	snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
1912 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1913 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1914 		goto out_fail;
1915 	++vmlinux_path__nr_entries;
1916 	snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
1917 		 kernel_version);
1918 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1919 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1920 		goto out_fail;
1921 	++vmlinux_path__nr_entries;
1922 
1923 	return 0;
1924 
1925 out_fail:
1926 	vmlinux_path__exit();
1927 	return -1;
1928 }
1929 
setup_list(struct strlist ** list,const char * list_str,const char * list_name)1930 int setup_list(struct strlist **list, const char *list_str,
1931 		      const char *list_name)
1932 {
1933 	if (list_str == NULL)
1934 		return 0;
1935 
1936 	*list = strlist__new(list_str, NULL);
1937 	if (!*list) {
1938 		pr_err("problems parsing %s list\n", list_name);
1939 		return -1;
1940 	}
1941 
1942 	symbol_conf.has_filter = true;
1943 	return 0;
1944 }
1945 
setup_intlist(struct intlist ** list,const char * list_str,const char * list_name)1946 int setup_intlist(struct intlist **list, const char *list_str,
1947 		  const char *list_name)
1948 {
1949 	if (list_str == NULL)
1950 		return 0;
1951 
1952 	*list = intlist__new(list_str);
1953 	if (!*list) {
1954 		pr_err("problems parsing %s list\n", list_name);
1955 		return -1;
1956 	}
1957 	return 0;
1958 }
1959 
symbol__read_kptr_restrict(void)1960 static bool symbol__read_kptr_restrict(void)
1961 {
1962 	bool value = false;
1963 
1964 	if (geteuid() != 0) {
1965 		FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1966 		if (fp != NULL) {
1967 			char line[8];
1968 
1969 			if (fgets(line, sizeof(line), fp) != NULL)
1970 				value = atoi(line) != 0;
1971 
1972 			fclose(fp);
1973 		}
1974 	}
1975 
1976 	return value;
1977 }
1978 
symbol__init(struct perf_env * env)1979 int symbol__init(struct perf_env *env)
1980 {
1981 	const char *symfs;
1982 
1983 	if (symbol_conf.initialized)
1984 		return 0;
1985 
1986 	symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
1987 
1988 	symbol__elf_init();
1989 
1990 	if (symbol_conf.sort_by_name)
1991 		symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
1992 					  sizeof(struct symbol));
1993 
1994 	if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
1995 		return -1;
1996 
1997 	if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
1998 		pr_err("'.' is the only non valid --field-separator argument\n");
1999 		return -1;
2000 	}
2001 
2002 	if (setup_list(&symbol_conf.dso_list,
2003 		       symbol_conf.dso_list_str, "dso") < 0)
2004 		return -1;
2005 
2006 	if (setup_list(&symbol_conf.comm_list,
2007 		       symbol_conf.comm_list_str, "comm") < 0)
2008 		goto out_free_dso_list;
2009 
2010 	if (setup_intlist(&symbol_conf.pid_list,
2011 		       symbol_conf.pid_list_str, "pid") < 0)
2012 		goto out_free_comm_list;
2013 
2014 	if (setup_intlist(&symbol_conf.tid_list,
2015 		       symbol_conf.tid_list_str, "tid") < 0)
2016 		goto out_free_pid_list;
2017 
2018 	if (setup_list(&symbol_conf.sym_list,
2019 		       symbol_conf.sym_list_str, "symbol") < 0)
2020 		goto out_free_tid_list;
2021 
2022 	/*
2023 	 * A path to symbols of "/" is identical to ""
2024 	 * reset here for simplicity.
2025 	 */
2026 	symfs = realpath(symbol_conf.symfs, NULL);
2027 	if (symfs == NULL)
2028 		symfs = symbol_conf.symfs;
2029 	if (strcmp(symfs, "/") == 0)
2030 		symbol_conf.symfs = "";
2031 	if (symfs != symbol_conf.symfs)
2032 		free((void *)symfs);
2033 
2034 	symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2035 
2036 	symbol_conf.initialized = true;
2037 	return 0;
2038 
2039 out_free_tid_list:
2040 	intlist__delete(symbol_conf.tid_list);
2041 out_free_pid_list:
2042 	intlist__delete(symbol_conf.pid_list);
2043 out_free_comm_list:
2044 	strlist__delete(symbol_conf.comm_list);
2045 out_free_dso_list:
2046 	strlist__delete(symbol_conf.dso_list);
2047 	return -1;
2048 }
2049 
symbol__exit(void)2050 void symbol__exit(void)
2051 {
2052 	if (!symbol_conf.initialized)
2053 		return;
2054 	strlist__delete(symbol_conf.sym_list);
2055 	strlist__delete(symbol_conf.dso_list);
2056 	strlist__delete(symbol_conf.comm_list);
2057 	intlist__delete(symbol_conf.tid_list);
2058 	intlist__delete(symbol_conf.pid_list);
2059 	vmlinux_path__exit();
2060 	symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2061 	symbol_conf.initialized = false;
2062 }
2063