1 #include <dirent.h>
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <sys/param.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include "build-id.h"
13 #include "util.h"
14 #include "debug.h"
15 #include "machine.h"
16 #include "symbol.h"
17 #include "strlist.h"
18 #include "header.h"
19
20 #include <elf.h>
21 #include <limits.h>
22 #include <symbol/kallsyms.h>
23 #include <sys/utsname.h>
24
25 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
26 symbol_filter_t filter);
27 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
28 symbol_filter_t filter);
29 int vmlinux_path__nr_entries;
30 char **vmlinux_path;
31
32 struct symbol_conf symbol_conf = {
33 .use_modules = true,
34 .try_vmlinux_path = true,
35 .annotate_src = true,
36 .demangle = true,
37 .demangle_kernel = false,
38 .cumulate_callchain = true,
39 .show_hist_headers = true,
40 .symfs = "",
41 };
42
43 static enum dso_binary_type binary_type_symtab[] = {
44 DSO_BINARY_TYPE__KALLSYMS,
45 DSO_BINARY_TYPE__GUEST_KALLSYMS,
46 DSO_BINARY_TYPE__JAVA_JIT,
47 DSO_BINARY_TYPE__DEBUGLINK,
48 DSO_BINARY_TYPE__BUILD_ID_CACHE,
49 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
50 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
51 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
52 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
53 DSO_BINARY_TYPE__GUEST_KMODULE,
54 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
55 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
56 DSO_BINARY_TYPE__NOT_FOUND,
57 };
58
59 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
60
symbol_type__is_a(char symbol_type,enum map_type map_type)61 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
62 {
63 symbol_type = toupper(symbol_type);
64
65 switch (map_type) {
66 case MAP__FUNCTION:
67 return symbol_type == 'T' || symbol_type == 'W';
68 case MAP__VARIABLE:
69 return symbol_type == 'D';
70 default:
71 return false;
72 }
73 }
74
prefix_underscores_count(const char * str)75 static int prefix_underscores_count(const char *str)
76 {
77 const char *tail = str;
78
79 while (*tail == '_')
80 tail++;
81
82 return tail - str;
83 }
84
85 #define SYMBOL_A 0
86 #define SYMBOL_B 1
87
choose_best_symbol(struct symbol * syma,struct symbol * symb)88 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
89 {
90 s64 a;
91 s64 b;
92 size_t na, nb;
93
94 /* Prefer a symbol with non zero length */
95 a = syma->end - syma->start;
96 b = symb->end - symb->start;
97 if ((b == 0) && (a > 0))
98 return SYMBOL_A;
99 else if ((a == 0) && (b > 0))
100 return SYMBOL_B;
101
102 /* Prefer a non weak symbol over a weak one */
103 a = syma->binding == STB_WEAK;
104 b = symb->binding == STB_WEAK;
105 if (b && !a)
106 return SYMBOL_A;
107 if (a && !b)
108 return SYMBOL_B;
109
110 /* Prefer a global symbol over a non global one */
111 a = syma->binding == STB_GLOBAL;
112 b = symb->binding == STB_GLOBAL;
113 if (a && !b)
114 return SYMBOL_A;
115 if (b && !a)
116 return SYMBOL_B;
117
118 /* Prefer a symbol with less underscores */
119 a = prefix_underscores_count(syma->name);
120 b = prefix_underscores_count(symb->name);
121 if (b > a)
122 return SYMBOL_A;
123 else if (a > b)
124 return SYMBOL_B;
125
126 /* Choose the symbol with the longest name */
127 na = strlen(syma->name);
128 nb = strlen(symb->name);
129 if (na > nb)
130 return SYMBOL_A;
131 else if (na < nb)
132 return SYMBOL_B;
133
134 /* Avoid "SyS" kernel syscall aliases */
135 if (na >= 3 && !strncmp(syma->name, "SyS", 3))
136 return SYMBOL_B;
137 if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
138 return SYMBOL_B;
139
140 return SYMBOL_A;
141 }
142
symbols__fixup_duplicate(struct rb_root * symbols)143 void symbols__fixup_duplicate(struct rb_root *symbols)
144 {
145 struct rb_node *nd;
146 struct symbol *curr, *next;
147
148 nd = rb_first(symbols);
149
150 while (nd) {
151 curr = rb_entry(nd, struct symbol, rb_node);
152 again:
153 nd = rb_next(&curr->rb_node);
154 next = rb_entry(nd, struct symbol, rb_node);
155
156 if (!nd)
157 break;
158
159 if (curr->start != next->start)
160 continue;
161
162 if (choose_best_symbol(curr, next) == SYMBOL_A) {
163 rb_erase(&next->rb_node, symbols);
164 symbol__delete(next);
165 goto again;
166 } else {
167 nd = rb_next(&curr->rb_node);
168 rb_erase(&curr->rb_node, symbols);
169 symbol__delete(curr);
170 }
171 }
172 }
173
symbols__fixup_end(struct rb_root * symbols)174 void symbols__fixup_end(struct rb_root *symbols)
175 {
176 struct rb_node *nd, *prevnd = rb_first(symbols);
177 struct symbol *curr, *prev;
178
179 if (prevnd == NULL)
180 return;
181
182 curr = rb_entry(prevnd, struct symbol, rb_node);
183
184 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
185 prev = curr;
186 curr = rb_entry(nd, struct symbol, rb_node);
187
188 if (prev->end == prev->start && prev->end != curr->start)
189 prev->end = curr->start;
190 }
191
192 /* Last entry */
193 if (curr->end == curr->start)
194 curr->end = roundup(curr->start, 4096) + 4096;
195 }
196
__map_groups__fixup_end(struct map_groups * mg,enum map_type type)197 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
198 {
199 struct map *prev, *curr;
200 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
201
202 if (prevnd == NULL)
203 return;
204
205 curr = rb_entry(prevnd, struct map, rb_node);
206
207 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
208 prev = curr;
209 curr = rb_entry(nd, struct map, rb_node);
210 prev->end = curr->start;
211 }
212
213 /*
214 * We still haven't the actual symbols, so guess the
215 * last map final address.
216 */
217 curr->end = ~0ULL;
218 }
219
symbol__new(u64 start,u64 len,u8 binding,const char * name)220 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
221 {
222 size_t namelen = strlen(name) + 1;
223 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
224 sizeof(*sym) + namelen));
225 if (sym == NULL)
226 return NULL;
227
228 if (symbol_conf.priv_size)
229 sym = ((void *)sym) + symbol_conf.priv_size;
230
231 sym->start = start;
232 sym->end = len ? start + len : start;
233 sym->binding = binding;
234 sym->namelen = namelen - 1;
235
236 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
237 __func__, name, start, sym->end);
238 memcpy(sym->name, name, namelen);
239
240 return sym;
241 }
242
symbol__delete(struct symbol * sym)243 void symbol__delete(struct symbol *sym)
244 {
245 free(((void *)sym) - symbol_conf.priv_size);
246 }
247
symbol__fprintf(struct symbol * sym,FILE * fp)248 size_t symbol__fprintf(struct symbol *sym, FILE *fp)
249 {
250 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
251 sym->start, sym->end,
252 sym->binding == STB_GLOBAL ? 'g' :
253 sym->binding == STB_LOCAL ? 'l' : 'w',
254 sym->name);
255 }
256
symbol__fprintf_symname_offs(const struct symbol * sym,const struct addr_location * al,FILE * fp)257 size_t symbol__fprintf_symname_offs(const struct symbol *sym,
258 const struct addr_location *al, FILE *fp)
259 {
260 unsigned long offset;
261 size_t length;
262
263 if (sym && sym->name) {
264 length = fprintf(fp, "%s", sym->name);
265 if (al) {
266 if (al->addr < sym->end)
267 offset = al->addr - sym->start;
268 else
269 offset = al->addr - al->map->start - sym->start;
270 length += fprintf(fp, "+0x%lx", offset);
271 }
272 return length;
273 } else
274 return fprintf(fp, "[unknown]");
275 }
276
symbol__fprintf_symname(const struct symbol * sym,FILE * fp)277 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
278 {
279 return symbol__fprintf_symname_offs(sym, NULL, fp);
280 }
281
symbols__delete(struct rb_root * symbols)282 void symbols__delete(struct rb_root *symbols)
283 {
284 struct symbol *pos;
285 struct rb_node *next = rb_first(symbols);
286
287 while (next) {
288 pos = rb_entry(next, struct symbol, rb_node);
289 next = rb_next(&pos->rb_node);
290 rb_erase(&pos->rb_node, symbols);
291 symbol__delete(pos);
292 }
293 }
294
symbols__insert(struct rb_root * symbols,struct symbol * sym)295 void symbols__insert(struct rb_root *symbols, struct symbol *sym)
296 {
297 struct rb_node **p = &symbols->rb_node;
298 struct rb_node *parent = NULL;
299 const u64 ip = sym->start;
300 struct symbol *s;
301
302 while (*p != NULL) {
303 parent = *p;
304 s = rb_entry(parent, struct symbol, rb_node);
305 if (ip < s->start)
306 p = &(*p)->rb_left;
307 else
308 p = &(*p)->rb_right;
309 }
310 rb_link_node(&sym->rb_node, parent, p);
311 rb_insert_color(&sym->rb_node, symbols);
312 }
313
symbols__find(struct rb_root * symbols,u64 ip)314 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
315 {
316 struct rb_node *n;
317
318 if (symbols == NULL)
319 return NULL;
320
321 n = symbols->rb_node;
322
323 while (n) {
324 struct symbol *s = rb_entry(n, struct symbol, rb_node);
325
326 if (ip < s->start)
327 n = n->rb_left;
328 else if (ip >= s->end)
329 n = n->rb_right;
330 else
331 return s;
332 }
333
334 return NULL;
335 }
336
symbols__first(struct rb_root * symbols)337 static struct symbol *symbols__first(struct rb_root *symbols)
338 {
339 struct rb_node *n = rb_first(symbols);
340
341 if (n)
342 return rb_entry(n, struct symbol, rb_node);
343
344 return NULL;
345 }
346
symbols__next(struct symbol * sym)347 static struct symbol *symbols__next(struct symbol *sym)
348 {
349 struct rb_node *n = rb_next(&sym->rb_node);
350
351 if (n)
352 return rb_entry(n, struct symbol, rb_node);
353
354 return NULL;
355 }
356
357 struct symbol_name_rb_node {
358 struct rb_node rb_node;
359 struct symbol sym;
360 };
361
symbols__insert_by_name(struct rb_root * symbols,struct symbol * sym)362 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
363 {
364 struct rb_node **p = &symbols->rb_node;
365 struct rb_node *parent = NULL;
366 struct symbol_name_rb_node *symn, *s;
367
368 symn = container_of(sym, struct symbol_name_rb_node, sym);
369
370 while (*p != NULL) {
371 parent = *p;
372 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
373 if (strcmp(sym->name, s->sym.name) < 0)
374 p = &(*p)->rb_left;
375 else
376 p = &(*p)->rb_right;
377 }
378 rb_link_node(&symn->rb_node, parent, p);
379 rb_insert_color(&symn->rb_node, symbols);
380 }
381
symbols__sort_by_name(struct rb_root * symbols,struct rb_root * source)382 static void symbols__sort_by_name(struct rb_root *symbols,
383 struct rb_root *source)
384 {
385 struct rb_node *nd;
386
387 for (nd = rb_first(source); nd; nd = rb_next(nd)) {
388 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
389 symbols__insert_by_name(symbols, pos);
390 }
391 }
392
symbols__find_by_name(struct rb_root * symbols,const char * name)393 static struct symbol *symbols__find_by_name(struct rb_root *symbols,
394 const char *name)
395 {
396 struct rb_node *n;
397
398 if (symbols == NULL)
399 return NULL;
400
401 n = symbols->rb_node;
402
403 while (n) {
404 struct symbol_name_rb_node *s;
405 int cmp;
406
407 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
408 cmp = strcmp(name, s->sym.name);
409
410 if (cmp < 0)
411 n = n->rb_left;
412 else if (cmp > 0)
413 n = n->rb_right;
414 else
415 return &s->sym;
416 }
417
418 return NULL;
419 }
420
dso__find_symbol(struct dso * dso,enum map_type type,u64 addr)421 struct symbol *dso__find_symbol(struct dso *dso,
422 enum map_type type, u64 addr)
423 {
424 return symbols__find(&dso->symbols[type], addr);
425 }
426
dso__first_symbol(struct dso * dso,enum map_type type)427 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
428 {
429 return symbols__first(&dso->symbols[type]);
430 }
431
dso__next_symbol(struct symbol * sym)432 struct symbol *dso__next_symbol(struct symbol *sym)
433 {
434 return symbols__next(sym);
435 }
436
dso__find_symbol_by_name(struct dso * dso,enum map_type type,const char * name)437 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
438 const char *name)
439 {
440 return symbols__find_by_name(&dso->symbol_names[type], name);
441 }
442
dso__sort_by_name(struct dso * dso,enum map_type type)443 void dso__sort_by_name(struct dso *dso, enum map_type type)
444 {
445 dso__set_sorted_by_name(dso, type);
446 return symbols__sort_by_name(&dso->symbol_names[type],
447 &dso->symbols[type]);
448 }
449
dso__fprintf_symbols_by_name(struct dso * dso,enum map_type type,FILE * fp)450 size_t dso__fprintf_symbols_by_name(struct dso *dso,
451 enum map_type type, FILE *fp)
452 {
453 size_t ret = 0;
454 struct rb_node *nd;
455 struct symbol_name_rb_node *pos;
456
457 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
458 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
459 fprintf(fp, "%s\n", pos->sym.name);
460 }
461
462 return ret;
463 }
464
modules__parse(const char * filename,void * arg,int (* process_module)(void * arg,const char * name,u64 start))465 int modules__parse(const char *filename, void *arg,
466 int (*process_module)(void *arg, const char *name,
467 u64 start))
468 {
469 char *line = NULL;
470 size_t n;
471 FILE *file;
472 int err = 0;
473
474 file = fopen(filename, "r");
475 if (file == NULL)
476 return -1;
477
478 while (1) {
479 char name[PATH_MAX];
480 u64 start;
481 char *sep;
482 ssize_t line_len;
483
484 line_len = getline(&line, &n, file);
485 if (line_len < 0) {
486 if (feof(file))
487 break;
488 err = -1;
489 goto out;
490 }
491
492 if (!line) {
493 err = -1;
494 goto out;
495 }
496
497 line[--line_len] = '\0'; /* \n */
498
499 sep = strrchr(line, 'x');
500 if (sep == NULL)
501 continue;
502
503 hex2u64(sep + 1, &start);
504
505 sep = strchr(line, ' ');
506 if (sep == NULL)
507 continue;
508
509 *sep = '\0';
510
511 scnprintf(name, sizeof(name), "[%s]", line);
512
513 err = process_module(arg, name, start);
514 if (err)
515 break;
516 }
517 out:
518 free(line);
519 fclose(file);
520 return err;
521 }
522
523 struct process_kallsyms_args {
524 struct map *map;
525 struct dso *dso;
526 };
527
528 /*
529 * These are symbols in the kernel image, so make sure that
530 * sym is from a kernel DSO.
531 */
symbol__is_idle(struct symbol * sym)532 bool symbol__is_idle(struct symbol *sym)
533 {
534 const char * const idle_symbols[] = {
535 "cpu_idle",
536 "cpu_startup_entry",
537 "intel_idle",
538 "default_idle",
539 "native_safe_halt",
540 "enter_idle",
541 "exit_idle",
542 "mwait_idle",
543 "mwait_idle_with_hints",
544 "poll_idle",
545 "ppc64_runlatch_off",
546 "pseries_dedicated_idle_sleep",
547 NULL
548 };
549
550 int i;
551
552 if (!sym)
553 return false;
554
555 for (i = 0; idle_symbols[i]; i++) {
556 if (!strcmp(idle_symbols[i], sym->name))
557 return true;
558 }
559
560 return false;
561 }
562
map__process_kallsym_symbol(void * arg,const char * name,char type,u64 start)563 static int map__process_kallsym_symbol(void *arg, const char *name,
564 char type, u64 start)
565 {
566 struct symbol *sym;
567 struct process_kallsyms_args *a = arg;
568 struct rb_root *root = &a->dso->symbols[a->map->type];
569
570 if (!symbol_type__is_a(type, a->map->type))
571 return 0;
572
573 /*
574 * module symbols are not sorted so we add all
575 * symbols, setting length to 0, and rely on
576 * symbols__fixup_end() to fix it up.
577 */
578 sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
579 if (sym == NULL)
580 return -ENOMEM;
581 /*
582 * We will pass the symbols to the filter later, in
583 * map__split_kallsyms, when we have split the maps per module
584 */
585 symbols__insert(root, sym);
586
587 return 0;
588 }
589
590 /*
591 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
592 * so that we can in the next step set the symbol ->end address and then
593 * call kernel_maps__split_kallsyms.
594 */
dso__load_all_kallsyms(struct dso * dso,const char * filename,struct map * map)595 static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
596 struct map *map)
597 {
598 struct process_kallsyms_args args = { .map = map, .dso = dso, };
599 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
600 }
601
dso__split_kallsyms_for_kcore(struct dso * dso,struct map * map,symbol_filter_t filter)602 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
603 symbol_filter_t filter)
604 {
605 struct map_groups *kmaps = map__kmap(map)->kmaps;
606 struct map *curr_map;
607 struct symbol *pos;
608 int count = 0, moved = 0;
609 struct rb_root *root = &dso->symbols[map->type];
610 struct rb_node *next = rb_first(root);
611
612 while (next) {
613 char *module;
614
615 pos = rb_entry(next, struct symbol, rb_node);
616 next = rb_next(&pos->rb_node);
617
618 module = strchr(pos->name, '\t');
619 if (module)
620 *module = '\0';
621
622 curr_map = map_groups__find(kmaps, map->type, pos->start);
623
624 if (!curr_map || (filter && filter(curr_map, pos))) {
625 rb_erase(&pos->rb_node, root);
626 symbol__delete(pos);
627 } else {
628 pos->start -= curr_map->start - curr_map->pgoff;
629 if (pos->end)
630 pos->end -= curr_map->start - curr_map->pgoff;
631 if (curr_map != map) {
632 rb_erase(&pos->rb_node, root);
633 symbols__insert(
634 &curr_map->dso->symbols[curr_map->type],
635 pos);
636 ++moved;
637 } else {
638 ++count;
639 }
640 }
641 }
642
643 /* Symbols have been adjusted */
644 dso->adjust_symbols = 1;
645
646 return count + moved;
647 }
648
649 /*
650 * Split the symbols into maps, making sure there are no overlaps, i.e. the
651 * kernel range is broken in several maps, named [kernel].N, as we don't have
652 * the original ELF section names vmlinux have.
653 */
dso__split_kallsyms(struct dso * dso,struct map * map,u64 delta,symbol_filter_t filter)654 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
655 symbol_filter_t filter)
656 {
657 struct map_groups *kmaps = map__kmap(map)->kmaps;
658 struct machine *machine = kmaps->machine;
659 struct map *curr_map = map;
660 struct symbol *pos;
661 int count = 0, moved = 0;
662 struct rb_root *root = &dso->symbols[map->type];
663 struct rb_node *next = rb_first(root);
664 int kernel_range = 0;
665
666 while (next) {
667 char *module;
668
669 pos = rb_entry(next, struct symbol, rb_node);
670 next = rb_next(&pos->rb_node);
671
672 module = strchr(pos->name, '\t');
673 if (module) {
674 if (!symbol_conf.use_modules)
675 goto discard_symbol;
676
677 *module++ = '\0';
678
679 if (strcmp(curr_map->dso->short_name, module)) {
680 if (curr_map != map &&
681 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
682 machine__is_default_guest(machine)) {
683 /*
684 * We assume all symbols of a module are
685 * continuous in * kallsyms, so curr_map
686 * points to a module and all its
687 * symbols are in its kmap. Mark it as
688 * loaded.
689 */
690 dso__set_loaded(curr_map->dso,
691 curr_map->type);
692 }
693
694 curr_map = map_groups__find_by_name(kmaps,
695 map->type, module);
696 if (curr_map == NULL) {
697 pr_debug("%s/proc/{kallsyms,modules} "
698 "inconsistency while looking "
699 "for \"%s\" module!\n",
700 machine->root_dir, module);
701 curr_map = map;
702 goto discard_symbol;
703 }
704
705 if (curr_map->dso->loaded &&
706 !machine__is_default_guest(machine))
707 goto discard_symbol;
708 }
709 /*
710 * So that we look just like we get from .ko files,
711 * i.e. not prelinked, relative to map->start.
712 */
713 pos->start = curr_map->map_ip(curr_map, pos->start);
714 pos->end = curr_map->map_ip(curr_map, pos->end);
715 } else if (curr_map != map) {
716 char dso_name[PATH_MAX];
717 struct dso *ndso;
718
719 if (delta) {
720 /* Kernel was relocated at boot time */
721 pos->start -= delta;
722 pos->end -= delta;
723 }
724
725 if (count == 0) {
726 curr_map = map;
727 goto filter_symbol;
728 }
729
730 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
731 snprintf(dso_name, sizeof(dso_name),
732 "[guest.kernel].%d",
733 kernel_range++);
734 else
735 snprintf(dso_name, sizeof(dso_name),
736 "[kernel].%d",
737 kernel_range++);
738
739 ndso = dso__new(dso_name);
740 if (ndso == NULL)
741 return -1;
742
743 ndso->kernel = dso->kernel;
744
745 curr_map = map__new2(pos->start, ndso, map->type);
746 if (curr_map == NULL) {
747 dso__delete(ndso);
748 return -1;
749 }
750
751 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
752 map_groups__insert(kmaps, curr_map);
753 ++kernel_range;
754 } else if (delta) {
755 /* Kernel was relocated at boot time */
756 pos->start -= delta;
757 pos->end -= delta;
758 }
759 filter_symbol:
760 if (filter && filter(curr_map, pos)) {
761 discard_symbol: rb_erase(&pos->rb_node, root);
762 symbol__delete(pos);
763 } else {
764 if (curr_map != map) {
765 rb_erase(&pos->rb_node, root);
766 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
767 ++moved;
768 } else
769 ++count;
770 }
771 }
772
773 if (curr_map != map &&
774 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
775 machine__is_default_guest(kmaps->machine)) {
776 dso__set_loaded(curr_map->dso, curr_map->type);
777 }
778
779 return count + moved;
780 }
781
symbol__restricted_filename(const char * filename,const char * restricted_filename)782 bool symbol__restricted_filename(const char *filename,
783 const char *restricted_filename)
784 {
785 bool restricted = false;
786
787 if (symbol_conf.kptr_restrict) {
788 char *r = realpath(filename, NULL);
789
790 if (r != NULL) {
791 restricted = strcmp(r, restricted_filename) == 0;
792 free(r);
793 return restricted;
794 }
795 }
796
797 return restricted;
798 }
799
800 struct module_info {
801 struct rb_node rb_node;
802 char *name;
803 u64 start;
804 };
805
add_module(struct module_info * mi,struct rb_root * modules)806 static void add_module(struct module_info *mi, struct rb_root *modules)
807 {
808 struct rb_node **p = &modules->rb_node;
809 struct rb_node *parent = NULL;
810 struct module_info *m;
811
812 while (*p != NULL) {
813 parent = *p;
814 m = rb_entry(parent, struct module_info, rb_node);
815 if (strcmp(mi->name, m->name) < 0)
816 p = &(*p)->rb_left;
817 else
818 p = &(*p)->rb_right;
819 }
820 rb_link_node(&mi->rb_node, parent, p);
821 rb_insert_color(&mi->rb_node, modules);
822 }
823
delete_modules(struct rb_root * modules)824 static void delete_modules(struct rb_root *modules)
825 {
826 struct module_info *mi;
827 struct rb_node *next = rb_first(modules);
828
829 while (next) {
830 mi = rb_entry(next, struct module_info, rb_node);
831 next = rb_next(&mi->rb_node);
832 rb_erase(&mi->rb_node, modules);
833 zfree(&mi->name);
834 free(mi);
835 }
836 }
837
find_module(const char * name,struct rb_root * modules)838 static struct module_info *find_module(const char *name,
839 struct rb_root *modules)
840 {
841 struct rb_node *n = modules->rb_node;
842
843 while (n) {
844 struct module_info *m;
845 int cmp;
846
847 m = rb_entry(n, struct module_info, rb_node);
848 cmp = strcmp(name, m->name);
849 if (cmp < 0)
850 n = n->rb_left;
851 else if (cmp > 0)
852 n = n->rb_right;
853 else
854 return m;
855 }
856
857 return NULL;
858 }
859
__read_proc_modules(void * arg,const char * name,u64 start)860 static int __read_proc_modules(void *arg, const char *name, u64 start)
861 {
862 struct rb_root *modules = arg;
863 struct module_info *mi;
864
865 mi = zalloc(sizeof(struct module_info));
866 if (!mi)
867 return -ENOMEM;
868
869 mi->name = strdup(name);
870 mi->start = start;
871
872 if (!mi->name) {
873 free(mi);
874 return -ENOMEM;
875 }
876
877 add_module(mi, modules);
878
879 return 0;
880 }
881
read_proc_modules(const char * filename,struct rb_root * modules)882 static int read_proc_modules(const char *filename, struct rb_root *modules)
883 {
884 if (symbol__restricted_filename(filename, "/proc/modules"))
885 return -1;
886
887 if (modules__parse(filename, modules, __read_proc_modules)) {
888 delete_modules(modules);
889 return -1;
890 }
891
892 return 0;
893 }
894
compare_proc_modules(const char * from,const char * to)895 int compare_proc_modules(const char *from, const char *to)
896 {
897 struct rb_root from_modules = RB_ROOT;
898 struct rb_root to_modules = RB_ROOT;
899 struct rb_node *from_node, *to_node;
900 struct module_info *from_m, *to_m;
901 int ret = -1;
902
903 if (read_proc_modules(from, &from_modules))
904 return -1;
905
906 if (read_proc_modules(to, &to_modules))
907 goto out_delete_from;
908
909 from_node = rb_first(&from_modules);
910 to_node = rb_first(&to_modules);
911 while (from_node) {
912 if (!to_node)
913 break;
914
915 from_m = rb_entry(from_node, struct module_info, rb_node);
916 to_m = rb_entry(to_node, struct module_info, rb_node);
917
918 if (from_m->start != to_m->start ||
919 strcmp(from_m->name, to_m->name))
920 break;
921
922 from_node = rb_next(from_node);
923 to_node = rb_next(to_node);
924 }
925
926 if (!from_node && !to_node)
927 ret = 0;
928
929 delete_modules(&to_modules);
930 out_delete_from:
931 delete_modules(&from_modules);
932
933 return ret;
934 }
935
do_validate_kcore_modules(const char * filename,struct map * map,struct map_groups * kmaps)936 static int do_validate_kcore_modules(const char *filename, struct map *map,
937 struct map_groups *kmaps)
938 {
939 struct rb_root modules = RB_ROOT;
940 struct map *old_map;
941 int err;
942
943 err = read_proc_modules(filename, &modules);
944 if (err)
945 return err;
946
947 old_map = map_groups__first(kmaps, map->type);
948 while (old_map) {
949 struct map *next = map_groups__next(old_map);
950 struct module_info *mi;
951
952 if (old_map == map || old_map->start == map->start) {
953 /* The kernel map */
954 old_map = next;
955 continue;
956 }
957
958 /* Module must be in memory at the same address */
959 mi = find_module(old_map->dso->short_name, &modules);
960 if (!mi || mi->start != old_map->start) {
961 err = -EINVAL;
962 goto out;
963 }
964
965 old_map = next;
966 }
967 out:
968 delete_modules(&modules);
969 return err;
970 }
971
972 /*
973 * If kallsyms is referenced by name then we look for filename in the same
974 * directory.
975 */
filename_from_kallsyms_filename(char * filename,const char * base_name,const char * kallsyms_filename)976 static bool filename_from_kallsyms_filename(char *filename,
977 const char *base_name,
978 const char *kallsyms_filename)
979 {
980 char *name;
981
982 strcpy(filename, kallsyms_filename);
983 name = strrchr(filename, '/');
984 if (!name)
985 return false;
986
987 name += 1;
988
989 if (!strcmp(name, "kallsyms")) {
990 strcpy(name, base_name);
991 return true;
992 }
993
994 return false;
995 }
996
validate_kcore_modules(const char * kallsyms_filename,struct map * map)997 static int validate_kcore_modules(const char *kallsyms_filename,
998 struct map *map)
999 {
1000 struct map_groups *kmaps = map__kmap(map)->kmaps;
1001 char modules_filename[PATH_MAX];
1002
1003 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1004 kallsyms_filename))
1005 return -EINVAL;
1006
1007 if (do_validate_kcore_modules(modules_filename, map, kmaps))
1008 return -EINVAL;
1009
1010 return 0;
1011 }
1012
validate_kcore_addresses(const char * kallsyms_filename,struct map * map)1013 static int validate_kcore_addresses(const char *kallsyms_filename,
1014 struct map *map)
1015 {
1016 struct kmap *kmap = map__kmap(map);
1017
1018 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1019 u64 start;
1020
1021 start = kallsyms__get_function_start(kallsyms_filename,
1022 kmap->ref_reloc_sym->name);
1023 if (start != kmap->ref_reloc_sym->addr)
1024 return -EINVAL;
1025 }
1026
1027 return validate_kcore_modules(kallsyms_filename, map);
1028 }
1029
1030 struct kcore_mapfn_data {
1031 struct dso *dso;
1032 enum map_type type;
1033 struct list_head maps;
1034 };
1035
kcore_mapfn(u64 start,u64 len,u64 pgoff,void * data)1036 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1037 {
1038 struct kcore_mapfn_data *md = data;
1039 struct map *map;
1040
1041 map = map__new2(start, md->dso, md->type);
1042 if (map == NULL)
1043 return -ENOMEM;
1044
1045 map->end = map->start + len;
1046 map->pgoff = pgoff;
1047
1048 list_add(&map->node, &md->maps);
1049
1050 return 0;
1051 }
1052
dso__load_kcore(struct dso * dso,struct map * map,const char * kallsyms_filename)1053 static int dso__load_kcore(struct dso *dso, struct map *map,
1054 const char *kallsyms_filename)
1055 {
1056 struct map_groups *kmaps = map__kmap(map)->kmaps;
1057 struct machine *machine = kmaps->machine;
1058 struct kcore_mapfn_data md;
1059 struct map *old_map, *new_map, *replacement_map = NULL;
1060 bool is_64_bit;
1061 int err, fd;
1062 char kcore_filename[PATH_MAX];
1063 struct symbol *sym;
1064
1065 /* This function requires that the map is the kernel map */
1066 if (map != machine->vmlinux_maps[map->type])
1067 return -EINVAL;
1068
1069 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1070 kallsyms_filename))
1071 return -EINVAL;
1072
1073 /* Modules and kernel must be present at their original addresses */
1074 if (validate_kcore_addresses(kallsyms_filename, map))
1075 return -EINVAL;
1076
1077 md.dso = dso;
1078 md.type = map->type;
1079 INIT_LIST_HEAD(&md.maps);
1080
1081 fd = open(kcore_filename, O_RDONLY);
1082 if (fd < 0)
1083 return -EINVAL;
1084
1085 /* Read new maps into temporary lists */
1086 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
1087 &is_64_bit);
1088 if (err)
1089 goto out_err;
1090 dso->is_64_bit = is_64_bit;
1091
1092 if (list_empty(&md.maps)) {
1093 err = -EINVAL;
1094 goto out_err;
1095 }
1096
1097 /* Remove old maps */
1098 old_map = map_groups__first(kmaps, map->type);
1099 while (old_map) {
1100 struct map *next = map_groups__next(old_map);
1101
1102 if (old_map != map)
1103 map_groups__remove(kmaps, old_map);
1104 old_map = next;
1105 }
1106
1107 /* Find the kernel map using the first symbol */
1108 sym = dso__first_symbol(dso, map->type);
1109 list_for_each_entry(new_map, &md.maps, node) {
1110 if (sym && sym->start >= new_map->start &&
1111 sym->start < new_map->end) {
1112 replacement_map = new_map;
1113 break;
1114 }
1115 }
1116
1117 if (!replacement_map)
1118 replacement_map = list_entry(md.maps.next, struct map, node);
1119
1120 /* Add new maps */
1121 while (!list_empty(&md.maps)) {
1122 new_map = list_entry(md.maps.next, struct map, node);
1123 list_del(&new_map->node);
1124 if (new_map == replacement_map) {
1125 map->start = new_map->start;
1126 map->end = new_map->end;
1127 map->pgoff = new_map->pgoff;
1128 map->map_ip = new_map->map_ip;
1129 map->unmap_ip = new_map->unmap_ip;
1130 map__delete(new_map);
1131 /* Ensure maps are correctly ordered */
1132 map_groups__remove(kmaps, map);
1133 map_groups__insert(kmaps, map);
1134 } else {
1135 map_groups__insert(kmaps, new_map);
1136 }
1137 }
1138
1139 /*
1140 * Set the data type and long name so that kcore can be read via
1141 * dso__data_read_addr().
1142 */
1143 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1144 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1145 else
1146 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1147 dso__set_long_name(dso, strdup(kcore_filename), true);
1148
1149 close(fd);
1150
1151 if (map->type == MAP__FUNCTION)
1152 pr_debug("Using %s for kernel object code\n", kcore_filename);
1153 else
1154 pr_debug("Using %s for kernel data\n", kcore_filename);
1155
1156 return 0;
1157
1158 out_err:
1159 while (!list_empty(&md.maps)) {
1160 map = list_entry(md.maps.next, struct map, node);
1161 list_del(&map->node);
1162 map__delete(map);
1163 }
1164 close(fd);
1165 return -EINVAL;
1166 }
1167
1168 /*
1169 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1170 * delta based on the relocation reference symbol.
1171 */
kallsyms__delta(struct map * map,const char * filename,u64 * delta)1172 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1173 {
1174 struct kmap *kmap = map__kmap(map);
1175 u64 addr;
1176
1177 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1178 return 0;
1179
1180 addr = kallsyms__get_function_start(filename,
1181 kmap->ref_reloc_sym->name);
1182 if (!addr)
1183 return -1;
1184
1185 *delta = addr - kmap->ref_reloc_sym->addr;
1186 return 0;
1187 }
1188
dso__load_kallsyms(struct dso * dso,const char * filename,struct map * map,symbol_filter_t filter)1189 int dso__load_kallsyms(struct dso *dso, const char *filename,
1190 struct map *map, symbol_filter_t filter)
1191 {
1192 u64 delta = 0;
1193
1194 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1195 return -1;
1196
1197 if (dso__load_all_kallsyms(dso, filename, map) < 0)
1198 return -1;
1199
1200 if (kallsyms__delta(map, filename, &delta))
1201 return -1;
1202
1203 symbols__fixup_duplicate(&dso->symbols[map->type]);
1204 symbols__fixup_end(&dso->symbols[map->type]);
1205
1206 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1207 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1208 else
1209 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1210
1211 if (!dso__load_kcore(dso, map, filename))
1212 return dso__split_kallsyms_for_kcore(dso, map, filter);
1213 else
1214 return dso__split_kallsyms(dso, map, delta, filter);
1215 }
1216
dso__load_perf_map(struct dso * dso,struct map * map,symbol_filter_t filter)1217 static int dso__load_perf_map(struct dso *dso, struct map *map,
1218 symbol_filter_t filter)
1219 {
1220 char *line = NULL;
1221 size_t n;
1222 FILE *file;
1223 int nr_syms = 0;
1224
1225 file = fopen(dso->long_name, "r");
1226 if (file == NULL)
1227 goto out_failure;
1228
1229 while (!feof(file)) {
1230 u64 start, size;
1231 struct symbol *sym;
1232 int line_len, len;
1233
1234 line_len = getline(&line, &n, file);
1235 if (line_len < 0)
1236 break;
1237
1238 if (!line)
1239 goto out_failure;
1240
1241 line[--line_len] = '\0'; /* \n */
1242
1243 len = hex2u64(line, &start);
1244
1245 len++;
1246 if (len + 2 >= line_len)
1247 continue;
1248
1249 len += hex2u64(line + len, &size);
1250
1251 len++;
1252 if (len + 2 >= line_len)
1253 continue;
1254
1255 sym = symbol__new(start, size, STB_GLOBAL, line + len);
1256
1257 if (sym == NULL)
1258 goto out_delete_line;
1259
1260 if (filter && filter(map, sym))
1261 symbol__delete(sym);
1262 else {
1263 symbols__insert(&dso->symbols[map->type], sym);
1264 nr_syms++;
1265 }
1266 }
1267
1268 free(line);
1269 fclose(file);
1270
1271 return nr_syms;
1272
1273 out_delete_line:
1274 free(line);
1275 out_failure:
1276 return -1;
1277 }
1278
dso__is_compatible_symtab_type(struct dso * dso,bool kmod,enum dso_binary_type type)1279 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1280 enum dso_binary_type type)
1281 {
1282 switch (type) {
1283 case DSO_BINARY_TYPE__JAVA_JIT:
1284 case DSO_BINARY_TYPE__DEBUGLINK:
1285 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1286 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1287 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1288 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1289 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1290 return !kmod && dso->kernel == DSO_TYPE_USER;
1291
1292 case DSO_BINARY_TYPE__KALLSYMS:
1293 case DSO_BINARY_TYPE__VMLINUX:
1294 case DSO_BINARY_TYPE__KCORE:
1295 return dso->kernel == DSO_TYPE_KERNEL;
1296
1297 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1298 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1299 case DSO_BINARY_TYPE__GUEST_KCORE:
1300 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1301
1302 case DSO_BINARY_TYPE__GUEST_KMODULE:
1303 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1304 /*
1305 * kernel modules know their symtab type - it's set when
1306 * creating a module dso in machine__new_module().
1307 */
1308 return kmod && dso->symtab_type == type;
1309
1310 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1311 return true;
1312
1313 case DSO_BINARY_TYPE__NOT_FOUND:
1314 default:
1315 return false;
1316 }
1317 }
1318
dso__load(struct dso * dso,struct map * map,symbol_filter_t filter)1319 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1320 {
1321 char *name;
1322 int ret = -1;
1323 u_int i;
1324 struct machine *machine;
1325 char *root_dir = (char *) "";
1326 int ss_pos = 0;
1327 struct symsrc ss_[2];
1328 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1329 bool kmod;
1330
1331 dso__set_loaded(dso, map->type);
1332
1333 if (dso->kernel == DSO_TYPE_KERNEL)
1334 return dso__load_kernel_sym(dso, map, filter);
1335 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1336 return dso__load_guest_kernel_sym(dso, map, filter);
1337
1338 if (map->groups && map->groups->machine)
1339 machine = map->groups->machine;
1340 else
1341 machine = NULL;
1342
1343 dso->adjust_symbols = 0;
1344
1345 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1346 struct stat st;
1347
1348 if (lstat(dso->name, &st) < 0)
1349 return -1;
1350
1351 if (st.st_uid && (st.st_uid != geteuid())) {
1352 pr_warning("File %s not owned by current user or root, "
1353 "ignoring it.\n", dso->name);
1354 return -1;
1355 }
1356
1357 ret = dso__load_perf_map(dso, map, filter);
1358 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1359 DSO_BINARY_TYPE__NOT_FOUND;
1360 return ret;
1361 }
1362
1363 if (machine)
1364 root_dir = machine->root_dir;
1365
1366 name = malloc(PATH_MAX);
1367 if (!name)
1368 return -1;
1369
1370 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1371 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1372
1373 /*
1374 * Iterate over candidate debug images.
1375 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1376 * and/or opd section) for processing.
1377 */
1378 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1379 struct symsrc *ss = &ss_[ss_pos];
1380 bool next_slot = false;
1381
1382 enum dso_binary_type symtab_type = binary_type_symtab[i];
1383
1384 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1385 continue;
1386
1387 if (dso__read_binary_type_filename(dso, symtab_type,
1388 root_dir, name, PATH_MAX))
1389 continue;
1390
1391 /* Name is now the name of the next image to try */
1392 if (symsrc__init(ss, dso, name, symtab_type) < 0)
1393 continue;
1394
1395 if (!syms_ss && symsrc__has_symtab(ss)) {
1396 syms_ss = ss;
1397 next_slot = true;
1398 if (!dso->symsrc_filename)
1399 dso->symsrc_filename = strdup(name);
1400 }
1401
1402 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1403 runtime_ss = ss;
1404 next_slot = true;
1405 }
1406
1407 if (next_slot) {
1408 ss_pos++;
1409
1410 if (syms_ss && runtime_ss)
1411 break;
1412 } else {
1413 symsrc__destroy(ss);
1414 }
1415
1416 }
1417
1418 if (!runtime_ss && !syms_ss)
1419 goto out_free;
1420
1421 if (runtime_ss && !syms_ss) {
1422 syms_ss = runtime_ss;
1423 }
1424
1425 /* We'll have to hope for the best */
1426 if (!runtime_ss && syms_ss)
1427 runtime_ss = syms_ss;
1428
1429 if (syms_ss)
1430 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
1431 else
1432 ret = -1;
1433
1434 if (ret > 0) {
1435 int nr_plt;
1436
1437 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
1438 if (nr_plt > 0)
1439 ret += nr_plt;
1440 }
1441
1442 for (; ss_pos > 0; ss_pos--)
1443 symsrc__destroy(&ss_[ss_pos - 1]);
1444 out_free:
1445 free(name);
1446 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1447 return 0;
1448 return ret;
1449 }
1450
map_groups__find_by_name(struct map_groups * mg,enum map_type type,const char * name)1451 struct map *map_groups__find_by_name(struct map_groups *mg,
1452 enum map_type type, const char *name)
1453 {
1454 struct rb_node *nd;
1455
1456 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
1457 struct map *map = rb_entry(nd, struct map, rb_node);
1458
1459 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1460 return map;
1461 }
1462
1463 return NULL;
1464 }
1465
dso__load_vmlinux(struct dso * dso,struct map * map,const char * vmlinux,bool vmlinux_allocated,symbol_filter_t filter)1466 int dso__load_vmlinux(struct dso *dso, struct map *map,
1467 const char *vmlinux, bool vmlinux_allocated,
1468 symbol_filter_t filter)
1469 {
1470 int err = -1;
1471 struct symsrc ss;
1472 char symfs_vmlinux[PATH_MAX];
1473 enum dso_binary_type symtab_type;
1474
1475 if (vmlinux[0] == '/')
1476 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1477 else
1478 symbol__join_symfs(symfs_vmlinux, vmlinux);
1479
1480 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1481 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1482 else
1483 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1484
1485 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1486 return -1;
1487
1488 err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
1489 symsrc__destroy(&ss);
1490
1491 if (err > 0) {
1492 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1493 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1494 else
1495 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1496 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1497 dso__set_loaded(dso, map->type);
1498 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1499 }
1500
1501 return err;
1502 }
1503
dso__load_vmlinux_path(struct dso * dso,struct map * map,symbol_filter_t filter)1504 int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1505 symbol_filter_t filter)
1506 {
1507 int i, err = 0;
1508 char *filename;
1509
1510 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1511 vmlinux_path__nr_entries + 1);
1512
1513 filename = dso__build_id_filename(dso, NULL, 0);
1514 if (filename != NULL) {
1515 err = dso__load_vmlinux(dso, map, filename, true, filter);
1516 if (err > 0)
1517 goto out;
1518 free(filename);
1519 }
1520
1521 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1522 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1523 if (err > 0)
1524 break;
1525 }
1526 out:
1527 return err;
1528 }
1529
find_matching_kcore(struct map * map,char * dir,size_t dir_sz)1530 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1531 {
1532 char kallsyms_filename[PATH_MAX];
1533 struct dirent *dent;
1534 int ret = -1;
1535 DIR *d;
1536
1537 d = opendir(dir);
1538 if (!d)
1539 return -1;
1540
1541 while (1) {
1542 dent = readdir(d);
1543 if (!dent)
1544 break;
1545 if (dent->d_type != DT_DIR)
1546 continue;
1547 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1548 "%s/%s/kallsyms", dir, dent->d_name);
1549 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1550 strlcpy(dir, kallsyms_filename, dir_sz);
1551 ret = 0;
1552 break;
1553 }
1554 }
1555
1556 closedir(d);
1557
1558 return ret;
1559 }
1560
dso__find_kallsyms(struct dso * dso,struct map * map)1561 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1562 {
1563 u8 host_build_id[BUILD_ID_SIZE];
1564 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1565 bool is_host = false;
1566 char path[PATH_MAX];
1567
1568 if (!dso->has_build_id) {
1569 /*
1570 * Last resort, if we don't have a build-id and couldn't find
1571 * any vmlinux file, try the running kernel kallsyms table.
1572 */
1573 goto proc_kallsyms;
1574 }
1575
1576 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1577 sizeof(host_build_id)) == 0)
1578 is_host = dso__build_id_equal(dso, host_build_id);
1579
1580 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1581
1582 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
1583 sbuild_id);
1584
1585 /* Use /proc/kallsyms if possible */
1586 if (is_host) {
1587 DIR *d;
1588 int fd;
1589
1590 /* If no cached kcore go with /proc/kallsyms */
1591 d = opendir(path);
1592 if (!d)
1593 goto proc_kallsyms;
1594 closedir(d);
1595
1596 /*
1597 * Do not check the build-id cache, until we know we cannot use
1598 * /proc/kcore.
1599 */
1600 fd = open("/proc/kcore", O_RDONLY);
1601 if (fd != -1) {
1602 close(fd);
1603 /* If module maps match go with /proc/kallsyms */
1604 if (!validate_kcore_addresses("/proc/kallsyms", map))
1605 goto proc_kallsyms;
1606 }
1607
1608 /* Find kallsyms in build-id cache with kcore */
1609 if (!find_matching_kcore(map, path, sizeof(path)))
1610 return strdup(path);
1611
1612 goto proc_kallsyms;
1613 }
1614
1615 /* Find kallsyms in build-id cache with kcore */
1616 if (!find_matching_kcore(map, path, sizeof(path)))
1617 return strdup(path);
1618
1619 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
1620 buildid_dir, sbuild_id);
1621
1622 if (access(path, F_OK)) {
1623 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1624 sbuild_id);
1625 return NULL;
1626 }
1627
1628 return strdup(path);
1629
1630 proc_kallsyms:
1631 return strdup("/proc/kallsyms");
1632 }
1633
dso__load_kernel_sym(struct dso * dso,struct map * map,symbol_filter_t filter)1634 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1635 symbol_filter_t filter)
1636 {
1637 int err;
1638 const char *kallsyms_filename = NULL;
1639 char *kallsyms_allocated_filename = NULL;
1640 /*
1641 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1642 * it and only it, reporting errors to the user if it cannot be used.
1643 *
1644 * For instance, try to analyse an ARM perf.data file _without_ a
1645 * build-id, or if the user specifies the wrong path to the right
1646 * vmlinux file, obviously we can't fallback to another vmlinux (a
1647 * x86_86 one, on the machine where analysis is being performed, say),
1648 * or worse, /proc/kallsyms.
1649 *
1650 * If the specified file _has_ a build-id and there is a build-id
1651 * section in the perf.data file, we will still do the expected
1652 * validation in dso__load_vmlinux and will bail out if they don't
1653 * match.
1654 */
1655 if (symbol_conf.kallsyms_name != NULL) {
1656 kallsyms_filename = symbol_conf.kallsyms_name;
1657 goto do_kallsyms;
1658 }
1659
1660 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1661 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
1662 false, filter);
1663 }
1664
1665 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1666 err = dso__load_vmlinux_path(dso, map, filter);
1667 if (err > 0)
1668 return err;
1669 }
1670
1671 /* do not try local files if a symfs was given */
1672 if (symbol_conf.symfs[0] != 0)
1673 return -1;
1674
1675 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1676 if (!kallsyms_allocated_filename)
1677 return -1;
1678
1679 kallsyms_filename = kallsyms_allocated_filename;
1680
1681 do_kallsyms:
1682 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1683 if (err > 0)
1684 pr_debug("Using %s for symbols\n", kallsyms_filename);
1685 free(kallsyms_allocated_filename);
1686
1687 if (err > 0 && !dso__is_kcore(dso)) {
1688 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1689 dso__set_long_name(dso, "[kernel.kallsyms]", false);
1690 map__fixup_start(map);
1691 map__fixup_end(map);
1692 }
1693
1694 return err;
1695 }
1696
dso__load_guest_kernel_sym(struct dso * dso,struct map * map,symbol_filter_t filter)1697 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
1698 symbol_filter_t filter)
1699 {
1700 int err;
1701 const char *kallsyms_filename = NULL;
1702 struct machine *machine;
1703 char path[PATH_MAX];
1704
1705 if (!map->groups) {
1706 pr_debug("Guest kernel map hasn't the point to groups\n");
1707 return -1;
1708 }
1709 machine = map->groups->machine;
1710
1711 if (machine__is_default_guest(machine)) {
1712 /*
1713 * if the user specified a vmlinux filename, use it and only
1714 * it, reporting errors to the user if it cannot be used.
1715 * Or use file guest_kallsyms inputted by user on commandline
1716 */
1717 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1718 err = dso__load_vmlinux(dso, map,
1719 symbol_conf.default_guest_vmlinux_name,
1720 false, filter);
1721 return err;
1722 }
1723
1724 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1725 if (!kallsyms_filename)
1726 return -1;
1727 } else {
1728 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1729 kallsyms_filename = path;
1730 }
1731
1732 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1733 if (err > 0)
1734 pr_debug("Using %s for symbols\n", kallsyms_filename);
1735 if (err > 0 && !dso__is_kcore(dso)) {
1736 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1737 machine__mmap_name(machine, path, sizeof(path));
1738 dso__set_long_name(dso, strdup(path), true);
1739 map__fixup_start(map);
1740 map__fixup_end(map);
1741 }
1742
1743 return err;
1744 }
1745
vmlinux_path__exit(void)1746 static void vmlinux_path__exit(void)
1747 {
1748 while (--vmlinux_path__nr_entries >= 0)
1749 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1750
1751 zfree(&vmlinux_path);
1752 }
1753
vmlinux_path__init(struct perf_session_env * env)1754 static int vmlinux_path__init(struct perf_session_env *env)
1755 {
1756 struct utsname uts;
1757 char bf[PATH_MAX];
1758 char *kernel_version;
1759
1760 vmlinux_path = malloc(sizeof(char *) * 6);
1761 if (vmlinux_path == NULL)
1762 return -1;
1763
1764 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
1765 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1766 goto out_fail;
1767 ++vmlinux_path__nr_entries;
1768 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
1769 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1770 goto out_fail;
1771 ++vmlinux_path__nr_entries;
1772
1773 /* only try kernel version if no symfs was given */
1774 if (symbol_conf.symfs[0] != 0)
1775 return 0;
1776
1777 if (env) {
1778 kernel_version = env->os_release;
1779 } else {
1780 if (uname(&uts) < 0)
1781 goto out_fail;
1782
1783 kernel_version = uts.release;
1784 }
1785
1786 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
1787 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1788 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1789 goto out_fail;
1790 ++vmlinux_path__nr_entries;
1791 snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
1792 kernel_version);
1793 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1794 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1795 goto out_fail;
1796 ++vmlinux_path__nr_entries;
1797 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
1798 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1799 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1800 goto out_fail;
1801 ++vmlinux_path__nr_entries;
1802 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
1803 kernel_version);
1804 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1805 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1806 goto out_fail;
1807 ++vmlinux_path__nr_entries;
1808
1809 return 0;
1810
1811 out_fail:
1812 vmlinux_path__exit();
1813 return -1;
1814 }
1815
setup_list(struct strlist ** list,const char * list_str,const char * list_name)1816 int setup_list(struct strlist **list, const char *list_str,
1817 const char *list_name)
1818 {
1819 if (list_str == NULL)
1820 return 0;
1821
1822 *list = strlist__new(true, list_str);
1823 if (!*list) {
1824 pr_err("problems parsing %s list\n", list_name);
1825 return -1;
1826 }
1827 return 0;
1828 }
1829
symbol__read_kptr_restrict(void)1830 static bool symbol__read_kptr_restrict(void)
1831 {
1832 bool value = false;
1833
1834 if (geteuid() != 0) {
1835 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1836 if (fp != NULL) {
1837 char line[8];
1838
1839 if (fgets(line, sizeof(line), fp) != NULL)
1840 value = atoi(line) != 0;
1841
1842 fclose(fp);
1843 }
1844 }
1845
1846 return value;
1847 }
1848
symbol__init(struct perf_session_env * env)1849 int symbol__init(struct perf_session_env *env)
1850 {
1851 const char *symfs;
1852
1853 if (symbol_conf.initialized)
1854 return 0;
1855
1856 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
1857
1858 symbol__elf_init();
1859
1860 if (symbol_conf.sort_by_name)
1861 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
1862 sizeof(struct symbol));
1863
1864 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
1865 return -1;
1866
1867 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
1868 pr_err("'.' is the only non valid --field-separator argument\n");
1869 return -1;
1870 }
1871
1872 if (setup_list(&symbol_conf.dso_list,
1873 symbol_conf.dso_list_str, "dso") < 0)
1874 return -1;
1875
1876 if (setup_list(&symbol_conf.comm_list,
1877 symbol_conf.comm_list_str, "comm") < 0)
1878 goto out_free_dso_list;
1879
1880 if (setup_list(&symbol_conf.sym_list,
1881 symbol_conf.sym_list_str, "symbol") < 0)
1882 goto out_free_comm_list;
1883
1884 /*
1885 * A path to symbols of "/" is identical to ""
1886 * reset here for simplicity.
1887 */
1888 symfs = realpath(symbol_conf.symfs, NULL);
1889 if (symfs == NULL)
1890 symfs = symbol_conf.symfs;
1891 if (strcmp(symfs, "/") == 0)
1892 symbol_conf.symfs = "";
1893 if (symfs != symbol_conf.symfs)
1894 free((void *)symfs);
1895
1896 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
1897
1898 symbol_conf.initialized = true;
1899 return 0;
1900
1901 out_free_comm_list:
1902 strlist__delete(symbol_conf.comm_list);
1903 out_free_dso_list:
1904 strlist__delete(symbol_conf.dso_list);
1905 return -1;
1906 }
1907
symbol__exit(void)1908 void symbol__exit(void)
1909 {
1910 if (!symbol_conf.initialized)
1911 return;
1912 strlist__delete(symbol_conf.sym_list);
1913 strlist__delete(symbol_conf.dso_list);
1914 strlist__delete(symbol_conf.comm_list);
1915 vmlinux_path__exit();
1916 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
1917 symbol_conf.initialized = false;
1918 }
1919