1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <sys/time.h>
5 #include <sys/resource.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include "compress.h"
11 #include "path.h"
12 #include "symbol.h"
13 #include "dso.h"
14 #include "machine.h"
15 #include "auxtrace.h"
16 #include "util.h"
17 #include "debug.h"
18 #include "string2.h"
19 #include "vdso.h"
20
21 static const char * const debuglink_paths[] = {
22 "%.0s%s",
23 "%s/%s",
24 "%s/.debug/%s",
25 "/usr/lib/debug%s/%s"
26 };
27
dso__symtab_origin(const struct dso * dso)28 char dso__symtab_origin(const struct dso *dso)
29 {
30 static const char origin[] = {
31 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
32 [DSO_BINARY_TYPE__VMLINUX] = 'v',
33 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
34 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
35 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
36 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
37 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
38 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
39 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
40 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
41 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
42 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
43 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
44 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
45 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
46 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
47 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
48 };
49
50 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
51 return '!';
52 return origin[dso->symtab_type];
53 }
54
dso__read_binary_type_filename(const struct dso * dso,enum dso_binary_type type,char * root_dir,char * filename,size_t size)55 int dso__read_binary_type_filename(const struct dso *dso,
56 enum dso_binary_type type,
57 char *root_dir, char *filename, size_t size)
58 {
59 char build_id_hex[SBUILD_ID_SIZE];
60 int ret = 0;
61 size_t len;
62
63 switch (type) {
64 case DSO_BINARY_TYPE__DEBUGLINK:
65 {
66 const char *last_slash;
67 char dso_dir[PATH_MAX];
68 char symfile[PATH_MAX];
69 unsigned int i;
70
71 len = __symbol__join_symfs(filename, size, dso->long_name);
72 last_slash = filename + len;
73 while (last_slash != filename && *last_slash != '/')
74 last_slash--;
75
76 strncpy(dso_dir, filename, last_slash - filename);
77 dso_dir[last_slash-filename] = '\0';
78
79 if (!is_regular_file(filename)) {
80 ret = -1;
81 break;
82 }
83
84 ret = filename__read_debuglink(filename, symfile, PATH_MAX);
85 if (ret)
86 break;
87
88 /* Check predefined locations where debug file might reside */
89 ret = -1;
90 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
91 snprintf(filename, size,
92 debuglink_paths[i], dso_dir, symfile);
93 if (is_regular_file(filename)) {
94 ret = 0;
95 break;
96 }
97 }
98
99 break;
100 }
101 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
102 if (dso__build_id_filename(dso, filename, size, false) == NULL)
103 ret = -1;
104 break;
105
106 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
107 if (dso__build_id_filename(dso, filename, size, true) == NULL)
108 ret = -1;
109 break;
110
111 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
112 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
113 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
114 break;
115
116 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
117 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
118 snprintf(filename + len, size - len, "%s", dso->long_name);
119 break;
120
121 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
122 {
123 const char *last_slash;
124 size_t dir_size;
125
126 last_slash = dso->long_name + dso->long_name_len;
127 while (last_slash != dso->long_name && *last_slash != '/')
128 last_slash--;
129
130 len = __symbol__join_symfs(filename, size, "");
131 dir_size = last_slash - dso->long_name + 2;
132 if (dir_size > (size - len)) {
133 ret = -1;
134 break;
135 }
136 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
137 len += scnprintf(filename + len , size - len, ".debug%s",
138 last_slash);
139 break;
140 }
141
142 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
143 if (!dso->has_build_id) {
144 ret = -1;
145 break;
146 }
147
148 build_id__sprintf(dso->build_id,
149 sizeof(dso->build_id),
150 build_id_hex);
151 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
152 snprintf(filename + len, size - len, "%.2s/%s.debug",
153 build_id_hex, build_id_hex + 2);
154 break;
155
156 case DSO_BINARY_TYPE__VMLINUX:
157 case DSO_BINARY_TYPE__GUEST_VMLINUX:
158 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
159 __symbol__join_symfs(filename, size, dso->long_name);
160 break;
161
162 case DSO_BINARY_TYPE__GUEST_KMODULE:
163 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
164 path__join3(filename, size, symbol_conf.symfs,
165 root_dir, dso->long_name);
166 break;
167
168 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
169 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
170 __symbol__join_symfs(filename, size, dso->long_name);
171 break;
172
173 case DSO_BINARY_TYPE__KCORE:
174 case DSO_BINARY_TYPE__GUEST_KCORE:
175 snprintf(filename, size, "%s", dso->long_name);
176 break;
177
178 default:
179 case DSO_BINARY_TYPE__KALLSYMS:
180 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
181 case DSO_BINARY_TYPE__JAVA_JIT:
182 case DSO_BINARY_TYPE__NOT_FOUND:
183 ret = -1;
184 break;
185 }
186
187 return ret;
188 }
189
190 static const struct {
191 const char *fmt;
192 int (*decompress)(const char *input, int output);
193 } compressions[] = {
194 #ifdef HAVE_ZLIB_SUPPORT
195 { "gz", gzip_decompress_to_file },
196 #endif
197 #ifdef HAVE_LZMA_SUPPORT
198 { "xz", lzma_decompress_to_file },
199 #endif
200 { NULL, NULL },
201 };
202
is_supported_compression(const char * ext)203 bool is_supported_compression(const char *ext)
204 {
205 unsigned i;
206
207 for (i = 0; compressions[i].fmt; i++) {
208 if (!strcmp(ext, compressions[i].fmt))
209 return true;
210 }
211 return false;
212 }
213
is_kernel_module(const char * pathname,int cpumode)214 bool is_kernel_module(const char *pathname, int cpumode)
215 {
216 struct kmod_path m;
217 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
218
219 WARN_ONCE(mode != cpumode,
220 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
221 cpumode);
222
223 switch (mode) {
224 case PERF_RECORD_MISC_USER:
225 case PERF_RECORD_MISC_HYPERVISOR:
226 case PERF_RECORD_MISC_GUEST_USER:
227 return false;
228 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
229 default:
230 if (kmod_path__parse(&m, pathname)) {
231 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
232 pathname);
233 return true;
234 }
235 }
236
237 return m.kmod;
238 }
239
decompress_to_file(const char * ext,const char * filename,int output_fd)240 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
241 {
242 unsigned i;
243
244 for (i = 0; compressions[i].fmt; i++) {
245 if (!strcmp(ext, compressions[i].fmt))
246 return !compressions[i].decompress(filename,
247 output_fd);
248 }
249 return false;
250 }
251
dso__needs_decompress(struct dso * dso)252 bool dso__needs_decompress(struct dso *dso)
253 {
254 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
255 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
256 }
257
decompress_kmodule(struct dso * dso,const char * name,char * tmpbuf)258 static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
259 {
260 int fd = -1;
261 struct kmod_path m;
262
263 if (!dso__needs_decompress(dso))
264 return -1;
265
266 if (kmod_path__parse_ext(&m, dso->long_name))
267 return -1;
268
269 if (!m.comp)
270 goto out;
271
272 fd = mkstemp(tmpbuf);
273 if (fd < 0) {
274 dso->load_errno = errno;
275 goto out;
276 }
277
278 if (!decompress_to_file(m.ext, name, fd)) {
279 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
280 close(fd);
281 fd = -1;
282 }
283
284 out:
285 free(m.ext);
286 return fd;
287 }
288
dso__decompress_kmodule_fd(struct dso * dso,const char * name)289 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
290 {
291 char tmpbuf[] = KMOD_DECOMP_NAME;
292 int fd;
293
294 fd = decompress_kmodule(dso, name, tmpbuf);
295 unlink(tmpbuf);
296 return fd;
297 }
298
dso__decompress_kmodule_path(struct dso * dso,const char * name,char * pathname,size_t len)299 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
300 char *pathname, size_t len)
301 {
302 char tmpbuf[] = KMOD_DECOMP_NAME;
303 int fd;
304
305 fd = decompress_kmodule(dso, name, tmpbuf);
306 if (fd < 0) {
307 unlink(tmpbuf);
308 return -1;
309 }
310
311 strncpy(pathname, tmpbuf, len);
312 close(fd);
313 return 0;
314 }
315
316 /*
317 * Parses kernel module specified in @path and updates
318 * @m argument like:
319 *
320 * @comp - true if @path contains supported compression suffix,
321 * false otherwise
322 * @kmod - true if @path contains '.ko' suffix in right position,
323 * false otherwise
324 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
325 * of the kernel module without suffixes, otherwise strudup-ed
326 * base name of @path
327 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
328 * the compression suffix
329 *
330 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
331 */
__kmod_path__parse(struct kmod_path * m,const char * path,bool alloc_name,bool alloc_ext)332 int __kmod_path__parse(struct kmod_path *m, const char *path,
333 bool alloc_name, bool alloc_ext)
334 {
335 const char *name = strrchr(path, '/');
336 const char *ext = strrchr(path, '.');
337 bool is_simple_name = false;
338
339 memset(m, 0x0, sizeof(*m));
340 name = name ? name + 1 : path;
341
342 /*
343 * '.' is also a valid character for module name. For example:
344 * [aaa.bbb] is a valid module name. '[' should have higher
345 * priority than '.ko' suffix.
346 *
347 * The kernel names are from machine__mmap_name. Such
348 * name should belong to kernel itself, not kernel module.
349 */
350 if (name[0] == '[') {
351 is_simple_name = true;
352 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
353 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
354 (strncmp(name, "[vdso]", 6) == 0) ||
355 (strncmp(name, "[vdso32]", 8) == 0) ||
356 (strncmp(name, "[vdsox32]", 9) == 0) ||
357 (strncmp(name, "[vsyscall]", 10) == 0)) {
358 m->kmod = false;
359
360 } else
361 m->kmod = true;
362 }
363
364 /* No extension, just return name. */
365 if ((ext == NULL) || is_simple_name) {
366 if (alloc_name) {
367 m->name = strdup(name);
368 return m->name ? 0 : -ENOMEM;
369 }
370 return 0;
371 }
372
373 if (is_supported_compression(ext + 1)) {
374 m->comp = true;
375 ext -= 3;
376 }
377
378 /* Check .ko extension only if there's enough name left. */
379 if (ext > name)
380 m->kmod = !strncmp(ext, ".ko", 3);
381
382 if (alloc_name) {
383 if (m->kmod) {
384 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
385 return -ENOMEM;
386 } else {
387 if (asprintf(&m->name, "%s", name) == -1)
388 return -ENOMEM;
389 }
390
391 strxfrchar(m->name, '-', '_');
392 }
393
394 if (alloc_ext && m->comp) {
395 m->ext = strdup(ext + 4);
396 if (!m->ext) {
397 free((void *) m->name);
398 return -ENOMEM;
399 }
400 }
401
402 return 0;
403 }
404
dso__set_module_info(struct dso * dso,struct kmod_path * m,struct machine * machine)405 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
406 struct machine *machine)
407 {
408 if (machine__is_host(machine))
409 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
410 else
411 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
412
413 /* _KMODULE_COMP should be next to _KMODULE */
414 if (m->kmod && m->comp)
415 dso->symtab_type++;
416
417 dso__set_short_name(dso, strdup(m->name), true);
418 }
419
420 /*
421 * Global list of open DSOs and the counter.
422 */
423 static LIST_HEAD(dso__data_open);
424 static long dso__data_open_cnt;
425 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
426
dso__list_add(struct dso * dso)427 static void dso__list_add(struct dso *dso)
428 {
429 list_add_tail(&dso->data.open_entry, &dso__data_open);
430 dso__data_open_cnt++;
431 }
432
dso__list_del(struct dso * dso)433 static void dso__list_del(struct dso *dso)
434 {
435 list_del(&dso->data.open_entry);
436 WARN_ONCE(dso__data_open_cnt <= 0,
437 "DSO data fd counter out of bounds.");
438 dso__data_open_cnt--;
439 }
440
441 static void close_first_dso(void);
442
do_open(char * name)443 static int do_open(char *name)
444 {
445 int fd;
446 char sbuf[STRERR_BUFSIZE];
447
448 do {
449 fd = open(name, O_RDONLY);
450 if (fd >= 0)
451 return fd;
452
453 pr_debug("dso open failed: %s\n",
454 str_error_r(errno, sbuf, sizeof(sbuf)));
455 if (!dso__data_open_cnt || errno != EMFILE)
456 break;
457
458 close_first_dso();
459 } while (1);
460
461 return -1;
462 }
463
__open_dso(struct dso * dso,struct machine * machine)464 static int __open_dso(struct dso *dso, struct machine *machine)
465 {
466 int fd = -EINVAL;
467 char *root_dir = (char *)"";
468 char *name = malloc(PATH_MAX);
469
470 if (!name)
471 return -ENOMEM;
472
473 if (machine)
474 root_dir = machine->root_dir;
475
476 if (dso__read_binary_type_filename(dso, dso->binary_type,
477 root_dir, name, PATH_MAX))
478 goto out;
479
480 if (!is_regular_file(name))
481 goto out;
482
483 if (dso__needs_decompress(dso)) {
484 char newpath[KMOD_DECOMP_LEN];
485 size_t len = sizeof(newpath);
486
487 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
488 fd = -dso->load_errno;
489 goto out;
490 }
491
492 strcpy(name, newpath);
493 }
494
495 fd = do_open(name);
496
497 if (dso__needs_decompress(dso))
498 unlink(name);
499
500 out:
501 free(name);
502 return fd;
503 }
504
505 static void check_data_close(void);
506
507 /**
508 * dso_close - Open DSO data file
509 * @dso: dso object
510 *
511 * Open @dso's data file descriptor and updates
512 * list/count of open DSO objects.
513 */
open_dso(struct dso * dso,struct machine * machine)514 static int open_dso(struct dso *dso, struct machine *machine)
515 {
516 int fd;
517 struct nscookie nsc;
518
519 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
520 nsinfo__mountns_enter(dso->nsinfo, &nsc);
521 fd = __open_dso(dso, machine);
522 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
523 nsinfo__mountns_exit(&nsc);
524
525 if (fd >= 0) {
526 dso__list_add(dso);
527 /*
528 * Check if we crossed the allowed number
529 * of opened DSOs and close one if needed.
530 */
531 check_data_close();
532 }
533
534 return fd;
535 }
536
close_data_fd(struct dso * dso)537 static void close_data_fd(struct dso *dso)
538 {
539 if (dso->data.fd >= 0) {
540 close(dso->data.fd);
541 dso->data.fd = -1;
542 dso->data.file_size = 0;
543 dso__list_del(dso);
544 }
545 }
546
547 /**
548 * dso_close - Close DSO data file
549 * @dso: dso object
550 *
551 * Close @dso's data file descriptor and updates
552 * list/count of open DSO objects.
553 */
close_dso(struct dso * dso)554 static void close_dso(struct dso *dso)
555 {
556 close_data_fd(dso);
557 }
558
close_first_dso(void)559 static void close_first_dso(void)
560 {
561 struct dso *dso;
562
563 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
564 close_dso(dso);
565 }
566
get_fd_limit(void)567 static rlim_t get_fd_limit(void)
568 {
569 struct rlimit l;
570 rlim_t limit = 0;
571
572 /* Allow half of the current open fd limit. */
573 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
574 if (l.rlim_cur == RLIM_INFINITY)
575 limit = l.rlim_cur;
576 else
577 limit = l.rlim_cur / 2;
578 } else {
579 pr_err("failed to get fd limit\n");
580 limit = 1;
581 }
582
583 return limit;
584 }
585
586 static rlim_t fd_limit;
587
588 /*
589 * Used only by tests/dso-data.c to reset the environment
590 * for tests. I dont expect we should change this during
591 * standard runtime.
592 */
reset_fd_limit(void)593 void reset_fd_limit(void)
594 {
595 fd_limit = 0;
596 }
597
may_cache_fd(void)598 static bool may_cache_fd(void)
599 {
600 if (!fd_limit)
601 fd_limit = get_fd_limit();
602
603 if (fd_limit == RLIM_INFINITY)
604 return true;
605
606 return fd_limit > (rlim_t) dso__data_open_cnt;
607 }
608
609 /*
610 * Check and close LRU dso if we crossed allowed limit
611 * for opened dso file descriptors. The limit is half
612 * of the RLIMIT_NOFILE files opened.
613 */
check_data_close(void)614 static void check_data_close(void)
615 {
616 bool cache_fd = may_cache_fd();
617
618 if (!cache_fd)
619 close_first_dso();
620 }
621
622 /**
623 * dso__data_close - Close DSO data file
624 * @dso: dso object
625 *
626 * External interface to close @dso's data file descriptor.
627 */
dso__data_close(struct dso * dso)628 void dso__data_close(struct dso *dso)
629 {
630 pthread_mutex_lock(&dso__data_open_lock);
631 close_dso(dso);
632 pthread_mutex_unlock(&dso__data_open_lock);
633 }
634
try_to_open_dso(struct dso * dso,struct machine * machine)635 static void try_to_open_dso(struct dso *dso, struct machine *machine)
636 {
637 enum dso_binary_type binary_type_data[] = {
638 DSO_BINARY_TYPE__BUILD_ID_CACHE,
639 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
640 DSO_BINARY_TYPE__NOT_FOUND,
641 };
642 int i = 0;
643
644 if (dso->data.fd >= 0)
645 return;
646
647 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
648 dso->data.fd = open_dso(dso, machine);
649 goto out;
650 }
651
652 do {
653 dso->binary_type = binary_type_data[i++];
654
655 dso->data.fd = open_dso(dso, machine);
656 if (dso->data.fd >= 0)
657 goto out;
658
659 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
660 out:
661 if (dso->data.fd >= 0)
662 dso->data.status = DSO_DATA_STATUS_OK;
663 else
664 dso->data.status = DSO_DATA_STATUS_ERROR;
665 }
666
667 /**
668 * dso__data_get_fd - Get dso's data file descriptor
669 * @dso: dso object
670 * @machine: machine object
671 *
672 * External interface to find dso's file, open it and
673 * returns file descriptor. It should be paired with
674 * dso__data_put_fd() if it returns non-negative value.
675 */
dso__data_get_fd(struct dso * dso,struct machine * machine)676 int dso__data_get_fd(struct dso *dso, struct machine *machine)
677 {
678 if (dso->data.status == DSO_DATA_STATUS_ERROR)
679 return -1;
680
681 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
682 return -1;
683
684 try_to_open_dso(dso, machine);
685
686 if (dso->data.fd < 0)
687 pthread_mutex_unlock(&dso__data_open_lock);
688
689 return dso->data.fd;
690 }
691
dso__data_put_fd(struct dso * dso __maybe_unused)692 void dso__data_put_fd(struct dso *dso __maybe_unused)
693 {
694 pthread_mutex_unlock(&dso__data_open_lock);
695 }
696
dso__data_status_seen(struct dso * dso,enum dso_data_status_seen by)697 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
698 {
699 u32 flag = 1 << by;
700
701 if (dso->data.status_seen & flag)
702 return true;
703
704 dso->data.status_seen |= flag;
705
706 return false;
707 }
708
709 static void
dso_cache__free(struct dso * dso)710 dso_cache__free(struct dso *dso)
711 {
712 struct rb_root *root = &dso->data.cache;
713 struct rb_node *next = rb_first(root);
714
715 pthread_mutex_lock(&dso->lock);
716 while (next) {
717 struct dso_cache *cache;
718
719 cache = rb_entry(next, struct dso_cache, rb_node);
720 next = rb_next(&cache->rb_node);
721 rb_erase(&cache->rb_node, root);
722 free(cache);
723 }
724 pthread_mutex_unlock(&dso->lock);
725 }
726
dso_cache__find(struct dso * dso,u64 offset)727 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
728 {
729 const struct rb_root *root = &dso->data.cache;
730 struct rb_node * const *p = &root->rb_node;
731 const struct rb_node *parent = NULL;
732 struct dso_cache *cache;
733
734 while (*p != NULL) {
735 u64 end;
736
737 parent = *p;
738 cache = rb_entry(parent, struct dso_cache, rb_node);
739 end = cache->offset + DSO__DATA_CACHE_SIZE;
740
741 if (offset < cache->offset)
742 p = &(*p)->rb_left;
743 else if (offset >= end)
744 p = &(*p)->rb_right;
745 else
746 return cache;
747 }
748
749 return NULL;
750 }
751
752 static struct dso_cache *
dso_cache__insert(struct dso * dso,struct dso_cache * new)753 dso_cache__insert(struct dso *dso, struct dso_cache *new)
754 {
755 struct rb_root *root = &dso->data.cache;
756 struct rb_node **p = &root->rb_node;
757 struct rb_node *parent = NULL;
758 struct dso_cache *cache;
759 u64 offset = new->offset;
760
761 pthread_mutex_lock(&dso->lock);
762 while (*p != NULL) {
763 u64 end;
764
765 parent = *p;
766 cache = rb_entry(parent, struct dso_cache, rb_node);
767 end = cache->offset + DSO__DATA_CACHE_SIZE;
768
769 if (offset < cache->offset)
770 p = &(*p)->rb_left;
771 else if (offset >= end)
772 p = &(*p)->rb_right;
773 else
774 goto out;
775 }
776
777 rb_link_node(&new->rb_node, parent, p);
778 rb_insert_color(&new->rb_node, root);
779
780 cache = NULL;
781 out:
782 pthread_mutex_unlock(&dso->lock);
783 return cache;
784 }
785
786 static ssize_t
dso_cache__memcpy(struct dso_cache * cache,u64 offset,u8 * data,u64 size)787 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
788 u8 *data, u64 size)
789 {
790 u64 cache_offset = offset - cache->offset;
791 u64 cache_size = min(cache->size - cache_offset, size);
792
793 memcpy(data, cache->data + cache_offset, cache_size);
794 return cache_size;
795 }
796
797 static ssize_t
dso_cache__read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)798 dso_cache__read(struct dso *dso, struct machine *machine,
799 u64 offset, u8 *data, ssize_t size)
800 {
801 struct dso_cache *cache;
802 struct dso_cache *old;
803 ssize_t ret;
804
805 do {
806 u64 cache_offset;
807
808 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
809 if (!cache)
810 return -ENOMEM;
811
812 pthread_mutex_lock(&dso__data_open_lock);
813
814 /*
815 * dso->data.fd might be closed if other thread opened another
816 * file (dso) due to open file limit (RLIMIT_NOFILE).
817 */
818 try_to_open_dso(dso, machine);
819
820 if (dso->data.fd < 0) {
821 ret = -errno;
822 dso->data.status = DSO_DATA_STATUS_ERROR;
823 break;
824 }
825
826 cache_offset = offset & DSO__DATA_CACHE_MASK;
827
828 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
829 if (ret <= 0)
830 break;
831
832 cache->offset = cache_offset;
833 cache->size = ret;
834 } while (0);
835
836 pthread_mutex_unlock(&dso__data_open_lock);
837
838 if (ret > 0) {
839 old = dso_cache__insert(dso, cache);
840 if (old) {
841 /* we lose the race */
842 free(cache);
843 cache = old;
844 }
845
846 ret = dso_cache__memcpy(cache, offset, data, size);
847 }
848
849 if (ret <= 0)
850 free(cache);
851
852 return ret;
853 }
854
dso_cache_read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)855 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
856 u64 offset, u8 *data, ssize_t size)
857 {
858 struct dso_cache *cache;
859
860 cache = dso_cache__find(dso, offset);
861 if (cache)
862 return dso_cache__memcpy(cache, offset, data, size);
863 else
864 return dso_cache__read(dso, machine, offset, data, size);
865 }
866
867 /*
868 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
869 * in the rb_tree. Any read to already cached data is served
870 * by cached data.
871 */
cached_read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)872 static ssize_t cached_read(struct dso *dso, struct machine *machine,
873 u64 offset, u8 *data, ssize_t size)
874 {
875 ssize_t r = 0;
876 u8 *p = data;
877
878 do {
879 ssize_t ret;
880
881 ret = dso_cache_read(dso, machine, offset, p, size);
882 if (ret < 0)
883 return ret;
884
885 /* Reached EOF, return what we have. */
886 if (!ret)
887 break;
888
889 BUG_ON(ret > size);
890
891 r += ret;
892 p += ret;
893 offset += ret;
894 size -= ret;
895
896 } while (size);
897
898 return r;
899 }
900
data_file_size(struct dso * dso,struct machine * machine)901 static int data_file_size(struct dso *dso, struct machine *machine)
902 {
903 int ret = 0;
904 struct stat st;
905 char sbuf[STRERR_BUFSIZE];
906
907 if (dso->data.file_size)
908 return 0;
909
910 if (dso->data.status == DSO_DATA_STATUS_ERROR)
911 return -1;
912
913 pthread_mutex_lock(&dso__data_open_lock);
914
915 /*
916 * dso->data.fd might be closed if other thread opened another
917 * file (dso) due to open file limit (RLIMIT_NOFILE).
918 */
919 try_to_open_dso(dso, machine);
920
921 if (dso->data.fd < 0) {
922 ret = -errno;
923 dso->data.status = DSO_DATA_STATUS_ERROR;
924 goto out;
925 }
926
927 if (fstat(dso->data.fd, &st) < 0) {
928 ret = -errno;
929 pr_err("dso cache fstat failed: %s\n",
930 str_error_r(errno, sbuf, sizeof(sbuf)));
931 dso->data.status = DSO_DATA_STATUS_ERROR;
932 goto out;
933 }
934 dso->data.file_size = st.st_size;
935
936 out:
937 pthread_mutex_unlock(&dso__data_open_lock);
938 return ret;
939 }
940
941 /**
942 * dso__data_size - Return dso data size
943 * @dso: dso object
944 * @machine: machine object
945 *
946 * Return: dso data size
947 */
dso__data_size(struct dso * dso,struct machine * machine)948 off_t dso__data_size(struct dso *dso, struct machine *machine)
949 {
950 if (data_file_size(dso, machine))
951 return -1;
952
953 /* For now just estimate dso data size is close to file size */
954 return dso->data.file_size;
955 }
956
data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)957 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
958 u64 offset, u8 *data, ssize_t size)
959 {
960 if (data_file_size(dso, machine))
961 return -1;
962
963 /* Check the offset sanity. */
964 if (offset > dso->data.file_size)
965 return -1;
966
967 if (offset + size < offset)
968 return -1;
969
970 return cached_read(dso, machine, offset, data, size);
971 }
972
973 /**
974 * dso__data_read_offset - Read data from dso file offset
975 * @dso: dso object
976 * @machine: machine object
977 * @offset: file offset
978 * @data: buffer to store data
979 * @size: size of the @data buffer
980 *
981 * External interface to read data from dso file offset. Open
982 * dso data file and use cached_read to get the data.
983 */
dso__data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)984 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
985 u64 offset, u8 *data, ssize_t size)
986 {
987 if (dso->data.status == DSO_DATA_STATUS_ERROR)
988 return -1;
989
990 return data_read_offset(dso, machine, offset, data, size);
991 }
992
993 /**
994 * dso__data_read_addr - Read data from dso address
995 * @dso: dso object
996 * @machine: machine object
997 * @add: virtual memory address
998 * @data: buffer to store data
999 * @size: size of the @data buffer
1000 *
1001 * External interface to read data from dso address.
1002 */
dso__data_read_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,u8 * data,ssize_t size)1003 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1004 struct machine *machine, u64 addr,
1005 u8 *data, ssize_t size)
1006 {
1007 u64 offset = map->map_ip(map, addr);
1008 return dso__data_read_offset(dso, machine, offset, data, size);
1009 }
1010
dso__new_map(const char * name)1011 struct map *dso__new_map(const char *name)
1012 {
1013 struct map *map = NULL;
1014 struct dso *dso = dso__new(name);
1015
1016 if (dso)
1017 map = map__new2(0, dso, MAP__FUNCTION);
1018
1019 return map;
1020 }
1021
machine__findnew_kernel(struct machine * machine,const char * name,const char * short_name,int dso_type)1022 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1023 const char *short_name, int dso_type)
1024 {
1025 /*
1026 * The kernel dso could be created by build_id processing.
1027 */
1028 struct dso *dso = machine__findnew_dso(machine, name);
1029
1030 /*
1031 * We need to run this in all cases, since during the build_id
1032 * processing we had no idea this was the kernel dso.
1033 */
1034 if (dso != NULL) {
1035 dso__set_short_name(dso, short_name, false);
1036 dso->kernel = dso_type;
1037 }
1038
1039 return dso;
1040 }
1041
1042 /*
1043 * Find a matching entry and/or link current entry to RB tree.
1044 * Either one of the dso or name parameter must be non-NULL or the
1045 * function will not work.
1046 */
__dso__findlink_by_longname(struct rb_root * root,struct dso * dso,const char * name)1047 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
1048 struct dso *dso, const char *name)
1049 {
1050 struct rb_node **p = &root->rb_node;
1051 struct rb_node *parent = NULL;
1052
1053 if (!name)
1054 name = dso->long_name;
1055 /*
1056 * Find node with the matching name
1057 */
1058 while (*p) {
1059 struct dso *this = rb_entry(*p, struct dso, rb_node);
1060 int rc = strcmp(name, this->long_name);
1061
1062 parent = *p;
1063 if (rc == 0) {
1064 /*
1065 * In case the new DSO is a duplicate of an existing
1066 * one, print a one-time warning & put the new entry
1067 * at the end of the list of duplicates.
1068 */
1069 if (!dso || (dso == this))
1070 return this; /* Find matching dso */
1071 /*
1072 * The core kernel DSOs may have duplicated long name.
1073 * In this case, the short name should be different.
1074 * Comparing the short names to differentiate the DSOs.
1075 */
1076 rc = strcmp(dso->short_name, this->short_name);
1077 if (rc == 0) {
1078 pr_err("Duplicated dso name: %s\n", name);
1079 return NULL;
1080 }
1081 }
1082 if (rc < 0)
1083 p = &parent->rb_left;
1084 else
1085 p = &parent->rb_right;
1086 }
1087 if (dso) {
1088 /* Add new node and rebalance tree */
1089 rb_link_node(&dso->rb_node, parent, p);
1090 rb_insert_color(&dso->rb_node, root);
1091 dso->root = root;
1092 }
1093 return NULL;
1094 }
1095
__dso__find_by_longname(struct rb_root * root,const char * name)1096 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
1097 const char *name)
1098 {
1099 return __dso__findlink_by_longname(root, NULL, name);
1100 }
1101
dso__set_long_name(struct dso * dso,const char * name,bool name_allocated)1102 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1103 {
1104 struct rb_root *root = dso->root;
1105
1106 if (name == NULL)
1107 return;
1108
1109 if (dso->long_name_allocated)
1110 free((char *)dso->long_name);
1111
1112 if (root) {
1113 rb_erase(&dso->rb_node, root);
1114 /*
1115 * __dso__findlink_by_longname() isn't guaranteed to add it
1116 * back, so a clean removal is required here.
1117 */
1118 RB_CLEAR_NODE(&dso->rb_node);
1119 dso->root = NULL;
1120 }
1121
1122 dso->long_name = name;
1123 dso->long_name_len = strlen(name);
1124 dso->long_name_allocated = name_allocated;
1125
1126 if (root)
1127 __dso__findlink_by_longname(root, dso, NULL);
1128 }
1129
dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)1130 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1131 {
1132 if (name == NULL)
1133 return;
1134
1135 if (dso->short_name_allocated)
1136 free((char *)dso->short_name);
1137
1138 dso->short_name = name;
1139 dso->short_name_len = strlen(name);
1140 dso->short_name_allocated = name_allocated;
1141 }
1142
dso__set_basename(struct dso * dso)1143 static void dso__set_basename(struct dso *dso)
1144 {
1145 /*
1146 * basename() may modify path buffer, so we must pass
1147 * a copy.
1148 */
1149 char *base, *lname = strdup(dso->long_name);
1150
1151 if (!lname)
1152 return;
1153
1154 /*
1155 * basename() may return a pointer to internal
1156 * storage which is reused in subsequent calls
1157 * so copy the result.
1158 */
1159 base = strdup(basename(lname));
1160
1161 free(lname);
1162
1163 if (!base)
1164 return;
1165
1166 dso__set_short_name(dso, base, true);
1167 }
1168
dso__name_len(const struct dso * dso)1169 int dso__name_len(const struct dso *dso)
1170 {
1171 if (!dso)
1172 return strlen("[unknown]");
1173 if (verbose > 0)
1174 return dso->long_name_len;
1175
1176 return dso->short_name_len;
1177 }
1178
dso__loaded(const struct dso * dso,enum map_type type)1179 bool dso__loaded(const struct dso *dso, enum map_type type)
1180 {
1181 return dso->loaded & (1 << type);
1182 }
1183
dso__sorted_by_name(const struct dso * dso,enum map_type type)1184 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1185 {
1186 return dso->sorted_by_name & (1 << type);
1187 }
1188
dso__set_sorted_by_name(struct dso * dso,enum map_type type)1189 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1190 {
1191 dso->sorted_by_name |= (1 << type);
1192 }
1193
dso__new(const char * name)1194 struct dso *dso__new(const char *name)
1195 {
1196 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1197
1198 if (dso != NULL) {
1199 int i;
1200 strcpy(dso->name, name);
1201 dso__set_long_name(dso, dso->name, false);
1202 dso__set_short_name(dso, dso->name, false);
1203 for (i = 0; i < MAP__NR_TYPES; ++i)
1204 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1205 dso->data.cache = RB_ROOT;
1206 dso->data.fd = -1;
1207 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1208 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1209 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1210 dso->is_64_bit = (sizeof(void *) == 8);
1211 dso->loaded = 0;
1212 dso->rel = 0;
1213 dso->sorted_by_name = 0;
1214 dso->has_build_id = 0;
1215 dso->has_srcline = 1;
1216 dso->a2l_fails = 1;
1217 dso->kernel = DSO_TYPE_USER;
1218 dso->needs_swap = DSO_SWAP__UNSET;
1219 RB_CLEAR_NODE(&dso->rb_node);
1220 dso->root = NULL;
1221 INIT_LIST_HEAD(&dso->node);
1222 INIT_LIST_HEAD(&dso->data.open_entry);
1223 pthread_mutex_init(&dso->lock, NULL);
1224 refcount_set(&dso->refcnt, 1);
1225 }
1226
1227 return dso;
1228 }
1229
dso__delete(struct dso * dso)1230 void dso__delete(struct dso *dso)
1231 {
1232 int i;
1233
1234 if (!RB_EMPTY_NODE(&dso->rb_node))
1235 pr_err("DSO %s is still in rbtree when being deleted!\n",
1236 dso->long_name);
1237 for (i = 0; i < MAP__NR_TYPES; ++i)
1238 symbols__delete(&dso->symbols[i]);
1239
1240 if (dso->short_name_allocated) {
1241 zfree((char **)&dso->short_name);
1242 dso->short_name_allocated = false;
1243 }
1244
1245 if (dso->long_name_allocated) {
1246 zfree((char **)&dso->long_name);
1247 dso->long_name_allocated = false;
1248 }
1249
1250 dso__data_close(dso);
1251 auxtrace_cache__free(dso->auxtrace_cache);
1252 dso_cache__free(dso);
1253 dso__free_a2l(dso);
1254 zfree(&dso->symsrc_filename);
1255 nsinfo__zput(dso->nsinfo);
1256 pthread_mutex_destroy(&dso->lock);
1257 free(dso);
1258 }
1259
dso__get(struct dso * dso)1260 struct dso *dso__get(struct dso *dso)
1261 {
1262 if (dso)
1263 refcount_inc(&dso->refcnt);
1264 return dso;
1265 }
1266
dso__put(struct dso * dso)1267 void dso__put(struct dso *dso)
1268 {
1269 if (dso && refcount_dec_and_test(&dso->refcnt))
1270 dso__delete(dso);
1271 }
1272
dso__set_build_id(struct dso * dso,void * build_id)1273 void dso__set_build_id(struct dso *dso, void *build_id)
1274 {
1275 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1276 dso->has_build_id = 1;
1277 }
1278
dso__build_id_equal(const struct dso * dso,u8 * build_id)1279 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1280 {
1281 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1282 }
1283
dso__read_running_kernel_build_id(struct dso * dso,struct machine * machine)1284 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1285 {
1286 char path[PATH_MAX];
1287
1288 if (machine__is_default_guest(machine))
1289 return;
1290 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1291 if (sysfs__read_build_id(path, dso->build_id,
1292 sizeof(dso->build_id)) == 0)
1293 dso->has_build_id = true;
1294 }
1295
dso__kernel_module_get_build_id(struct dso * dso,const char * root_dir)1296 int dso__kernel_module_get_build_id(struct dso *dso,
1297 const char *root_dir)
1298 {
1299 char filename[PATH_MAX];
1300 /*
1301 * kernel module short names are of the form "[module]" and
1302 * we need just "module" here.
1303 */
1304 const char *name = dso->short_name + 1;
1305
1306 snprintf(filename, sizeof(filename),
1307 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1308 root_dir, (int)strlen(name) - 1, name);
1309
1310 if (sysfs__read_build_id(filename, dso->build_id,
1311 sizeof(dso->build_id)) == 0)
1312 dso->has_build_id = true;
1313
1314 return 0;
1315 }
1316
__dsos__read_build_ids(struct list_head * head,bool with_hits)1317 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1318 {
1319 bool have_build_id = false;
1320 struct dso *pos;
1321 struct nscookie nsc;
1322
1323 list_for_each_entry(pos, head, node) {
1324 if (with_hits && !pos->hit && !dso__is_vdso(pos))
1325 continue;
1326 if (pos->has_build_id) {
1327 have_build_id = true;
1328 continue;
1329 }
1330 nsinfo__mountns_enter(pos->nsinfo, &nsc);
1331 if (filename__read_build_id(pos->long_name, pos->build_id,
1332 sizeof(pos->build_id)) > 0) {
1333 have_build_id = true;
1334 pos->has_build_id = true;
1335 }
1336 nsinfo__mountns_exit(&nsc);
1337 }
1338
1339 return have_build_id;
1340 }
1341
__dsos__add(struct dsos * dsos,struct dso * dso)1342 void __dsos__add(struct dsos *dsos, struct dso *dso)
1343 {
1344 list_add_tail(&dso->node, &dsos->head);
1345 __dso__findlink_by_longname(&dsos->root, dso, NULL);
1346 /*
1347 * It is now in the linked list, grab a reference, then garbage collect
1348 * this when needing memory, by looking at LRU dso instances in the
1349 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1350 * anywhere besides the one for the list, do, under a lock for the
1351 * list: remove it from the list, then a dso__put(), that probably will
1352 * be the last and will then call dso__delete(), end of life.
1353 *
1354 * That, or at the end of the 'struct machine' lifetime, when all
1355 * 'struct dso' instances will be removed from the list, in
1356 * dsos__exit(), if they have no other reference from some other data
1357 * structure.
1358 *
1359 * E.g.: after processing a 'perf.data' file and storing references
1360 * to objects instantiated while processing events, we will have
1361 * references to the 'thread', 'map', 'dso' structs all from 'struct
1362 * hist_entry' instances, but we may not need anything not referenced,
1363 * so we might as well call machines__exit()/machines__delete() and
1364 * garbage collect it.
1365 */
1366 dso__get(dso);
1367 }
1368
dsos__add(struct dsos * dsos,struct dso * dso)1369 void dsos__add(struct dsos *dsos, struct dso *dso)
1370 {
1371 pthread_rwlock_wrlock(&dsos->lock);
1372 __dsos__add(dsos, dso);
1373 pthread_rwlock_unlock(&dsos->lock);
1374 }
1375
__dsos__find(struct dsos * dsos,const char * name,bool cmp_short)1376 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1377 {
1378 struct dso *pos;
1379
1380 if (cmp_short) {
1381 list_for_each_entry(pos, &dsos->head, node)
1382 if (strcmp(pos->short_name, name) == 0)
1383 return pos;
1384 return NULL;
1385 }
1386 return __dso__find_by_longname(&dsos->root, name);
1387 }
1388
dsos__find(struct dsos * dsos,const char * name,bool cmp_short)1389 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1390 {
1391 struct dso *dso;
1392 pthread_rwlock_rdlock(&dsos->lock);
1393 dso = __dsos__find(dsos, name, cmp_short);
1394 pthread_rwlock_unlock(&dsos->lock);
1395 return dso;
1396 }
1397
__dsos__addnew(struct dsos * dsos,const char * name)1398 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1399 {
1400 struct dso *dso = dso__new(name);
1401
1402 if (dso != NULL) {
1403 __dsos__add(dsos, dso);
1404 dso__set_basename(dso);
1405 /* Put dso here because __dsos_add already got it */
1406 dso__put(dso);
1407 }
1408 return dso;
1409 }
1410
__dsos__findnew(struct dsos * dsos,const char * name)1411 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1412 {
1413 struct dso *dso = __dsos__find(dsos, name, false);
1414
1415 return dso ? dso : __dsos__addnew(dsos, name);
1416 }
1417
dsos__findnew(struct dsos * dsos,const char * name)1418 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1419 {
1420 struct dso *dso;
1421 pthread_rwlock_wrlock(&dsos->lock);
1422 dso = dso__get(__dsos__findnew(dsos, name));
1423 pthread_rwlock_unlock(&dsos->lock);
1424 return dso;
1425 }
1426
__dsos__fprintf_buildid(struct list_head * head,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)1427 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1428 bool (skip)(struct dso *dso, int parm), int parm)
1429 {
1430 struct dso *pos;
1431 size_t ret = 0;
1432
1433 list_for_each_entry(pos, head, node) {
1434 if (skip && skip(pos, parm))
1435 continue;
1436 ret += dso__fprintf_buildid(pos, fp);
1437 ret += fprintf(fp, " %s\n", pos->long_name);
1438 }
1439 return ret;
1440 }
1441
__dsos__fprintf(struct list_head * head,FILE * fp)1442 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1443 {
1444 struct dso *pos;
1445 size_t ret = 0;
1446
1447 list_for_each_entry(pos, head, node) {
1448 int i;
1449 for (i = 0; i < MAP__NR_TYPES; ++i)
1450 ret += dso__fprintf(pos, i, fp);
1451 }
1452
1453 return ret;
1454 }
1455
dso__fprintf_buildid(struct dso * dso,FILE * fp)1456 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1457 {
1458 char sbuild_id[SBUILD_ID_SIZE];
1459
1460 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1461 return fprintf(fp, "%s", sbuild_id);
1462 }
1463
dso__fprintf(struct dso * dso,enum map_type type,FILE * fp)1464 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1465 {
1466 struct rb_node *nd;
1467 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1468
1469 if (dso->short_name != dso->long_name)
1470 ret += fprintf(fp, "%s, ", dso->long_name);
1471 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1472 dso__loaded(dso, type) ? "" : "NOT ");
1473 ret += dso__fprintf_buildid(dso, fp);
1474 ret += fprintf(fp, ")\n");
1475 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1476 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1477 ret += symbol__fprintf(pos, fp);
1478 }
1479
1480 return ret;
1481 }
1482
dso__type(struct dso * dso,struct machine * machine)1483 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1484 {
1485 int fd;
1486 enum dso_type type = DSO__TYPE_UNKNOWN;
1487
1488 fd = dso__data_get_fd(dso, machine);
1489 if (fd >= 0) {
1490 type = dso__type_fd(fd);
1491 dso__data_put_fd(dso);
1492 }
1493
1494 return type;
1495 }
1496
dso__strerror_load(struct dso * dso,char * buf,size_t buflen)1497 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1498 {
1499 int idx, errnum = dso->load_errno;
1500 /*
1501 * This must have a same ordering as the enum dso_load_errno.
1502 */
1503 static const char *dso_load__error_str[] = {
1504 "Internal tools/perf/ library error",
1505 "Invalid ELF file",
1506 "Can not read build id",
1507 "Mismatching build id",
1508 "Decompression failure",
1509 };
1510
1511 BUG_ON(buflen == 0);
1512
1513 if (errnum >= 0) {
1514 const char *err = str_error_r(errnum, buf, buflen);
1515
1516 if (err != buf)
1517 scnprintf(buf, buflen, "%s", err);
1518
1519 return 0;
1520 }
1521
1522 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1523 return -1;
1524
1525 idx = errnum - __DSO_LOAD_ERRNO__START;
1526 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1527 return 0;
1528 }
1529