1 #include <asm/bug.h>
2 #include <sys/time.h>
3 #include <sys/resource.h>
4 #include "symbol.h"
5 #include "dso.h"
6 #include "machine.h"
7 #include "auxtrace.h"
8 #include "util.h"
9 #include "debug.h"
10
dso__symtab_origin(const struct dso * dso)11 char dso__symtab_origin(const struct dso *dso)
12 {
13 static const char origin[] = {
14 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
15 [DSO_BINARY_TYPE__VMLINUX] = 'v',
16 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
17 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
18 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
19 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
20 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
21 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
22 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
23 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
26 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
27 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
28 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
29 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
30 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
31 };
32
33 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
34 return '!';
35 return origin[dso->symtab_type];
36 }
37
dso__read_binary_type_filename(const struct dso * dso,enum dso_binary_type type,char * root_dir,char * filename,size_t size)38 int dso__read_binary_type_filename(const struct dso *dso,
39 enum dso_binary_type type,
40 char *root_dir, char *filename, size_t size)
41 {
42 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
43 int ret = 0;
44 size_t len;
45
46 switch (type) {
47 case DSO_BINARY_TYPE__DEBUGLINK: {
48 char *debuglink;
49
50 len = __symbol__join_symfs(filename, size, dso->long_name);
51 debuglink = filename + len;
52 while (debuglink != filename && *debuglink != '/')
53 debuglink--;
54 if (*debuglink == '/')
55 debuglink++;
56 ret = filename__read_debuglink(filename, debuglink,
57 size - (debuglink - filename));
58 }
59 break;
60 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
61 /* skip the locally configured cache if a symfs is given */
62 if (symbol_conf.symfs[0] ||
63 (dso__build_id_filename(dso, filename, size) == NULL))
64 ret = -1;
65 break;
66
67 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
68 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
69 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
70 break;
71
72 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
73 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
74 snprintf(filename + len, size - len, "%s", dso->long_name);
75 break;
76
77 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
78 /*
79 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
80 * /usr/lib/debug/lib when it is expected to be in
81 * /usr/lib/debug/usr/lib
82 */
83 if (strlen(dso->long_name) < 9 ||
84 strncmp(dso->long_name, "/usr/lib/", 9)) {
85 ret = -1;
86 break;
87 }
88 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
89 snprintf(filename + len, size - len, "%s", dso->long_name + 4);
90 break;
91
92 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
93 {
94 const char *last_slash;
95 size_t dir_size;
96
97 last_slash = dso->long_name + dso->long_name_len;
98 while (last_slash != dso->long_name && *last_slash != '/')
99 last_slash--;
100
101 len = __symbol__join_symfs(filename, size, "");
102 dir_size = last_slash - dso->long_name + 2;
103 if (dir_size > (size - len)) {
104 ret = -1;
105 break;
106 }
107 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
108 len += scnprintf(filename + len , size - len, ".debug%s",
109 last_slash);
110 break;
111 }
112
113 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
114 if (!dso->has_build_id) {
115 ret = -1;
116 break;
117 }
118
119 build_id__sprintf(dso->build_id,
120 sizeof(dso->build_id),
121 build_id_hex);
122 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
123 snprintf(filename + len, size - len, "%.2s/%s.debug",
124 build_id_hex, build_id_hex + 2);
125 break;
126
127 case DSO_BINARY_TYPE__VMLINUX:
128 case DSO_BINARY_TYPE__GUEST_VMLINUX:
129 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
130 __symbol__join_symfs(filename, size, dso->long_name);
131 break;
132
133 case DSO_BINARY_TYPE__GUEST_KMODULE:
134 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
135 path__join3(filename, size, symbol_conf.symfs,
136 root_dir, dso->long_name);
137 break;
138
139 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
140 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
141 __symbol__join_symfs(filename, size, dso->long_name);
142 break;
143
144 case DSO_BINARY_TYPE__KCORE:
145 case DSO_BINARY_TYPE__GUEST_KCORE:
146 snprintf(filename, size, "%s", dso->long_name);
147 break;
148
149 default:
150 case DSO_BINARY_TYPE__KALLSYMS:
151 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
152 case DSO_BINARY_TYPE__JAVA_JIT:
153 case DSO_BINARY_TYPE__NOT_FOUND:
154 ret = -1;
155 break;
156 }
157
158 return ret;
159 }
160
161 static const struct {
162 const char *fmt;
163 int (*decompress)(const char *input, int output);
164 } compressions[] = {
165 #ifdef HAVE_ZLIB_SUPPORT
166 { "gz", gzip_decompress_to_file },
167 #endif
168 #ifdef HAVE_LZMA_SUPPORT
169 { "xz", lzma_decompress_to_file },
170 #endif
171 { NULL, NULL },
172 };
173
is_supported_compression(const char * ext)174 bool is_supported_compression(const char *ext)
175 {
176 unsigned i;
177
178 for (i = 0; compressions[i].fmt; i++) {
179 if (!strcmp(ext, compressions[i].fmt))
180 return true;
181 }
182 return false;
183 }
184
is_kernel_module(const char * pathname,int cpumode)185 bool is_kernel_module(const char *pathname, int cpumode)
186 {
187 struct kmod_path m;
188 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
189
190 WARN_ONCE(mode != cpumode,
191 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
192 cpumode);
193
194 switch (mode) {
195 case PERF_RECORD_MISC_USER:
196 case PERF_RECORD_MISC_HYPERVISOR:
197 case PERF_RECORD_MISC_GUEST_USER:
198 return false;
199 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
200 default:
201 if (kmod_path__parse(&m, pathname)) {
202 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
203 pathname);
204 return true;
205 }
206 }
207
208 return m.kmod;
209 }
210
decompress_to_file(const char * ext,const char * filename,int output_fd)211 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
212 {
213 unsigned i;
214
215 for (i = 0; compressions[i].fmt; i++) {
216 if (!strcmp(ext, compressions[i].fmt))
217 return !compressions[i].decompress(filename,
218 output_fd);
219 }
220 return false;
221 }
222
dso__needs_decompress(struct dso * dso)223 bool dso__needs_decompress(struct dso *dso)
224 {
225 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
226 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
227 }
228
229 /*
230 * Parses kernel module specified in @path and updates
231 * @m argument like:
232 *
233 * @comp - true if @path contains supported compression suffix,
234 * false otherwise
235 * @kmod - true if @path contains '.ko' suffix in right position,
236 * false otherwise
237 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
238 * of the kernel module without suffixes, otherwise strudup-ed
239 * base name of @path
240 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
241 * the compression suffix
242 *
243 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
244 */
__kmod_path__parse(struct kmod_path * m,const char * path,bool alloc_name,bool alloc_ext)245 int __kmod_path__parse(struct kmod_path *m, const char *path,
246 bool alloc_name, bool alloc_ext)
247 {
248 const char *name = strrchr(path, '/');
249 const char *ext = strrchr(path, '.');
250 bool is_simple_name = false;
251
252 memset(m, 0x0, sizeof(*m));
253 name = name ? name + 1 : path;
254
255 /*
256 * '.' is also a valid character for module name. For example:
257 * [aaa.bbb] is a valid module name. '[' should have higher
258 * priority than '.ko' suffix.
259 *
260 * The kernel names are from machine__mmap_name. Such
261 * name should belong to kernel itself, not kernel module.
262 */
263 if (name[0] == '[') {
264 is_simple_name = true;
265 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
266 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
267 (strncmp(name, "[vdso]", 6) == 0) ||
268 (strncmp(name, "[vdso32]", 8) == 0) ||
269 (strncmp(name, "[vdsox32]", 9) == 0) ||
270 (strncmp(name, "[vsyscall]", 10) == 0)) {
271 m->kmod = false;
272
273 } else
274 m->kmod = true;
275 }
276
277 /* No extension, just return name. */
278 if ((ext == NULL) || is_simple_name) {
279 if (alloc_name) {
280 m->name = strdup(name);
281 return m->name ? 0 : -ENOMEM;
282 }
283 return 0;
284 }
285
286 if (is_supported_compression(ext + 1)) {
287 m->comp = true;
288 ext -= 3;
289 }
290
291 /* Check .ko extension only if there's enough name left. */
292 if (ext > name)
293 m->kmod = !strncmp(ext, ".ko", 3);
294
295 if (alloc_name) {
296 if (m->kmod) {
297 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
298 return -ENOMEM;
299 } else {
300 if (asprintf(&m->name, "%s", name) == -1)
301 return -ENOMEM;
302 }
303
304 strxfrchar(m->name, '-', '_');
305 }
306
307 if (alloc_ext && m->comp) {
308 m->ext = strdup(ext + 4);
309 if (!m->ext) {
310 free((void *) m->name);
311 return -ENOMEM;
312 }
313 }
314
315 return 0;
316 }
317
318 /*
319 * Global list of open DSOs and the counter.
320 */
321 static LIST_HEAD(dso__data_open);
322 static long dso__data_open_cnt;
323 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
324
dso__list_add(struct dso * dso)325 static void dso__list_add(struct dso *dso)
326 {
327 list_add_tail(&dso->data.open_entry, &dso__data_open);
328 dso__data_open_cnt++;
329 }
330
dso__list_del(struct dso * dso)331 static void dso__list_del(struct dso *dso)
332 {
333 list_del(&dso->data.open_entry);
334 WARN_ONCE(dso__data_open_cnt <= 0,
335 "DSO data fd counter out of bounds.");
336 dso__data_open_cnt--;
337 }
338
339 static void close_first_dso(void);
340
do_open(char * name)341 static int do_open(char *name)
342 {
343 int fd;
344 char sbuf[STRERR_BUFSIZE];
345
346 do {
347 fd = open(name, O_RDONLY);
348 if (fd >= 0)
349 return fd;
350
351 pr_debug("dso open failed: %s\n",
352 strerror_r(errno, sbuf, sizeof(sbuf)));
353 if (!dso__data_open_cnt || errno != EMFILE)
354 break;
355
356 close_first_dso();
357 } while (1);
358
359 return -1;
360 }
361
__open_dso(struct dso * dso,struct machine * machine)362 static int __open_dso(struct dso *dso, struct machine *machine)
363 {
364 int fd;
365 char *root_dir = (char *)"";
366 char *name = malloc(PATH_MAX);
367
368 if (!name)
369 return -ENOMEM;
370
371 if (machine)
372 root_dir = machine->root_dir;
373
374 if (dso__read_binary_type_filename(dso, dso->binary_type,
375 root_dir, name, PATH_MAX)) {
376 free(name);
377 return -EINVAL;
378 }
379
380 fd = do_open(name);
381 free(name);
382 return fd;
383 }
384
385 static void check_data_close(void);
386
387 /**
388 * dso_close - Open DSO data file
389 * @dso: dso object
390 *
391 * Open @dso's data file descriptor and updates
392 * list/count of open DSO objects.
393 */
open_dso(struct dso * dso,struct machine * machine)394 static int open_dso(struct dso *dso, struct machine *machine)
395 {
396 int fd = __open_dso(dso, machine);
397
398 if (fd >= 0) {
399 dso__list_add(dso);
400 /*
401 * Check if we crossed the allowed number
402 * of opened DSOs and close one if needed.
403 */
404 check_data_close();
405 }
406
407 return fd;
408 }
409
close_data_fd(struct dso * dso)410 static void close_data_fd(struct dso *dso)
411 {
412 if (dso->data.fd >= 0) {
413 close(dso->data.fd);
414 dso->data.fd = -1;
415 dso->data.file_size = 0;
416 dso__list_del(dso);
417 }
418 }
419
420 /**
421 * dso_close - Close DSO data file
422 * @dso: dso object
423 *
424 * Close @dso's data file descriptor and updates
425 * list/count of open DSO objects.
426 */
close_dso(struct dso * dso)427 static void close_dso(struct dso *dso)
428 {
429 close_data_fd(dso);
430 }
431
close_first_dso(void)432 static void close_first_dso(void)
433 {
434 struct dso *dso;
435
436 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
437 close_dso(dso);
438 }
439
get_fd_limit(void)440 static rlim_t get_fd_limit(void)
441 {
442 struct rlimit l;
443 rlim_t limit = 0;
444
445 /* Allow half of the current open fd limit. */
446 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
447 if (l.rlim_cur == RLIM_INFINITY)
448 limit = l.rlim_cur;
449 else
450 limit = l.rlim_cur / 2;
451 } else {
452 pr_err("failed to get fd limit\n");
453 limit = 1;
454 }
455
456 return limit;
457 }
458
may_cache_fd(void)459 static bool may_cache_fd(void)
460 {
461 static rlim_t limit;
462
463 if (!limit)
464 limit = get_fd_limit();
465
466 if (limit == RLIM_INFINITY)
467 return true;
468
469 return limit > (rlim_t) dso__data_open_cnt;
470 }
471
472 /*
473 * Check and close LRU dso if we crossed allowed limit
474 * for opened dso file descriptors. The limit is half
475 * of the RLIMIT_NOFILE files opened.
476 */
check_data_close(void)477 static void check_data_close(void)
478 {
479 bool cache_fd = may_cache_fd();
480
481 if (!cache_fd)
482 close_first_dso();
483 }
484
485 /**
486 * dso__data_close - Close DSO data file
487 * @dso: dso object
488 *
489 * External interface to close @dso's data file descriptor.
490 */
dso__data_close(struct dso * dso)491 void dso__data_close(struct dso *dso)
492 {
493 pthread_mutex_lock(&dso__data_open_lock);
494 close_dso(dso);
495 pthread_mutex_unlock(&dso__data_open_lock);
496 }
497
try_to_open_dso(struct dso * dso,struct machine * machine)498 static void try_to_open_dso(struct dso *dso, struct machine *machine)
499 {
500 enum dso_binary_type binary_type_data[] = {
501 DSO_BINARY_TYPE__BUILD_ID_CACHE,
502 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
503 DSO_BINARY_TYPE__NOT_FOUND,
504 };
505 int i = 0;
506
507 if (dso->data.fd >= 0)
508 return;
509
510 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
511 dso->data.fd = open_dso(dso, machine);
512 goto out;
513 }
514
515 do {
516 dso->binary_type = binary_type_data[i++];
517
518 dso->data.fd = open_dso(dso, machine);
519 if (dso->data.fd >= 0)
520 goto out;
521
522 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
523 out:
524 if (dso->data.fd >= 0)
525 dso->data.status = DSO_DATA_STATUS_OK;
526 else
527 dso->data.status = DSO_DATA_STATUS_ERROR;
528 }
529
530 /**
531 * dso__data_get_fd - Get dso's data file descriptor
532 * @dso: dso object
533 * @machine: machine object
534 *
535 * External interface to find dso's file, open it and
536 * returns file descriptor. It should be paired with
537 * dso__data_put_fd() if it returns non-negative value.
538 */
dso__data_get_fd(struct dso * dso,struct machine * machine)539 int dso__data_get_fd(struct dso *dso, struct machine *machine)
540 {
541 if (dso->data.status == DSO_DATA_STATUS_ERROR)
542 return -1;
543
544 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
545 return -1;
546
547 try_to_open_dso(dso, machine);
548
549 if (dso->data.fd < 0)
550 pthread_mutex_unlock(&dso__data_open_lock);
551
552 return dso->data.fd;
553 }
554
dso__data_put_fd(struct dso * dso __maybe_unused)555 void dso__data_put_fd(struct dso *dso __maybe_unused)
556 {
557 pthread_mutex_unlock(&dso__data_open_lock);
558 }
559
dso__data_status_seen(struct dso * dso,enum dso_data_status_seen by)560 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
561 {
562 u32 flag = 1 << by;
563
564 if (dso->data.status_seen & flag)
565 return true;
566
567 dso->data.status_seen |= flag;
568
569 return false;
570 }
571
572 static void
dso_cache__free(struct dso * dso)573 dso_cache__free(struct dso *dso)
574 {
575 struct rb_root *root = &dso->data.cache;
576 struct rb_node *next = rb_first(root);
577
578 pthread_mutex_lock(&dso->lock);
579 while (next) {
580 struct dso_cache *cache;
581
582 cache = rb_entry(next, struct dso_cache, rb_node);
583 next = rb_next(&cache->rb_node);
584 rb_erase(&cache->rb_node, root);
585 free(cache);
586 }
587 pthread_mutex_unlock(&dso->lock);
588 }
589
dso_cache__find(struct dso * dso,u64 offset)590 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
591 {
592 const struct rb_root *root = &dso->data.cache;
593 struct rb_node * const *p = &root->rb_node;
594 const struct rb_node *parent = NULL;
595 struct dso_cache *cache;
596
597 while (*p != NULL) {
598 u64 end;
599
600 parent = *p;
601 cache = rb_entry(parent, struct dso_cache, rb_node);
602 end = cache->offset + DSO__DATA_CACHE_SIZE;
603
604 if (offset < cache->offset)
605 p = &(*p)->rb_left;
606 else if (offset >= end)
607 p = &(*p)->rb_right;
608 else
609 return cache;
610 }
611
612 return NULL;
613 }
614
615 static struct dso_cache *
dso_cache__insert(struct dso * dso,struct dso_cache * new)616 dso_cache__insert(struct dso *dso, struct dso_cache *new)
617 {
618 struct rb_root *root = &dso->data.cache;
619 struct rb_node **p = &root->rb_node;
620 struct rb_node *parent = NULL;
621 struct dso_cache *cache;
622 u64 offset = new->offset;
623
624 pthread_mutex_lock(&dso->lock);
625 while (*p != NULL) {
626 u64 end;
627
628 parent = *p;
629 cache = rb_entry(parent, struct dso_cache, rb_node);
630 end = cache->offset + DSO__DATA_CACHE_SIZE;
631
632 if (offset < cache->offset)
633 p = &(*p)->rb_left;
634 else if (offset >= end)
635 p = &(*p)->rb_right;
636 else
637 goto out;
638 }
639
640 rb_link_node(&new->rb_node, parent, p);
641 rb_insert_color(&new->rb_node, root);
642
643 cache = NULL;
644 out:
645 pthread_mutex_unlock(&dso->lock);
646 return cache;
647 }
648
649 static ssize_t
dso_cache__memcpy(struct dso_cache * cache,u64 offset,u8 * data,u64 size)650 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
651 u8 *data, u64 size)
652 {
653 u64 cache_offset = offset - cache->offset;
654 u64 cache_size = min(cache->size - cache_offset, size);
655
656 memcpy(data, cache->data + cache_offset, cache_size);
657 return cache_size;
658 }
659
660 static ssize_t
dso_cache__read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)661 dso_cache__read(struct dso *dso, struct machine *machine,
662 u64 offset, u8 *data, ssize_t size)
663 {
664 struct dso_cache *cache;
665 struct dso_cache *old;
666 ssize_t ret;
667
668 do {
669 u64 cache_offset;
670
671 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
672 if (!cache)
673 return -ENOMEM;
674
675 pthread_mutex_lock(&dso__data_open_lock);
676
677 /*
678 * dso->data.fd might be closed if other thread opened another
679 * file (dso) due to open file limit (RLIMIT_NOFILE).
680 */
681 try_to_open_dso(dso, machine);
682
683 if (dso->data.fd < 0) {
684 ret = -errno;
685 dso->data.status = DSO_DATA_STATUS_ERROR;
686 break;
687 }
688
689 cache_offset = offset & DSO__DATA_CACHE_MASK;
690
691 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
692 if (ret <= 0)
693 break;
694
695 cache->offset = cache_offset;
696 cache->size = ret;
697 } while (0);
698
699 pthread_mutex_unlock(&dso__data_open_lock);
700
701 if (ret > 0) {
702 old = dso_cache__insert(dso, cache);
703 if (old) {
704 /* we lose the race */
705 free(cache);
706 cache = old;
707 }
708
709 ret = dso_cache__memcpy(cache, offset, data, size);
710 }
711
712 if (ret <= 0)
713 free(cache);
714
715 return ret;
716 }
717
dso_cache_read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)718 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
719 u64 offset, u8 *data, ssize_t size)
720 {
721 struct dso_cache *cache;
722
723 cache = dso_cache__find(dso, offset);
724 if (cache)
725 return dso_cache__memcpy(cache, offset, data, size);
726 else
727 return dso_cache__read(dso, machine, offset, data, size);
728 }
729
730 /*
731 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
732 * in the rb_tree. Any read to already cached data is served
733 * by cached data.
734 */
cached_read(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)735 static ssize_t cached_read(struct dso *dso, struct machine *machine,
736 u64 offset, u8 *data, ssize_t size)
737 {
738 ssize_t r = 0;
739 u8 *p = data;
740
741 do {
742 ssize_t ret;
743
744 ret = dso_cache_read(dso, machine, offset, p, size);
745 if (ret < 0)
746 return ret;
747
748 /* Reached EOF, return what we have. */
749 if (!ret)
750 break;
751
752 BUG_ON(ret > size);
753
754 r += ret;
755 p += ret;
756 offset += ret;
757 size -= ret;
758
759 } while (size);
760
761 return r;
762 }
763
data_file_size(struct dso * dso,struct machine * machine)764 static int data_file_size(struct dso *dso, struct machine *machine)
765 {
766 int ret = 0;
767 struct stat st;
768 char sbuf[STRERR_BUFSIZE];
769
770 if (dso->data.file_size)
771 return 0;
772
773 if (dso->data.status == DSO_DATA_STATUS_ERROR)
774 return -1;
775
776 pthread_mutex_lock(&dso__data_open_lock);
777
778 /*
779 * dso->data.fd might be closed if other thread opened another
780 * file (dso) due to open file limit (RLIMIT_NOFILE).
781 */
782 try_to_open_dso(dso, machine);
783
784 if (dso->data.fd < 0) {
785 ret = -errno;
786 dso->data.status = DSO_DATA_STATUS_ERROR;
787 goto out;
788 }
789
790 if (fstat(dso->data.fd, &st) < 0) {
791 ret = -errno;
792 pr_err("dso cache fstat failed: %s\n",
793 strerror_r(errno, sbuf, sizeof(sbuf)));
794 dso->data.status = DSO_DATA_STATUS_ERROR;
795 goto out;
796 }
797 dso->data.file_size = st.st_size;
798
799 out:
800 pthread_mutex_unlock(&dso__data_open_lock);
801 return ret;
802 }
803
804 /**
805 * dso__data_size - Return dso data size
806 * @dso: dso object
807 * @machine: machine object
808 *
809 * Return: dso data size
810 */
dso__data_size(struct dso * dso,struct machine * machine)811 off_t dso__data_size(struct dso *dso, struct machine *machine)
812 {
813 if (data_file_size(dso, machine))
814 return -1;
815
816 /* For now just estimate dso data size is close to file size */
817 return dso->data.file_size;
818 }
819
data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)820 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
821 u64 offset, u8 *data, ssize_t size)
822 {
823 if (data_file_size(dso, machine))
824 return -1;
825
826 /* Check the offset sanity. */
827 if (offset > dso->data.file_size)
828 return -1;
829
830 if (offset + size < offset)
831 return -1;
832
833 return cached_read(dso, machine, offset, data, size);
834 }
835
836 /**
837 * dso__data_read_offset - Read data from dso file offset
838 * @dso: dso object
839 * @machine: machine object
840 * @offset: file offset
841 * @data: buffer to store data
842 * @size: size of the @data buffer
843 *
844 * External interface to read data from dso file offset. Open
845 * dso data file and use cached_read to get the data.
846 */
dso__data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)847 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
848 u64 offset, u8 *data, ssize_t size)
849 {
850 if (dso->data.status == DSO_DATA_STATUS_ERROR)
851 return -1;
852
853 return data_read_offset(dso, machine, offset, data, size);
854 }
855
856 /**
857 * dso__data_read_addr - Read data from dso address
858 * @dso: dso object
859 * @machine: machine object
860 * @add: virtual memory address
861 * @data: buffer to store data
862 * @size: size of the @data buffer
863 *
864 * External interface to read data from dso address.
865 */
dso__data_read_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,u8 * data,ssize_t size)866 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
867 struct machine *machine, u64 addr,
868 u8 *data, ssize_t size)
869 {
870 u64 offset = map->map_ip(map, addr);
871 return dso__data_read_offset(dso, machine, offset, data, size);
872 }
873
dso__new_map(const char * name)874 struct map *dso__new_map(const char *name)
875 {
876 struct map *map = NULL;
877 struct dso *dso = dso__new(name);
878
879 if (dso)
880 map = map__new2(0, dso, MAP__FUNCTION);
881
882 return map;
883 }
884
machine__findnew_kernel(struct machine * machine,const char * name,const char * short_name,int dso_type)885 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
886 const char *short_name, int dso_type)
887 {
888 /*
889 * The kernel dso could be created by build_id processing.
890 */
891 struct dso *dso = machine__findnew_dso(machine, name);
892
893 /*
894 * We need to run this in all cases, since during the build_id
895 * processing we had no idea this was the kernel dso.
896 */
897 if (dso != NULL) {
898 dso__set_short_name(dso, short_name, false);
899 dso->kernel = dso_type;
900 }
901
902 return dso;
903 }
904
905 /*
906 * Find a matching entry and/or link current entry to RB tree.
907 * Either one of the dso or name parameter must be non-NULL or the
908 * function will not work.
909 */
__dso__findlink_by_longname(struct rb_root * root,struct dso * dso,const char * name)910 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
911 struct dso *dso, const char *name)
912 {
913 struct rb_node **p = &root->rb_node;
914 struct rb_node *parent = NULL;
915
916 if (!name)
917 name = dso->long_name;
918 /*
919 * Find node with the matching name
920 */
921 while (*p) {
922 struct dso *this = rb_entry(*p, struct dso, rb_node);
923 int rc = strcmp(name, this->long_name);
924
925 parent = *p;
926 if (rc == 0) {
927 /*
928 * In case the new DSO is a duplicate of an existing
929 * one, print an one-time warning & put the new entry
930 * at the end of the list of duplicates.
931 */
932 if (!dso || (dso == this))
933 return this; /* Find matching dso */
934 /*
935 * The core kernel DSOs may have duplicated long name.
936 * In this case, the short name should be different.
937 * Comparing the short names to differentiate the DSOs.
938 */
939 rc = strcmp(dso->short_name, this->short_name);
940 if (rc == 0) {
941 pr_err("Duplicated dso name: %s\n", name);
942 return NULL;
943 }
944 }
945 if (rc < 0)
946 p = &parent->rb_left;
947 else
948 p = &parent->rb_right;
949 }
950 if (dso) {
951 /* Add new node and rebalance tree */
952 rb_link_node(&dso->rb_node, parent, p);
953 rb_insert_color(&dso->rb_node, root);
954 dso->root = root;
955 }
956 return NULL;
957 }
958
__dso__find_by_longname(struct rb_root * root,const char * name)959 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
960 const char *name)
961 {
962 return __dso__findlink_by_longname(root, NULL, name);
963 }
964
dso__set_long_name(struct dso * dso,const char * name,bool name_allocated)965 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
966 {
967 struct rb_root *root = dso->root;
968
969 if (name == NULL)
970 return;
971
972 if (dso->long_name_allocated)
973 free((char *)dso->long_name);
974
975 if (root) {
976 rb_erase(&dso->rb_node, root);
977 /*
978 * __dso__findlink_by_longname() isn't guaranteed to add it
979 * back, so a clean removal is required here.
980 */
981 RB_CLEAR_NODE(&dso->rb_node);
982 dso->root = NULL;
983 }
984
985 dso->long_name = name;
986 dso->long_name_len = strlen(name);
987 dso->long_name_allocated = name_allocated;
988
989 if (root)
990 __dso__findlink_by_longname(root, dso, NULL);
991 }
992
dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)993 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
994 {
995 if (name == NULL)
996 return;
997
998 if (dso->short_name_allocated)
999 free((char *)dso->short_name);
1000
1001 dso->short_name = name;
1002 dso->short_name_len = strlen(name);
1003 dso->short_name_allocated = name_allocated;
1004 }
1005
dso__set_basename(struct dso * dso)1006 static void dso__set_basename(struct dso *dso)
1007 {
1008 /*
1009 * basename() may modify path buffer, so we must pass
1010 * a copy.
1011 */
1012 char *base, *lname = strdup(dso->long_name);
1013
1014 if (!lname)
1015 return;
1016
1017 /*
1018 * basename() may return a pointer to internal
1019 * storage which is reused in subsequent calls
1020 * so copy the result.
1021 */
1022 base = strdup(basename(lname));
1023
1024 free(lname);
1025
1026 if (!base)
1027 return;
1028
1029 dso__set_short_name(dso, base, true);
1030 }
1031
dso__name_len(const struct dso * dso)1032 int dso__name_len(const struct dso *dso)
1033 {
1034 if (!dso)
1035 return strlen("[unknown]");
1036 if (verbose)
1037 return dso->long_name_len;
1038
1039 return dso->short_name_len;
1040 }
1041
dso__loaded(const struct dso * dso,enum map_type type)1042 bool dso__loaded(const struct dso *dso, enum map_type type)
1043 {
1044 return dso->loaded & (1 << type);
1045 }
1046
dso__sorted_by_name(const struct dso * dso,enum map_type type)1047 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1048 {
1049 return dso->sorted_by_name & (1 << type);
1050 }
1051
dso__set_sorted_by_name(struct dso * dso,enum map_type type)1052 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1053 {
1054 dso->sorted_by_name |= (1 << type);
1055 }
1056
dso__new(const char * name)1057 struct dso *dso__new(const char *name)
1058 {
1059 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1060
1061 if (dso != NULL) {
1062 int i;
1063 strcpy(dso->name, name);
1064 dso__set_long_name(dso, dso->name, false);
1065 dso__set_short_name(dso, dso->name, false);
1066 for (i = 0; i < MAP__NR_TYPES; ++i)
1067 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1068 dso->data.cache = RB_ROOT;
1069 dso->data.fd = -1;
1070 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1071 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1072 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1073 dso->is_64_bit = (sizeof(void *) == 8);
1074 dso->loaded = 0;
1075 dso->rel = 0;
1076 dso->sorted_by_name = 0;
1077 dso->has_build_id = 0;
1078 dso->has_srcline = 1;
1079 dso->a2l_fails = 1;
1080 dso->kernel = DSO_TYPE_USER;
1081 dso->needs_swap = DSO_SWAP__UNSET;
1082 RB_CLEAR_NODE(&dso->rb_node);
1083 dso->root = NULL;
1084 INIT_LIST_HEAD(&dso->node);
1085 INIT_LIST_HEAD(&dso->data.open_entry);
1086 pthread_mutex_init(&dso->lock, NULL);
1087 atomic_set(&dso->refcnt, 1);
1088 }
1089
1090 return dso;
1091 }
1092
dso__delete(struct dso * dso)1093 void dso__delete(struct dso *dso)
1094 {
1095 int i;
1096
1097 if (!RB_EMPTY_NODE(&dso->rb_node))
1098 pr_err("DSO %s is still in rbtree when being deleted!\n",
1099 dso->long_name);
1100 for (i = 0; i < MAP__NR_TYPES; ++i)
1101 symbols__delete(&dso->symbols[i]);
1102
1103 if (dso->short_name_allocated) {
1104 zfree((char **)&dso->short_name);
1105 dso->short_name_allocated = false;
1106 }
1107
1108 if (dso->long_name_allocated) {
1109 zfree((char **)&dso->long_name);
1110 dso->long_name_allocated = false;
1111 }
1112
1113 dso__data_close(dso);
1114 auxtrace_cache__free(dso->auxtrace_cache);
1115 dso_cache__free(dso);
1116 dso__free_a2l(dso);
1117 zfree(&dso->symsrc_filename);
1118 pthread_mutex_destroy(&dso->lock);
1119 free(dso);
1120 }
1121
dso__get(struct dso * dso)1122 struct dso *dso__get(struct dso *dso)
1123 {
1124 if (dso)
1125 atomic_inc(&dso->refcnt);
1126 return dso;
1127 }
1128
dso__put(struct dso * dso)1129 void dso__put(struct dso *dso)
1130 {
1131 if (dso && atomic_dec_and_test(&dso->refcnt))
1132 dso__delete(dso);
1133 }
1134
dso__set_build_id(struct dso * dso,void * build_id)1135 void dso__set_build_id(struct dso *dso, void *build_id)
1136 {
1137 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1138 dso->has_build_id = 1;
1139 }
1140
dso__build_id_equal(const struct dso * dso,u8 * build_id)1141 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1142 {
1143 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1144 }
1145
dso__read_running_kernel_build_id(struct dso * dso,struct machine * machine)1146 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1147 {
1148 char path[PATH_MAX];
1149
1150 if (machine__is_default_guest(machine))
1151 return;
1152 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1153 if (sysfs__read_build_id(path, dso->build_id,
1154 sizeof(dso->build_id)) == 0)
1155 dso->has_build_id = true;
1156 }
1157
dso__kernel_module_get_build_id(struct dso * dso,const char * root_dir)1158 int dso__kernel_module_get_build_id(struct dso *dso,
1159 const char *root_dir)
1160 {
1161 char filename[PATH_MAX];
1162 /*
1163 * kernel module short names are of the form "[module]" and
1164 * we need just "module" here.
1165 */
1166 const char *name = dso->short_name + 1;
1167
1168 snprintf(filename, sizeof(filename),
1169 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1170 root_dir, (int)strlen(name) - 1, name);
1171
1172 if (sysfs__read_build_id(filename, dso->build_id,
1173 sizeof(dso->build_id)) == 0)
1174 dso->has_build_id = true;
1175
1176 return 0;
1177 }
1178
__dsos__read_build_ids(struct list_head * head,bool with_hits)1179 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1180 {
1181 bool have_build_id = false;
1182 struct dso *pos;
1183
1184 list_for_each_entry(pos, head, node) {
1185 if (with_hits && !pos->hit)
1186 continue;
1187 if (pos->has_build_id) {
1188 have_build_id = true;
1189 continue;
1190 }
1191 if (filename__read_build_id(pos->long_name, pos->build_id,
1192 sizeof(pos->build_id)) > 0) {
1193 have_build_id = true;
1194 pos->has_build_id = true;
1195 }
1196 }
1197
1198 return have_build_id;
1199 }
1200
__dsos__add(struct dsos * dsos,struct dso * dso)1201 void __dsos__add(struct dsos *dsos, struct dso *dso)
1202 {
1203 list_add_tail(&dso->node, &dsos->head);
1204 __dso__findlink_by_longname(&dsos->root, dso, NULL);
1205 /*
1206 * It is now in the linked list, grab a reference, then garbage collect
1207 * this when needing memory, by looking at LRU dso instances in the
1208 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1209 * anywhere besides the one for the list, do, under a lock for the
1210 * list: remove it from the list, then a dso__put(), that probably will
1211 * be the last and will then call dso__delete(), end of life.
1212 *
1213 * That, or at the end of the 'struct machine' lifetime, when all
1214 * 'struct dso' instances will be removed from the list, in
1215 * dsos__exit(), if they have no other reference from some other data
1216 * structure.
1217 *
1218 * E.g.: after processing a 'perf.data' file and storing references
1219 * to objects instantiated while processing events, we will have
1220 * references to the 'thread', 'map', 'dso' structs all from 'struct
1221 * hist_entry' instances, but we may not need anything not referenced,
1222 * so we might as well call machines__exit()/machines__delete() and
1223 * garbage collect it.
1224 */
1225 dso__get(dso);
1226 }
1227
dsos__add(struct dsos * dsos,struct dso * dso)1228 void dsos__add(struct dsos *dsos, struct dso *dso)
1229 {
1230 pthread_rwlock_wrlock(&dsos->lock);
1231 __dsos__add(dsos, dso);
1232 pthread_rwlock_unlock(&dsos->lock);
1233 }
1234
__dsos__find(struct dsos * dsos,const char * name,bool cmp_short)1235 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1236 {
1237 struct dso *pos;
1238
1239 if (cmp_short) {
1240 list_for_each_entry(pos, &dsos->head, node)
1241 if (strcmp(pos->short_name, name) == 0)
1242 return pos;
1243 return NULL;
1244 }
1245 return __dso__find_by_longname(&dsos->root, name);
1246 }
1247
dsos__find(struct dsos * dsos,const char * name,bool cmp_short)1248 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1249 {
1250 struct dso *dso;
1251 pthread_rwlock_rdlock(&dsos->lock);
1252 dso = __dsos__find(dsos, name, cmp_short);
1253 pthread_rwlock_unlock(&dsos->lock);
1254 return dso;
1255 }
1256
__dsos__addnew(struct dsos * dsos,const char * name)1257 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1258 {
1259 struct dso *dso = dso__new(name);
1260
1261 if (dso != NULL) {
1262 __dsos__add(dsos, dso);
1263 dso__set_basename(dso);
1264 }
1265 return dso;
1266 }
1267
__dsos__findnew(struct dsos * dsos,const char * name)1268 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1269 {
1270 struct dso *dso = __dsos__find(dsos, name, false);
1271
1272 return dso ? dso : __dsos__addnew(dsos, name);
1273 }
1274
dsos__findnew(struct dsos * dsos,const char * name)1275 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1276 {
1277 struct dso *dso;
1278 pthread_rwlock_wrlock(&dsos->lock);
1279 dso = dso__get(__dsos__findnew(dsos, name));
1280 pthread_rwlock_unlock(&dsos->lock);
1281 return dso;
1282 }
1283
__dsos__fprintf_buildid(struct list_head * head,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)1284 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1285 bool (skip)(struct dso *dso, int parm), int parm)
1286 {
1287 struct dso *pos;
1288 size_t ret = 0;
1289
1290 list_for_each_entry(pos, head, node) {
1291 if (skip && skip(pos, parm))
1292 continue;
1293 ret += dso__fprintf_buildid(pos, fp);
1294 ret += fprintf(fp, " %s\n", pos->long_name);
1295 }
1296 return ret;
1297 }
1298
__dsos__fprintf(struct list_head * head,FILE * fp)1299 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1300 {
1301 struct dso *pos;
1302 size_t ret = 0;
1303
1304 list_for_each_entry(pos, head, node) {
1305 int i;
1306 for (i = 0; i < MAP__NR_TYPES; ++i)
1307 ret += dso__fprintf(pos, i, fp);
1308 }
1309
1310 return ret;
1311 }
1312
dso__fprintf_buildid(struct dso * dso,FILE * fp)1313 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1314 {
1315 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1316
1317 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1318 return fprintf(fp, "%s", sbuild_id);
1319 }
1320
dso__fprintf(struct dso * dso,enum map_type type,FILE * fp)1321 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1322 {
1323 struct rb_node *nd;
1324 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1325
1326 if (dso->short_name != dso->long_name)
1327 ret += fprintf(fp, "%s, ", dso->long_name);
1328 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1329 dso__loaded(dso, type) ? "" : "NOT ");
1330 ret += dso__fprintf_buildid(dso, fp);
1331 ret += fprintf(fp, ")\n");
1332 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1333 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1334 ret += symbol__fprintf(pos, fp);
1335 }
1336
1337 return ret;
1338 }
1339
dso__type(struct dso * dso,struct machine * machine)1340 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1341 {
1342 int fd;
1343 enum dso_type type = DSO__TYPE_UNKNOWN;
1344
1345 fd = dso__data_get_fd(dso, machine);
1346 if (fd >= 0) {
1347 type = dso__type_fd(fd);
1348 dso__data_put_fd(dso);
1349 }
1350
1351 return type;
1352 }
1353
dso__strerror_load(struct dso * dso,char * buf,size_t buflen)1354 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1355 {
1356 int idx, errnum = dso->load_errno;
1357 /*
1358 * This must have a same ordering as the enum dso_load_errno.
1359 */
1360 static const char *dso_load__error_str[] = {
1361 "Internal tools/perf/ library error",
1362 "Invalid ELF file",
1363 "Can not read build id",
1364 "Mismatching build id",
1365 "Decompression failure",
1366 };
1367
1368 BUG_ON(buflen == 0);
1369
1370 if (errnum >= 0) {
1371 const char *err = strerror_r(errnum, buf, buflen);
1372
1373 if (err != buf)
1374 scnprintf(buf, buflen, "%s", err);
1375
1376 return 0;
1377 }
1378
1379 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1380 return -1;
1381
1382 idx = errnum - __DSO_LOAD_ERRNO__START;
1383 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1384 return 0;
1385 }
1386