1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26
27 /*
28 * magic2 = "PERFILE2"
29 * must be a numerical value to let the endianness
30 * determine the memory layout. That way we are able
31 * to detect endianness when reading the perf.data file
32 * back.
33 *
34 * we check for legacy (PERFFILE) format.
35 */
36 static const char *__perf_magic1 = "PERFFILE";
37 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
38 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
39
40 #define PERF_MAGIC __perf_magic2
41
42 struct perf_file_attr {
43 struct perf_event_attr attr;
44 struct perf_file_section ids;
45 };
46
perf_header__set_feat(struct perf_header * header,int feat)47 void perf_header__set_feat(struct perf_header *header, int feat)
48 {
49 set_bit(feat, header->adds_features);
50 }
51
perf_header__clear_feat(struct perf_header * header,int feat)52 void perf_header__clear_feat(struct perf_header *header, int feat)
53 {
54 clear_bit(feat, header->adds_features);
55 }
56
perf_header__has_feat(const struct perf_header * header,int feat)57 bool perf_header__has_feat(const struct perf_header *header, int feat)
58 {
59 return test_bit(feat, header->adds_features);
60 }
61
do_write(int fd,const void * buf,size_t size)62 static int do_write(int fd, const void *buf, size_t size)
63 {
64 while (size) {
65 int ret = write(fd, buf, size);
66
67 if (ret < 0)
68 return -errno;
69
70 size -= ret;
71 buf += ret;
72 }
73
74 return 0;
75 }
76
write_padded(int fd,const void * bf,size_t count,size_t count_aligned)77 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
78 {
79 static const char zero_buf[NAME_ALIGN];
80 int err = do_write(fd, bf, count);
81
82 if (!err)
83 err = do_write(fd, zero_buf, count_aligned - count);
84
85 return err;
86 }
87
88 #define string_size(str) \
89 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
90
do_write_string(int fd,const char * str)91 static int do_write_string(int fd, const char *str)
92 {
93 u32 len, olen;
94 int ret;
95
96 olen = strlen(str) + 1;
97 len = PERF_ALIGN(olen, NAME_ALIGN);
98
99 /* write len, incl. \0 */
100 ret = do_write(fd, &len, sizeof(len));
101 if (ret < 0)
102 return ret;
103
104 return write_padded(fd, str, olen, len);
105 }
106
do_read_string(int fd,struct perf_header * ph)107 static char *do_read_string(int fd, struct perf_header *ph)
108 {
109 ssize_t sz, ret;
110 u32 len;
111 char *buf;
112
113 sz = readn(fd, &len, sizeof(len));
114 if (sz < (ssize_t)sizeof(len))
115 return NULL;
116
117 if (ph->needs_swap)
118 len = bswap_32(len);
119
120 buf = malloc(len);
121 if (!buf)
122 return NULL;
123
124 ret = readn(fd, buf, len);
125 if (ret == (ssize_t)len) {
126 /*
127 * strings are padded by zeroes
128 * thus the actual strlen of buf
129 * may be less than len
130 */
131 return buf;
132 }
133
134 free(buf);
135 return NULL;
136 }
137
write_tracing_data(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist)138 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
139 struct perf_evlist *evlist)
140 {
141 return read_tracing_data(fd, &evlist->entries);
142 }
143
144
write_build_id(int fd,struct perf_header * h,struct perf_evlist * evlist __maybe_unused)145 static int write_build_id(int fd, struct perf_header *h,
146 struct perf_evlist *evlist __maybe_unused)
147 {
148 struct perf_session *session;
149 int err;
150
151 session = container_of(h, struct perf_session, header);
152
153 if (!perf_session__read_build_ids(session, true))
154 return -1;
155
156 err = perf_session__write_buildid_table(session, fd);
157 if (err < 0) {
158 pr_debug("failed to write buildid table\n");
159 return err;
160 }
161 perf_session__cache_build_ids(session);
162
163 return 0;
164 }
165
write_hostname(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)166 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
167 struct perf_evlist *evlist __maybe_unused)
168 {
169 struct utsname uts;
170 int ret;
171
172 ret = uname(&uts);
173 if (ret < 0)
174 return -1;
175
176 return do_write_string(fd, uts.nodename);
177 }
178
write_osrelease(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)179 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
180 struct perf_evlist *evlist __maybe_unused)
181 {
182 struct utsname uts;
183 int ret;
184
185 ret = uname(&uts);
186 if (ret < 0)
187 return -1;
188
189 return do_write_string(fd, uts.release);
190 }
191
write_arch(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)192 static int write_arch(int fd, struct perf_header *h __maybe_unused,
193 struct perf_evlist *evlist __maybe_unused)
194 {
195 struct utsname uts;
196 int ret;
197
198 ret = uname(&uts);
199 if (ret < 0)
200 return -1;
201
202 return do_write_string(fd, uts.machine);
203 }
204
write_version(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)205 static int write_version(int fd, struct perf_header *h __maybe_unused,
206 struct perf_evlist *evlist __maybe_unused)
207 {
208 return do_write_string(fd, perf_version_string);
209 }
210
__write_cpudesc(int fd,const char * cpuinfo_proc)211 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
212 {
213 FILE *file;
214 char *buf = NULL;
215 char *s, *p;
216 const char *search = cpuinfo_proc;
217 size_t len = 0;
218 int ret = -1;
219
220 if (!search)
221 return -1;
222
223 file = fopen("/proc/cpuinfo", "r");
224 if (!file)
225 return -1;
226
227 while (getline(&buf, &len, file) > 0) {
228 ret = strncmp(buf, search, strlen(search));
229 if (!ret)
230 break;
231 }
232
233 if (ret) {
234 ret = -1;
235 goto done;
236 }
237
238 s = buf;
239
240 p = strchr(buf, ':');
241 if (p && *(p+1) == ' ' && *(p+2))
242 s = p + 2;
243 p = strchr(s, '\n');
244 if (p)
245 *p = '\0';
246
247 /* squash extra space characters (branding string) */
248 p = s;
249 while (*p) {
250 if (isspace(*p)) {
251 char *r = p + 1;
252 char *q = r;
253 *p = ' ';
254 while (*q && isspace(*q))
255 q++;
256 if (q != (p+1))
257 while ((*r++ = *q++));
258 }
259 p++;
260 }
261 ret = do_write_string(fd, s);
262 done:
263 free(buf);
264 fclose(file);
265 return ret;
266 }
267
write_cpudesc(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)268 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
269 struct perf_evlist *evlist __maybe_unused)
270 {
271 #ifndef CPUINFO_PROC
272 #define CPUINFO_PROC {"model name", }
273 #endif
274 const char *cpuinfo_procs[] = CPUINFO_PROC;
275 unsigned int i;
276
277 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
278 int ret;
279 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
280 if (ret >= 0)
281 return ret;
282 }
283 return -1;
284 }
285
286
write_nrcpus(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)287 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
288 struct perf_evlist *evlist __maybe_unused)
289 {
290 long nr;
291 u32 nrc, nra;
292 int ret;
293
294 nr = sysconf(_SC_NPROCESSORS_CONF);
295 if (nr < 0)
296 return -1;
297
298 nrc = (u32)(nr & UINT_MAX);
299
300 nr = sysconf(_SC_NPROCESSORS_ONLN);
301 if (nr < 0)
302 return -1;
303
304 nra = (u32)(nr & UINT_MAX);
305
306 ret = do_write(fd, &nrc, sizeof(nrc));
307 if (ret < 0)
308 return ret;
309
310 return do_write(fd, &nra, sizeof(nra));
311 }
312
write_event_desc(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist)313 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
314 struct perf_evlist *evlist)
315 {
316 struct perf_evsel *evsel;
317 u32 nre, nri, sz;
318 int ret;
319
320 nre = evlist->nr_entries;
321
322 /*
323 * write number of events
324 */
325 ret = do_write(fd, &nre, sizeof(nre));
326 if (ret < 0)
327 return ret;
328
329 /*
330 * size of perf_event_attr struct
331 */
332 sz = (u32)sizeof(evsel->attr);
333 ret = do_write(fd, &sz, sizeof(sz));
334 if (ret < 0)
335 return ret;
336
337 evlist__for_each(evlist, evsel) {
338 ret = do_write(fd, &evsel->attr, sz);
339 if (ret < 0)
340 return ret;
341 /*
342 * write number of unique id per event
343 * there is one id per instance of an event
344 *
345 * copy into an nri to be independent of the
346 * type of ids,
347 */
348 nri = evsel->ids;
349 ret = do_write(fd, &nri, sizeof(nri));
350 if (ret < 0)
351 return ret;
352
353 /*
354 * write event string as passed on cmdline
355 */
356 ret = do_write_string(fd, perf_evsel__name(evsel));
357 if (ret < 0)
358 return ret;
359 /*
360 * write unique ids for this event
361 */
362 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
363 if (ret < 0)
364 return ret;
365 }
366 return 0;
367 }
368
write_cmdline(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)369 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
370 struct perf_evlist *evlist __maybe_unused)
371 {
372 char buf[MAXPATHLEN];
373 char proc[32];
374 u32 n;
375 int i, ret;
376
377 /*
378 * actual atual path to perf binary
379 */
380 sprintf(proc, "/proc/%d/exe", getpid());
381 ret = readlink(proc, buf, sizeof(buf));
382 if (ret <= 0)
383 return -1;
384
385 /* readlink() does not add null termination */
386 buf[ret] = '\0';
387
388 /* account for binary path */
389 n = perf_env.nr_cmdline + 1;
390
391 ret = do_write(fd, &n, sizeof(n));
392 if (ret < 0)
393 return ret;
394
395 ret = do_write_string(fd, buf);
396 if (ret < 0)
397 return ret;
398
399 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
400 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
401 if (ret < 0)
402 return ret;
403 }
404 return 0;
405 }
406
407 #define CORE_SIB_FMT \
408 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
409 #define THRD_SIB_FMT \
410 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
411
412 struct cpu_topo {
413 u32 cpu_nr;
414 u32 core_sib;
415 u32 thread_sib;
416 char **core_siblings;
417 char **thread_siblings;
418 };
419
build_cpu_topo(struct cpu_topo * tp,int cpu)420 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
421 {
422 FILE *fp;
423 char filename[MAXPATHLEN];
424 char *buf = NULL, *p;
425 size_t len = 0;
426 ssize_t sret;
427 u32 i = 0;
428 int ret = -1;
429
430 sprintf(filename, CORE_SIB_FMT, cpu);
431 fp = fopen(filename, "r");
432 if (!fp)
433 goto try_threads;
434
435 sret = getline(&buf, &len, fp);
436 fclose(fp);
437 if (sret <= 0)
438 goto try_threads;
439
440 p = strchr(buf, '\n');
441 if (p)
442 *p = '\0';
443
444 for (i = 0; i < tp->core_sib; i++) {
445 if (!strcmp(buf, tp->core_siblings[i]))
446 break;
447 }
448 if (i == tp->core_sib) {
449 tp->core_siblings[i] = buf;
450 tp->core_sib++;
451 buf = NULL;
452 len = 0;
453 }
454 ret = 0;
455
456 try_threads:
457 sprintf(filename, THRD_SIB_FMT, cpu);
458 fp = fopen(filename, "r");
459 if (!fp)
460 goto done;
461
462 if (getline(&buf, &len, fp) <= 0)
463 goto done;
464
465 p = strchr(buf, '\n');
466 if (p)
467 *p = '\0';
468
469 for (i = 0; i < tp->thread_sib; i++) {
470 if (!strcmp(buf, tp->thread_siblings[i]))
471 break;
472 }
473 if (i == tp->thread_sib) {
474 tp->thread_siblings[i] = buf;
475 tp->thread_sib++;
476 buf = NULL;
477 }
478 ret = 0;
479 done:
480 if(fp)
481 fclose(fp);
482 free(buf);
483 return ret;
484 }
485
free_cpu_topo(struct cpu_topo * tp)486 static void free_cpu_topo(struct cpu_topo *tp)
487 {
488 u32 i;
489
490 if (!tp)
491 return;
492
493 for (i = 0 ; i < tp->core_sib; i++)
494 zfree(&tp->core_siblings[i]);
495
496 for (i = 0 ; i < tp->thread_sib; i++)
497 zfree(&tp->thread_siblings[i]);
498
499 free(tp);
500 }
501
build_cpu_topology(void)502 static struct cpu_topo *build_cpu_topology(void)
503 {
504 struct cpu_topo *tp;
505 void *addr;
506 u32 nr, i;
507 size_t sz;
508 long ncpus;
509 int ret = -1;
510
511 ncpus = sysconf(_SC_NPROCESSORS_CONF);
512 if (ncpus < 0)
513 return NULL;
514
515 nr = (u32)(ncpus & UINT_MAX);
516
517 sz = nr * sizeof(char *);
518
519 addr = calloc(1, sizeof(*tp) + 2 * sz);
520 if (!addr)
521 return NULL;
522
523 tp = addr;
524 tp->cpu_nr = nr;
525 addr += sizeof(*tp);
526 tp->core_siblings = addr;
527 addr += sz;
528 tp->thread_siblings = addr;
529
530 for (i = 0; i < nr; i++) {
531 ret = build_cpu_topo(tp, i);
532 if (ret < 0)
533 break;
534 }
535 if (ret) {
536 free_cpu_topo(tp);
537 tp = NULL;
538 }
539 return tp;
540 }
541
write_cpu_topology(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)542 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
543 struct perf_evlist *evlist __maybe_unused)
544 {
545 struct cpu_topo *tp;
546 u32 i;
547 int ret, j;
548
549 tp = build_cpu_topology();
550 if (!tp)
551 return -1;
552
553 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
554 if (ret < 0)
555 goto done;
556
557 for (i = 0; i < tp->core_sib; i++) {
558 ret = do_write_string(fd, tp->core_siblings[i]);
559 if (ret < 0)
560 goto done;
561 }
562 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
563 if (ret < 0)
564 goto done;
565
566 for (i = 0; i < tp->thread_sib; i++) {
567 ret = do_write_string(fd, tp->thread_siblings[i]);
568 if (ret < 0)
569 break;
570 }
571
572 ret = perf_env__read_cpu_topology_map(&perf_env);
573 if (ret < 0)
574 goto done;
575
576 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
577 ret = do_write(fd, &perf_env.cpu[j].core_id,
578 sizeof(perf_env.cpu[j].core_id));
579 if (ret < 0)
580 return ret;
581 ret = do_write(fd, &perf_env.cpu[j].socket_id,
582 sizeof(perf_env.cpu[j].socket_id));
583 if (ret < 0)
584 return ret;
585 }
586 done:
587 free_cpu_topo(tp);
588 return ret;
589 }
590
591
592
write_total_mem(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)593 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
594 struct perf_evlist *evlist __maybe_unused)
595 {
596 char *buf = NULL;
597 FILE *fp;
598 size_t len = 0;
599 int ret = -1, n;
600 uint64_t mem;
601
602 fp = fopen("/proc/meminfo", "r");
603 if (!fp)
604 return -1;
605
606 while (getline(&buf, &len, fp) > 0) {
607 ret = strncmp(buf, "MemTotal:", 9);
608 if (!ret)
609 break;
610 }
611 if (!ret) {
612 n = sscanf(buf, "%*s %"PRIu64, &mem);
613 if (n == 1)
614 ret = do_write(fd, &mem, sizeof(mem));
615 } else
616 ret = -1;
617 free(buf);
618 fclose(fp);
619 return ret;
620 }
621
write_topo_node(int fd,int node)622 static int write_topo_node(int fd, int node)
623 {
624 char str[MAXPATHLEN];
625 char field[32];
626 char *buf = NULL, *p;
627 size_t len = 0;
628 FILE *fp;
629 u64 mem_total, mem_free, mem;
630 int ret = -1;
631
632 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
633 fp = fopen(str, "r");
634 if (!fp)
635 return -1;
636
637 while (getline(&buf, &len, fp) > 0) {
638 /* skip over invalid lines */
639 if (!strchr(buf, ':'))
640 continue;
641 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
642 goto done;
643 if (!strcmp(field, "MemTotal:"))
644 mem_total = mem;
645 if (!strcmp(field, "MemFree:"))
646 mem_free = mem;
647 }
648
649 fclose(fp);
650 fp = NULL;
651
652 ret = do_write(fd, &mem_total, sizeof(u64));
653 if (ret)
654 goto done;
655
656 ret = do_write(fd, &mem_free, sizeof(u64));
657 if (ret)
658 goto done;
659
660 ret = -1;
661 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
662
663 fp = fopen(str, "r");
664 if (!fp)
665 goto done;
666
667 if (getline(&buf, &len, fp) <= 0)
668 goto done;
669
670 p = strchr(buf, '\n');
671 if (p)
672 *p = '\0';
673
674 ret = do_write_string(fd, buf);
675 done:
676 free(buf);
677 if (fp)
678 fclose(fp);
679 return ret;
680 }
681
write_numa_topology(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)682 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
683 struct perf_evlist *evlist __maybe_unused)
684 {
685 char *buf = NULL;
686 size_t len = 0;
687 FILE *fp;
688 struct cpu_map *node_map = NULL;
689 char *c;
690 u32 nr, i, j;
691 int ret = -1;
692
693 fp = fopen("/sys/devices/system/node/online", "r");
694 if (!fp)
695 return -1;
696
697 if (getline(&buf, &len, fp) <= 0)
698 goto done;
699
700 c = strchr(buf, '\n');
701 if (c)
702 *c = '\0';
703
704 node_map = cpu_map__new(buf);
705 if (!node_map)
706 goto done;
707
708 nr = (u32)node_map->nr;
709
710 ret = do_write(fd, &nr, sizeof(nr));
711 if (ret < 0)
712 goto done;
713
714 for (i = 0; i < nr; i++) {
715 j = (u32)node_map->map[i];
716 ret = do_write(fd, &j, sizeof(j));
717 if (ret < 0)
718 break;
719
720 ret = write_topo_node(fd, i);
721 if (ret < 0)
722 break;
723 }
724 done:
725 free(buf);
726 fclose(fp);
727 free(node_map);
728 return ret;
729 }
730
731 /*
732 * File format:
733 *
734 * struct pmu_mappings {
735 * u32 pmu_num;
736 * struct pmu_map {
737 * u32 type;
738 * char name[];
739 * }[pmu_num];
740 * };
741 */
742
write_pmu_mappings(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)743 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
744 struct perf_evlist *evlist __maybe_unused)
745 {
746 struct perf_pmu *pmu = NULL;
747 off_t offset = lseek(fd, 0, SEEK_CUR);
748 __u32 pmu_num = 0;
749 int ret;
750
751 /* write real pmu_num later */
752 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
753 if (ret < 0)
754 return ret;
755
756 while ((pmu = perf_pmu__scan(pmu))) {
757 if (!pmu->name)
758 continue;
759 pmu_num++;
760
761 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
762 if (ret < 0)
763 return ret;
764
765 ret = do_write_string(fd, pmu->name);
766 if (ret < 0)
767 return ret;
768 }
769
770 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
771 /* discard all */
772 lseek(fd, offset, SEEK_SET);
773 return -1;
774 }
775
776 return 0;
777 }
778
779 /*
780 * File format:
781 *
782 * struct group_descs {
783 * u32 nr_groups;
784 * struct group_desc {
785 * char name[];
786 * u32 leader_idx;
787 * u32 nr_members;
788 * }[nr_groups];
789 * };
790 */
write_group_desc(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist)791 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
792 struct perf_evlist *evlist)
793 {
794 u32 nr_groups = evlist->nr_groups;
795 struct perf_evsel *evsel;
796 int ret;
797
798 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
799 if (ret < 0)
800 return ret;
801
802 evlist__for_each(evlist, evsel) {
803 if (perf_evsel__is_group_leader(evsel) &&
804 evsel->nr_members > 1) {
805 const char *name = evsel->group_name ?: "{anon_group}";
806 u32 leader_idx = evsel->idx;
807 u32 nr_members = evsel->nr_members;
808
809 ret = do_write_string(fd, name);
810 if (ret < 0)
811 return ret;
812
813 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
814 if (ret < 0)
815 return ret;
816
817 ret = do_write(fd, &nr_members, sizeof(nr_members));
818 if (ret < 0)
819 return ret;
820 }
821 }
822 return 0;
823 }
824
825 /*
826 * default get_cpuid(): nothing gets recorded
827 * actual implementation must be in arch/$(ARCH)/util/header.c
828 */
get_cpuid(char * buffer __maybe_unused,size_t sz __maybe_unused)829 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
830 size_t sz __maybe_unused)
831 {
832 return -1;
833 }
834
write_cpuid(int fd,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)835 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
836 struct perf_evlist *evlist __maybe_unused)
837 {
838 char buffer[64];
839 int ret;
840
841 ret = get_cpuid(buffer, sizeof(buffer));
842 if (!ret)
843 goto write_it;
844
845 return -1;
846 write_it:
847 return do_write_string(fd, buffer);
848 }
849
write_branch_stack(int fd __maybe_unused,struct perf_header * h __maybe_unused,struct perf_evlist * evlist __maybe_unused)850 static int write_branch_stack(int fd __maybe_unused,
851 struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
853 {
854 return 0;
855 }
856
write_auxtrace(int fd,struct perf_header * h,struct perf_evlist * evlist __maybe_unused)857 static int write_auxtrace(int fd, struct perf_header *h,
858 struct perf_evlist *evlist __maybe_unused)
859 {
860 struct perf_session *session;
861 int err;
862
863 session = container_of(h, struct perf_session, header);
864
865 err = auxtrace_index__write(fd, &session->auxtrace_index);
866 if (err < 0)
867 pr_err("Failed to write auxtrace index\n");
868 return err;
869 }
870
print_hostname(struct perf_header * ph,int fd __maybe_unused,FILE * fp)871 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
872 FILE *fp)
873 {
874 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
875 }
876
print_osrelease(struct perf_header * ph,int fd __maybe_unused,FILE * fp)877 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
878 FILE *fp)
879 {
880 fprintf(fp, "# os release : %s\n", ph->env.os_release);
881 }
882
print_arch(struct perf_header * ph,int fd __maybe_unused,FILE * fp)883 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
884 {
885 fprintf(fp, "# arch : %s\n", ph->env.arch);
886 }
887
print_cpudesc(struct perf_header * ph,int fd __maybe_unused,FILE * fp)888 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
889 FILE *fp)
890 {
891 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
892 }
893
print_nrcpus(struct perf_header * ph,int fd __maybe_unused,FILE * fp)894 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
895 FILE *fp)
896 {
897 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
898 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
899 }
900
print_version(struct perf_header * ph,int fd __maybe_unused,FILE * fp)901 static void print_version(struct perf_header *ph, int fd __maybe_unused,
902 FILE *fp)
903 {
904 fprintf(fp, "# perf version : %s\n", ph->env.version);
905 }
906
print_cmdline(struct perf_header * ph,int fd __maybe_unused,FILE * fp)907 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
908 FILE *fp)
909 {
910 int nr, i;
911
912 nr = ph->env.nr_cmdline;
913
914 fprintf(fp, "# cmdline : ");
915
916 for (i = 0; i < nr; i++)
917 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
918 fputc('\n', fp);
919 }
920
print_cpu_topology(struct perf_header * ph,int fd __maybe_unused,FILE * fp)921 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
922 FILE *fp)
923 {
924 int nr, i;
925 char *str;
926 int cpu_nr = ph->env.nr_cpus_online;
927
928 nr = ph->env.nr_sibling_cores;
929 str = ph->env.sibling_cores;
930
931 for (i = 0; i < nr; i++) {
932 fprintf(fp, "# sibling cores : %s\n", str);
933 str += strlen(str) + 1;
934 }
935
936 nr = ph->env.nr_sibling_threads;
937 str = ph->env.sibling_threads;
938
939 for (i = 0; i < nr; i++) {
940 fprintf(fp, "# sibling threads : %s\n", str);
941 str += strlen(str) + 1;
942 }
943
944 if (ph->env.cpu != NULL) {
945 for (i = 0; i < cpu_nr; i++)
946 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
947 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
948 } else
949 fprintf(fp, "# Core ID and Socket ID information is not available\n");
950 }
951
free_event_desc(struct perf_evsel * events)952 static void free_event_desc(struct perf_evsel *events)
953 {
954 struct perf_evsel *evsel;
955
956 if (!events)
957 return;
958
959 for (evsel = events; evsel->attr.size; evsel++) {
960 zfree(&evsel->name);
961 zfree(&evsel->id);
962 }
963
964 free(events);
965 }
966
967 static struct perf_evsel *
read_event_desc(struct perf_header * ph,int fd)968 read_event_desc(struct perf_header *ph, int fd)
969 {
970 struct perf_evsel *evsel, *events = NULL;
971 u64 *id;
972 void *buf = NULL;
973 u32 nre, sz, nr, i, j;
974 ssize_t ret;
975 size_t msz;
976
977 /* number of events */
978 ret = readn(fd, &nre, sizeof(nre));
979 if (ret != (ssize_t)sizeof(nre))
980 goto error;
981
982 if (ph->needs_swap)
983 nre = bswap_32(nre);
984
985 ret = readn(fd, &sz, sizeof(sz));
986 if (ret != (ssize_t)sizeof(sz))
987 goto error;
988
989 if (ph->needs_swap)
990 sz = bswap_32(sz);
991
992 /* buffer to hold on file attr struct */
993 buf = malloc(sz);
994 if (!buf)
995 goto error;
996
997 /* the last event terminates with evsel->attr.size == 0: */
998 events = calloc(nre + 1, sizeof(*events));
999 if (!events)
1000 goto error;
1001
1002 msz = sizeof(evsel->attr);
1003 if (sz < msz)
1004 msz = sz;
1005
1006 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1007 evsel->idx = i;
1008
1009 /*
1010 * must read entire on-file attr struct to
1011 * sync up with layout.
1012 */
1013 ret = readn(fd, buf, sz);
1014 if (ret != (ssize_t)sz)
1015 goto error;
1016
1017 if (ph->needs_swap)
1018 perf_event__attr_swap(buf);
1019
1020 memcpy(&evsel->attr, buf, msz);
1021
1022 ret = readn(fd, &nr, sizeof(nr));
1023 if (ret != (ssize_t)sizeof(nr))
1024 goto error;
1025
1026 if (ph->needs_swap) {
1027 nr = bswap_32(nr);
1028 evsel->needs_swap = true;
1029 }
1030
1031 evsel->name = do_read_string(fd, ph);
1032
1033 if (!nr)
1034 continue;
1035
1036 id = calloc(nr, sizeof(*id));
1037 if (!id)
1038 goto error;
1039 evsel->ids = nr;
1040 evsel->id = id;
1041
1042 for (j = 0 ; j < nr; j++) {
1043 ret = readn(fd, id, sizeof(*id));
1044 if (ret != (ssize_t)sizeof(*id))
1045 goto error;
1046 if (ph->needs_swap)
1047 *id = bswap_64(*id);
1048 id++;
1049 }
1050 }
1051 out:
1052 free(buf);
1053 return events;
1054 error:
1055 free_event_desc(events);
1056 events = NULL;
1057 goto out;
1058 }
1059
__desc_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv)1060 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1061 void *priv __attribute__((unused)))
1062 {
1063 return fprintf(fp, ", %s = %s", name, val);
1064 }
1065
print_event_desc(struct perf_header * ph,int fd,FILE * fp)1066 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1067 {
1068 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1069 u32 j;
1070 u64 *id;
1071
1072 if (!events) {
1073 fprintf(fp, "# event desc: not available or unable to read\n");
1074 return;
1075 }
1076
1077 for (evsel = events; evsel->attr.size; evsel++) {
1078 fprintf(fp, "# event : name = %s, ", evsel->name);
1079
1080 if (evsel->ids) {
1081 fprintf(fp, ", id = {");
1082 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1083 if (j)
1084 fputc(',', fp);
1085 fprintf(fp, " %"PRIu64, *id);
1086 }
1087 fprintf(fp, " }");
1088 }
1089
1090 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1091
1092 fputc('\n', fp);
1093 }
1094
1095 free_event_desc(events);
1096 }
1097
print_total_mem(struct perf_header * ph,int fd __maybe_unused,FILE * fp)1098 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1099 FILE *fp)
1100 {
1101 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1102 }
1103
print_numa_topology(struct perf_header * ph,int fd __maybe_unused,FILE * fp)1104 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1105 FILE *fp)
1106 {
1107 u32 nr, c, i;
1108 char *str, *tmp;
1109 uint64_t mem_total, mem_free;
1110
1111 /* nr nodes */
1112 nr = ph->env.nr_numa_nodes;
1113 str = ph->env.numa_nodes;
1114
1115 for (i = 0; i < nr; i++) {
1116 /* node number */
1117 c = strtoul(str, &tmp, 0);
1118 if (*tmp != ':')
1119 goto error;
1120
1121 str = tmp + 1;
1122 mem_total = strtoull(str, &tmp, 0);
1123 if (*tmp != ':')
1124 goto error;
1125
1126 str = tmp + 1;
1127 mem_free = strtoull(str, &tmp, 0);
1128 if (*tmp != ':')
1129 goto error;
1130
1131 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1132 " free = %"PRIu64" kB\n",
1133 c, mem_total, mem_free);
1134
1135 str = tmp + 1;
1136 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1137
1138 str += strlen(str) + 1;
1139 }
1140 return;
1141 error:
1142 fprintf(fp, "# numa topology : not available\n");
1143 }
1144
print_cpuid(struct perf_header * ph,int fd __maybe_unused,FILE * fp)1145 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1146 {
1147 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1148 }
1149
print_branch_stack(struct perf_header * ph __maybe_unused,int fd __maybe_unused,FILE * fp)1150 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1151 int fd __maybe_unused, FILE *fp)
1152 {
1153 fprintf(fp, "# contains samples with branch stack\n");
1154 }
1155
print_auxtrace(struct perf_header * ph __maybe_unused,int fd __maybe_unused,FILE * fp)1156 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1157 int fd __maybe_unused, FILE *fp)
1158 {
1159 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1160 }
1161
print_pmu_mappings(struct perf_header * ph,int fd __maybe_unused,FILE * fp)1162 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1163 FILE *fp)
1164 {
1165 const char *delimiter = "# pmu mappings: ";
1166 char *str, *tmp;
1167 u32 pmu_num;
1168 u32 type;
1169
1170 pmu_num = ph->env.nr_pmu_mappings;
1171 if (!pmu_num) {
1172 fprintf(fp, "# pmu mappings: not available\n");
1173 return;
1174 }
1175
1176 str = ph->env.pmu_mappings;
1177
1178 while (pmu_num) {
1179 type = strtoul(str, &tmp, 0);
1180 if (*tmp != ':')
1181 goto error;
1182
1183 str = tmp + 1;
1184 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1185
1186 delimiter = ", ";
1187 str += strlen(str) + 1;
1188 pmu_num--;
1189 }
1190
1191 fprintf(fp, "\n");
1192
1193 if (!pmu_num)
1194 return;
1195 error:
1196 fprintf(fp, "# pmu mappings: unable to read\n");
1197 }
1198
print_group_desc(struct perf_header * ph,int fd __maybe_unused,FILE * fp)1199 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1200 FILE *fp)
1201 {
1202 struct perf_session *session;
1203 struct perf_evsel *evsel;
1204 u32 nr = 0;
1205
1206 session = container_of(ph, struct perf_session, header);
1207
1208 evlist__for_each(session->evlist, evsel) {
1209 if (perf_evsel__is_group_leader(evsel) &&
1210 evsel->nr_members > 1) {
1211 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1212 perf_evsel__name(evsel));
1213
1214 nr = evsel->nr_members - 1;
1215 } else if (nr) {
1216 fprintf(fp, ",%s", perf_evsel__name(evsel));
1217
1218 if (--nr == 0)
1219 fprintf(fp, "}\n");
1220 }
1221 }
1222 }
1223
__event_process_build_id(struct build_id_event * bev,char * filename,struct perf_session * session)1224 static int __event_process_build_id(struct build_id_event *bev,
1225 char *filename,
1226 struct perf_session *session)
1227 {
1228 int err = -1;
1229 struct machine *machine;
1230 u16 cpumode;
1231 struct dso *dso;
1232 enum dso_kernel_type dso_type;
1233
1234 machine = perf_session__findnew_machine(session, bev->pid);
1235 if (!machine)
1236 goto out;
1237
1238 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1239
1240 switch (cpumode) {
1241 case PERF_RECORD_MISC_KERNEL:
1242 dso_type = DSO_TYPE_KERNEL;
1243 break;
1244 case PERF_RECORD_MISC_GUEST_KERNEL:
1245 dso_type = DSO_TYPE_GUEST_KERNEL;
1246 break;
1247 case PERF_RECORD_MISC_USER:
1248 case PERF_RECORD_MISC_GUEST_USER:
1249 dso_type = DSO_TYPE_USER;
1250 break;
1251 default:
1252 goto out;
1253 }
1254
1255 dso = machine__findnew_dso(machine, filename);
1256 if (dso != NULL) {
1257 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1258
1259 dso__set_build_id(dso, &bev->build_id);
1260
1261 if (dso_type != DSO_TYPE_USER) {
1262 struct kmod_path m = { .name = NULL, };
1263
1264 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1265 dso__set_short_name(dso, strdup(m.name), true);
1266 else
1267 dso->kernel = dso_type;
1268
1269 free(m.name);
1270 }
1271
1272 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1273 sbuild_id);
1274 pr_debug("build id event received for %s: %s\n",
1275 dso->long_name, sbuild_id);
1276 dso__put(dso);
1277 }
1278
1279 err = 0;
1280 out:
1281 return err;
1282 }
1283
perf_header__read_build_ids_abi_quirk(struct perf_header * header,int input,u64 offset,u64 size)1284 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1285 int input, u64 offset, u64 size)
1286 {
1287 struct perf_session *session = container_of(header, struct perf_session, header);
1288 struct {
1289 struct perf_event_header header;
1290 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1291 char filename[0];
1292 } old_bev;
1293 struct build_id_event bev;
1294 char filename[PATH_MAX];
1295 u64 limit = offset + size;
1296
1297 while (offset < limit) {
1298 ssize_t len;
1299
1300 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1301 return -1;
1302
1303 if (header->needs_swap)
1304 perf_event_header__bswap(&old_bev.header);
1305
1306 len = old_bev.header.size - sizeof(old_bev);
1307 if (readn(input, filename, len) != len)
1308 return -1;
1309
1310 bev.header = old_bev.header;
1311
1312 /*
1313 * As the pid is the missing value, we need to fill
1314 * it properly. The header.misc value give us nice hint.
1315 */
1316 bev.pid = HOST_KERNEL_ID;
1317 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1318 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1319 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1320
1321 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1322 __event_process_build_id(&bev, filename, session);
1323
1324 offset += bev.header.size;
1325 }
1326
1327 return 0;
1328 }
1329
perf_header__read_build_ids(struct perf_header * header,int input,u64 offset,u64 size)1330 static int perf_header__read_build_ids(struct perf_header *header,
1331 int input, u64 offset, u64 size)
1332 {
1333 struct perf_session *session = container_of(header, struct perf_session, header);
1334 struct build_id_event bev;
1335 char filename[PATH_MAX];
1336 u64 limit = offset + size, orig_offset = offset;
1337 int err = -1;
1338
1339 while (offset < limit) {
1340 ssize_t len;
1341
1342 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1343 goto out;
1344
1345 if (header->needs_swap)
1346 perf_event_header__bswap(&bev.header);
1347
1348 len = bev.header.size - sizeof(bev);
1349 if (readn(input, filename, len) != len)
1350 goto out;
1351 /*
1352 * The a1645ce1 changeset:
1353 *
1354 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1355 *
1356 * Added a field to struct build_id_event that broke the file
1357 * format.
1358 *
1359 * Since the kernel build-id is the first entry, process the
1360 * table using the old format if the well known
1361 * '[kernel.kallsyms]' string for the kernel build-id has the
1362 * first 4 characters chopped off (where the pid_t sits).
1363 */
1364 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1365 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1366 return -1;
1367 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1368 }
1369
1370 __event_process_build_id(&bev, filename, session);
1371
1372 offset += bev.header.size;
1373 }
1374 err = 0;
1375 out:
1376 return err;
1377 }
1378
process_tracing_data(struct perf_file_section * section __maybe_unused,struct perf_header * ph __maybe_unused,int fd,void * data)1379 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1380 struct perf_header *ph __maybe_unused,
1381 int fd, void *data)
1382 {
1383 ssize_t ret = trace_report(fd, data, false);
1384 return ret < 0 ? -1 : 0;
1385 }
1386
process_build_id(struct perf_file_section * section,struct perf_header * ph,int fd,void * data __maybe_unused)1387 static int process_build_id(struct perf_file_section *section,
1388 struct perf_header *ph, int fd,
1389 void *data __maybe_unused)
1390 {
1391 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1392 pr_debug("Failed to read buildids, continuing...\n");
1393 return 0;
1394 }
1395
process_hostname(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1396 static int process_hostname(struct perf_file_section *section __maybe_unused,
1397 struct perf_header *ph, int fd,
1398 void *data __maybe_unused)
1399 {
1400 ph->env.hostname = do_read_string(fd, ph);
1401 return ph->env.hostname ? 0 : -ENOMEM;
1402 }
1403
process_osrelease(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1404 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1405 struct perf_header *ph, int fd,
1406 void *data __maybe_unused)
1407 {
1408 ph->env.os_release = do_read_string(fd, ph);
1409 return ph->env.os_release ? 0 : -ENOMEM;
1410 }
1411
process_version(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1412 static int process_version(struct perf_file_section *section __maybe_unused,
1413 struct perf_header *ph, int fd,
1414 void *data __maybe_unused)
1415 {
1416 ph->env.version = do_read_string(fd, ph);
1417 return ph->env.version ? 0 : -ENOMEM;
1418 }
1419
process_arch(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1420 static int process_arch(struct perf_file_section *section __maybe_unused,
1421 struct perf_header *ph, int fd,
1422 void *data __maybe_unused)
1423 {
1424 ph->env.arch = do_read_string(fd, ph);
1425 return ph->env.arch ? 0 : -ENOMEM;
1426 }
1427
process_nrcpus(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1428 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1429 struct perf_header *ph, int fd,
1430 void *data __maybe_unused)
1431 {
1432 ssize_t ret;
1433 u32 nr;
1434
1435 ret = readn(fd, &nr, sizeof(nr));
1436 if (ret != sizeof(nr))
1437 return -1;
1438
1439 if (ph->needs_swap)
1440 nr = bswap_32(nr);
1441
1442 ph->env.nr_cpus_avail = nr;
1443
1444 ret = readn(fd, &nr, sizeof(nr));
1445 if (ret != sizeof(nr))
1446 return -1;
1447
1448 if (ph->needs_swap)
1449 nr = bswap_32(nr);
1450
1451 ph->env.nr_cpus_online = nr;
1452 return 0;
1453 }
1454
process_cpudesc(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1455 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1456 struct perf_header *ph, int fd,
1457 void *data __maybe_unused)
1458 {
1459 ph->env.cpu_desc = do_read_string(fd, ph);
1460 return ph->env.cpu_desc ? 0 : -ENOMEM;
1461 }
1462
process_cpuid(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1463 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1464 struct perf_header *ph, int fd,
1465 void *data __maybe_unused)
1466 {
1467 ph->env.cpuid = do_read_string(fd, ph);
1468 return ph->env.cpuid ? 0 : -ENOMEM;
1469 }
1470
process_total_mem(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1471 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1472 struct perf_header *ph, int fd,
1473 void *data __maybe_unused)
1474 {
1475 uint64_t mem;
1476 ssize_t ret;
1477
1478 ret = readn(fd, &mem, sizeof(mem));
1479 if (ret != sizeof(mem))
1480 return -1;
1481
1482 if (ph->needs_swap)
1483 mem = bswap_64(mem);
1484
1485 ph->env.total_mem = mem;
1486 return 0;
1487 }
1488
1489 static struct perf_evsel *
perf_evlist__find_by_index(struct perf_evlist * evlist,int idx)1490 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1491 {
1492 struct perf_evsel *evsel;
1493
1494 evlist__for_each(evlist, evsel) {
1495 if (evsel->idx == idx)
1496 return evsel;
1497 }
1498
1499 return NULL;
1500 }
1501
1502 static void
perf_evlist__set_event_name(struct perf_evlist * evlist,struct perf_evsel * event)1503 perf_evlist__set_event_name(struct perf_evlist *evlist,
1504 struct perf_evsel *event)
1505 {
1506 struct perf_evsel *evsel;
1507
1508 if (!event->name)
1509 return;
1510
1511 evsel = perf_evlist__find_by_index(evlist, event->idx);
1512 if (!evsel)
1513 return;
1514
1515 if (evsel->name)
1516 return;
1517
1518 evsel->name = strdup(event->name);
1519 }
1520
1521 static int
process_event_desc(struct perf_file_section * section __maybe_unused,struct perf_header * header,int fd,void * data __maybe_unused)1522 process_event_desc(struct perf_file_section *section __maybe_unused,
1523 struct perf_header *header, int fd,
1524 void *data __maybe_unused)
1525 {
1526 struct perf_session *session;
1527 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1528
1529 if (!events)
1530 return 0;
1531
1532 session = container_of(header, struct perf_session, header);
1533 for (evsel = events; evsel->attr.size; evsel++)
1534 perf_evlist__set_event_name(session->evlist, evsel);
1535
1536 free_event_desc(events);
1537
1538 return 0;
1539 }
1540
process_cmdline(struct perf_file_section * section,struct perf_header * ph,int fd,void * data __maybe_unused)1541 static int process_cmdline(struct perf_file_section *section,
1542 struct perf_header *ph, int fd,
1543 void *data __maybe_unused)
1544 {
1545 ssize_t ret;
1546 char *str, *cmdline = NULL, **argv = NULL;
1547 u32 nr, i, len = 0;
1548
1549 ret = readn(fd, &nr, sizeof(nr));
1550 if (ret != sizeof(nr))
1551 return -1;
1552
1553 if (ph->needs_swap)
1554 nr = bswap_32(nr);
1555
1556 ph->env.nr_cmdline = nr;
1557
1558 cmdline = zalloc(section->size + nr + 1);
1559 if (!cmdline)
1560 return -1;
1561
1562 argv = zalloc(sizeof(char *) * (nr + 1));
1563 if (!argv)
1564 goto error;
1565
1566 for (i = 0; i < nr; i++) {
1567 str = do_read_string(fd, ph);
1568 if (!str)
1569 goto error;
1570
1571 argv[i] = cmdline + len;
1572 memcpy(argv[i], str, strlen(str) + 1);
1573 len += strlen(str) + 1;
1574 free(str);
1575 }
1576 ph->env.cmdline = cmdline;
1577 ph->env.cmdline_argv = (const char **) argv;
1578 return 0;
1579
1580 error:
1581 free(argv);
1582 free(cmdline);
1583 return -1;
1584 }
1585
process_cpu_topology(struct perf_file_section * section,struct perf_header * ph,int fd,void * data __maybe_unused)1586 static int process_cpu_topology(struct perf_file_section *section,
1587 struct perf_header *ph, int fd,
1588 void *data __maybe_unused)
1589 {
1590 ssize_t ret;
1591 u32 nr, i;
1592 char *str;
1593 struct strbuf sb;
1594 int cpu_nr = ph->env.nr_cpus_online;
1595 u64 size = 0;
1596
1597 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1598 if (!ph->env.cpu)
1599 return -1;
1600
1601 ret = readn(fd, &nr, sizeof(nr));
1602 if (ret != sizeof(nr))
1603 goto free_cpu;
1604
1605 if (ph->needs_swap)
1606 nr = bswap_32(nr);
1607
1608 ph->env.nr_sibling_cores = nr;
1609 size += sizeof(u32);
1610 strbuf_init(&sb, 128);
1611
1612 for (i = 0; i < nr; i++) {
1613 str = do_read_string(fd, ph);
1614 if (!str)
1615 goto error;
1616
1617 /* include a NULL character at the end */
1618 strbuf_add(&sb, str, strlen(str) + 1);
1619 size += string_size(str);
1620 free(str);
1621 }
1622 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1623
1624 ret = readn(fd, &nr, sizeof(nr));
1625 if (ret != sizeof(nr))
1626 return -1;
1627
1628 if (ph->needs_swap)
1629 nr = bswap_32(nr);
1630
1631 ph->env.nr_sibling_threads = nr;
1632 size += sizeof(u32);
1633
1634 for (i = 0; i < nr; i++) {
1635 str = do_read_string(fd, ph);
1636 if (!str)
1637 goto error;
1638
1639 /* include a NULL character at the end */
1640 strbuf_add(&sb, str, strlen(str) + 1);
1641 size += string_size(str);
1642 free(str);
1643 }
1644 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1645
1646 /*
1647 * The header may be from old perf,
1648 * which doesn't include core id and socket id information.
1649 */
1650 if (section->size <= size) {
1651 zfree(&ph->env.cpu);
1652 return 0;
1653 }
1654
1655 for (i = 0; i < (u32)cpu_nr; i++) {
1656 ret = readn(fd, &nr, sizeof(nr));
1657 if (ret != sizeof(nr))
1658 goto free_cpu;
1659
1660 if (ph->needs_swap)
1661 nr = bswap_32(nr);
1662
1663 if (nr > (u32)cpu_nr) {
1664 pr_debug("core_id number is too big."
1665 "You may need to upgrade the perf tool.\n");
1666 goto free_cpu;
1667 }
1668 ph->env.cpu[i].core_id = nr;
1669
1670 ret = readn(fd, &nr, sizeof(nr));
1671 if (ret != sizeof(nr))
1672 goto free_cpu;
1673
1674 if (ph->needs_swap)
1675 nr = bswap_32(nr);
1676
1677 if (nr > (u32)cpu_nr) {
1678 pr_debug("socket_id number is too big."
1679 "You may need to upgrade the perf tool.\n");
1680 goto free_cpu;
1681 }
1682
1683 ph->env.cpu[i].socket_id = nr;
1684 }
1685
1686 return 0;
1687
1688 error:
1689 strbuf_release(&sb);
1690 free_cpu:
1691 zfree(&ph->env.cpu);
1692 return -1;
1693 }
1694
process_numa_topology(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1695 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1696 struct perf_header *ph, int fd,
1697 void *data __maybe_unused)
1698 {
1699 ssize_t ret;
1700 u32 nr, node, i;
1701 char *str;
1702 uint64_t mem_total, mem_free;
1703 struct strbuf sb;
1704
1705 /* nr nodes */
1706 ret = readn(fd, &nr, sizeof(nr));
1707 if (ret != sizeof(nr))
1708 goto error;
1709
1710 if (ph->needs_swap)
1711 nr = bswap_32(nr);
1712
1713 ph->env.nr_numa_nodes = nr;
1714 strbuf_init(&sb, 256);
1715
1716 for (i = 0; i < nr; i++) {
1717 /* node number */
1718 ret = readn(fd, &node, sizeof(node));
1719 if (ret != sizeof(node))
1720 goto error;
1721
1722 ret = readn(fd, &mem_total, sizeof(u64));
1723 if (ret != sizeof(u64))
1724 goto error;
1725
1726 ret = readn(fd, &mem_free, sizeof(u64));
1727 if (ret != sizeof(u64))
1728 goto error;
1729
1730 if (ph->needs_swap) {
1731 node = bswap_32(node);
1732 mem_total = bswap_64(mem_total);
1733 mem_free = bswap_64(mem_free);
1734 }
1735
1736 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1737 node, mem_total, mem_free);
1738
1739 str = do_read_string(fd, ph);
1740 if (!str)
1741 goto error;
1742
1743 /* include a NULL character at the end */
1744 strbuf_add(&sb, str, strlen(str) + 1);
1745 free(str);
1746 }
1747 ph->env.numa_nodes = strbuf_detach(&sb, NULL);
1748 return 0;
1749
1750 error:
1751 strbuf_release(&sb);
1752 return -1;
1753 }
1754
process_pmu_mappings(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1755 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1756 struct perf_header *ph, int fd,
1757 void *data __maybe_unused)
1758 {
1759 ssize_t ret;
1760 char *name;
1761 u32 pmu_num;
1762 u32 type;
1763 struct strbuf sb;
1764
1765 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1766 if (ret != sizeof(pmu_num))
1767 return -1;
1768
1769 if (ph->needs_swap)
1770 pmu_num = bswap_32(pmu_num);
1771
1772 if (!pmu_num) {
1773 pr_debug("pmu mappings not available\n");
1774 return 0;
1775 }
1776
1777 ph->env.nr_pmu_mappings = pmu_num;
1778 strbuf_init(&sb, 128);
1779
1780 while (pmu_num) {
1781 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1782 goto error;
1783 if (ph->needs_swap)
1784 type = bswap_32(type);
1785
1786 name = do_read_string(fd, ph);
1787 if (!name)
1788 goto error;
1789
1790 strbuf_addf(&sb, "%u:%s", type, name);
1791 /* include a NULL character at the end */
1792 strbuf_add(&sb, "", 1);
1793
1794 if (!strcmp(name, "msr"))
1795 ph->env.msr_pmu_type = type;
1796
1797 free(name);
1798 pmu_num--;
1799 }
1800 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1801 return 0;
1802
1803 error:
1804 strbuf_release(&sb);
1805 return -1;
1806 }
1807
process_group_desc(struct perf_file_section * section __maybe_unused,struct perf_header * ph,int fd,void * data __maybe_unused)1808 static int process_group_desc(struct perf_file_section *section __maybe_unused,
1809 struct perf_header *ph, int fd,
1810 void *data __maybe_unused)
1811 {
1812 size_t ret = -1;
1813 u32 i, nr, nr_groups;
1814 struct perf_session *session;
1815 struct perf_evsel *evsel, *leader = NULL;
1816 struct group_desc {
1817 char *name;
1818 u32 leader_idx;
1819 u32 nr_members;
1820 } *desc;
1821
1822 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
1823 return -1;
1824
1825 if (ph->needs_swap)
1826 nr_groups = bswap_32(nr_groups);
1827
1828 ph->env.nr_groups = nr_groups;
1829 if (!nr_groups) {
1830 pr_debug("group desc not available\n");
1831 return 0;
1832 }
1833
1834 desc = calloc(nr_groups, sizeof(*desc));
1835 if (!desc)
1836 return -1;
1837
1838 for (i = 0; i < nr_groups; i++) {
1839 desc[i].name = do_read_string(fd, ph);
1840 if (!desc[i].name)
1841 goto out_free;
1842
1843 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
1844 goto out_free;
1845
1846 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
1847 goto out_free;
1848
1849 if (ph->needs_swap) {
1850 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
1851 desc[i].nr_members = bswap_32(desc[i].nr_members);
1852 }
1853 }
1854
1855 /*
1856 * Rebuild group relationship based on the group_desc
1857 */
1858 session = container_of(ph, struct perf_session, header);
1859 session->evlist->nr_groups = nr_groups;
1860
1861 i = nr = 0;
1862 evlist__for_each(session->evlist, evsel) {
1863 if (evsel->idx == (int) desc[i].leader_idx) {
1864 evsel->leader = evsel;
1865 /* {anon_group} is a dummy name */
1866 if (strcmp(desc[i].name, "{anon_group}")) {
1867 evsel->group_name = desc[i].name;
1868 desc[i].name = NULL;
1869 }
1870 evsel->nr_members = desc[i].nr_members;
1871
1872 if (i >= nr_groups || nr > 0) {
1873 pr_debug("invalid group desc\n");
1874 goto out_free;
1875 }
1876
1877 leader = evsel;
1878 nr = evsel->nr_members - 1;
1879 i++;
1880 } else if (nr) {
1881 /* This is a group member */
1882 evsel->leader = leader;
1883
1884 nr--;
1885 }
1886 }
1887
1888 if (i != nr_groups || nr != 0) {
1889 pr_debug("invalid group desc\n");
1890 goto out_free;
1891 }
1892
1893 ret = 0;
1894 out_free:
1895 for (i = 0; i < nr_groups; i++)
1896 zfree(&desc[i].name);
1897 free(desc);
1898
1899 return ret;
1900 }
1901
process_auxtrace(struct perf_file_section * section,struct perf_header * ph,int fd,void * data __maybe_unused)1902 static int process_auxtrace(struct perf_file_section *section,
1903 struct perf_header *ph, int fd,
1904 void *data __maybe_unused)
1905 {
1906 struct perf_session *session;
1907 int err;
1908
1909 session = container_of(ph, struct perf_session, header);
1910
1911 err = auxtrace_index__process(fd, section->size, session,
1912 ph->needs_swap);
1913 if (err < 0)
1914 pr_err("Failed to process auxtrace index\n");
1915 return err;
1916 }
1917
1918 struct feature_ops {
1919 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1920 void (*print)(struct perf_header *h, int fd, FILE *fp);
1921 int (*process)(struct perf_file_section *section,
1922 struct perf_header *h, int fd, void *data);
1923 const char *name;
1924 bool full_only;
1925 };
1926
1927 #define FEAT_OPA(n, func) \
1928 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1929 #define FEAT_OPP(n, func) \
1930 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1931 .process = process_##func }
1932 #define FEAT_OPF(n, func) \
1933 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1934 .process = process_##func, .full_only = true }
1935
1936 /* feature_ops not implemented: */
1937 #define print_tracing_data NULL
1938 #define print_build_id NULL
1939
1940 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1941 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
1942 FEAT_OPP(HEADER_BUILD_ID, build_id),
1943 FEAT_OPP(HEADER_HOSTNAME, hostname),
1944 FEAT_OPP(HEADER_OSRELEASE, osrelease),
1945 FEAT_OPP(HEADER_VERSION, version),
1946 FEAT_OPP(HEADER_ARCH, arch),
1947 FEAT_OPP(HEADER_NRCPUS, nrcpus),
1948 FEAT_OPP(HEADER_CPUDESC, cpudesc),
1949 FEAT_OPP(HEADER_CPUID, cpuid),
1950 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
1951 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
1952 FEAT_OPP(HEADER_CMDLINE, cmdline),
1953 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
1954 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
1955 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
1956 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
1957 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
1958 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
1959 };
1960
1961 struct header_print_data {
1962 FILE *fp;
1963 bool full; /* extended list of headers */
1964 };
1965
perf_file_section__fprintf_info(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)1966 static int perf_file_section__fprintf_info(struct perf_file_section *section,
1967 struct perf_header *ph,
1968 int feat, int fd, void *data)
1969 {
1970 struct header_print_data *hd = data;
1971
1972 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1973 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1974 "%d, continuing...\n", section->offset, feat);
1975 return 0;
1976 }
1977 if (feat >= HEADER_LAST_FEATURE) {
1978 pr_warning("unknown feature %d\n", feat);
1979 return 0;
1980 }
1981 if (!feat_ops[feat].print)
1982 return 0;
1983
1984 if (!feat_ops[feat].full_only || hd->full)
1985 feat_ops[feat].print(ph, fd, hd->fp);
1986 else
1987 fprintf(hd->fp, "# %s info available, use -I to display\n",
1988 feat_ops[feat].name);
1989
1990 return 0;
1991 }
1992
perf_header__fprintf_info(struct perf_session * session,FILE * fp,bool full)1993 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
1994 {
1995 struct header_print_data hd;
1996 struct perf_header *header = &session->header;
1997 int fd = perf_data_file__fd(session->file);
1998 hd.fp = fp;
1999 hd.full = full;
2000
2001 perf_header__process_sections(header, fd, &hd,
2002 perf_file_section__fprintf_info);
2003 return 0;
2004 }
2005
do_write_feat(int fd,struct perf_header * h,int type,struct perf_file_section ** p,struct perf_evlist * evlist)2006 static int do_write_feat(int fd, struct perf_header *h, int type,
2007 struct perf_file_section **p,
2008 struct perf_evlist *evlist)
2009 {
2010 int err;
2011 int ret = 0;
2012
2013 if (perf_header__has_feat(h, type)) {
2014 if (!feat_ops[type].write)
2015 return -1;
2016
2017 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2018
2019 err = feat_ops[type].write(fd, h, evlist);
2020 if (err < 0) {
2021 pr_debug("failed to write feature %d\n", type);
2022
2023 /* undo anything written */
2024 lseek(fd, (*p)->offset, SEEK_SET);
2025
2026 return -1;
2027 }
2028 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2029 (*p)++;
2030 }
2031 return ret;
2032 }
2033
perf_header__adds_write(struct perf_header * header,struct perf_evlist * evlist,int fd)2034 static int perf_header__adds_write(struct perf_header *header,
2035 struct perf_evlist *evlist, int fd)
2036 {
2037 int nr_sections;
2038 struct perf_file_section *feat_sec, *p;
2039 int sec_size;
2040 u64 sec_start;
2041 int feat;
2042 int err;
2043
2044 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2045 if (!nr_sections)
2046 return 0;
2047
2048 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2049 if (feat_sec == NULL)
2050 return -ENOMEM;
2051
2052 sec_size = sizeof(*feat_sec) * nr_sections;
2053
2054 sec_start = header->feat_offset;
2055 lseek(fd, sec_start + sec_size, SEEK_SET);
2056
2057 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2058 if (do_write_feat(fd, header, feat, &p, evlist))
2059 perf_header__clear_feat(header, feat);
2060 }
2061
2062 lseek(fd, sec_start, SEEK_SET);
2063 /*
2064 * may write more than needed due to dropped feature, but
2065 * this is okay, reader will skip the mising entries
2066 */
2067 err = do_write(fd, feat_sec, sec_size);
2068 if (err < 0)
2069 pr_debug("failed to write feature section\n");
2070 free(feat_sec);
2071 return err;
2072 }
2073
perf_header__write_pipe(int fd)2074 int perf_header__write_pipe(int fd)
2075 {
2076 struct perf_pipe_file_header f_header;
2077 int err;
2078
2079 f_header = (struct perf_pipe_file_header){
2080 .magic = PERF_MAGIC,
2081 .size = sizeof(f_header),
2082 };
2083
2084 err = do_write(fd, &f_header, sizeof(f_header));
2085 if (err < 0) {
2086 pr_debug("failed to write perf pipe header\n");
2087 return err;
2088 }
2089
2090 return 0;
2091 }
2092
perf_session__write_header(struct perf_session * session,struct perf_evlist * evlist,int fd,bool at_exit)2093 int perf_session__write_header(struct perf_session *session,
2094 struct perf_evlist *evlist,
2095 int fd, bool at_exit)
2096 {
2097 struct perf_file_header f_header;
2098 struct perf_file_attr f_attr;
2099 struct perf_header *header = &session->header;
2100 struct perf_evsel *evsel;
2101 u64 attr_offset;
2102 int err;
2103
2104 lseek(fd, sizeof(f_header), SEEK_SET);
2105
2106 evlist__for_each(session->evlist, evsel) {
2107 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2108 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2109 if (err < 0) {
2110 pr_debug("failed to write perf header\n");
2111 return err;
2112 }
2113 }
2114
2115 attr_offset = lseek(fd, 0, SEEK_CUR);
2116
2117 evlist__for_each(evlist, evsel) {
2118 f_attr = (struct perf_file_attr){
2119 .attr = evsel->attr,
2120 .ids = {
2121 .offset = evsel->id_offset,
2122 .size = evsel->ids * sizeof(u64),
2123 }
2124 };
2125 err = do_write(fd, &f_attr, sizeof(f_attr));
2126 if (err < 0) {
2127 pr_debug("failed to write perf header attribute\n");
2128 return err;
2129 }
2130 }
2131
2132 if (!header->data_offset)
2133 header->data_offset = lseek(fd, 0, SEEK_CUR);
2134 header->feat_offset = header->data_offset + header->data_size;
2135
2136 if (at_exit) {
2137 err = perf_header__adds_write(header, evlist, fd);
2138 if (err < 0)
2139 return err;
2140 }
2141
2142 f_header = (struct perf_file_header){
2143 .magic = PERF_MAGIC,
2144 .size = sizeof(f_header),
2145 .attr_size = sizeof(f_attr),
2146 .attrs = {
2147 .offset = attr_offset,
2148 .size = evlist->nr_entries * sizeof(f_attr),
2149 },
2150 .data = {
2151 .offset = header->data_offset,
2152 .size = header->data_size,
2153 },
2154 /* event_types is ignored, store zeros */
2155 };
2156
2157 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2158
2159 lseek(fd, 0, SEEK_SET);
2160 err = do_write(fd, &f_header, sizeof(f_header));
2161 if (err < 0) {
2162 pr_debug("failed to write perf header\n");
2163 return err;
2164 }
2165 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2166
2167 return 0;
2168 }
2169
perf_header__getbuffer64(struct perf_header * header,int fd,void * buf,size_t size)2170 static int perf_header__getbuffer64(struct perf_header *header,
2171 int fd, void *buf, size_t size)
2172 {
2173 if (readn(fd, buf, size) <= 0)
2174 return -1;
2175
2176 if (header->needs_swap)
2177 mem_bswap_64(buf, size);
2178
2179 return 0;
2180 }
2181
perf_header__process_sections(struct perf_header * header,int fd,void * data,int (* process)(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data))2182 int perf_header__process_sections(struct perf_header *header, int fd,
2183 void *data,
2184 int (*process)(struct perf_file_section *section,
2185 struct perf_header *ph,
2186 int feat, int fd, void *data))
2187 {
2188 struct perf_file_section *feat_sec, *sec;
2189 int nr_sections;
2190 int sec_size;
2191 int feat;
2192 int err;
2193
2194 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2195 if (!nr_sections)
2196 return 0;
2197
2198 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2199 if (!feat_sec)
2200 return -1;
2201
2202 sec_size = sizeof(*feat_sec) * nr_sections;
2203
2204 lseek(fd, header->feat_offset, SEEK_SET);
2205
2206 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2207 if (err < 0)
2208 goto out_free;
2209
2210 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2211 err = process(sec++, header, feat, fd, data);
2212 if (err < 0)
2213 goto out_free;
2214 }
2215 err = 0;
2216 out_free:
2217 free(feat_sec);
2218 return err;
2219 }
2220
2221 static const int attr_file_abi_sizes[] = {
2222 [0] = PERF_ATTR_SIZE_VER0,
2223 [1] = PERF_ATTR_SIZE_VER1,
2224 [2] = PERF_ATTR_SIZE_VER2,
2225 [3] = PERF_ATTR_SIZE_VER3,
2226 [4] = PERF_ATTR_SIZE_VER4,
2227 0,
2228 };
2229
2230 /*
2231 * In the legacy file format, the magic number is not used to encode endianness.
2232 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2233 * on ABI revisions, we need to try all combinations for all endianness to
2234 * detect the endianness.
2235 */
try_all_file_abis(uint64_t hdr_sz,struct perf_header * ph)2236 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2237 {
2238 uint64_t ref_size, attr_size;
2239 int i;
2240
2241 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2242 ref_size = attr_file_abi_sizes[i]
2243 + sizeof(struct perf_file_section);
2244 if (hdr_sz != ref_size) {
2245 attr_size = bswap_64(hdr_sz);
2246 if (attr_size != ref_size)
2247 continue;
2248
2249 ph->needs_swap = true;
2250 }
2251 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2252 i,
2253 ph->needs_swap);
2254 return 0;
2255 }
2256 /* could not determine endianness */
2257 return -1;
2258 }
2259
2260 #define PERF_PIPE_HDR_VER0 16
2261
2262 static const size_t attr_pipe_abi_sizes[] = {
2263 [0] = PERF_PIPE_HDR_VER0,
2264 0,
2265 };
2266
2267 /*
2268 * In the legacy pipe format, there is an implicit assumption that endiannesss
2269 * between host recording the samples, and host parsing the samples is the
2270 * same. This is not always the case given that the pipe output may always be
2271 * redirected into a file and analyzed on a different machine with possibly a
2272 * different endianness and perf_event ABI revsions in the perf tool itself.
2273 */
try_all_pipe_abis(uint64_t hdr_sz,struct perf_header * ph)2274 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2275 {
2276 u64 attr_size;
2277 int i;
2278
2279 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2280 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2281 attr_size = bswap_64(hdr_sz);
2282 if (attr_size != hdr_sz)
2283 continue;
2284
2285 ph->needs_swap = true;
2286 }
2287 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2288 return 0;
2289 }
2290 return -1;
2291 }
2292
is_perf_magic(u64 magic)2293 bool is_perf_magic(u64 magic)
2294 {
2295 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2296 || magic == __perf_magic2
2297 || magic == __perf_magic2_sw)
2298 return true;
2299
2300 return false;
2301 }
2302
check_magic_endian(u64 magic,uint64_t hdr_sz,bool is_pipe,struct perf_header * ph)2303 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2304 bool is_pipe, struct perf_header *ph)
2305 {
2306 int ret;
2307
2308 /* check for legacy format */
2309 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2310 if (ret == 0) {
2311 ph->version = PERF_HEADER_VERSION_1;
2312 pr_debug("legacy perf.data format\n");
2313 if (is_pipe)
2314 return try_all_pipe_abis(hdr_sz, ph);
2315
2316 return try_all_file_abis(hdr_sz, ph);
2317 }
2318 /*
2319 * the new magic number serves two purposes:
2320 * - unique number to identify actual perf.data files
2321 * - encode endianness of file
2322 */
2323 ph->version = PERF_HEADER_VERSION_2;
2324
2325 /* check magic number with one endianness */
2326 if (magic == __perf_magic2)
2327 return 0;
2328
2329 /* check magic number with opposite endianness */
2330 if (magic != __perf_magic2_sw)
2331 return -1;
2332
2333 ph->needs_swap = true;
2334
2335 return 0;
2336 }
2337
perf_file_header__read(struct perf_file_header * header,struct perf_header * ph,int fd)2338 int perf_file_header__read(struct perf_file_header *header,
2339 struct perf_header *ph, int fd)
2340 {
2341 ssize_t ret;
2342
2343 lseek(fd, 0, SEEK_SET);
2344
2345 ret = readn(fd, header, sizeof(*header));
2346 if (ret <= 0)
2347 return -1;
2348
2349 if (check_magic_endian(header->magic,
2350 header->attr_size, false, ph) < 0) {
2351 pr_debug("magic/endian check failed\n");
2352 return -1;
2353 }
2354
2355 if (ph->needs_swap) {
2356 mem_bswap_64(header, offsetof(struct perf_file_header,
2357 adds_features));
2358 }
2359
2360 if (header->size != sizeof(*header)) {
2361 /* Support the previous format */
2362 if (header->size == offsetof(typeof(*header), adds_features))
2363 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2364 else
2365 return -1;
2366 } else if (ph->needs_swap) {
2367 /*
2368 * feature bitmap is declared as an array of unsigned longs --
2369 * not good since its size can differ between the host that
2370 * generated the data file and the host analyzing the file.
2371 *
2372 * We need to handle endianness, but we don't know the size of
2373 * the unsigned long where the file was generated. Take a best
2374 * guess at determining it: try 64-bit swap first (ie., file
2375 * created on a 64-bit host), and check if the hostname feature
2376 * bit is set (this feature bit is forced on as of fbe96f2).
2377 * If the bit is not, undo the 64-bit swap and try a 32-bit
2378 * swap. If the hostname bit is still not set (e.g., older data
2379 * file), punt and fallback to the original behavior --
2380 * clearing all feature bits and setting buildid.
2381 */
2382 mem_bswap_64(&header->adds_features,
2383 BITS_TO_U64(HEADER_FEAT_BITS));
2384
2385 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2386 /* unswap as u64 */
2387 mem_bswap_64(&header->adds_features,
2388 BITS_TO_U64(HEADER_FEAT_BITS));
2389
2390 /* unswap as u32 */
2391 mem_bswap_32(&header->adds_features,
2392 BITS_TO_U32(HEADER_FEAT_BITS));
2393 }
2394
2395 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2396 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2397 set_bit(HEADER_BUILD_ID, header->adds_features);
2398 }
2399 }
2400
2401 memcpy(&ph->adds_features, &header->adds_features,
2402 sizeof(ph->adds_features));
2403
2404 ph->data_offset = header->data.offset;
2405 ph->data_size = header->data.size;
2406 ph->feat_offset = header->data.offset + header->data.size;
2407 return 0;
2408 }
2409
perf_file_section__process(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)2410 static int perf_file_section__process(struct perf_file_section *section,
2411 struct perf_header *ph,
2412 int feat, int fd, void *data)
2413 {
2414 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2415 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2416 "%d, continuing...\n", section->offset, feat);
2417 return 0;
2418 }
2419
2420 if (feat >= HEADER_LAST_FEATURE) {
2421 pr_debug("unknown feature %d, continuing...\n", feat);
2422 return 0;
2423 }
2424
2425 if (!feat_ops[feat].process)
2426 return 0;
2427
2428 return feat_ops[feat].process(section, ph, fd, data);
2429 }
2430
perf_file_header__read_pipe(struct perf_pipe_file_header * header,struct perf_header * ph,int fd,bool repipe)2431 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2432 struct perf_header *ph, int fd,
2433 bool repipe)
2434 {
2435 ssize_t ret;
2436
2437 ret = readn(fd, header, sizeof(*header));
2438 if (ret <= 0)
2439 return -1;
2440
2441 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2442 pr_debug("endian/magic failed\n");
2443 return -1;
2444 }
2445
2446 if (ph->needs_swap)
2447 header->size = bswap_64(header->size);
2448
2449 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2450 return -1;
2451
2452 return 0;
2453 }
2454
perf_header__read_pipe(struct perf_session * session)2455 static int perf_header__read_pipe(struct perf_session *session)
2456 {
2457 struct perf_header *header = &session->header;
2458 struct perf_pipe_file_header f_header;
2459
2460 if (perf_file_header__read_pipe(&f_header, header,
2461 perf_data_file__fd(session->file),
2462 session->repipe) < 0) {
2463 pr_debug("incompatible file format\n");
2464 return -EINVAL;
2465 }
2466
2467 return 0;
2468 }
2469
read_attr(int fd,struct perf_header * ph,struct perf_file_attr * f_attr)2470 static int read_attr(int fd, struct perf_header *ph,
2471 struct perf_file_attr *f_attr)
2472 {
2473 struct perf_event_attr *attr = &f_attr->attr;
2474 size_t sz, left;
2475 size_t our_sz = sizeof(f_attr->attr);
2476 ssize_t ret;
2477
2478 memset(f_attr, 0, sizeof(*f_attr));
2479
2480 /* read minimal guaranteed structure */
2481 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2482 if (ret <= 0) {
2483 pr_debug("cannot read %d bytes of header attr\n",
2484 PERF_ATTR_SIZE_VER0);
2485 return -1;
2486 }
2487
2488 /* on file perf_event_attr size */
2489 sz = attr->size;
2490
2491 if (ph->needs_swap)
2492 sz = bswap_32(sz);
2493
2494 if (sz == 0) {
2495 /* assume ABI0 */
2496 sz = PERF_ATTR_SIZE_VER0;
2497 } else if (sz > our_sz) {
2498 pr_debug("file uses a more recent and unsupported ABI"
2499 " (%zu bytes extra)\n", sz - our_sz);
2500 return -1;
2501 }
2502 /* what we have not yet read and that we know about */
2503 left = sz - PERF_ATTR_SIZE_VER0;
2504 if (left) {
2505 void *ptr = attr;
2506 ptr += PERF_ATTR_SIZE_VER0;
2507
2508 ret = readn(fd, ptr, left);
2509 }
2510 /* read perf_file_section, ids are read in caller */
2511 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2512
2513 return ret <= 0 ? -1 : 0;
2514 }
2515
perf_evsel__prepare_tracepoint_event(struct perf_evsel * evsel,struct pevent * pevent)2516 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2517 struct pevent *pevent)
2518 {
2519 struct event_format *event;
2520 char bf[128];
2521
2522 /* already prepared */
2523 if (evsel->tp_format)
2524 return 0;
2525
2526 if (pevent == NULL) {
2527 pr_debug("broken or missing trace data\n");
2528 return -1;
2529 }
2530
2531 event = pevent_find_event(pevent, evsel->attr.config);
2532 if (event == NULL)
2533 return -1;
2534
2535 if (!evsel->name) {
2536 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2537 evsel->name = strdup(bf);
2538 if (evsel->name == NULL)
2539 return -1;
2540 }
2541
2542 evsel->tp_format = event;
2543 return 0;
2544 }
2545
perf_evlist__prepare_tracepoint_events(struct perf_evlist * evlist,struct pevent * pevent)2546 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2547 struct pevent *pevent)
2548 {
2549 struct perf_evsel *pos;
2550
2551 evlist__for_each(evlist, pos) {
2552 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2553 perf_evsel__prepare_tracepoint_event(pos, pevent))
2554 return -1;
2555 }
2556
2557 return 0;
2558 }
2559
perf_session__read_header(struct perf_session * session)2560 int perf_session__read_header(struct perf_session *session)
2561 {
2562 struct perf_data_file *file = session->file;
2563 struct perf_header *header = &session->header;
2564 struct perf_file_header f_header;
2565 struct perf_file_attr f_attr;
2566 u64 f_id;
2567 int nr_attrs, nr_ids, i, j;
2568 int fd = perf_data_file__fd(file);
2569
2570 session->evlist = perf_evlist__new();
2571 if (session->evlist == NULL)
2572 return -ENOMEM;
2573
2574 session->evlist->env = &header->env;
2575 session->machines.host.env = &header->env;
2576 if (perf_data_file__is_pipe(file))
2577 return perf_header__read_pipe(session);
2578
2579 if (perf_file_header__read(&f_header, header, fd) < 0)
2580 return -EINVAL;
2581
2582 /*
2583 * Sanity check that perf.data was written cleanly; data size is
2584 * initialized to 0 and updated only if the on_exit function is run.
2585 * If data size is still 0 then the file contains only partial
2586 * information. Just warn user and process it as much as it can.
2587 */
2588 if (f_header.data.size == 0) {
2589 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2590 "Was the 'perf record' command properly terminated?\n",
2591 file->path);
2592 }
2593
2594 if (f_header.attr_size == 0) {
2595 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
2596 "Was the 'perf record' command properly terminated?\n",
2597 file->path);
2598 return -EINVAL;
2599 }
2600
2601 nr_attrs = f_header.attrs.size / f_header.attr_size;
2602 lseek(fd, f_header.attrs.offset, SEEK_SET);
2603
2604 for (i = 0; i < nr_attrs; i++) {
2605 struct perf_evsel *evsel;
2606 off_t tmp;
2607
2608 if (read_attr(fd, header, &f_attr) < 0)
2609 goto out_errno;
2610
2611 if (header->needs_swap) {
2612 f_attr.ids.size = bswap_64(f_attr.ids.size);
2613 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2614 perf_event__attr_swap(&f_attr.attr);
2615 }
2616
2617 tmp = lseek(fd, 0, SEEK_CUR);
2618 evsel = perf_evsel__new(&f_attr.attr);
2619
2620 if (evsel == NULL)
2621 goto out_delete_evlist;
2622
2623 evsel->needs_swap = header->needs_swap;
2624 /*
2625 * Do it before so that if perf_evsel__alloc_id fails, this
2626 * entry gets purged too at perf_evlist__delete().
2627 */
2628 perf_evlist__add(session->evlist, evsel);
2629
2630 nr_ids = f_attr.ids.size / sizeof(u64);
2631 /*
2632 * We don't have the cpu and thread maps on the header, so
2633 * for allocating the perf_sample_id table we fake 1 cpu and
2634 * hattr->ids threads.
2635 */
2636 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2637 goto out_delete_evlist;
2638
2639 lseek(fd, f_attr.ids.offset, SEEK_SET);
2640
2641 for (j = 0; j < nr_ids; j++) {
2642 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2643 goto out_errno;
2644
2645 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2646 }
2647
2648 lseek(fd, tmp, SEEK_SET);
2649 }
2650
2651 symbol_conf.nr_events = nr_attrs;
2652
2653 perf_header__process_sections(header, fd, &session->tevent,
2654 perf_file_section__process);
2655
2656 if (perf_evlist__prepare_tracepoint_events(session->evlist,
2657 session->tevent.pevent))
2658 goto out_delete_evlist;
2659
2660 return 0;
2661 out_errno:
2662 return -errno;
2663
2664 out_delete_evlist:
2665 perf_evlist__delete(session->evlist);
2666 session->evlist = NULL;
2667 return -ENOMEM;
2668 }
2669
perf_event__synthesize_attr(struct perf_tool * tool,struct perf_event_attr * attr,u32 ids,u64 * id,perf_event__handler_t process)2670 int perf_event__synthesize_attr(struct perf_tool *tool,
2671 struct perf_event_attr *attr, u32 ids, u64 *id,
2672 perf_event__handler_t process)
2673 {
2674 union perf_event *ev;
2675 size_t size;
2676 int err;
2677
2678 size = sizeof(struct perf_event_attr);
2679 size = PERF_ALIGN(size, sizeof(u64));
2680 size += sizeof(struct perf_event_header);
2681 size += ids * sizeof(u64);
2682
2683 ev = zalloc(size);
2684
2685 if (ev == NULL)
2686 return -ENOMEM;
2687
2688 ev->attr.attr = *attr;
2689 memcpy(ev->attr.id, id, ids * sizeof(u64));
2690
2691 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2692 ev->attr.header.size = (u16)size;
2693
2694 if (ev->attr.header.size == size)
2695 err = process(tool, ev, NULL, NULL);
2696 else
2697 err = -E2BIG;
2698
2699 free(ev);
2700
2701 return err;
2702 }
2703
perf_event__synthesize_attrs(struct perf_tool * tool,struct perf_session * session,perf_event__handler_t process)2704 int perf_event__synthesize_attrs(struct perf_tool *tool,
2705 struct perf_session *session,
2706 perf_event__handler_t process)
2707 {
2708 struct perf_evsel *evsel;
2709 int err = 0;
2710
2711 evlist__for_each(session->evlist, evsel) {
2712 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
2713 evsel->id, process);
2714 if (err) {
2715 pr_debug("failed to create perf header attribute\n");
2716 return err;
2717 }
2718 }
2719
2720 return err;
2721 }
2722
perf_event__process_attr(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_evlist ** pevlist)2723 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
2724 union perf_event *event,
2725 struct perf_evlist **pevlist)
2726 {
2727 u32 i, ids, n_ids;
2728 struct perf_evsel *evsel;
2729 struct perf_evlist *evlist = *pevlist;
2730
2731 if (evlist == NULL) {
2732 *pevlist = evlist = perf_evlist__new();
2733 if (evlist == NULL)
2734 return -ENOMEM;
2735 }
2736
2737 evsel = perf_evsel__new(&event->attr.attr);
2738 if (evsel == NULL)
2739 return -ENOMEM;
2740
2741 perf_evlist__add(evlist, evsel);
2742
2743 ids = event->header.size;
2744 ids -= (void *)&event->attr.id - (void *)event;
2745 n_ids = ids / sizeof(u64);
2746 /*
2747 * We don't have the cpu and thread maps on the header, so
2748 * for allocating the perf_sample_id table we fake 1 cpu and
2749 * hattr->ids threads.
2750 */
2751 if (perf_evsel__alloc_id(evsel, 1, n_ids))
2752 return -ENOMEM;
2753
2754 for (i = 0; i < n_ids; i++) {
2755 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2756 }
2757
2758 symbol_conf.nr_events = evlist->nr_entries;
2759
2760 return 0;
2761 }
2762
perf_event__synthesize_tracing_data(struct perf_tool * tool,int fd,struct perf_evlist * evlist,perf_event__handler_t process)2763 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2764 struct perf_evlist *evlist,
2765 perf_event__handler_t process)
2766 {
2767 union perf_event ev;
2768 struct tracing_data *tdata;
2769 ssize_t size = 0, aligned_size = 0, padding;
2770 int err __maybe_unused = 0;
2771
2772 /*
2773 * We are going to store the size of the data followed
2774 * by the data contents. Since the fd descriptor is a pipe,
2775 * we cannot seek back to store the size of the data once
2776 * we know it. Instead we:
2777 *
2778 * - write the tracing data to the temp file
2779 * - get/write the data size to pipe
2780 * - write the tracing data from the temp file
2781 * to the pipe
2782 */
2783 tdata = tracing_data_get(&evlist->entries, fd, true);
2784 if (!tdata)
2785 return -1;
2786
2787 memset(&ev, 0, sizeof(ev));
2788
2789 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2790 size = tdata->size;
2791 aligned_size = PERF_ALIGN(size, sizeof(u64));
2792 padding = aligned_size - size;
2793 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2794 ev.tracing_data.size = aligned_size;
2795
2796 process(tool, &ev, NULL, NULL);
2797
2798 /*
2799 * The put function will copy all the tracing data
2800 * stored in temp file to the pipe.
2801 */
2802 tracing_data_put(tdata);
2803
2804 write_padded(fd, NULL, 0, padding);
2805
2806 return aligned_size;
2807 }
2808
perf_event__process_tracing_data(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_session * session)2809 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
2810 union perf_event *event,
2811 struct perf_session *session)
2812 {
2813 ssize_t size_read, padding, size = event->tracing_data.size;
2814 int fd = perf_data_file__fd(session->file);
2815 off_t offset = lseek(fd, 0, SEEK_CUR);
2816 char buf[BUFSIZ];
2817
2818 /* setup for reading amidst mmap */
2819 lseek(fd, offset + sizeof(struct tracing_data_event),
2820 SEEK_SET);
2821
2822 size_read = trace_report(fd, &session->tevent,
2823 session->repipe);
2824 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
2825
2826 if (readn(fd, buf, padding) < 0) {
2827 pr_err("%s: reading input file", __func__);
2828 return -1;
2829 }
2830 if (session->repipe) {
2831 int retw = write(STDOUT_FILENO, buf, padding);
2832 if (retw <= 0 || retw != padding) {
2833 pr_err("%s: repiping tracing data padding", __func__);
2834 return -1;
2835 }
2836 }
2837
2838 if (size_read + padding != size) {
2839 pr_err("%s: tracing data size mismatch", __func__);
2840 return -1;
2841 }
2842
2843 perf_evlist__prepare_tracepoint_events(session->evlist,
2844 session->tevent.pevent);
2845
2846 return size_read + padding;
2847 }
2848
perf_event__synthesize_build_id(struct perf_tool * tool,struct dso * pos,u16 misc,perf_event__handler_t process,struct machine * machine)2849 int perf_event__synthesize_build_id(struct perf_tool *tool,
2850 struct dso *pos, u16 misc,
2851 perf_event__handler_t process,
2852 struct machine *machine)
2853 {
2854 union perf_event ev;
2855 size_t len;
2856 int err = 0;
2857
2858 if (!pos->hit)
2859 return err;
2860
2861 memset(&ev, 0, sizeof(ev));
2862
2863 len = pos->long_name_len + 1;
2864 len = PERF_ALIGN(len, NAME_ALIGN);
2865 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
2866 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2867 ev.build_id.header.misc = misc;
2868 ev.build_id.pid = machine->pid;
2869 ev.build_id.header.size = sizeof(ev.build_id) + len;
2870 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2871
2872 err = process(tool, &ev, NULL, machine);
2873
2874 return err;
2875 }
2876
perf_event__process_build_id(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_session * session)2877 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
2878 union perf_event *event,
2879 struct perf_session *session)
2880 {
2881 __event_process_build_id(&event->build_id,
2882 event->build_id.filename,
2883 session);
2884 return 0;
2885 }
2886