• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include "util/debug.h"
4 #include "util/dso.h"
5 #include "util/event.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/map.h"
9 #include "util/map_symbol.h"
10 #include "util/branch.h"
11 #include "util/memswap.h"
12 #include "util/namespaces.h"
13 #include "util/session.h"
14 #include "util/stat.h"
15 #include "util/symbol.h"
16 #include "util/synthetic-events.h"
17 #include "util/target.h"
18 #include "util/time-utils.h"
19 #include "util/cgroup.h"
20 #include <linux/bitops.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/zalloc.h>
24 #include <linux/perf_event.h>
25 #include <asm/bug.h>
26 #include <perf/evsel.h>
27 #include <internal/cpumap.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45 
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47 
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49 
perf_tool__process_synth_event(struct perf_tool * tool,union perf_event * event,struct machine * machine,perf_event__handler_t process)50 int perf_tool__process_synth_event(struct perf_tool *tool,
51 				   union perf_event *event,
52 				   struct machine *machine,
53 				   perf_event__handler_t process)
54 {
55 	struct perf_sample synth_sample = {
56 		.pid	   = -1,
57 		.tid	   = -1,
58 		.time	   = -1,
59 		.stream_id = -1,
60 		.cpu	   = -1,
61 		.period	   = 1,
62 		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63 	};
64 
65 	return process(tool, event, &synth_sample, machine);
66 };
67 
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
perf_event__get_comm_ids(pid_t pid,char * comm,size_t len,pid_t * tgid,pid_t * ppid)72 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
73 				    pid_t *tgid, pid_t *ppid)
74 {
75 	char bf[4096];
76 	int fd;
77 	size_t size = 0;
78 	ssize_t n;
79 	char *name, *tgids, *ppids;
80 
81 	*tgid = -1;
82 	*ppid = -1;
83 
84 	snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
85 
86 	fd = open(bf, O_RDONLY);
87 	if (fd < 0) {
88 		pr_debug("couldn't open %s\n", bf);
89 		return -1;
90 	}
91 
92 	n = read(fd, bf, sizeof(bf) - 1);
93 	close(fd);
94 	if (n <= 0) {
95 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
96 			   pid);
97 		return -1;
98 	}
99 	bf[n] = '\0';
100 
101 	name = strstr(bf, "Name:");
102 	tgids = strstr(bf, "Tgid:");
103 	ppids = strstr(bf, "PPid:");
104 
105 	if (name) {
106 		char *nl;
107 
108 		name = skip_spaces(name + 5);  /* strlen("Name:") */
109 		nl = strchr(name, '\n');
110 		if (nl)
111 			*nl = '\0';
112 
113 		size = strlen(name);
114 		if (size >= len)
115 			size = len - 1;
116 		memcpy(comm, name, size);
117 		comm[size] = '\0';
118 	} else {
119 		pr_debug("Name: string not found for pid %d\n", pid);
120 	}
121 
122 	if (tgids) {
123 		tgids += 5;  /* strlen("Tgid:") */
124 		*tgid = atoi(tgids);
125 	} else {
126 		pr_debug("Tgid: string not found for pid %d\n", pid);
127 	}
128 
129 	if (ppids) {
130 		ppids += 5;  /* strlen("PPid:") */
131 		*ppid = atoi(ppids);
132 	} else {
133 		pr_debug("PPid: string not found for pid %d\n", pid);
134 	}
135 
136 	return 0;
137 }
138 
perf_event__prepare_comm(union perf_event * event,pid_t pid,struct machine * machine,pid_t * tgid,pid_t * ppid)139 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
140 				    struct machine *machine,
141 				    pid_t *tgid, pid_t *ppid)
142 {
143 	size_t size;
144 
145 	*ppid = -1;
146 
147 	memset(&event->comm, 0, sizeof(event->comm));
148 
149 	if (machine__is_host(machine)) {
150 		if (perf_event__get_comm_ids(pid, event->comm.comm,
151 					     sizeof(event->comm.comm),
152 					     tgid, ppid) != 0) {
153 			return -1;
154 		}
155 	} else {
156 		*tgid = machine->pid;
157 	}
158 
159 	if (*tgid < 0)
160 		return -1;
161 
162 	event->comm.pid = *tgid;
163 	event->comm.header.type = PERF_RECORD_COMM;
164 
165 	size = strlen(event->comm.comm) + 1;
166 	size = PERF_ALIGN(size, sizeof(u64));
167 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
168 	event->comm.header.size = (sizeof(event->comm) -
169 				(sizeof(event->comm.comm) - size) +
170 				machine->id_hdr_size);
171 	event->comm.tid = pid;
172 
173 	return 0;
174 }
175 
perf_event__synthesize_comm(struct perf_tool * tool,union perf_event * event,pid_t pid,perf_event__handler_t process,struct machine * machine)176 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
177 					 union perf_event *event, pid_t pid,
178 					 perf_event__handler_t process,
179 					 struct machine *machine)
180 {
181 	pid_t tgid, ppid;
182 
183 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
184 		return -1;
185 
186 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
187 		return -1;
188 
189 	return tgid;
190 }
191 
perf_event__get_ns_link_info(pid_t pid,const char * ns,struct perf_ns_link_info * ns_link_info)192 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
193 					 struct perf_ns_link_info *ns_link_info)
194 {
195 	struct stat64 st;
196 	char proc_ns[128];
197 
198 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
199 	if (stat64(proc_ns, &st) == 0) {
200 		ns_link_info->dev = st.st_dev;
201 		ns_link_info->ino = st.st_ino;
202 	}
203 }
204 
perf_event__synthesize_namespaces(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine)205 int perf_event__synthesize_namespaces(struct perf_tool *tool,
206 				      union perf_event *event,
207 				      pid_t pid, pid_t tgid,
208 				      perf_event__handler_t process,
209 				      struct machine *machine)
210 {
211 	u32 idx;
212 	struct perf_ns_link_info *ns_link_info;
213 
214 	if (!tool || !tool->namespace_events)
215 		return 0;
216 
217 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
218 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
219 	       machine->id_hdr_size));
220 
221 	event->namespaces.pid = tgid;
222 	event->namespaces.tid = pid;
223 
224 	event->namespaces.nr_namespaces = NR_NAMESPACES;
225 
226 	ns_link_info = event->namespaces.link_info;
227 
228 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
229 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
230 					     &ns_link_info[idx]);
231 
232 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
233 
234 	event->namespaces.header.size = (sizeof(event->namespaces) +
235 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
236 			machine->id_hdr_size);
237 
238 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
239 		return -1;
240 
241 	return 0;
242 }
243 
perf_event__synthesize_fork(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,pid_t ppid,perf_event__handler_t process,struct machine * machine)244 static int perf_event__synthesize_fork(struct perf_tool *tool,
245 				       union perf_event *event,
246 				       pid_t pid, pid_t tgid, pid_t ppid,
247 				       perf_event__handler_t process,
248 				       struct machine *machine)
249 {
250 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
251 
252 	/*
253 	 * for main thread set parent to ppid from status file. For other
254 	 * threads set parent pid to main thread. ie., assume main thread
255 	 * spawns all threads in a process
256 	*/
257 	if (tgid == pid) {
258 		event->fork.ppid = ppid;
259 		event->fork.ptid = ppid;
260 	} else {
261 		event->fork.ppid = tgid;
262 		event->fork.ptid = tgid;
263 	}
264 	event->fork.pid  = tgid;
265 	event->fork.tid  = pid;
266 	event->fork.header.type = PERF_RECORD_FORK;
267 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
268 
269 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
270 
271 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
272 		return -1;
273 
274 	return 0;
275 }
276 
read_proc_maps_line(struct io * io,__u64 * start,__u64 * end,u32 * prot,u32 * flags,__u64 * offset,u32 * maj,u32 * min,__u64 * inode,ssize_t pathname_size,char * pathname)277 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
278 				u32 *prot, u32 *flags, __u64 *offset,
279 				u32 *maj, u32 *min,
280 				__u64 *inode,
281 				ssize_t pathname_size, char *pathname)
282 {
283 	__u64 temp;
284 	int ch;
285 	char *start_pathname = pathname;
286 
287 	if (io__get_hex(io, start) != '-')
288 		return false;
289 	if (io__get_hex(io, end) != ' ')
290 		return false;
291 
292 	/* map protection and flags bits */
293 	*prot = 0;
294 	ch = io__get_char(io);
295 	if (ch == 'r')
296 		*prot |= PROT_READ;
297 	else if (ch != '-')
298 		return false;
299 	ch = io__get_char(io);
300 	if (ch == 'w')
301 		*prot |= PROT_WRITE;
302 	else if (ch != '-')
303 		return false;
304 	ch = io__get_char(io);
305 	if (ch == 'x')
306 		*prot |= PROT_EXEC;
307 	else if (ch != '-')
308 		return false;
309 	ch = io__get_char(io);
310 	if (ch == 's')
311 		*flags = MAP_SHARED;
312 	else if (ch == 'p')
313 		*flags = MAP_PRIVATE;
314 	else
315 		return false;
316 	if (io__get_char(io) != ' ')
317 		return false;
318 
319 	if (io__get_hex(io, offset) != ' ')
320 		return false;
321 
322 	if (io__get_hex(io, &temp) != ':')
323 		return false;
324 	*maj = temp;
325 	if (io__get_hex(io, &temp) != ' ')
326 		return false;
327 	*min = temp;
328 
329 	ch = io__get_dec(io, inode);
330 	if (ch != ' ') {
331 		*pathname = '\0';
332 		return ch == '\n';
333 	}
334 	do {
335 		ch = io__get_char(io);
336 	} while (ch == ' ');
337 	while (true) {
338 		if (ch < 0)
339 			return false;
340 		if (ch == '\0' || ch == '\n' ||
341 		    (pathname + 1 - start_pathname) >= pathname_size) {
342 			*pathname = '\0';
343 			return true;
344 		}
345 		*pathname++ = ch;
346 		ch = io__get_char(io);
347 	}
348 }
349 
perf_event__synthesize_mmap_events(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine,bool mmap_data)350 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
351 				       union perf_event *event,
352 				       pid_t pid, pid_t tgid,
353 				       perf_event__handler_t process,
354 				       struct machine *machine,
355 				       bool mmap_data)
356 {
357 	unsigned long long t;
358 	char bf[BUFSIZ];
359 	struct io io;
360 	bool truncation = false;
361 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
362 	int rc = 0;
363 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
364 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
365 
366 	if (machine__is_default_guest(machine))
367 		return 0;
368 
369 	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
370 		machine->root_dir, pid, pid);
371 
372 	io.fd = open(bf, O_RDONLY, 0);
373 	if (io.fd < 0) {
374 		/*
375 		 * We raced with a task exiting - just return:
376 		 */
377 		pr_debug("couldn't open %s\n", bf);
378 		return -1;
379 	}
380 	io__init(&io, io.fd, bf, sizeof(bf));
381 
382 	event->header.type = PERF_RECORD_MMAP2;
383 	t = rdclock();
384 
385 	while (!io.eof) {
386 		static const char anonstr[] = "//anon";
387 		size_t size, aligned_size;
388 
389 		/* ensure null termination since stack will be reused. */
390 		event->mmap2.filename[0] = '\0';
391 
392 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
393 		if (!read_proc_maps_line(&io,
394 					&event->mmap2.start,
395 					&event->mmap2.len,
396 					&event->mmap2.prot,
397 					&event->mmap2.flags,
398 					&event->mmap2.pgoff,
399 					&event->mmap2.maj,
400 					&event->mmap2.min,
401 					&event->mmap2.ino,
402 					sizeof(event->mmap2.filename),
403 					event->mmap2.filename))
404 			continue;
405 
406 		if ((rdclock() - t) > timeout) {
407 			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
408 				   "You may want to increase "
409 				   "the time limit by --proc-map-timeout\n",
410 				   machine->root_dir, pid, pid);
411 			truncation = true;
412 			goto out;
413 		}
414 
415 		event->mmap2.ino_generation = 0;
416 
417 		/*
418 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
419 		 */
420 		if (machine__is_host(machine))
421 			event->header.misc = PERF_RECORD_MISC_USER;
422 		else
423 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
424 
425 		if ((event->mmap2.prot & PROT_EXEC) == 0) {
426 			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
427 				continue;
428 
429 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
430 		}
431 
432 out:
433 		if (truncation)
434 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
435 
436 		if (!strcmp(event->mmap2.filename, ""))
437 			strcpy(event->mmap2.filename, anonstr);
438 
439 		if (hugetlbfs_mnt_len &&
440 		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
441 			     hugetlbfs_mnt_len)) {
442 			strcpy(event->mmap2.filename, anonstr);
443 			event->mmap2.flags |= MAP_HUGETLB;
444 		}
445 
446 		size = strlen(event->mmap2.filename) + 1;
447 		aligned_size = PERF_ALIGN(size, sizeof(u64));
448 		event->mmap2.len -= event->mmap.start;
449 		event->mmap2.header.size = (sizeof(event->mmap2) -
450 					(sizeof(event->mmap2.filename) - aligned_size));
451 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
452 			(aligned_size - size));
453 		event->mmap2.header.size += machine->id_hdr_size;
454 		event->mmap2.pid = tgid;
455 		event->mmap2.tid = pid;
456 
457 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
458 			rc = -1;
459 			break;
460 		}
461 
462 		if (truncation)
463 			break;
464 	}
465 
466 	close(io.fd);
467 	return rc;
468 }
469 
470 #ifdef HAVE_FILE_HANDLE
perf_event__synthesize_cgroup(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)471 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
472 					 union perf_event *event,
473 					 char *path, size_t mount_len,
474 					 perf_event__handler_t process,
475 					 struct machine *machine)
476 {
477 	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
478 	size_t path_len = strlen(path) - mount_len + 1;
479 	struct {
480 		struct file_handle fh;
481 		uint64_t cgroup_id;
482 	} handle;
483 	int mount_id;
484 
485 	while (path_len % sizeof(u64))
486 		path[mount_len + path_len++] = '\0';
487 
488 	memset(&event->cgroup, 0, event_size);
489 
490 	event->cgroup.header.type = PERF_RECORD_CGROUP;
491 	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
492 
493 	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
494 	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
495 		pr_debug("stat failed: %s\n", path);
496 		return -1;
497 	}
498 
499 	event->cgroup.id = handle.cgroup_id;
500 	strncpy(event->cgroup.path, path + mount_len, path_len);
501 	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
502 
503 	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
504 		pr_debug("process synth event failed\n");
505 		return -1;
506 	}
507 
508 	return 0;
509 }
510 
perf_event__walk_cgroup_tree(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)511 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
512 					union perf_event *event,
513 					char *path, size_t mount_len,
514 					perf_event__handler_t process,
515 					struct machine *machine)
516 {
517 	size_t pos = strlen(path);
518 	DIR *d;
519 	struct dirent *dent;
520 	int ret = 0;
521 
522 	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
523 					  process, machine) < 0)
524 		return -1;
525 
526 	d = opendir(path);
527 	if (d == NULL) {
528 		pr_debug("failed to open directory: %s\n", path);
529 		return -1;
530 	}
531 
532 	while ((dent = readdir(d)) != NULL) {
533 		if (dent->d_type != DT_DIR)
534 			continue;
535 		if (!strcmp(dent->d_name, ".") ||
536 		    !strcmp(dent->d_name, ".."))
537 			continue;
538 
539 		/* any sane path should be less than PATH_MAX */
540 		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
541 			continue;
542 
543 		if (path[pos - 1] != '/')
544 			strcat(path, "/");
545 		strcat(path, dent->d_name);
546 
547 		ret = perf_event__walk_cgroup_tree(tool, event, path,
548 						   mount_len, process, machine);
549 		if (ret < 0)
550 			break;
551 
552 		path[pos] = '\0';
553 	}
554 
555 	closedir(d);
556 	return ret;
557 }
558 
perf_event__synthesize_cgroups(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)559 int perf_event__synthesize_cgroups(struct perf_tool *tool,
560 				   perf_event__handler_t process,
561 				   struct machine *machine)
562 {
563 	union perf_event event;
564 	char cgrp_root[PATH_MAX];
565 	size_t mount_len;  /* length of mount point in the path */
566 
567 	if (!tool || !tool->cgroup_events)
568 		return 0;
569 
570 	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
571 		pr_debug("cannot find cgroup mount point\n");
572 		return -1;
573 	}
574 
575 	mount_len = strlen(cgrp_root);
576 	/* make sure the path starts with a slash (after mount point) */
577 	strcat(cgrp_root, "/");
578 
579 	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
580 					 process, machine) < 0)
581 		return -1;
582 
583 	return 0;
584 }
585 #else
perf_event__synthesize_cgroups(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)586 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
587 				   perf_event__handler_t process __maybe_unused,
588 				   struct machine *machine __maybe_unused)
589 {
590 	return -1;
591 }
592 #endif
593 
perf_event__synthesize_modules(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)594 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
595 				   struct machine *machine)
596 {
597 	int rc = 0;
598 	struct map *pos;
599 	struct maps *maps = machine__kernel_maps(machine);
600 	union perf_event *event = zalloc((sizeof(event->mmap) +
601 					  machine->id_hdr_size));
602 	if (event == NULL) {
603 		pr_debug("Not enough memory synthesizing mmap event "
604 			 "for kernel modules\n");
605 		return -1;
606 	}
607 
608 	event->header.type = PERF_RECORD_MMAP;
609 
610 	/*
611 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
612 	 * __perf_event_mmap
613 	 */
614 	if (machine__is_host(machine))
615 		event->header.misc = PERF_RECORD_MISC_KERNEL;
616 	else
617 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
618 
619 	maps__for_each_entry(maps, pos) {
620 		size_t size;
621 
622 		if (!__map__is_kmodule(pos))
623 			continue;
624 
625 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
626 		event->mmap.header.type = PERF_RECORD_MMAP;
627 		event->mmap.header.size = (sizeof(event->mmap) -
628 				        (sizeof(event->mmap.filename) - size));
629 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
630 		event->mmap.header.size += machine->id_hdr_size;
631 		event->mmap.start = pos->start;
632 		event->mmap.len   = pos->end - pos->start;
633 		event->mmap.pid   = machine->pid;
634 
635 		memcpy(event->mmap.filename, pos->dso->long_name,
636 		       pos->dso->long_name_len + 1);
637 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
638 			rc = -1;
639 			break;
640 		}
641 	}
642 
643 	free(event);
644 	return rc;
645 }
646 
__event__synthesize_thread(union perf_event * comm_event,union perf_event * mmap_event,union perf_event * fork_event,union perf_event * namespaces_event,pid_t pid,int full,perf_event__handler_t process,struct perf_tool * tool,struct machine * machine,bool mmap_data)647 static int __event__synthesize_thread(union perf_event *comm_event,
648 				      union perf_event *mmap_event,
649 				      union perf_event *fork_event,
650 				      union perf_event *namespaces_event,
651 				      pid_t pid, int full, perf_event__handler_t process,
652 				      struct perf_tool *tool, struct machine *machine, bool mmap_data)
653 {
654 	char filename[PATH_MAX];
655 	DIR *tasks;
656 	struct dirent *dirent;
657 	pid_t tgid, ppid;
658 	int rc = 0;
659 
660 	/* special case: only send one comm event using passed in pid */
661 	if (!full) {
662 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
663 						   process, machine);
664 
665 		if (tgid == -1)
666 			return -1;
667 
668 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
669 						      tgid, process, machine) < 0)
670 			return -1;
671 
672 		/*
673 		 * send mmap only for thread group leader
674 		 * see thread__init_maps()
675 		 */
676 		if (pid == tgid &&
677 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
678 						       process, machine, mmap_data))
679 			return -1;
680 
681 		return 0;
682 	}
683 
684 	if (machine__is_default_guest(machine))
685 		return 0;
686 
687 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
688 		 machine->root_dir, pid);
689 
690 	tasks = opendir(filename);
691 	if (tasks == NULL) {
692 		pr_debug("couldn't open %s\n", filename);
693 		return 0;
694 	}
695 
696 	while ((dirent = readdir(tasks)) != NULL) {
697 		char *end;
698 		pid_t _pid;
699 
700 		_pid = strtol(dirent->d_name, &end, 10);
701 		if (*end)
702 			continue;
703 
704 		rc = -1;
705 		if (perf_event__prepare_comm(comm_event, _pid, machine,
706 					     &tgid, &ppid) != 0)
707 			break;
708 
709 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
710 						ppid, process, machine) < 0)
711 			break;
712 
713 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
714 						      tgid, process, machine) < 0)
715 			break;
716 
717 		/*
718 		 * Send the prepared comm event
719 		 */
720 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
721 			break;
722 
723 		rc = 0;
724 		if (_pid == pid) {
725 			/* process the parent's maps too */
726 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
727 						process, machine, mmap_data);
728 			if (rc)
729 				break;
730 		}
731 	}
732 
733 	closedir(tasks);
734 	return rc;
735 }
736 
perf_event__synthesize_thread_map(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine,bool mmap_data)737 int perf_event__synthesize_thread_map(struct perf_tool *tool,
738 				      struct perf_thread_map *threads,
739 				      perf_event__handler_t process,
740 				      struct machine *machine,
741 				      bool mmap_data)
742 {
743 	union perf_event *comm_event, *mmap_event, *fork_event;
744 	union perf_event *namespaces_event;
745 	int err = -1, thread, j;
746 
747 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
748 	if (comm_event == NULL)
749 		goto out;
750 
751 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
752 	if (mmap_event == NULL)
753 		goto out_free_comm;
754 
755 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
756 	if (fork_event == NULL)
757 		goto out_free_mmap;
758 
759 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
760 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
761 				  machine->id_hdr_size);
762 	if (namespaces_event == NULL)
763 		goto out_free_fork;
764 
765 	err = 0;
766 	for (thread = 0; thread < threads->nr; ++thread) {
767 		if (__event__synthesize_thread(comm_event, mmap_event,
768 					       fork_event, namespaces_event,
769 					       perf_thread_map__pid(threads, thread), 0,
770 					       process, tool, machine,
771 					       mmap_data)) {
772 			err = -1;
773 			break;
774 		}
775 
776 		/*
777 		 * comm.pid is set to thread group id by
778 		 * perf_event__synthesize_comm
779 		 */
780 		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
781 			bool need_leader = true;
782 
783 			/* is thread group leader in thread_map? */
784 			for (j = 0; j < threads->nr; ++j) {
785 				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
786 					need_leader = false;
787 					break;
788 				}
789 			}
790 
791 			/* if not, generate events for it */
792 			if (need_leader &&
793 			    __event__synthesize_thread(comm_event, mmap_event,
794 						       fork_event, namespaces_event,
795 						       comm_event->comm.pid, 0,
796 						       process, tool, machine,
797 						       mmap_data)) {
798 				err = -1;
799 				break;
800 			}
801 		}
802 	}
803 	free(namespaces_event);
804 out_free_fork:
805 	free(fork_event);
806 out_free_mmap:
807 	free(mmap_event);
808 out_free_comm:
809 	free(comm_event);
810 out:
811 	return err;
812 }
813 
__perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool mmap_data,struct dirent ** dirent,int start,int num)814 static int __perf_event__synthesize_threads(struct perf_tool *tool,
815 					    perf_event__handler_t process,
816 					    struct machine *machine,
817 					    bool mmap_data,
818 					    struct dirent **dirent,
819 					    int start,
820 					    int num)
821 {
822 	union perf_event *comm_event, *mmap_event, *fork_event;
823 	union perf_event *namespaces_event;
824 	int err = -1;
825 	char *end;
826 	pid_t pid;
827 	int i;
828 
829 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
830 	if (comm_event == NULL)
831 		goto out;
832 
833 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
834 	if (mmap_event == NULL)
835 		goto out_free_comm;
836 
837 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
838 	if (fork_event == NULL)
839 		goto out_free_mmap;
840 
841 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
842 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
843 				  machine->id_hdr_size);
844 	if (namespaces_event == NULL)
845 		goto out_free_fork;
846 
847 	for (i = start; i < start + num; i++) {
848 		if (!isdigit(dirent[i]->d_name[0]))
849 			continue;
850 
851 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
852 		/* only interested in proper numerical dirents */
853 		if (*end)
854 			continue;
855 		/*
856 		 * We may race with exiting thread, so don't stop just because
857 		 * one thread couldn't be synthesized.
858 		 */
859 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
860 					   namespaces_event, pid, 1, process,
861 					   tool, machine, mmap_data);
862 	}
863 	err = 0;
864 
865 	free(namespaces_event);
866 out_free_fork:
867 	free(fork_event);
868 out_free_mmap:
869 	free(mmap_event);
870 out_free_comm:
871 	free(comm_event);
872 out:
873 	return err;
874 }
875 
876 struct synthesize_threads_arg {
877 	struct perf_tool *tool;
878 	perf_event__handler_t process;
879 	struct machine *machine;
880 	bool mmap_data;
881 	struct dirent **dirent;
882 	int num;
883 	int start;
884 };
885 
synthesize_threads_worker(void * arg)886 static void *synthesize_threads_worker(void *arg)
887 {
888 	struct synthesize_threads_arg *args = arg;
889 
890 	__perf_event__synthesize_threads(args->tool, args->process,
891 					 args->machine, args->mmap_data,
892 					 args->dirent,
893 					 args->start, args->num);
894 	return NULL;
895 }
896 
perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool mmap_data,unsigned int nr_threads_synthesize)897 int perf_event__synthesize_threads(struct perf_tool *tool,
898 				   perf_event__handler_t process,
899 				   struct machine *machine,
900 				   bool mmap_data,
901 				   unsigned int nr_threads_synthesize)
902 {
903 	struct synthesize_threads_arg *args = NULL;
904 	pthread_t *synthesize_threads = NULL;
905 	char proc_path[PATH_MAX];
906 	struct dirent **dirent;
907 	int num_per_thread;
908 	int m, n, i, j;
909 	int thread_nr;
910 	int base = 0;
911 	int err = -1;
912 
913 
914 	if (machine__is_default_guest(machine))
915 		return 0;
916 
917 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
918 	n = scandir(proc_path, &dirent, 0, alphasort);
919 	if (n < 0)
920 		return err;
921 
922 	if (nr_threads_synthesize == UINT_MAX)
923 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
924 	else
925 		thread_nr = nr_threads_synthesize;
926 
927 	if (thread_nr <= 1) {
928 		err = __perf_event__synthesize_threads(tool, process,
929 						       machine, mmap_data,
930 						       dirent, base, n);
931 		goto free_dirent;
932 	}
933 	if (thread_nr > n)
934 		thread_nr = n;
935 
936 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
937 	if (synthesize_threads == NULL)
938 		goto free_dirent;
939 
940 	args = calloc(sizeof(*args), thread_nr);
941 	if (args == NULL)
942 		goto free_threads;
943 
944 	num_per_thread = n / thread_nr;
945 	m = n % thread_nr;
946 	for (i = 0; i < thread_nr; i++) {
947 		args[i].tool = tool;
948 		args[i].process = process;
949 		args[i].machine = machine;
950 		args[i].mmap_data = mmap_data;
951 		args[i].dirent = dirent;
952 	}
953 	for (i = 0; i < m; i++) {
954 		args[i].num = num_per_thread + 1;
955 		args[i].start = i * args[i].num;
956 	}
957 	if (i != 0)
958 		base = args[i-1].start + args[i-1].num;
959 	for (j = i; j < thread_nr; j++) {
960 		args[j].num = num_per_thread;
961 		args[j].start = base + (j - i) * args[i].num;
962 	}
963 
964 	for (i = 0; i < thread_nr; i++) {
965 		if (pthread_create(&synthesize_threads[i], NULL,
966 				   synthesize_threads_worker, &args[i]))
967 			goto out_join;
968 	}
969 	err = 0;
970 out_join:
971 	for (i = 0; i < thread_nr; i++)
972 		pthread_join(synthesize_threads[i], NULL);
973 	free(args);
974 free_threads:
975 	free(synthesize_threads);
976 free_dirent:
977 	for (i = 0; i < n; i++)
978 		zfree(&dirent[i]);
979 	free(dirent);
980 
981 	return err;
982 }
983 
perf_event__synthesize_extra_kmaps(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)984 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
985 					      perf_event__handler_t process __maybe_unused,
986 					      struct machine *machine __maybe_unused)
987 {
988 	return 0;
989 }
990 
__perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)991 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
992 						perf_event__handler_t process,
993 						struct machine *machine)
994 {
995 	size_t size;
996 	struct map *map = machine__kernel_map(machine);
997 	struct kmap *kmap;
998 	int err;
999 	union perf_event *event;
1000 
1001 	if (map == NULL)
1002 		return -1;
1003 
1004 	kmap = map__kmap(map);
1005 	if (!kmap->ref_reloc_sym)
1006 		return -1;
1007 
1008 	/*
1009 	 * We should get this from /sys/kernel/sections/.text, but till that is
1010 	 * available use this, and after it is use this as a fallback for older
1011 	 * kernels.
1012 	 */
1013 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
1014 	if (event == NULL) {
1015 		pr_debug("Not enough memory synthesizing mmap event "
1016 			 "for kernel modules\n");
1017 		return -1;
1018 	}
1019 
1020 	if (machine__is_host(machine)) {
1021 		/*
1022 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1023 		 * see kernel/perf_event.c __perf_event_mmap
1024 		 */
1025 		event->header.misc = PERF_RECORD_MISC_KERNEL;
1026 	} else {
1027 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1028 	}
1029 
1030 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1031 			"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1032 	size = PERF_ALIGN(size, sizeof(u64));
1033 	event->mmap.header.type = PERF_RECORD_MMAP;
1034 	event->mmap.header.size = (sizeof(event->mmap) -
1035 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1036 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1037 	event->mmap.start = map->start;
1038 	event->mmap.len   = map->end - event->mmap.start;
1039 	event->mmap.pid   = machine->pid;
1040 
1041 	err = perf_tool__process_synth_event(tool, event, machine, process);
1042 	free(event);
1043 
1044 	return err;
1045 }
1046 
perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1047 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1048 				       perf_event__handler_t process,
1049 				       struct machine *machine)
1050 {
1051 	int err;
1052 
1053 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1054 	if (err < 0)
1055 		return err;
1056 
1057 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
1058 }
1059 
perf_event__synthesize_thread_map2(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine)1060 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1061 				      struct perf_thread_map *threads,
1062 				      perf_event__handler_t process,
1063 				      struct machine *machine)
1064 {
1065 	union perf_event *event;
1066 	int i, err, size;
1067 
1068 	size  = sizeof(event->thread_map);
1069 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
1070 
1071 	event = zalloc(size);
1072 	if (!event)
1073 		return -ENOMEM;
1074 
1075 	event->header.type = PERF_RECORD_THREAD_MAP;
1076 	event->header.size = size;
1077 	event->thread_map.nr = threads->nr;
1078 
1079 	for (i = 0; i < threads->nr; i++) {
1080 		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1081 		char *comm = perf_thread_map__comm(threads, i);
1082 
1083 		if (!comm)
1084 			comm = (char *) "";
1085 
1086 		entry->pid = perf_thread_map__pid(threads, i);
1087 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1088 	}
1089 
1090 	err = process(tool, event, NULL, machine);
1091 
1092 	free(event);
1093 	return err;
1094 }
1095 
synthesize_cpus(struct cpu_map_entries * cpus,struct perf_cpu_map * map)1096 static void synthesize_cpus(struct cpu_map_entries *cpus,
1097 			    struct perf_cpu_map *map)
1098 {
1099 	int i;
1100 
1101 	cpus->nr = map->nr;
1102 
1103 	for (i = 0; i < map->nr; i++)
1104 		cpus->cpu[i] = map->map[i];
1105 }
1106 
synthesize_mask(struct perf_record_record_cpu_map * mask,struct perf_cpu_map * map,int max)1107 static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1108 			    struct perf_cpu_map *map, int max)
1109 {
1110 	int i;
1111 
1112 	mask->nr = BITS_TO_LONGS(max);
1113 	mask->long_size = sizeof(long);
1114 
1115 	for (i = 0; i < map->nr; i++)
1116 		set_bit(map->map[i], mask->mask);
1117 }
1118 
cpus_size(struct perf_cpu_map * map)1119 static size_t cpus_size(struct perf_cpu_map *map)
1120 {
1121 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1122 }
1123 
mask_size(struct perf_cpu_map * map,int * max)1124 static size_t mask_size(struct perf_cpu_map *map, int *max)
1125 {
1126 	int i;
1127 
1128 	*max = 0;
1129 
1130 	for (i = 0; i < map->nr; i++) {
1131 		/* bit possition of the cpu is + 1 */
1132 		int bit = map->map[i] + 1;
1133 
1134 		if (bit > *max)
1135 			*max = bit;
1136 	}
1137 
1138 	return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1139 }
1140 
cpu_map_data__alloc(struct perf_cpu_map * map,size_t * size,u16 * type,int * max)1141 void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1142 {
1143 	size_t size_cpus, size_mask;
1144 	bool is_dummy = perf_cpu_map__empty(map);
1145 
1146 	/*
1147 	 * Both array and mask data have variable size based
1148 	 * on the number of cpus and their actual values.
1149 	 * The size of the 'struct perf_record_cpu_map_data' is:
1150 	 *
1151 	 *   array = size of 'struct cpu_map_entries' +
1152 	 *           number of cpus * sizeof(u64)
1153 	 *
1154 	 *   mask  = size of 'struct perf_record_record_cpu_map' +
1155 	 *           maximum cpu bit converted to size of longs
1156 	 *
1157 	 * and finaly + the size of 'struct perf_record_cpu_map_data'.
1158 	 */
1159 	size_cpus = cpus_size(map);
1160 	size_mask = mask_size(map, max);
1161 
1162 	if (is_dummy || (size_cpus < size_mask)) {
1163 		*size += size_cpus;
1164 		*type  = PERF_CPU_MAP__CPUS;
1165 	} else {
1166 		*size += size_mask;
1167 		*type  = PERF_CPU_MAP__MASK;
1168 	}
1169 
1170 	*size += sizeof(struct perf_record_cpu_map_data);
1171 	*size = PERF_ALIGN(*size, sizeof(u64));
1172 	return zalloc(*size);
1173 }
1174 
cpu_map_data__synthesize(struct perf_record_cpu_map_data * data,struct perf_cpu_map * map,u16 type,int max)1175 void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1176 			      u16 type, int max)
1177 {
1178 	data->type = type;
1179 
1180 	switch (type) {
1181 	case PERF_CPU_MAP__CPUS:
1182 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
1183 		break;
1184 	case PERF_CPU_MAP__MASK:
1185 		synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1186 	default:
1187 		break;
1188 	}
1189 }
1190 
cpu_map_event__new(struct perf_cpu_map * map)1191 static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1192 {
1193 	size_t size = sizeof(struct perf_record_cpu_map);
1194 	struct perf_record_cpu_map *event;
1195 	int max;
1196 	u16 type;
1197 
1198 	event = cpu_map_data__alloc(map, &size, &type, &max);
1199 	if (!event)
1200 		return NULL;
1201 
1202 	event->header.type = PERF_RECORD_CPU_MAP;
1203 	event->header.size = size;
1204 	event->data.type   = type;
1205 
1206 	cpu_map_data__synthesize(&event->data, map, type, max);
1207 	return event;
1208 }
1209 
perf_event__synthesize_cpu_map(struct perf_tool * tool,struct perf_cpu_map * map,perf_event__handler_t process,struct machine * machine)1210 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1211 				   struct perf_cpu_map *map,
1212 				   perf_event__handler_t process,
1213 				   struct machine *machine)
1214 {
1215 	struct perf_record_cpu_map *event;
1216 	int err;
1217 
1218 	event = cpu_map_event__new(map);
1219 	if (!event)
1220 		return -ENOMEM;
1221 
1222 	err = process(tool, (union perf_event *) event, NULL, machine);
1223 
1224 	free(event);
1225 	return err;
1226 }
1227 
perf_event__synthesize_stat_config(struct perf_tool * tool,struct perf_stat_config * config,perf_event__handler_t process,struct machine * machine)1228 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1229 				       struct perf_stat_config *config,
1230 				       perf_event__handler_t process,
1231 				       struct machine *machine)
1232 {
1233 	struct perf_record_stat_config *event;
1234 	int size, i = 0, err;
1235 
1236 	size  = sizeof(*event);
1237 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1238 
1239 	event = zalloc(size);
1240 	if (!event)
1241 		return -ENOMEM;
1242 
1243 	event->header.type = PERF_RECORD_STAT_CONFIG;
1244 	event->header.size = size;
1245 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1246 
1247 #define ADD(__term, __val)					\
1248 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1249 	event->data[i].val = __val;				\
1250 	i++;
1251 
1252 	ADD(AGGR_MODE,	config->aggr_mode)
1253 	ADD(INTERVAL,	config->interval)
1254 	ADD(SCALE,	config->scale)
1255 
1256 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1257 		  "stat config terms unbalanced\n");
1258 #undef ADD
1259 
1260 	err = process(tool, (union perf_event *) event, NULL, machine);
1261 
1262 	free(event);
1263 	return err;
1264 }
1265 
perf_event__synthesize_stat(struct perf_tool * tool,u32 cpu,u32 thread,u64 id,struct perf_counts_values * count,perf_event__handler_t process,struct machine * machine)1266 int perf_event__synthesize_stat(struct perf_tool *tool,
1267 				u32 cpu, u32 thread, u64 id,
1268 				struct perf_counts_values *count,
1269 				perf_event__handler_t process,
1270 				struct machine *machine)
1271 {
1272 	struct perf_record_stat event;
1273 
1274 	event.header.type = PERF_RECORD_STAT;
1275 	event.header.size = sizeof(event);
1276 	event.header.misc = 0;
1277 
1278 	event.id        = id;
1279 	event.cpu       = cpu;
1280 	event.thread    = thread;
1281 	event.val       = count->val;
1282 	event.ena       = count->ena;
1283 	event.run       = count->run;
1284 
1285 	return process(tool, (union perf_event *) &event, NULL, machine);
1286 }
1287 
perf_event__synthesize_stat_round(struct perf_tool * tool,u64 evtime,u64 type,perf_event__handler_t process,struct machine * machine)1288 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1289 				      u64 evtime, u64 type,
1290 				      perf_event__handler_t process,
1291 				      struct machine *machine)
1292 {
1293 	struct perf_record_stat_round event;
1294 
1295 	event.header.type = PERF_RECORD_STAT_ROUND;
1296 	event.header.size = sizeof(event);
1297 	event.header.misc = 0;
1298 
1299 	event.time = evtime;
1300 	event.type = type;
1301 
1302 	return process(tool, (union perf_event *) &event, NULL, machine);
1303 }
1304 
perf_event__sample_event_size(const struct perf_sample * sample,u64 type,u64 read_format)1305 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1306 {
1307 	size_t sz, result = sizeof(struct perf_record_sample);
1308 
1309 	if (type & PERF_SAMPLE_IDENTIFIER)
1310 		result += sizeof(u64);
1311 
1312 	if (type & PERF_SAMPLE_IP)
1313 		result += sizeof(u64);
1314 
1315 	if (type & PERF_SAMPLE_TID)
1316 		result += sizeof(u64);
1317 
1318 	if (type & PERF_SAMPLE_TIME)
1319 		result += sizeof(u64);
1320 
1321 	if (type & PERF_SAMPLE_ADDR)
1322 		result += sizeof(u64);
1323 
1324 	if (type & PERF_SAMPLE_ID)
1325 		result += sizeof(u64);
1326 
1327 	if (type & PERF_SAMPLE_STREAM_ID)
1328 		result += sizeof(u64);
1329 
1330 	if (type & PERF_SAMPLE_CPU)
1331 		result += sizeof(u64);
1332 
1333 	if (type & PERF_SAMPLE_PERIOD)
1334 		result += sizeof(u64);
1335 
1336 	if (type & PERF_SAMPLE_READ) {
1337 		result += sizeof(u64);
1338 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1339 			result += sizeof(u64);
1340 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1341 			result += sizeof(u64);
1342 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1343 		if (read_format & PERF_FORMAT_GROUP) {
1344 			sz = sample->read.group.nr *
1345 			     sizeof(struct sample_read_value);
1346 			result += sz;
1347 		} else {
1348 			result += sizeof(u64);
1349 		}
1350 	}
1351 
1352 	if (type & PERF_SAMPLE_CALLCHAIN) {
1353 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1354 		result += sz;
1355 	}
1356 
1357 	if (type & PERF_SAMPLE_RAW) {
1358 		result += sizeof(u32);
1359 		result += sample->raw_size;
1360 	}
1361 
1362 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1363 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1364 		/* nr, hw_idx */
1365 		sz += 2 * sizeof(u64);
1366 		result += sz;
1367 	}
1368 
1369 	if (type & PERF_SAMPLE_REGS_USER) {
1370 		if (sample->user_regs.abi) {
1371 			result += sizeof(u64);
1372 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1373 			result += sz;
1374 		} else {
1375 			result += sizeof(u64);
1376 		}
1377 	}
1378 
1379 	if (type & PERF_SAMPLE_STACK_USER) {
1380 		sz = sample->user_stack.size;
1381 		result += sizeof(u64);
1382 		if (sz) {
1383 			result += sz;
1384 			result += sizeof(u64);
1385 		}
1386 	}
1387 
1388 	if (type & PERF_SAMPLE_WEIGHT)
1389 		result += sizeof(u64);
1390 
1391 	if (type & PERF_SAMPLE_DATA_SRC)
1392 		result += sizeof(u64);
1393 
1394 	if (type & PERF_SAMPLE_TRANSACTION)
1395 		result += sizeof(u64);
1396 
1397 	if (type & PERF_SAMPLE_REGS_INTR) {
1398 		if (sample->intr_regs.abi) {
1399 			result += sizeof(u64);
1400 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1401 			result += sz;
1402 		} else {
1403 			result += sizeof(u64);
1404 		}
1405 	}
1406 
1407 	if (type & PERF_SAMPLE_PHYS_ADDR)
1408 		result += sizeof(u64);
1409 
1410 	if (type & PERF_SAMPLE_CGROUP)
1411 		result += sizeof(u64);
1412 
1413 	if (type & PERF_SAMPLE_AUX) {
1414 		result += sizeof(u64);
1415 		result += sample->aux_sample.size;
1416 	}
1417 
1418 	return result;
1419 }
1420 
perf_event__synthesize_sample(union perf_event * event,u64 type,u64 read_format,const struct perf_sample * sample)1421 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1422 				  const struct perf_sample *sample)
1423 {
1424 	__u64 *array;
1425 	size_t sz;
1426 	/*
1427 	 * used for cross-endian analysis. See git commit 65014ab3
1428 	 * for why this goofiness is needed.
1429 	 */
1430 	union u64_swap u;
1431 
1432 	array = event->sample.array;
1433 
1434 	if (type & PERF_SAMPLE_IDENTIFIER) {
1435 		*array = sample->id;
1436 		array++;
1437 	}
1438 
1439 	if (type & PERF_SAMPLE_IP) {
1440 		*array = sample->ip;
1441 		array++;
1442 	}
1443 
1444 	if (type & PERF_SAMPLE_TID) {
1445 		u.val32[0] = sample->pid;
1446 		u.val32[1] = sample->tid;
1447 		*array = u.val64;
1448 		array++;
1449 	}
1450 
1451 	if (type & PERF_SAMPLE_TIME) {
1452 		*array = sample->time;
1453 		array++;
1454 	}
1455 
1456 	if (type & PERF_SAMPLE_ADDR) {
1457 		*array = sample->addr;
1458 		array++;
1459 	}
1460 
1461 	if (type & PERF_SAMPLE_ID) {
1462 		*array = sample->id;
1463 		array++;
1464 	}
1465 
1466 	if (type & PERF_SAMPLE_STREAM_ID) {
1467 		*array = sample->stream_id;
1468 		array++;
1469 	}
1470 
1471 	if (type & PERF_SAMPLE_CPU) {
1472 		u.val32[0] = sample->cpu;
1473 		u.val32[1] = 0;
1474 		*array = u.val64;
1475 		array++;
1476 	}
1477 
1478 	if (type & PERF_SAMPLE_PERIOD) {
1479 		*array = sample->period;
1480 		array++;
1481 	}
1482 
1483 	if (type & PERF_SAMPLE_READ) {
1484 		if (read_format & PERF_FORMAT_GROUP)
1485 			*array = sample->read.group.nr;
1486 		else
1487 			*array = sample->read.one.value;
1488 		array++;
1489 
1490 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1491 			*array = sample->read.time_enabled;
1492 			array++;
1493 		}
1494 
1495 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1496 			*array = sample->read.time_running;
1497 			array++;
1498 		}
1499 
1500 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1501 		if (read_format & PERF_FORMAT_GROUP) {
1502 			sz = sample->read.group.nr *
1503 			     sizeof(struct sample_read_value);
1504 			memcpy(array, sample->read.group.values, sz);
1505 			array = (void *)array + sz;
1506 		} else {
1507 			*array = sample->read.one.id;
1508 			array++;
1509 		}
1510 	}
1511 
1512 	if (type & PERF_SAMPLE_CALLCHAIN) {
1513 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1514 		memcpy(array, sample->callchain, sz);
1515 		array = (void *)array + sz;
1516 	}
1517 
1518 	if (type & PERF_SAMPLE_RAW) {
1519 		u.val32[0] = sample->raw_size;
1520 		*array = u.val64;
1521 		array = (void *)array + sizeof(u32);
1522 
1523 		memcpy(array, sample->raw_data, sample->raw_size);
1524 		array = (void *)array + sample->raw_size;
1525 	}
1526 
1527 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1528 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1529 		/* nr, hw_idx */
1530 		sz += 2 * sizeof(u64);
1531 		memcpy(array, sample->branch_stack, sz);
1532 		array = (void *)array + sz;
1533 	}
1534 
1535 	if (type & PERF_SAMPLE_REGS_USER) {
1536 		if (sample->user_regs.abi) {
1537 			*array++ = sample->user_regs.abi;
1538 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1539 			memcpy(array, sample->user_regs.regs, sz);
1540 			array = (void *)array + sz;
1541 		} else {
1542 			*array++ = 0;
1543 		}
1544 	}
1545 
1546 	if (type & PERF_SAMPLE_STACK_USER) {
1547 		sz = sample->user_stack.size;
1548 		*array++ = sz;
1549 		if (sz) {
1550 			memcpy(array, sample->user_stack.data, sz);
1551 			array = (void *)array + sz;
1552 			*array++ = sz;
1553 		}
1554 	}
1555 
1556 	if (type & PERF_SAMPLE_WEIGHT) {
1557 		*array = sample->weight;
1558 		array++;
1559 	}
1560 
1561 	if (type & PERF_SAMPLE_DATA_SRC) {
1562 		*array = sample->data_src;
1563 		array++;
1564 	}
1565 
1566 	if (type & PERF_SAMPLE_TRANSACTION) {
1567 		*array = sample->transaction;
1568 		array++;
1569 	}
1570 
1571 	if (type & PERF_SAMPLE_REGS_INTR) {
1572 		if (sample->intr_regs.abi) {
1573 			*array++ = sample->intr_regs.abi;
1574 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1575 			memcpy(array, sample->intr_regs.regs, sz);
1576 			array = (void *)array + sz;
1577 		} else {
1578 			*array++ = 0;
1579 		}
1580 	}
1581 
1582 	if (type & PERF_SAMPLE_PHYS_ADDR) {
1583 		*array = sample->phys_addr;
1584 		array++;
1585 	}
1586 
1587 	if (type & PERF_SAMPLE_CGROUP) {
1588 		*array = sample->cgroup;
1589 		array++;
1590 	}
1591 
1592 	if (type & PERF_SAMPLE_AUX) {
1593 		sz = sample->aux_sample.size;
1594 		*array++ = sz;
1595 		memcpy(array, sample->aux_sample.data, sz);
1596 		array = (void *)array + sz;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
perf_event__synthesize_id_index(struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine)1602 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1603 				    struct evlist *evlist, struct machine *machine)
1604 {
1605 	union perf_event *ev;
1606 	struct evsel *evsel;
1607 	size_t nr = 0, i = 0, sz, max_nr, n;
1608 	int err;
1609 
1610 	pr_debug2("Synthesizing id index\n");
1611 
1612 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1613 		 sizeof(struct id_index_entry);
1614 
1615 	evlist__for_each_entry(evlist, evsel)
1616 		nr += evsel->core.ids;
1617 
1618 	n = nr > max_nr ? max_nr : nr;
1619 	sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1620 	ev = zalloc(sz);
1621 	if (!ev)
1622 		return -ENOMEM;
1623 
1624 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1625 	ev->id_index.header.size = sz;
1626 	ev->id_index.nr = n;
1627 
1628 	evlist__for_each_entry(evlist, evsel) {
1629 		u32 j;
1630 
1631 		for (j = 0; j < evsel->core.ids; j++) {
1632 			struct id_index_entry *e;
1633 			struct perf_sample_id *sid;
1634 
1635 			if (i >= n) {
1636 				err = process(tool, ev, NULL, machine);
1637 				if (err)
1638 					goto out_err;
1639 				nr -= n;
1640 				i = 0;
1641 			}
1642 
1643 			e = &ev->id_index.entries[i++];
1644 
1645 			e->id = evsel->core.id[j];
1646 
1647 			sid = perf_evlist__id2sid(evlist, e->id);
1648 			if (!sid) {
1649 				free(ev);
1650 				return -ENOENT;
1651 			}
1652 
1653 			e->idx = sid->idx;
1654 			e->cpu = sid->cpu;
1655 			e->tid = sid->tid;
1656 		}
1657 	}
1658 
1659 	sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1660 	ev->id_index.header.size = sz;
1661 	ev->id_index.nr = nr;
1662 
1663 	err = process(tool, ev, NULL, machine);
1664 out_err:
1665 	free(ev);
1666 
1667 	return err;
1668 }
1669 
__machine__synthesize_threads(struct machine * machine,struct perf_tool * tool,struct target * target,struct perf_thread_map * threads,perf_event__handler_t process,bool data_mmap,unsigned int nr_threads_synthesize)1670 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1671 				  struct target *target, struct perf_thread_map *threads,
1672 				  perf_event__handler_t process, bool data_mmap,
1673 				  unsigned int nr_threads_synthesize)
1674 {
1675 	if (target__has_task(target))
1676 		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1677 	else if (target__has_cpu(target))
1678 		return perf_event__synthesize_threads(tool, process,
1679 						      machine, data_mmap,
1680 						      nr_threads_synthesize);
1681 	/* command specified */
1682 	return 0;
1683 }
1684 
machine__synthesize_threads(struct machine * machine,struct target * target,struct perf_thread_map * threads,bool data_mmap,unsigned int nr_threads_synthesize)1685 int machine__synthesize_threads(struct machine *machine, struct target *target,
1686 				struct perf_thread_map *threads, bool data_mmap,
1687 				unsigned int nr_threads_synthesize)
1688 {
1689 	return __machine__synthesize_threads(machine, NULL, target, threads,
1690 					     perf_event__process, data_mmap,
1691 					     nr_threads_synthesize);
1692 }
1693 
event_update_event__new(size_t size,u64 type,u64 id)1694 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1695 {
1696 	struct perf_record_event_update *ev;
1697 
1698 	size += sizeof(*ev);
1699 	size  = PERF_ALIGN(size, sizeof(u64));
1700 
1701 	ev = zalloc(size);
1702 	if (ev) {
1703 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
1704 		ev->header.size = (u16)size;
1705 		ev->type	= type;
1706 		ev->id		= id;
1707 	}
1708 	return ev;
1709 }
1710 
perf_event__synthesize_event_update_unit(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1711 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1712 					     perf_event__handler_t process)
1713 {
1714 	size_t size = strlen(evsel->unit);
1715 	struct perf_record_event_update *ev;
1716 	int err;
1717 
1718 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1719 	if (ev == NULL)
1720 		return -ENOMEM;
1721 
1722 	strlcpy(ev->data, evsel->unit, size + 1);
1723 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1724 	free(ev);
1725 	return err;
1726 }
1727 
perf_event__synthesize_event_update_scale(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1728 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1729 					      perf_event__handler_t process)
1730 {
1731 	struct perf_record_event_update *ev;
1732 	struct perf_record_event_update_scale *ev_data;
1733 	int err;
1734 
1735 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1736 	if (ev == NULL)
1737 		return -ENOMEM;
1738 
1739 	ev_data = (struct perf_record_event_update_scale *)ev->data;
1740 	ev_data->scale = evsel->scale;
1741 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1742 	free(ev);
1743 	return err;
1744 }
1745 
perf_event__synthesize_event_update_name(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1746 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1747 					     perf_event__handler_t process)
1748 {
1749 	struct perf_record_event_update *ev;
1750 	size_t len = strlen(evsel->name);
1751 	int err;
1752 
1753 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1754 	if (ev == NULL)
1755 		return -ENOMEM;
1756 
1757 	strlcpy(ev->data, evsel->name, len + 1);
1758 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1759 	free(ev);
1760 	return err;
1761 }
1762 
perf_event__synthesize_event_update_cpus(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1763 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1764 					     perf_event__handler_t process)
1765 {
1766 	size_t size = sizeof(struct perf_record_event_update);
1767 	struct perf_record_event_update *ev;
1768 	int max, err;
1769 	u16 type;
1770 
1771 	if (!evsel->core.own_cpus)
1772 		return 0;
1773 
1774 	ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1775 	if (!ev)
1776 		return -ENOMEM;
1777 
1778 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
1779 	ev->header.size = (u16)size;
1780 	ev->type	= PERF_EVENT_UPDATE__CPUS;
1781 	ev->id		= evsel->core.id[0];
1782 
1783 	cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1784 				 evsel->core.own_cpus, type, max);
1785 
1786 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1787 	free(ev);
1788 	return err;
1789 }
1790 
perf_event__synthesize_attrs(struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process)1791 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1792 				 perf_event__handler_t process)
1793 {
1794 	struct evsel *evsel;
1795 	int err = 0;
1796 
1797 	evlist__for_each_entry(evlist, evsel) {
1798 		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1799 						  evsel->core.id, process);
1800 		if (err) {
1801 			pr_debug("failed to create perf header attribute\n");
1802 			return err;
1803 		}
1804 	}
1805 
1806 	return err;
1807 }
1808 
has_unit(struct evsel * evsel)1809 static bool has_unit(struct evsel *evsel)
1810 {
1811 	return evsel->unit && *evsel->unit;
1812 }
1813 
has_scale(struct evsel * evsel)1814 static bool has_scale(struct evsel *evsel)
1815 {
1816 	return evsel->scale != 1;
1817 }
1818 
perf_event__synthesize_extra_attr(struct perf_tool * tool,struct evlist * evsel_list,perf_event__handler_t process,bool is_pipe)1819 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1820 				      perf_event__handler_t process, bool is_pipe)
1821 {
1822 	struct evsel *evsel;
1823 	int err;
1824 
1825 	/*
1826 	 * Synthesize other events stuff not carried within
1827 	 * attr event - unit, scale, name
1828 	 */
1829 	evlist__for_each_entry(evsel_list, evsel) {
1830 		if (!evsel->supported)
1831 			continue;
1832 
1833 		/*
1834 		 * Synthesize unit and scale only if it's defined.
1835 		 */
1836 		if (has_unit(evsel)) {
1837 			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1838 			if (err < 0) {
1839 				pr_err("Couldn't synthesize evsel unit.\n");
1840 				return err;
1841 			}
1842 		}
1843 
1844 		if (has_scale(evsel)) {
1845 			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1846 			if (err < 0) {
1847 				pr_err("Couldn't synthesize evsel evsel.\n");
1848 				return err;
1849 			}
1850 		}
1851 
1852 		if (evsel->core.own_cpus) {
1853 			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1854 			if (err < 0) {
1855 				pr_err("Couldn't synthesize evsel cpus.\n");
1856 				return err;
1857 			}
1858 		}
1859 
1860 		/*
1861 		 * Name is needed only for pipe output,
1862 		 * perf.data carries event names.
1863 		 */
1864 		if (is_pipe) {
1865 			err = perf_event__synthesize_event_update_name(tool, evsel, process);
1866 			if (err < 0) {
1867 				pr_err("Couldn't synthesize evsel name.\n");
1868 				return err;
1869 			}
1870 		}
1871 	}
1872 	return 0;
1873 }
1874 
perf_event__synthesize_attr(struct perf_tool * tool,struct perf_event_attr * attr,u32 ids,u64 * id,perf_event__handler_t process)1875 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1876 				u32 ids, u64 *id, perf_event__handler_t process)
1877 {
1878 	union perf_event *ev;
1879 	size_t size;
1880 	int err;
1881 
1882 	size = sizeof(struct perf_event_attr);
1883 	size = PERF_ALIGN(size, sizeof(u64));
1884 	size += sizeof(struct perf_event_header);
1885 	size += ids * sizeof(u64);
1886 
1887 	ev = zalloc(size);
1888 
1889 	if (ev == NULL)
1890 		return -ENOMEM;
1891 
1892 	ev->attr.attr = *attr;
1893 	memcpy(ev->attr.id, id, ids * sizeof(u64));
1894 
1895 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1896 	ev->attr.header.size = (u16)size;
1897 
1898 	if (ev->attr.header.size == size)
1899 		err = process(tool, ev, NULL, NULL);
1900 	else
1901 		err = -E2BIG;
1902 
1903 	free(ev);
1904 
1905 	return err;
1906 }
1907 
perf_event__synthesize_tracing_data(struct perf_tool * tool,int fd,struct evlist * evlist,perf_event__handler_t process)1908 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1909 					perf_event__handler_t process)
1910 {
1911 	union perf_event ev;
1912 	struct tracing_data *tdata;
1913 	ssize_t size = 0, aligned_size = 0, padding;
1914 	struct feat_fd ff;
1915 
1916 	/*
1917 	 * We are going to store the size of the data followed
1918 	 * by the data contents. Since the fd descriptor is a pipe,
1919 	 * we cannot seek back to store the size of the data once
1920 	 * we know it. Instead we:
1921 	 *
1922 	 * - write the tracing data to the temp file
1923 	 * - get/write the data size to pipe
1924 	 * - write the tracing data from the temp file
1925 	 *   to the pipe
1926 	 */
1927 	tdata = tracing_data_get(&evlist->core.entries, fd, true);
1928 	if (!tdata)
1929 		return -1;
1930 
1931 	memset(&ev, 0, sizeof(ev));
1932 
1933 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1934 	size = tdata->size;
1935 	aligned_size = PERF_ALIGN(size, sizeof(u64));
1936 	padding = aligned_size - size;
1937 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
1938 	ev.tracing_data.size = aligned_size;
1939 
1940 	process(tool, &ev, NULL, NULL);
1941 
1942 	/*
1943 	 * The put function will copy all the tracing data
1944 	 * stored in temp file to the pipe.
1945 	 */
1946 	tracing_data_put(tdata);
1947 
1948 	ff = (struct feat_fd){ .fd = fd };
1949 	if (write_padded(&ff, NULL, 0, padding))
1950 		return -1;
1951 
1952 	return aligned_size;
1953 }
1954 
perf_event__synthesize_build_id(struct perf_tool * tool,struct dso * pos,u16 misc,perf_event__handler_t process,struct machine * machine)1955 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1956 				    perf_event__handler_t process, struct machine *machine)
1957 {
1958 	union perf_event ev;
1959 	size_t len;
1960 
1961 	if (!pos->hit)
1962 		return 0;
1963 
1964 	memset(&ev, 0, sizeof(ev));
1965 
1966 	len = pos->long_name_len + 1;
1967 	len = PERF_ALIGN(len, NAME_ALIGN);
1968 	memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
1969 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1970 	ev.build_id.header.misc = misc;
1971 	ev.build_id.pid = machine->pid;
1972 	ev.build_id.header.size = sizeof(ev.build_id) + len;
1973 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1974 
1975 	return process(tool, &ev, NULL, machine);
1976 }
1977 
perf_event__synthesize_stat_events(struct perf_stat_config * config,struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process,bool attrs)1978 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1979 				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
1980 {
1981 	int err;
1982 
1983 	if (attrs) {
1984 		err = perf_event__synthesize_attrs(tool, evlist, process);
1985 		if (err < 0) {
1986 			pr_err("Couldn't synthesize attrs.\n");
1987 			return err;
1988 		}
1989 	}
1990 
1991 	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1992 	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1993 	if (err < 0) {
1994 		pr_err("Couldn't synthesize thread map.\n");
1995 		return err;
1996 	}
1997 
1998 	err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1999 	if (err < 0) {
2000 		pr_err("Couldn't synthesize thread map.\n");
2001 		return err;
2002 	}
2003 
2004 	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2005 	if (err < 0) {
2006 		pr_err("Couldn't synthesize config.\n");
2007 		return err;
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2014 
perf_event__synthesize_features(struct perf_tool * tool,struct perf_session * session,struct evlist * evlist,perf_event__handler_t process)2015 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2016 				    struct evlist *evlist, perf_event__handler_t process)
2017 {
2018 	struct perf_header *header = &session->header;
2019 	struct perf_record_header_feature *fe;
2020 	struct feat_fd ff;
2021 	size_t sz, sz_hdr;
2022 	int feat, ret;
2023 
2024 	sz_hdr = sizeof(fe->header);
2025 	sz = sizeof(union perf_event);
2026 	/* get a nice alignment */
2027 	sz = PERF_ALIGN(sz, page_size);
2028 
2029 	memset(&ff, 0, sizeof(ff));
2030 
2031 	ff.buf = malloc(sz);
2032 	if (!ff.buf)
2033 		return -ENOMEM;
2034 
2035 	ff.size = sz - sz_hdr;
2036 	ff.ph = &session->header;
2037 
2038 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2039 		if (!feat_ops[feat].synthesize) {
2040 			pr_debug("No record header feature for header :%d\n", feat);
2041 			continue;
2042 		}
2043 
2044 		ff.offset = sizeof(*fe);
2045 
2046 		ret = feat_ops[feat].write(&ff, evlist);
2047 		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2048 			pr_debug("Error writing feature\n");
2049 			continue;
2050 		}
2051 		/* ff.buf may have changed due to realloc in do_write() */
2052 		fe = ff.buf;
2053 		memset(fe, 0, sizeof(*fe));
2054 
2055 		fe->feat_id = feat;
2056 		fe->header.type = PERF_RECORD_HEADER_FEATURE;
2057 		fe->header.size = ff.offset;
2058 
2059 		ret = process(tool, ff.buf, NULL, NULL);
2060 		if (ret) {
2061 			free(ff.buf);
2062 			return ret;
2063 		}
2064 	}
2065 
2066 	/* Send HEADER_LAST_FEATURE mark. */
2067 	fe = ff.buf;
2068 	fe->feat_id     = HEADER_LAST_FEATURE;
2069 	fe->header.type = PERF_RECORD_HEADER_FEATURE;
2070 	fe->header.size = sizeof(*fe);
2071 
2072 	ret = process(tool, ff.buf, NULL, NULL);
2073 
2074 	free(ff.buf);
2075 	return ret;
2076 }
2077