1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/string2.h"
25 #include "util/symbol.h"
26 #include "util/synthetic-events.h"
27 #include "util/thread.h"
28 #include "util/namespaces.h"
29 #include "util/util.h"
30 #include "util/tsc.h"
31
32 #include <internal/lib.h>
33
34 #include <linux/err.h>
35 #include <subcmd/parse-options.h>
36 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
37
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/zalloc.h>
41 #include <linux/hash.h>
42 #include <ctype.h>
43 #include <errno.h>
44 #include <signal.h>
45 #include <inttypes.h>
46
47 struct guest_event {
48 struct perf_sample sample;
49 union perf_event *event;
50 char event_buf[PERF_SAMPLE_MAX_SIZE];
51 };
52
53 struct guest_id {
54 /* hlist_node must be first, see free_hlist() */
55 struct hlist_node node;
56 u64 id;
57 u64 host_id;
58 u32 vcpu;
59 };
60
61 struct guest_tid {
62 /* hlist_node must be first, see free_hlist() */
63 struct hlist_node node;
64 /* Thread ID of QEMU thread */
65 u32 tid;
66 u32 vcpu;
67 };
68
69 struct guest_vcpu {
70 /* Current host CPU */
71 u32 cpu;
72 /* Thread ID of QEMU thread */
73 u32 tid;
74 };
75
76 struct guest_session {
77 char *perf_data_file;
78 u32 machine_pid;
79 u64 time_offset;
80 double time_scale;
81 struct perf_tool tool;
82 struct perf_data data;
83 struct perf_session *session;
84 char *tmp_file_name;
85 int tmp_fd;
86 struct perf_tsc_conversion host_tc;
87 struct perf_tsc_conversion guest_tc;
88 bool copy_kcore_dir;
89 bool have_tc;
90 bool fetched;
91 bool ready;
92 u16 dflt_id_hdr_size;
93 u64 dflt_id;
94 u64 highest_id;
95 /* Array of guest_vcpu */
96 struct guest_vcpu *vcpu;
97 size_t vcpu_cnt;
98 /* Hash table for guest_id */
99 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
100 /* Hash table for guest_tid */
101 struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
102 /* Place to stash next guest event */
103 struct guest_event ev;
104 };
105
106 struct perf_inject {
107 struct perf_tool tool;
108 struct perf_session *session;
109 bool build_ids;
110 bool build_id_all;
111 bool sched_stat;
112 bool have_auxtrace;
113 bool strip;
114 bool jit_mode;
115 bool in_place_update;
116 bool in_place_update_dry_run;
117 bool is_pipe;
118 bool copy_kcore_dir;
119 const char *input_name;
120 struct perf_data output;
121 u64 bytes_written;
122 u64 aux_id;
123 struct list_head samples;
124 struct itrace_synth_opts itrace_synth_opts;
125 char event_copy[PERF_SAMPLE_MAX_SIZE];
126 struct perf_file_section secs[HEADER_FEAT_BITS];
127 struct guest_session guest_session;
128 struct strlist *known_build_ids;
129 };
130
131 struct event_entry {
132 struct list_head node;
133 u32 tid;
134 union perf_event event[];
135 };
136
137 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
138 struct machine *machine, u8 cpumode, u32 flags);
139
output_bytes(struct perf_inject * inject,void * buf,size_t sz)140 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
141 {
142 ssize_t size;
143
144 size = perf_data__write(&inject->output, buf, sz);
145 if (size < 0)
146 return -errno;
147
148 inject->bytes_written += size;
149 return 0;
150 }
151
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)152 static int perf_event__repipe_synth(struct perf_tool *tool,
153 union perf_event *event)
154 {
155 struct perf_inject *inject = container_of(tool, struct perf_inject,
156 tool);
157
158 return output_bytes(inject, event, event->header.size);
159 }
160
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)161 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
162 union perf_event *event,
163 struct ordered_events *oe __maybe_unused)
164 {
165 return perf_event__repipe_synth(tool, event);
166 }
167
168 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)169 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
170 union perf_event *event __maybe_unused,
171 struct ordered_events *oe __maybe_unused)
172 {
173 return 0;
174 }
175 #endif
176
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)177 static int perf_event__repipe_op2_synth(struct perf_session *session,
178 union perf_event *event)
179 {
180 return perf_event__repipe_synth(session->tool, event);
181 }
182
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused,const char * str __maybe_unused)183 static int perf_event__repipe_op4_synth(struct perf_session *session,
184 union perf_event *event,
185 u64 data __maybe_unused,
186 const char *str __maybe_unused)
187 {
188 return perf_event__repipe_synth(session->tool, event);
189 }
190
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)191 static int perf_event__repipe_attr(struct perf_tool *tool,
192 union perf_event *event,
193 struct evlist **pevlist)
194 {
195 struct perf_inject *inject = container_of(tool, struct perf_inject,
196 tool);
197 int ret;
198
199 ret = perf_event__process_attr(tool, event, pevlist);
200 if (ret)
201 return ret;
202
203 if (!inject->is_pipe)
204 return 0;
205
206 return perf_event__repipe_synth(tool, event);
207 }
208
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)209 static int perf_event__repipe_event_update(struct perf_tool *tool,
210 union perf_event *event,
211 struct evlist **pevlist __maybe_unused)
212 {
213 return perf_event__repipe_synth(tool, event);
214 }
215
216 #ifdef HAVE_AUXTRACE_SUPPORT
217
copy_bytes(struct perf_inject * inject,struct perf_data * data,off_t size)218 static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
219 {
220 char buf[4096];
221 ssize_t ssz;
222 int ret;
223
224 while (size > 0) {
225 ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
226 if (ssz < 0)
227 return -errno;
228 ret = output_bytes(inject, buf, ssz);
229 if (ret)
230 return ret;
231 size -= ssz;
232 }
233
234 return 0;
235 }
236
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)237 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
238 union perf_event *event)
239 {
240 struct perf_tool *tool = session->tool;
241 struct perf_inject *inject = container_of(tool, struct perf_inject,
242 tool);
243 int ret;
244
245 inject->have_auxtrace = true;
246
247 if (!inject->output.is_pipe) {
248 off_t offset;
249
250 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
251 if (offset == -1)
252 return -errno;
253 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
254 event, offset);
255 if (ret < 0)
256 return ret;
257 }
258
259 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
260 ret = output_bytes(inject, event, event->header.size);
261 if (ret < 0)
262 return ret;
263 ret = copy_bytes(inject, session->data,
264 event->auxtrace.size);
265 } else {
266 ret = output_bytes(inject, event,
267 event->header.size + event->auxtrace.size);
268 }
269 if (ret < 0)
270 return ret;
271
272 return event->auxtrace.size;
273 }
274
275 #else
276
277 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)278 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
279 union perf_event *event __maybe_unused)
280 {
281 pr_err("AUX area tracing not supported\n");
282 return -EINVAL;
283 }
284
285 #endif
286
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)287 static int perf_event__repipe(struct perf_tool *tool,
288 union perf_event *event,
289 struct perf_sample *sample __maybe_unused,
290 struct machine *machine __maybe_unused)
291 {
292 return perf_event__repipe_synth(tool, event);
293 }
294
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)295 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
296 union perf_event *event __maybe_unused,
297 struct perf_sample *sample __maybe_unused,
298 struct machine *machine __maybe_unused)
299 {
300 return 0;
301 }
302
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)303 static int perf_event__drop_aux(struct perf_tool *tool,
304 union perf_event *event __maybe_unused,
305 struct perf_sample *sample,
306 struct machine *machine __maybe_unused)
307 {
308 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
309
310 if (!inject->aux_id)
311 inject->aux_id = sample->id;
312
313 return 0;
314 }
315
316 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)317 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
318 union perf_event *event,
319 struct perf_sample *sample)
320 {
321 size_t sz1 = sample->aux_sample.data - (void *)event;
322 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
323 union perf_event *ev = (union perf_event *)inject->event_copy;
324
325 if (sz1 > event->header.size || sz2 > event->header.size ||
326 sz1 + sz2 > event->header.size ||
327 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
328 return event;
329
330 memcpy(ev, event, sz1);
331 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
332 ev->header.size = sz1 + sz2;
333 ((u64 *)((void *)ev + sz1))[-1] = 0;
334
335 return ev;
336 }
337
338 typedef int (*inject_handler)(struct perf_tool *tool,
339 union perf_event *event,
340 struct perf_sample *sample,
341 struct evsel *evsel,
342 struct machine *machine);
343
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)344 static int perf_event__repipe_sample(struct perf_tool *tool,
345 union perf_event *event,
346 struct perf_sample *sample,
347 struct evsel *evsel,
348 struct machine *machine)
349 {
350 struct perf_inject *inject = container_of(tool, struct perf_inject,
351 tool);
352
353 if (evsel && evsel->handler) {
354 inject_handler f = evsel->handler;
355 return f(tool, event, sample, evsel, machine);
356 }
357
358 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
359
360 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
361 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
362
363 return perf_event__repipe_synth(tool, event);
364 }
365
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)366 static int perf_event__repipe_mmap(struct perf_tool *tool,
367 union perf_event *event,
368 struct perf_sample *sample,
369 struct machine *machine)
370 {
371 int err;
372
373 err = perf_event__process_mmap(tool, event, sample, machine);
374 perf_event__repipe(tool, event, sample, machine);
375
376 return err;
377 }
378
379 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)380 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
381 union perf_event *event,
382 struct perf_sample *sample,
383 struct machine *machine)
384 {
385 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
386 u64 n = 0;
387 int ret;
388
389 /*
390 * if jit marker, then inject jit mmaps and generate ELF images
391 */
392 ret = jit_process(inject->session, &inject->output, machine,
393 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
394 if (ret < 0)
395 return ret;
396 if (ret) {
397 inject->bytes_written += n;
398 return 0;
399 }
400 return perf_event__repipe_mmap(tool, event, sample, machine);
401 }
402 #endif
403
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)404 static struct dso *findnew_dso(int pid, int tid, const char *filename,
405 struct dso_id *id, struct machine *machine)
406 {
407 struct thread *thread;
408 struct nsinfo *nsi = NULL;
409 struct nsinfo *nnsi;
410 struct dso *dso;
411 bool vdso;
412
413 thread = machine__findnew_thread(machine, pid, tid);
414 if (thread == NULL) {
415 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
416 return NULL;
417 }
418
419 vdso = is_vdso_map(filename);
420 nsi = nsinfo__get(thread->nsinfo);
421
422 if (vdso) {
423 /* The vdso maps are always on the host and not the
424 * container. Ensure that we don't use setns to look
425 * them up.
426 */
427 nnsi = nsinfo__copy(nsi);
428 if (nnsi) {
429 nsinfo__put(nsi);
430 nsinfo__clear_need_setns(nnsi);
431 nsi = nnsi;
432 }
433 dso = machine__findnew_vdso(machine, thread);
434 } else {
435 dso = machine__findnew_dso_id(machine, filename, id);
436 }
437
438 if (dso) {
439 mutex_lock(&dso->lock);
440 nsinfo__put(dso->nsinfo);
441 dso->nsinfo = nsi;
442 mutex_unlock(&dso->lock);
443 } else
444 nsinfo__put(nsi);
445
446 thread__put(thread);
447 return dso;
448 }
449
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)450 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
451 union perf_event *event,
452 struct perf_sample *sample,
453 struct machine *machine)
454 {
455 struct dso *dso;
456
457 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
458 event->mmap.filename, NULL, machine);
459
460 if (dso && !dso->hit) {
461 dso->hit = 1;
462 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
463 }
464 dso__put(dso);
465
466 return perf_event__repipe(tool, event, sample, machine);
467 }
468
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)469 static int perf_event__repipe_mmap2(struct perf_tool *tool,
470 union perf_event *event,
471 struct perf_sample *sample,
472 struct machine *machine)
473 {
474 int err;
475
476 err = perf_event__process_mmap2(tool, event, sample, machine);
477 perf_event__repipe(tool, event, sample, machine);
478
479 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
480 struct dso *dso;
481
482 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
483 event->mmap2.filename, NULL, machine);
484 if (dso) {
485 /* mark it not to inject build-id */
486 dso->hit = 1;
487 }
488 dso__put(dso);
489 }
490
491 return err;
492 }
493
494 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)495 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
496 union perf_event *event,
497 struct perf_sample *sample,
498 struct machine *machine)
499 {
500 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
501 u64 n = 0;
502 int ret;
503
504 /*
505 * if jit marker, then inject jit mmaps and generate ELF images
506 */
507 ret = jit_process(inject->session, &inject->output, machine,
508 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
509 if (ret < 0)
510 return ret;
511 if (ret) {
512 inject->bytes_written += n;
513 return 0;
514 }
515 return perf_event__repipe_mmap2(tool, event, sample, machine);
516 }
517 #endif
518
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)519 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
520 union perf_event *event,
521 struct perf_sample *sample,
522 struct machine *machine)
523 {
524 struct dso_id dso_id = {
525 .maj = event->mmap2.maj,
526 .min = event->mmap2.min,
527 .ino = event->mmap2.ino,
528 .ino_generation = event->mmap2.ino_generation,
529 };
530 struct dso *dso;
531
532 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
533 /* cannot use dso_id since it'd have invalid info */
534 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
535 event->mmap2.filename, NULL, machine);
536 if (dso) {
537 /* mark it not to inject build-id */
538 dso->hit = 1;
539 }
540 dso__put(dso);
541 perf_event__repipe(tool, event, sample, machine);
542 return 0;
543 }
544
545 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
546 event->mmap2.filename, &dso_id, machine);
547
548 if (dso && !dso->hit) {
549 dso->hit = 1;
550 dso__inject_build_id(dso, tool, machine, sample->cpumode,
551 event->mmap2.flags);
552 }
553 dso__put(dso);
554
555 perf_event__repipe(tool, event, sample, machine);
556
557 return 0;
558 }
559
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)560 static int perf_event__repipe_fork(struct perf_tool *tool,
561 union perf_event *event,
562 struct perf_sample *sample,
563 struct machine *machine)
564 {
565 int err;
566
567 err = perf_event__process_fork(tool, event, sample, machine);
568 perf_event__repipe(tool, event, sample, machine);
569
570 return err;
571 }
572
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)573 static int perf_event__repipe_comm(struct perf_tool *tool,
574 union perf_event *event,
575 struct perf_sample *sample,
576 struct machine *machine)
577 {
578 int err;
579
580 err = perf_event__process_comm(tool, event, sample, machine);
581 perf_event__repipe(tool, event, sample, machine);
582
583 return err;
584 }
585
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)586 static int perf_event__repipe_namespaces(struct perf_tool *tool,
587 union perf_event *event,
588 struct perf_sample *sample,
589 struct machine *machine)
590 {
591 int err = perf_event__process_namespaces(tool, event, sample, machine);
592
593 perf_event__repipe(tool, event, sample, machine);
594
595 return err;
596 }
597
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)598 static int perf_event__repipe_exit(struct perf_tool *tool,
599 union perf_event *event,
600 struct perf_sample *sample,
601 struct machine *machine)
602 {
603 int err;
604
605 err = perf_event__process_exit(tool, event, sample, machine);
606 perf_event__repipe(tool, event, sample, machine);
607
608 return err;
609 }
610
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)611 static int perf_event__repipe_tracing_data(struct perf_session *session,
612 union perf_event *event)
613 {
614 perf_event__repipe_synth(session->tool, event);
615
616 return perf_event__process_tracing_data(session, event);
617 }
618
dso__read_build_id(struct dso * dso)619 static int dso__read_build_id(struct dso *dso)
620 {
621 struct nscookie nsc;
622
623 if (dso->has_build_id)
624 return 0;
625
626 mutex_lock(&dso->lock);
627 nsinfo__mountns_enter(dso->nsinfo, &nsc);
628 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
629 dso->has_build_id = true;
630 else if (dso->nsinfo) {
631 char *new_name;
632
633 new_name = filename_with_chroot(dso->nsinfo->pid,
634 dso->long_name);
635 if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
636 dso->has_build_id = true;
637 free(new_name);
638 }
639 nsinfo__mountns_exit(&nsc);
640 mutex_unlock(&dso->lock);
641
642 return dso->has_build_id ? 0 : -1;
643 }
644
perf_inject__parse_known_build_ids(const char * known_build_ids_string)645 static struct strlist *perf_inject__parse_known_build_ids(
646 const char *known_build_ids_string)
647 {
648 struct str_node *pos, *tmp;
649 struct strlist *known_build_ids;
650 int bid_len;
651
652 known_build_ids = strlist__new(known_build_ids_string, NULL);
653 if (known_build_ids == NULL)
654 return NULL;
655 strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
656 const char *build_id, *dso_name;
657
658 build_id = skip_spaces(pos->s);
659 dso_name = strchr(build_id, ' ');
660 if (dso_name == NULL) {
661 strlist__remove(known_build_ids, pos);
662 continue;
663 }
664 bid_len = dso_name - pos->s;
665 dso_name = skip_spaces(dso_name);
666 if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
667 strlist__remove(known_build_ids, pos);
668 continue;
669 }
670 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
671 if (!isxdigit(build_id[2 * ix]) ||
672 !isxdigit(build_id[2 * ix + 1])) {
673 strlist__remove(known_build_ids, pos);
674 break;
675 }
676 }
677 }
678 return known_build_ids;
679 }
680
perf_inject__lookup_known_build_id(struct perf_inject * inject,struct dso * dso)681 static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
682 struct dso *dso)
683 {
684 struct str_node *pos;
685 int bid_len;
686
687 strlist__for_each_entry(pos, inject->known_build_ids) {
688 const char *build_id, *dso_name;
689
690 build_id = skip_spaces(pos->s);
691 dso_name = strchr(build_id, ' ');
692 bid_len = dso_name - pos->s;
693 dso_name = skip_spaces(dso_name);
694 if (strcmp(dso->long_name, dso_name))
695 continue;
696 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
697 dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
698 hex(build_id[2 * ix + 1]));
699 }
700 dso->bid.size = bid_len / 2;
701 dso->has_build_id = 1;
702 return true;
703 }
704 return false;
705 }
706
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)707 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
708 struct machine *machine, u8 cpumode, u32 flags)
709 {
710 struct perf_inject *inject = container_of(tool, struct perf_inject,
711 tool);
712 int err;
713
714 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
715 return 0;
716 if (is_no_dso_memory(dso->long_name))
717 return 0;
718
719 if (inject->known_build_ids != NULL &&
720 perf_inject__lookup_known_build_id(inject, dso))
721 return 1;
722
723 if (dso__read_build_id(dso) < 0) {
724 pr_debug("no build_id found for %s\n", dso->long_name);
725 return -1;
726 }
727
728 err = perf_event__synthesize_build_id(tool, dso, cpumode,
729 perf_event__repipe, machine);
730 if (err) {
731 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
732 return -1;
733 }
734
735 return 0;
736 }
737
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)738 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
739 struct perf_sample *sample,
740 struct evsel *evsel __maybe_unused,
741 struct machine *machine)
742 {
743 struct addr_location al;
744 struct thread *thread;
745
746 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
747 if (thread == NULL) {
748 pr_err("problem processing %d event, skipping it.\n",
749 event->header.type);
750 goto repipe;
751 }
752
753 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
754 if (!al.map->dso->hit) {
755 al.map->dso->hit = 1;
756 dso__inject_build_id(al.map->dso, tool, machine,
757 sample->cpumode, al.map->flags);
758 }
759 }
760
761 thread__put(thread);
762 repipe:
763 perf_event__repipe(tool, event, sample, machine);
764 return 0;
765 }
766
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)767 static int perf_inject__sched_process_exit(struct perf_tool *tool,
768 union perf_event *event __maybe_unused,
769 struct perf_sample *sample,
770 struct evsel *evsel __maybe_unused,
771 struct machine *machine __maybe_unused)
772 {
773 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
774 struct event_entry *ent;
775
776 list_for_each_entry(ent, &inject->samples, node) {
777 if (sample->tid == ent->tid) {
778 list_del_init(&ent->node);
779 free(ent);
780 break;
781 }
782 }
783
784 return 0;
785 }
786
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)787 static int perf_inject__sched_switch(struct perf_tool *tool,
788 union perf_event *event,
789 struct perf_sample *sample,
790 struct evsel *evsel,
791 struct machine *machine)
792 {
793 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
794 struct event_entry *ent;
795
796 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
797
798 ent = malloc(event->header.size + sizeof(struct event_entry));
799 if (ent == NULL) {
800 color_fprintf(stderr, PERF_COLOR_RED,
801 "Not enough memory to process sched switch event!");
802 return -1;
803 }
804
805 ent->tid = sample->tid;
806 memcpy(&ent->event, event, event->header.size);
807 list_add(&ent->node, &inject->samples);
808 return 0;
809 }
810
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)811 static int perf_inject__sched_stat(struct perf_tool *tool,
812 union perf_event *event __maybe_unused,
813 struct perf_sample *sample,
814 struct evsel *evsel,
815 struct machine *machine)
816 {
817 struct event_entry *ent;
818 union perf_event *event_sw;
819 struct perf_sample sample_sw;
820 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
821 u32 pid = evsel__intval(evsel, sample, "pid");
822
823 list_for_each_entry(ent, &inject->samples, node) {
824 if (pid == ent->tid)
825 goto found;
826 }
827
828 return 0;
829 found:
830 event_sw = &ent->event[0];
831 evsel__parse_sample(evsel, event_sw, &sample_sw);
832
833 sample_sw.period = sample->period;
834 sample_sw.time = sample->time;
835 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
836 evsel->core.attr.read_format, &sample_sw);
837 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
838 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
839 }
840
guest_session__vcpu(struct guest_session * gs,u32 vcpu)841 static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
842 {
843 if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
844 return NULL;
845 return &gs->vcpu[vcpu];
846 }
847
guest_session__output_bytes(struct guest_session * gs,void * buf,size_t sz)848 static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
849 {
850 ssize_t ret = writen(gs->tmp_fd, buf, sz);
851
852 return ret < 0 ? ret : 0;
853 }
854
guest_session__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)855 static int guest_session__repipe(struct perf_tool *tool,
856 union perf_event *event,
857 struct perf_sample *sample __maybe_unused,
858 struct machine *machine __maybe_unused)
859 {
860 struct guest_session *gs = container_of(tool, struct guest_session, tool);
861
862 return guest_session__output_bytes(gs, event, event->header.size);
863 }
864
guest_session__map_tid(struct guest_session * gs,u32 tid,u32 vcpu)865 static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
866 {
867 struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
868 int hash;
869
870 if (!guest_tid)
871 return -ENOMEM;
872
873 guest_tid->tid = tid;
874 guest_tid->vcpu = vcpu;
875 hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
876 hlist_add_head(&guest_tid->node, &gs->tids[hash]);
877
878 return 0;
879 }
880
host_peek_vm_comms_cb(struct perf_session * session __maybe_unused,union perf_event * event,u64 offset __maybe_unused,void * data)881 static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
882 union perf_event *event,
883 u64 offset __maybe_unused, void *data)
884 {
885 struct guest_session *gs = data;
886 unsigned int vcpu;
887 struct guest_vcpu *guest_vcpu;
888 int ret;
889
890 if (event->header.type != PERF_RECORD_COMM ||
891 event->comm.pid != gs->machine_pid)
892 return 0;
893
894 /*
895 * QEMU option -name debug-threads=on, causes thread names formatted as
896 * below, although it is not an ABI. Also libvirt seems to use this by
897 * default. Here we rely on it to tell us which thread is which VCPU.
898 */
899 ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
900 if (ret <= 0)
901 return ret;
902 pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
903 event->comm.tid, event->comm.comm, vcpu);
904 if (vcpu > INT_MAX) {
905 pr_err("Invalid VCPU %u\n", vcpu);
906 return -EINVAL;
907 }
908 guest_vcpu = guest_session__vcpu(gs, vcpu);
909 if (!guest_vcpu)
910 return -ENOMEM;
911 if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
912 pr_err("Fatal error: Two threads found with the same VCPU\n");
913 return -EINVAL;
914 }
915 guest_vcpu->tid = event->comm.tid;
916
917 return guest_session__map_tid(gs, event->comm.tid, vcpu);
918 }
919
host_peek_vm_comms(struct perf_session * session,struct guest_session * gs)920 static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
921 {
922 return perf_session__peek_events(session, session->header.data_offset,
923 session->header.data_size,
924 host_peek_vm_comms_cb, gs);
925 }
926
evlist__is_id_used(struct evlist * evlist,u64 id)927 static bool evlist__is_id_used(struct evlist *evlist, u64 id)
928 {
929 return evlist__id2sid(evlist, id);
930 }
931
guest_session__allocate_new_id(struct guest_session * gs,struct evlist * host_evlist)932 static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
933 {
934 do {
935 gs->highest_id += 1;
936 } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
937
938 return gs->highest_id;
939 }
940
guest_session__map_id(struct guest_session * gs,u64 id,u64 host_id,u32 vcpu)941 static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
942 {
943 struct guest_id *guest_id = zalloc(sizeof(*guest_id));
944 int hash;
945
946 if (!guest_id)
947 return -ENOMEM;
948
949 guest_id->id = id;
950 guest_id->host_id = host_id;
951 guest_id->vcpu = vcpu;
952 hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
953 hlist_add_head(&guest_id->node, &gs->heads[hash]);
954
955 return 0;
956 }
957
evlist__find_highest_id(struct evlist * evlist)958 static u64 evlist__find_highest_id(struct evlist *evlist)
959 {
960 struct evsel *evsel;
961 u64 highest_id = 1;
962
963 evlist__for_each_entry(evlist, evsel) {
964 u32 j;
965
966 for (j = 0; j < evsel->core.ids; j++) {
967 u64 id = evsel->core.id[j];
968
969 if (id > highest_id)
970 highest_id = id;
971 }
972 }
973
974 return highest_id;
975 }
976
guest_session__map_ids(struct guest_session * gs,struct evlist * host_evlist)977 static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
978 {
979 struct evlist *evlist = gs->session->evlist;
980 struct evsel *evsel;
981 int ret;
982
983 evlist__for_each_entry(evlist, evsel) {
984 u32 j;
985
986 for (j = 0; j < evsel->core.ids; j++) {
987 struct perf_sample_id *sid;
988 u64 host_id;
989 u64 id;
990
991 id = evsel->core.id[j];
992 sid = evlist__id2sid(evlist, id);
993 if (!sid || sid->cpu.cpu == -1)
994 continue;
995 host_id = guest_session__allocate_new_id(gs, host_evlist);
996 ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
997 if (ret)
998 return ret;
999 }
1000 }
1001
1002 return 0;
1003 }
1004
guest_session__lookup_id(struct guest_session * gs,u64 id)1005 static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1006 {
1007 struct hlist_head *head;
1008 struct guest_id *guest_id;
1009 int hash;
1010
1011 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1012 head = &gs->heads[hash];
1013
1014 hlist_for_each_entry(guest_id, head, node)
1015 if (guest_id->id == id)
1016 return guest_id;
1017
1018 return NULL;
1019 }
1020
process_attr(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1021 static int process_attr(struct perf_tool *tool, union perf_event *event,
1022 struct perf_sample *sample __maybe_unused,
1023 struct machine *machine __maybe_unused)
1024 {
1025 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1026
1027 return perf_event__process_attr(tool, event, &inject->session->evlist);
1028 }
1029
guest_session__add_attr(struct guest_session * gs,struct evsel * evsel)1030 static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1031 {
1032 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1033 struct perf_event_attr attr = evsel->core.attr;
1034 u64 *id_array;
1035 u32 *vcpu_array;
1036 int ret = -ENOMEM;
1037 u32 i;
1038
1039 id_array = calloc(evsel->core.ids, sizeof(*id_array));
1040 if (!id_array)
1041 return -ENOMEM;
1042
1043 vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1044 if (!vcpu_array)
1045 goto out;
1046
1047 for (i = 0; i < evsel->core.ids; i++) {
1048 u64 id = evsel->core.id[i];
1049 struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1050
1051 if (!guest_id) {
1052 pr_err("Failed to find guest id %"PRIu64"\n", id);
1053 ret = -EINVAL;
1054 goto out;
1055 }
1056 id_array[i] = guest_id->host_id;
1057 vcpu_array[i] = guest_id->vcpu;
1058 }
1059
1060 attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1061 attr.exclude_host = 1;
1062 attr.exclude_guest = 0;
1063
1064 ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1065 id_array, process_attr);
1066 if (ret)
1067 pr_err("Failed to add guest attr.\n");
1068
1069 for (i = 0; i < evsel->core.ids; i++) {
1070 struct perf_sample_id *sid;
1071 u32 vcpu = vcpu_array[i];
1072
1073 sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1074 /* Guest event is per-thread from the host point of view */
1075 sid->cpu.cpu = -1;
1076 sid->tid = gs->vcpu[vcpu].tid;
1077 sid->machine_pid = gs->machine_pid;
1078 sid->vcpu.cpu = vcpu;
1079 }
1080 out:
1081 free(vcpu_array);
1082 free(id_array);
1083 return ret;
1084 }
1085
guest_session__add_attrs(struct guest_session * gs)1086 static int guest_session__add_attrs(struct guest_session *gs)
1087 {
1088 struct evlist *evlist = gs->session->evlist;
1089 struct evsel *evsel;
1090 int ret;
1091
1092 evlist__for_each_entry(evlist, evsel) {
1093 ret = guest_session__add_attr(gs, evsel);
1094 if (ret)
1095 return ret;
1096 }
1097
1098 return 0;
1099 }
1100
synthesize_id_index(struct perf_inject * inject,size_t new_cnt)1101 static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1102 {
1103 struct perf_session *session = inject->session;
1104 struct evlist *evlist = session->evlist;
1105 struct machine *machine = &session->machines.host;
1106 size_t from = evlist->core.nr_entries - new_cnt;
1107
1108 return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1109 evlist, machine, from);
1110 }
1111
guest_session__lookup_tid(struct guest_session * gs,u32 tid)1112 static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1113 {
1114 struct hlist_head *head;
1115 struct guest_tid *guest_tid;
1116 int hash;
1117
1118 hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1119 head = &gs->tids[hash];
1120
1121 hlist_for_each_entry(guest_tid, head, node)
1122 if (guest_tid->tid == tid)
1123 return guest_tid;
1124
1125 return NULL;
1126 }
1127
dso__is_in_kernel_space(struct dso * dso)1128 static bool dso__is_in_kernel_space(struct dso *dso)
1129 {
1130 if (dso__is_vdso(dso))
1131 return false;
1132
1133 return dso__is_kcore(dso) ||
1134 dso->kernel ||
1135 is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1136 }
1137
evlist__first_id(struct evlist * evlist)1138 static u64 evlist__first_id(struct evlist *evlist)
1139 {
1140 struct evsel *evsel;
1141
1142 evlist__for_each_entry(evlist, evsel) {
1143 if (evsel->core.ids)
1144 return evsel->core.id[0];
1145 }
1146 return 0;
1147 }
1148
process_build_id(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1149 static int process_build_id(struct perf_tool *tool,
1150 union perf_event *event,
1151 struct perf_sample *sample __maybe_unused,
1152 struct machine *machine __maybe_unused)
1153 {
1154 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1155
1156 return perf_event__process_build_id(inject->session, event);
1157 }
1158
synthesize_build_id(struct perf_inject * inject,struct dso * dso,pid_t machine_pid)1159 static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1160 {
1161 struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1162 u8 cpumode = dso__is_in_kernel_space(dso) ?
1163 PERF_RECORD_MISC_GUEST_KERNEL :
1164 PERF_RECORD_MISC_GUEST_USER;
1165
1166 if (!machine)
1167 return -ENOMEM;
1168
1169 dso->hit = 1;
1170
1171 return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
1172 process_build_id, machine);
1173 }
1174
guest_session__add_build_ids(struct guest_session * gs)1175 static int guest_session__add_build_ids(struct guest_session *gs)
1176 {
1177 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1178 struct machine *machine = &gs->session->machines.host;
1179 struct dso *dso;
1180 int ret;
1181
1182 /* Build IDs will be put in the Build ID feature section */
1183 perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1184
1185 dsos__for_each_with_build_id(dso, &machine->dsos.head) {
1186 ret = synthesize_build_id(inject, dso, gs->machine_pid);
1187 if (ret)
1188 return ret;
1189 }
1190
1191 return 0;
1192 }
1193
guest_session__ksymbol_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1194 static int guest_session__ksymbol_event(struct perf_tool *tool,
1195 union perf_event *event,
1196 struct perf_sample *sample __maybe_unused,
1197 struct machine *machine __maybe_unused)
1198 {
1199 struct guest_session *gs = container_of(tool, struct guest_session, tool);
1200
1201 /* Only support out-of-line i.e. no BPF support */
1202 if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1203 return 0;
1204
1205 return guest_session__output_bytes(gs, event, event->header.size);
1206 }
1207
guest_session__start(struct guest_session * gs,const char * name,bool force)1208 static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1209 {
1210 char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1211 struct perf_session *session;
1212 int ret;
1213
1214 /* Only these events will be injected */
1215 gs->tool.mmap = guest_session__repipe;
1216 gs->tool.mmap2 = guest_session__repipe;
1217 gs->tool.comm = guest_session__repipe;
1218 gs->tool.fork = guest_session__repipe;
1219 gs->tool.exit = guest_session__repipe;
1220 gs->tool.lost = guest_session__repipe;
1221 gs->tool.context_switch = guest_session__repipe;
1222 gs->tool.ksymbol = guest_session__ksymbol_event;
1223 gs->tool.text_poke = guest_session__repipe;
1224 /*
1225 * Processing a build ID creates a struct dso with that build ID. Later,
1226 * all guest dsos are iterated and the build IDs processed into the host
1227 * session where they will be output to the Build ID feature section
1228 * when the perf.data file header is written.
1229 */
1230 gs->tool.build_id = perf_event__process_build_id;
1231 /* Process the id index to know what VCPU an ID belongs to */
1232 gs->tool.id_index = perf_event__process_id_index;
1233
1234 gs->tool.ordered_events = true;
1235 gs->tool.ordering_requires_timestamps = true;
1236
1237 gs->data.path = name;
1238 gs->data.force = force;
1239 gs->data.mode = PERF_DATA_MODE_READ;
1240
1241 session = perf_session__new(&gs->data, &gs->tool);
1242 if (IS_ERR(session))
1243 return PTR_ERR(session);
1244 gs->session = session;
1245
1246 /*
1247 * Initial events have zero'd ID samples. Get default ID sample size
1248 * used for removing them.
1249 */
1250 gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1251 /* And default ID for adding back a host-compatible ID sample */
1252 gs->dflt_id = evlist__first_id(session->evlist);
1253 if (!gs->dflt_id) {
1254 pr_err("Guest data has no sample IDs");
1255 return -EINVAL;
1256 }
1257
1258 /* Temporary file for guest events */
1259 gs->tmp_file_name = strdup(tmp_file_name);
1260 if (!gs->tmp_file_name)
1261 return -ENOMEM;
1262 gs->tmp_fd = mkstemp(gs->tmp_file_name);
1263 if (gs->tmp_fd < 0)
1264 return -errno;
1265
1266 if (zstd_init(&gs->session->zstd_data, 0) < 0)
1267 pr_warning("Guest session decompression initialization failed.\n");
1268
1269 /*
1270 * perf does not support processing 2 sessions simultaneously, so output
1271 * guest events to a temporary file.
1272 */
1273 ret = perf_session__process_events(gs->session);
1274 if (ret)
1275 return ret;
1276
1277 if (lseek(gs->tmp_fd, 0, SEEK_SET))
1278 return -errno;
1279
1280 return 0;
1281 }
1282
1283 /* Free hlist nodes assuming hlist_node is the first member of hlist entries */
free_hlist(struct hlist_head * heads,size_t hlist_sz)1284 static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1285 {
1286 struct hlist_node *pos, *n;
1287 size_t i;
1288
1289 for (i = 0; i < hlist_sz; ++i) {
1290 hlist_for_each_safe(pos, n, &heads[i]) {
1291 hlist_del(pos);
1292 free(pos);
1293 }
1294 }
1295 }
1296
guest_session__exit(struct guest_session * gs)1297 static void guest_session__exit(struct guest_session *gs)
1298 {
1299 if (gs->session) {
1300 perf_session__delete(gs->session);
1301 free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1302 free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1303 }
1304 if (gs->tmp_file_name) {
1305 if (gs->tmp_fd >= 0)
1306 close(gs->tmp_fd);
1307 unlink(gs->tmp_file_name);
1308 free(gs->tmp_file_name);
1309 }
1310 free(gs->vcpu);
1311 free(gs->perf_data_file);
1312 }
1313
get_tsc_conv(struct perf_tsc_conversion * tc,struct perf_record_time_conv * time_conv)1314 static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1315 {
1316 tc->time_shift = time_conv->time_shift;
1317 tc->time_mult = time_conv->time_mult;
1318 tc->time_zero = time_conv->time_zero;
1319 tc->time_cycles = time_conv->time_cycles;
1320 tc->time_mask = time_conv->time_mask;
1321 tc->cap_user_time_zero = time_conv->cap_user_time_zero;
1322 tc->cap_user_time_short = time_conv->cap_user_time_short;
1323 }
1324
guest_session__get_tc(struct guest_session * gs)1325 static void guest_session__get_tc(struct guest_session *gs)
1326 {
1327 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1328
1329 get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1330 get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1331 }
1332
guest_session__convert_time(struct guest_session * gs,u64 guest_time,u64 * host_time)1333 static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1334 {
1335 u64 tsc;
1336
1337 if (!guest_time) {
1338 *host_time = 0;
1339 return;
1340 }
1341
1342 if (gs->guest_tc.cap_user_time_zero)
1343 tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1344 else
1345 tsc = guest_time;
1346
1347 /*
1348 * This is the correct order of operations for x86 if the TSC Offset and
1349 * Multiplier values are used.
1350 */
1351 tsc -= gs->time_offset;
1352 tsc /= gs->time_scale;
1353
1354 if (gs->host_tc.cap_user_time_zero)
1355 *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1356 else
1357 *host_time = tsc;
1358 }
1359
guest_session__fetch(struct guest_session * gs)1360 static int guest_session__fetch(struct guest_session *gs)
1361 {
1362 void *buf = gs->ev.event_buf;
1363 struct perf_event_header *hdr = buf;
1364 size_t hdr_sz = sizeof(*hdr);
1365 ssize_t ret;
1366
1367 ret = readn(gs->tmp_fd, buf, hdr_sz);
1368 if (ret < 0)
1369 return ret;
1370
1371 if (!ret) {
1372 /* Zero size means EOF */
1373 hdr->size = 0;
1374 return 0;
1375 }
1376
1377 buf += hdr_sz;
1378
1379 ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1380 if (ret < 0)
1381 return ret;
1382
1383 gs->ev.event = (union perf_event *)gs->ev.event_buf;
1384 gs->ev.sample.time = 0;
1385
1386 if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1387 pr_err("Unexpected type fetching guest event");
1388 return 0;
1389 }
1390
1391 ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1392 if (ret) {
1393 pr_err("Parse failed fetching guest event");
1394 return ret;
1395 }
1396
1397 if (!gs->have_tc) {
1398 guest_session__get_tc(gs);
1399 gs->have_tc = true;
1400 }
1401
1402 guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1403
1404 return 0;
1405 }
1406
evlist__append_id_sample(struct evlist * evlist,union perf_event * ev,const struct perf_sample * sample)1407 static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1408 const struct perf_sample *sample)
1409 {
1410 struct evsel *evsel;
1411 void *array;
1412 int ret;
1413
1414 evsel = evlist__id2evsel(evlist, sample->id);
1415 array = ev;
1416
1417 if (!evsel) {
1418 pr_err("No evsel for id %"PRIu64"\n", sample->id);
1419 return -EINVAL;
1420 }
1421
1422 array += ev->header.size;
1423 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1424 if (ret < 0)
1425 return ret;
1426
1427 if (ret & 7) {
1428 pr_err("Bad id sample size %d\n", ret);
1429 return -EINVAL;
1430 }
1431
1432 ev->header.size += ret;
1433
1434 return 0;
1435 }
1436
guest_session__inject_events(struct guest_session * gs,u64 timestamp)1437 static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1438 {
1439 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1440 int ret;
1441
1442 if (!gs->ready)
1443 return 0;
1444
1445 while (1) {
1446 struct perf_sample *sample;
1447 struct guest_id *guest_id;
1448 union perf_event *ev;
1449 u16 id_hdr_size;
1450 u8 cpumode;
1451 u64 id;
1452
1453 if (!gs->fetched) {
1454 ret = guest_session__fetch(gs);
1455 if (ret)
1456 return ret;
1457 gs->fetched = true;
1458 }
1459
1460 ev = gs->ev.event;
1461 sample = &gs->ev.sample;
1462
1463 if (!ev->header.size)
1464 return 0; /* EOF */
1465
1466 if (sample->time > timestamp)
1467 return 0;
1468
1469 /* Change cpumode to guest */
1470 cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1471 if (cpumode & PERF_RECORD_MISC_USER)
1472 cpumode = PERF_RECORD_MISC_GUEST_USER;
1473 else
1474 cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1475 ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1476 ev->header.misc |= cpumode;
1477
1478 id = sample->id;
1479 if (!id) {
1480 id = gs->dflt_id;
1481 id_hdr_size = gs->dflt_id_hdr_size;
1482 } else {
1483 struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1484
1485 id_hdr_size = evsel__id_hdr_size(evsel);
1486 }
1487
1488 if (id_hdr_size & 7) {
1489 pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1490 return -EINVAL;
1491 }
1492
1493 if (ev->header.size & 7) {
1494 pr_err("Bad event size %u\n", ev->header.size);
1495 return -EINVAL;
1496 }
1497
1498 /* Remove guest id sample */
1499 ev->header.size -= id_hdr_size;
1500
1501 if (ev->header.size & 7) {
1502 pr_err("Bad raw event size %u\n", ev->header.size);
1503 return -EINVAL;
1504 }
1505
1506 guest_id = guest_session__lookup_id(gs, id);
1507 if (!guest_id) {
1508 pr_err("Guest event with unknown id %llu\n",
1509 (unsigned long long)id);
1510 return -EINVAL;
1511 }
1512
1513 /* Change to host ID to avoid conflicting ID values */
1514 sample->id = guest_id->host_id;
1515 sample->stream_id = guest_id->host_id;
1516
1517 if (sample->cpu != (u32)-1) {
1518 if (sample->cpu >= gs->vcpu_cnt) {
1519 pr_err("Guest event with unknown VCPU %u\n",
1520 sample->cpu);
1521 return -EINVAL;
1522 }
1523 /* Change to host CPU instead of guest VCPU */
1524 sample->cpu = gs->vcpu[sample->cpu].cpu;
1525 }
1526
1527 /* New id sample with new ID and CPU */
1528 ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1529 if (ret)
1530 return ret;
1531
1532 if (ev->header.size & 7) {
1533 pr_err("Bad new event size %u\n", ev->header.size);
1534 return -EINVAL;
1535 }
1536
1537 gs->fetched = false;
1538
1539 ret = output_bytes(inject, ev, ev->header.size);
1540 if (ret)
1541 return ret;
1542 }
1543 }
1544
guest_session__flush_events(struct guest_session * gs)1545 static int guest_session__flush_events(struct guest_session *gs)
1546 {
1547 return guest_session__inject_events(gs, -1);
1548 }
1549
host__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1550 static int host__repipe(struct perf_tool *tool,
1551 union perf_event *event,
1552 struct perf_sample *sample,
1553 struct machine *machine)
1554 {
1555 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1556 int ret;
1557
1558 ret = guest_session__inject_events(&inject->guest_session, sample->time);
1559 if (ret)
1560 return ret;
1561
1562 return perf_event__repipe(tool, event, sample, machine);
1563 }
1564
host__finished_init(struct perf_session * session,union perf_event * event)1565 static int host__finished_init(struct perf_session *session, union perf_event *event)
1566 {
1567 struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1568 struct guest_session *gs = &inject->guest_session;
1569 int ret;
1570
1571 /*
1572 * Peek through host COMM events to find QEMU threads and the VCPU they
1573 * are running.
1574 */
1575 ret = host_peek_vm_comms(session, gs);
1576 if (ret)
1577 return ret;
1578
1579 if (!gs->vcpu_cnt) {
1580 pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1581 return -EINVAL;
1582 }
1583
1584 /*
1585 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1586 */
1587 gs->highest_id = evlist__find_highest_id(session->evlist);
1588 ret = guest_session__map_ids(gs, session->evlist);
1589 if (ret)
1590 return ret;
1591
1592 ret = guest_session__add_attrs(gs);
1593 if (ret)
1594 return ret;
1595
1596 ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1597 if (ret) {
1598 pr_err("Failed to synthesize id_index\n");
1599 return ret;
1600 }
1601
1602 ret = guest_session__add_build_ids(gs);
1603 if (ret) {
1604 pr_err("Failed to add guest build IDs\n");
1605 return ret;
1606 }
1607
1608 gs->ready = true;
1609
1610 ret = guest_session__inject_events(gs, 0);
1611 if (ret)
1612 return ret;
1613
1614 return perf_event__repipe_op2_synth(session, event);
1615 }
1616
1617 /*
1618 * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1619 * which flushes host events to file up until the last flush time. Then inject
1620 * guest events up to the same time. Finally write out the FINISHED_ROUND event
1621 * itself.
1622 */
host__finished_round(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe)1623 static int host__finished_round(struct perf_tool *tool,
1624 union perf_event *event,
1625 struct ordered_events *oe)
1626 {
1627 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1628 int ret = perf_event__process_finished_round(tool, event, oe);
1629 u64 timestamp = ordered_events__last_flush_time(oe);
1630
1631 if (ret)
1632 return ret;
1633
1634 ret = guest_session__inject_events(&inject->guest_session, timestamp);
1635 if (ret)
1636 return ret;
1637
1638 return perf_event__repipe_oe_synth(tool, event, oe);
1639 }
1640
host__context_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1641 static int host__context_switch(struct perf_tool *tool,
1642 union perf_event *event,
1643 struct perf_sample *sample,
1644 struct machine *machine)
1645 {
1646 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1647 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1648 struct guest_session *gs = &inject->guest_session;
1649 u32 pid = event->context_switch.next_prev_pid;
1650 u32 tid = event->context_switch.next_prev_tid;
1651 struct guest_tid *guest_tid;
1652 u32 vcpu;
1653
1654 if (out || pid != gs->machine_pid)
1655 goto out;
1656
1657 guest_tid = guest_session__lookup_tid(gs, tid);
1658 if (!guest_tid)
1659 goto out;
1660
1661 if (sample->cpu == (u32)-1) {
1662 pr_err("Switch event does not have CPU\n");
1663 return -EINVAL;
1664 }
1665
1666 vcpu = guest_tid->vcpu;
1667 if (vcpu >= gs->vcpu_cnt)
1668 return -EINVAL;
1669
1670 /* Guest is switching in, record which CPU the VCPU is now running on */
1671 gs->vcpu[vcpu].cpu = sample->cpu;
1672 out:
1673 return host__repipe(tool, event, sample, machine);
1674 }
1675
sig_handler(int sig __maybe_unused)1676 static void sig_handler(int sig __maybe_unused)
1677 {
1678 session_done = 1;
1679 }
1680
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)1681 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1682 {
1683 struct perf_event_attr *attr = &evsel->core.attr;
1684 const char *name = evsel__name(evsel);
1685
1686 if (!(attr->sample_type & sample_type)) {
1687 pr_err("Samples for %s event do not have %s attribute set.",
1688 name, sample_msg);
1689 return -EINVAL;
1690 }
1691
1692 return 0;
1693 }
1694
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)1695 static int drop_sample(struct perf_tool *tool __maybe_unused,
1696 union perf_event *event __maybe_unused,
1697 struct perf_sample *sample __maybe_unused,
1698 struct evsel *evsel __maybe_unused,
1699 struct machine *machine __maybe_unused)
1700 {
1701 return 0;
1702 }
1703
strip_init(struct perf_inject * inject)1704 static void strip_init(struct perf_inject *inject)
1705 {
1706 struct evlist *evlist = inject->session->evlist;
1707 struct evsel *evsel;
1708
1709 inject->tool.context_switch = perf_event__drop;
1710
1711 evlist__for_each_entry(evlist, evsel)
1712 evsel->handler = drop_sample;
1713 }
1714
parse_vm_time_correlation(const struct option * opt,const char * str,int unset)1715 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1716 {
1717 struct perf_inject *inject = opt->value;
1718 const char *args;
1719 char *dry_run;
1720
1721 if (unset)
1722 return 0;
1723
1724 inject->itrace_synth_opts.set = true;
1725 inject->itrace_synth_opts.vm_time_correlation = true;
1726 inject->in_place_update = true;
1727
1728 if (!str)
1729 return 0;
1730
1731 dry_run = skip_spaces(str);
1732 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1733 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1734 inject->in_place_update_dry_run = true;
1735 args = dry_run + strlen("dry-run");
1736 } else {
1737 args = str;
1738 }
1739
1740 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1741
1742 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1743 }
1744
parse_guest_data(const struct option * opt,const char * str,int unset)1745 static int parse_guest_data(const struct option *opt, const char *str, int unset)
1746 {
1747 struct perf_inject *inject = opt->value;
1748 struct guest_session *gs = &inject->guest_session;
1749 char *tok;
1750 char *s;
1751
1752 if (unset)
1753 return 0;
1754
1755 if (!str)
1756 goto bad_args;
1757
1758 s = strdup(str);
1759 if (!s)
1760 return -ENOMEM;
1761
1762 gs->perf_data_file = strsep(&s, ",");
1763 if (!gs->perf_data_file)
1764 goto bad_args;
1765
1766 gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1767 if (gs->copy_kcore_dir)
1768 inject->output.is_dir = true;
1769
1770 tok = strsep(&s, ",");
1771 if (!tok)
1772 goto bad_args;
1773 gs->machine_pid = strtoul(tok, NULL, 0);
1774 if (!inject->guest_session.machine_pid)
1775 goto bad_args;
1776
1777 gs->time_scale = 1;
1778
1779 tok = strsep(&s, ",");
1780 if (!tok)
1781 goto out;
1782 gs->time_offset = strtoull(tok, NULL, 0);
1783
1784 tok = strsep(&s, ",");
1785 if (!tok)
1786 goto out;
1787 gs->time_scale = strtod(tok, NULL);
1788 if (!gs->time_scale)
1789 goto bad_args;
1790 out:
1791 return 0;
1792
1793 bad_args:
1794 pr_err("--guest-data option requires guest perf.data file name, "
1795 "guest machine PID, and optionally guest timestamp offset, "
1796 "and guest timestamp scale factor, separated by commas.\n");
1797 return -1;
1798 }
1799
save_section_info_cb(struct perf_file_section * section,struct perf_header * ph __maybe_unused,int feat,int fd __maybe_unused,void * data)1800 static int save_section_info_cb(struct perf_file_section *section,
1801 struct perf_header *ph __maybe_unused,
1802 int feat, int fd __maybe_unused, void *data)
1803 {
1804 struct perf_inject *inject = data;
1805
1806 inject->secs[feat] = *section;
1807 return 0;
1808 }
1809
save_section_info(struct perf_inject * inject)1810 static int save_section_info(struct perf_inject *inject)
1811 {
1812 struct perf_header *header = &inject->session->header;
1813 int fd = perf_data__fd(inject->session->data);
1814
1815 return perf_header__process_sections(header, fd, inject, save_section_info_cb);
1816 }
1817
keep_feat(int feat)1818 static bool keep_feat(int feat)
1819 {
1820 switch (feat) {
1821 /* Keep original information that describes the machine or software */
1822 case HEADER_TRACING_DATA:
1823 case HEADER_HOSTNAME:
1824 case HEADER_OSRELEASE:
1825 case HEADER_VERSION:
1826 case HEADER_ARCH:
1827 case HEADER_NRCPUS:
1828 case HEADER_CPUDESC:
1829 case HEADER_CPUID:
1830 case HEADER_TOTAL_MEM:
1831 case HEADER_CPU_TOPOLOGY:
1832 case HEADER_NUMA_TOPOLOGY:
1833 case HEADER_PMU_MAPPINGS:
1834 case HEADER_CACHE:
1835 case HEADER_MEM_TOPOLOGY:
1836 case HEADER_CLOCKID:
1837 case HEADER_BPF_PROG_INFO:
1838 case HEADER_BPF_BTF:
1839 case HEADER_CPU_PMU_CAPS:
1840 case HEADER_CLOCK_DATA:
1841 case HEADER_HYBRID_TOPOLOGY:
1842 case HEADER_PMU_CAPS:
1843 return true;
1844 /* Information that can be updated */
1845 case HEADER_BUILD_ID:
1846 case HEADER_CMDLINE:
1847 case HEADER_EVENT_DESC:
1848 case HEADER_BRANCH_STACK:
1849 case HEADER_GROUP_DESC:
1850 case HEADER_AUXTRACE:
1851 case HEADER_STAT:
1852 case HEADER_SAMPLE_TIME:
1853 case HEADER_DIR_FORMAT:
1854 case HEADER_COMPRESSED:
1855 default:
1856 return false;
1857 };
1858 }
1859
read_file(int fd,u64 offs,void * buf,size_t sz)1860 static int read_file(int fd, u64 offs, void *buf, size_t sz)
1861 {
1862 ssize_t ret = preadn(fd, buf, sz, offs);
1863
1864 if (ret < 0)
1865 return -errno;
1866 if ((size_t)ret != sz)
1867 return -EINVAL;
1868 return 0;
1869 }
1870
feat_copy(struct perf_inject * inject,int feat,struct feat_writer * fw)1871 static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
1872 {
1873 int fd = perf_data__fd(inject->session->data);
1874 u64 offs = inject->secs[feat].offset;
1875 size_t sz = inject->secs[feat].size;
1876 void *buf = malloc(sz);
1877 int ret;
1878
1879 if (!buf)
1880 return -ENOMEM;
1881
1882 ret = read_file(fd, offs, buf, sz);
1883 if (ret)
1884 goto out_free;
1885
1886 ret = fw->write(fw, buf, sz);
1887 out_free:
1888 free(buf);
1889 return ret;
1890 }
1891
1892 struct inject_fc {
1893 struct feat_copier fc;
1894 struct perf_inject *inject;
1895 };
1896
feat_copy_cb(struct feat_copier * fc,int feat,struct feat_writer * fw)1897 static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
1898 {
1899 struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
1900 struct perf_inject *inject = inj_fc->inject;
1901 int ret;
1902
1903 if (!inject->secs[feat].offset ||
1904 !keep_feat(feat))
1905 return 0;
1906
1907 ret = feat_copy(inject, feat, fw);
1908 if (ret < 0)
1909 return ret;
1910
1911 return 1; /* Feature section copied */
1912 }
1913
copy_kcore_dir(struct perf_inject * inject)1914 static int copy_kcore_dir(struct perf_inject *inject)
1915 {
1916 char *cmd;
1917 int ret;
1918
1919 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
1920 inject->input_name, inject->output.path);
1921 if (ret < 0)
1922 return ret;
1923 pr_debug("%s\n", cmd);
1924 ret = system(cmd);
1925 free(cmd);
1926 return ret;
1927 }
1928
guest_session__copy_kcore_dir(struct guest_session * gs)1929 static int guest_session__copy_kcore_dir(struct guest_session *gs)
1930 {
1931 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1932 char *cmd;
1933 int ret;
1934
1935 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
1936 gs->perf_data_file, inject->output.path, gs->machine_pid);
1937 if (ret < 0)
1938 return ret;
1939 pr_debug("%s\n", cmd);
1940 ret = system(cmd);
1941 free(cmd);
1942 return ret;
1943 }
1944
output_fd(struct perf_inject * inject)1945 static int output_fd(struct perf_inject *inject)
1946 {
1947 return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
1948 }
1949
__cmd_inject(struct perf_inject * inject)1950 static int __cmd_inject(struct perf_inject *inject)
1951 {
1952 int ret = -EINVAL;
1953 struct guest_session *gs = &inject->guest_session;
1954 struct perf_session *session = inject->session;
1955 int fd = output_fd(inject);
1956 u64 output_data_offset;
1957
1958 signal(SIGINT, sig_handler);
1959
1960 if (inject->build_ids || inject->sched_stat ||
1961 inject->itrace_synth_opts.set || inject->build_id_all) {
1962 inject->tool.mmap = perf_event__repipe_mmap;
1963 inject->tool.mmap2 = perf_event__repipe_mmap2;
1964 inject->tool.fork = perf_event__repipe_fork;
1965 inject->tool.tracing_data = perf_event__repipe_tracing_data;
1966 }
1967
1968 output_data_offset = perf_session__data_offset(session->evlist);
1969
1970 if (inject->build_id_all) {
1971 inject->tool.mmap = perf_event__repipe_buildid_mmap;
1972 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
1973 } else if (inject->build_ids) {
1974 inject->tool.sample = perf_event__inject_buildid;
1975 } else if (inject->sched_stat) {
1976 struct evsel *evsel;
1977
1978 evlist__for_each_entry(session->evlist, evsel) {
1979 const char *name = evsel__name(evsel);
1980
1981 if (!strcmp(name, "sched:sched_switch")) {
1982 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
1983 return -EINVAL;
1984
1985 evsel->handler = perf_inject__sched_switch;
1986 } else if (!strcmp(name, "sched:sched_process_exit"))
1987 evsel->handler = perf_inject__sched_process_exit;
1988 else if (!strncmp(name, "sched:sched_stat_", 17))
1989 evsel->handler = perf_inject__sched_stat;
1990 }
1991 } else if (inject->itrace_synth_opts.vm_time_correlation) {
1992 session->itrace_synth_opts = &inject->itrace_synth_opts;
1993 memset(&inject->tool, 0, sizeof(inject->tool));
1994 inject->tool.id_index = perf_event__process_id_index;
1995 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
1996 inject->tool.auxtrace = perf_event__process_auxtrace;
1997 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
1998 inject->tool.ordered_events = true;
1999 inject->tool.ordering_requires_timestamps = true;
2000 } else if (inject->itrace_synth_opts.set) {
2001 session->itrace_synth_opts = &inject->itrace_synth_opts;
2002 inject->itrace_synth_opts.inject = true;
2003 inject->tool.comm = perf_event__repipe_comm;
2004 inject->tool.namespaces = perf_event__repipe_namespaces;
2005 inject->tool.exit = perf_event__repipe_exit;
2006 inject->tool.id_index = perf_event__process_id_index;
2007 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
2008 inject->tool.auxtrace = perf_event__process_auxtrace;
2009 inject->tool.aux = perf_event__drop_aux;
2010 inject->tool.itrace_start = perf_event__drop_aux;
2011 inject->tool.aux_output_hw_id = perf_event__drop_aux;
2012 inject->tool.ordered_events = true;
2013 inject->tool.ordering_requires_timestamps = true;
2014 /* Allow space in the header for new attributes */
2015 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2016 if (inject->strip)
2017 strip_init(inject);
2018 } else if (gs->perf_data_file) {
2019 char *name = gs->perf_data_file;
2020
2021 /*
2022 * Not strictly necessary, but keep these events in order wrt
2023 * guest events.
2024 */
2025 inject->tool.mmap = host__repipe;
2026 inject->tool.mmap2 = host__repipe;
2027 inject->tool.comm = host__repipe;
2028 inject->tool.fork = host__repipe;
2029 inject->tool.exit = host__repipe;
2030 inject->tool.lost = host__repipe;
2031 inject->tool.context_switch = host__repipe;
2032 inject->tool.ksymbol = host__repipe;
2033 inject->tool.text_poke = host__repipe;
2034 /*
2035 * Once the host session has initialized, set up sample ID
2036 * mapping and feed in guest attrs, build IDs and initial
2037 * events.
2038 */
2039 inject->tool.finished_init = host__finished_init;
2040 /* Obey finished round ordering */
2041 inject->tool.finished_round = host__finished_round,
2042 /* Keep track of which CPU a VCPU is runnng on */
2043 inject->tool.context_switch = host__context_switch;
2044 /*
2045 * Must order events to be able to obey finished round
2046 * ordering.
2047 */
2048 inject->tool.ordered_events = true;
2049 inject->tool.ordering_requires_timestamps = true;
2050 /* Set up a separate session to process guest perf.data file */
2051 ret = guest_session__start(gs, name, session->data->force);
2052 if (ret) {
2053 pr_err("Failed to process %s, error %d\n", name, ret);
2054 return ret;
2055 }
2056 /* Allow space in the header for guest attributes */
2057 output_data_offset += gs->session->header.data_offset;
2058 output_data_offset = roundup(output_data_offset, 4096);
2059 }
2060
2061 if (!inject->itrace_synth_opts.set)
2062 auxtrace_index__free(&session->auxtrace_index);
2063
2064 if (!inject->is_pipe && !inject->in_place_update)
2065 lseek(fd, output_data_offset, SEEK_SET);
2066
2067 ret = perf_session__process_events(session);
2068 if (ret)
2069 return ret;
2070
2071 if (gs->session) {
2072 /*
2073 * Remaining guest events have later timestamps. Flush them
2074 * out to file.
2075 */
2076 ret = guest_session__flush_events(gs);
2077 if (ret) {
2078 pr_err("Failed to flush guest events\n");
2079 return ret;
2080 }
2081 }
2082
2083 if (!inject->is_pipe && !inject->in_place_update) {
2084 struct inject_fc inj_fc = {
2085 .fc.copy = feat_copy_cb,
2086 .inject = inject,
2087 };
2088
2089 if (inject->build_ids)
2090 perf_header__set_feat(&session->header,
2091 HEADER_BUILD_ID);
2092 /*
2093 * Keep all buildids when there is unprocessed AUX data because
2094 * it is not known which ones the AUX trace hits.
2095 */
2096 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2097 inject->have_auxtrace && !inject->itrace_synth_opts.set)
2098 dsos__hit_all(session);
2099 /*
2100 * The AUX areas have been removed and replaced with
2101 * synthesized hardware events, so clear the feature flag.
2102 */
2103 if (inject->itrace_synth_opts.set) {
2104 perf_header__clear_feat(&session->header,
2105 HEADER_AUXTRACE);
2106 if (inject->itrace_synth_opts.last_branch ||
2107 inject->itrace_synth_opts.add_last_branch)
2108 perf_header__set_feat(&session->header,
2109 HEADER_BRANCH_STACK);
2110 }
2111 session->header.data_offset = output_data_offset;
2112 session->header.data_size = inject->bytes_written;
2113 perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
2114
2115 if (inject->copy_kcore_dir) {
2116 ret = copy_kcore_dir(inject);
2117 if (ret) {
2118 pr_err("Failed to copy kcore\n");
2119 return ret;
2120 }
2121 }
2122 if (gs->copy_kcore_dir) {
2123 ret = guest_session__copy_kcore_dir(gs);
2124 if (ret) {
2125 pr_err("Failed to copy guest kcore\n");
2126 return ret;
2127 }
2128 }
2129 }
2130
2131 return ret;
2132 }
2133
cmd_inject(int argc,const char ** argv)2134 int cmd_inject(int argc, const char **argv)
2135 {
2136 struct perf_inject inject = {
2137 .tool = {
2138 .sample = perf_event__repipe_sample,
2139 .read = perf_event__repipe_sample,
2140 .mmap = perf_event__repipe,
2141 .mmap2 = perf_event__repipe,
2142 .comm = perf_event__repipe,
2143 .namespaces = perf_event__repipe,
2144 .cgroup = perf_event__repipe,
2145 .fork = perf_event__repipe,
2146 .exit = perf_event__repipe,
2147 .lost = perf_event__repipe,
2148 .lost_samples = perf_event__repipe,
2149 .aux = perf_event__repipe,
2150 .itrace_start = perf_event__repipe,
2151 .aux_output_hw_id = perf_event__repipe,
2152 .context_switch = perf_event__repipe,
2153 .throttle = perf_event__repipe,
2154 .unthrottle = perf_event__repipe,
2155 .ksymbol = perf_event__repipe,
2156 .bpf = perf_event__repipe,
2157 .text_poke = perf_event__repipe,
2158 .attr = perf_event__repipe_attr,
2159 .event_update = perf_event__repipe_event_update,
2160 .tracing_data = perf_event__repipe_op2_synth,
2161 .finished_round = perf_event__repipe_oe_synth,
2162 .build_id = perf_event__repipe_op2_synth,
2163 .id_index = perf_event__repipe_op2_synth,
2164 .auxtrace_info = perf_event__repipe_op2_synth,
2165 .auxtrace_error = perf_event__repipe_op2_synth,
2166 .time_conv = perf_event__repipe_op2_synth,
2167 .thread_map = perf_event__repipe_op2_synth,
2168 .cpu_map = perf_event__repipe_op2_synth,
2169 .stat_config = perf_event__repipe_op2_synth,
2170 .stat = perf_event__repipe_op2_synth,
2171 .stat_round = perf_event__repipe_op2_synth,
2172 .feature = perf_event__repipe_op2_synth,
2173 .finished_init = perf_event__repipe_op2_synth,
2174 .compressed = perf_event__repipe_op4_synth,
2175 .auxtrace = perf_event__repipe_auxtrace,
2176 },
2177 .input_name = "-",
2178 .samples = LIST_HEAD_INIT(inject.samples),
2179 .output = {
2180 .path = "-",
2181 .mode = PERF_DATA_MODE_WRITE,
2182 .use_stdio = true,
2183 },
2184 };
2185 struct perf_data data = {
2186 .mode = PERF_DATA_MODE_READ,
2187 .use_stdio = true,
2188 };
2189 int ret;
2190 bool repipe = true;
2191 const char *known_build_ids = NULL;
2192
2193 struct option options[] = {
2194 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
2195 "Inject build-ids into the output stream"),
2196 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
2197 "Inject build-ids of all DSOs into the output stream"),
2198 OPT_STRING(0, "known-build-ids", &known_build_ids,
2199 "buildid path [,buildid path...]",
2200 "build-ids to use for given paths"),
2201 OPT_STRING('i', "input", &inject.input_name, "file",
2202 "input file name"),
2203 OPT_STRING('o', "output", &inject.output.path, "file",
2204 "output file name"),
2205 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2206 "Merge sched-stat and sched-switch for getting events "
2207 "where and how long tasks slept"),
2208 #ifdef HAVE_JITDUMP
2209 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2210 #endif
2211 OPT_INCR('v', "verbose", &verbose,
2212 "be more verbose (show build ids, etc)"),
2213 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2214 "file", "vmlinux pathname"),
2215 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2216 "don't load vmlinux even if found"),
2217 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2218 "kallsyms pathname"),
2219 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2220 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2221 NULL, "opts", "Instruction Tracing options\n"
2222 ITRACE_HELP,
2223 itrace_parse_synth_opts),
2224 OPT_BOOLEAN(0, "strip", &inject.strip,
2225 "strip non-synthesized events (use with --itrace)"),
2226 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2227 "correlate time between VM guests and the host",
2228 parse_vm_time_correlation),
2229 OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2230 "inject events from a guest perf.data file",
2231 parse_guest_data),
2232 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2233 "guest mount directory under which every guest os"
2234 " instance has a subdir"),
2235 OPT_END()
2236 };
2237 const char * const inject_usage[] = {
2238 "perf inject [<options>]",
2239 NULL
2240 };
2241 #ifndef HAVE_JITDUMP
2242 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2243 #endif
2244 argc = parse_options(argc, argv, options, inject_usage, 0);
2245
2246 /*
2247 * Any (unrecognized) arguments left?
2248 */
2249 if (argc)
2250 usage_with_options(inject_usage, options);
2251
2252 if (inject.strip && !inject.itrace_synth_opts.set) {
2253 pr_err("--strip option requires --itrace option\n");
2254 return -1;
2255 }
2256
2257 if (symbol__validate_sym_arguments())
2258 return -1;
2259
2260 if (inject.in_place_update) {
2261 if (!strcmp(inject.input_name, "-")) {
2262 pr_err("Input file name required for in-place updating\n");
2263 return -1;
2264 }
2265 if (strcmp(inject.output.path, "-")) {
2266 pr_err("Output file name must not be specified for in-place updating\n");
2267 return -1;
2268 }
2269 if (!data.force && !inject.in_place_update_dry_run) {
2270 pr_err("The input file would be updated in place, "
2271 "the --force option is required.\n");
2272 return -1;
2273 }
2274 if (!inject.in_place_update_dry_run)
2275 data.in_place_update = true;
2276 } else {
2277 if (strcmp(inject.output.path, "-") && !inject.strip &&
2278 has_kcore_dir(inject.input_name)) {
2279 inject.output.is_dir = true;
2280 inject.copy_kcore_dir = true;
2281 }
2282 if (perf_data__open(&inject.output)) {
2283 perror("failed to create output file");
2284 return -1;
2285 }
2286 }
2287
2288 data.path = inject.input_name;
2289 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
2290 inject.is_pipe = true;
2291 /*
2292 * Do not repipe header when input is a regular file
2293 * since either it can rewrite the header at the end
2294 * or write a new pipe header.
2295 */
2296 if (strcmp(inject.input_name, "-"))
2297 repipe = false;
2298 }
2299
2300 inject.session = __perf_session__new(&data, repipe,
2301 output_fd(&inject),
2302 &inject.tool);
2303 if (IS_ERR(inject.session)) {
2304 ret = PTR_ERR(inject.session);
2305 goto out_close_output;
2306 }
2307
2308 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2309 pr_warning("Decompression initialization failed.\n");
2310
2311 /* Save original section info before feature bits change */
2312 ret = save_section_info(&inject);
2313 if (ret)
2314 goto out_delete;
2315
2316 if (!data.is_pipe && inject.output.is_pipe) {
2317 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2318 if (ret < 0) {
2319 pr_err("Couldn't write a new pipe header.\n");
2320 goto out_delete;
2321 }
2322
2323 ret = perf_event__synthesize_for_pipe(&inject.tool,
2324 inject.session,
2325 &inject.output,
2326 perf_event__repipe);
2327 if (ret < 0)
2328 goto out_delete;
2329 }
2330
2331 if (inject.build_ids && !inject.build_id_all) {
2332 /*
2333 * to make sure the mmap records are ordered correctly
2334 * and so that the correct especially due to jitted code
2335 * mmaps. We cannot generate the buildid hit list and
2336 * inject the jit mmaps at the same time for now.
2337 */
2338 inject.tool.ordered_events = true;
2339 inject.tool.ordering_requires_timestamps = true;
2340 if (known_build_ids != NULL) {
2341 inject.known_build_ids =
2342 perf_inject__parse_known_build_ids(known_build_ids);
2343
2344 if (inject.known_build_ids == NULL) {
2345 pr_err("Couldn't parse known build ids.\n");
2346 goto out_delete;
2347 }
2348 }
2349 }
2350
2351 if (inject.sched_stat) {
2352 inject.tool.ordered_events = true;
2353 }
2354
2355 #ifdef HAVE_JITDUMP
2356 if (inject.jit_mode) {
2357 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
2358 inject.tool.mmap = perf_event__jit_repipe_mmap;
2359 inject.tool.ordered_events = true;
2360 inject.tool.ordering_requires_timestamps = true;
2361 /*
2362 * JIT MMAP injection injects all MMAP events in one go, so it
2363 * does not obey finished_round semantics.
2364 */
2365 inject.tool.finished_round = perf_event__drop_oe;
2366 }
2367 #endif
2368 ret = symbol__init(&inject.session->header.env);
2369 if (ret < 0)
2370 goto out_delete;
2371
2372 ret = __cmd_inject(&inject);
2373
2374 guest_session__exit(&inject.guest_session);
2375
2376 out_delete:
2377 strlist__delete(inject.known_build_ids);
2378 zstd_fini(&(inject.session->zstd_data));
2379 perf_session__delete(inject.session);
2380 out_close_output:
2381 if (!inject.in_place_update)
2382 perf_data__close(&inject.output);
2383 free(inject.itrace_synth_opts.vm_tm_corr_args);
2384 return ret;
2385 }
2386