1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/symbol.h"
25 #include "util/synthetic-events.h"
26 #include "util/thread.h"
27 #include "util/namespaces.h"
28
29 #include <linux/err.h>
30 #include <subcmd/parse-options.h>
31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
32
33 #include <linux/list.h>
34 #include <linux/string.h>
35 #include <errno.h>
36 #include <signal.h>
37
38 struct perf_inject {
39 struct perf_tool tool;
40 struct perf_session *session;
41 bool build_ids;
42 bool build_id_all;
43 bool sched_stat;
44 bool have_auxtrace;
45 bool strip;
46 bool jit_mode;
47 bool in_place_update;
48 bool in_place_update_dry_run;
49 bool is_pipe;
50 const char *input_name;
51 struct perf_data output;
52 u64 bytes_written;
53 u64 aux_id;
54 struct list_head samples;
55 struct itrace_synth_opts itrace_synth_opts;
56 char event_copy[PERF_SAMPLE_MAX_SIZE];
57 };
58
59 struct event_entry {
60 struct list_head node;
61 u32 tid;
62 union perf_event event[];
63 };
64
65 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
66 struct machine *machine, u8 cpumode, u32 flags);
67
output_bytes(struct perf_inject * inject,void * buf,size_t sz)68 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
69 {
70 ssize_t size;
71
72 size = perf_data__write(&inject->output, buf, sz);
73 if (size < 0)
74 return -errno;
75
76 inject->bytes_written += size;
77 return 0;
78 }
79
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)80 static int perf_event__repipe_synth(struct perf_tool *tool,
81 union perf_event *event)
82 {
83 struct perf_inject *inject = container_of(tool, struct perf_inject,
84 tool);
85
86 return output_bytes(inject, event, event->header.size);
87 }
88
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)89 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
90 union perf_event *event,
91 struct ordered_events *oe __maybe_unused)
92 {
93 return perf_event__repipe_synth(tool, event);
94 }
95
96 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)97 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
98 union perf_event *event __maybe_unused,
99 struct ordered_events *oe __maybe_unused)
100 {
101 return 0;
102 }
103 #endif
104
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)105 static int perf_event__repipe_op2_synth(struct perf_session *session,
106 union perf_event *event)
107 {
108 return perf_event__repipe_synth(session->tool, event);
109 }
110
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused)111 static int perf_event__repipe_op4_synth(struct perf_session *session,
112 union perf_event *event,
113 u64 data __maybe_unused)
114 {
115 return perf_event__repipe_synth(session->tool, event);
116 }
117
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)118 static int perf_event__repipe_attr(struct perf_tool *tool,
119 union perf_event *event,
120 struct evlist **pevlist)
121 {
122 struct perf_inject *inject = container_of(tool, struct perf_inject,
123 tool);
124 int ret;
125
126 ret = perf_event__process_attr(tool, event, pevlist);
127 if (ret)
128 return ret;
129
130 if (!inject->is_pipe)
131 return 0;
132
133 return perf_event__repipe_synth(tool, event);
134 }
135
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)136 static int perf_event__repipe_event_update(struct perf_tool *tool,
137 union perf_event *event,
138 struct evlist **pevlist __maybe_unused)
139 {
140 return perf_event__repipe_synth(tool, event);
141 }
142
143 #ifdef HAVE_AUXTRACE_SUPPORT
144
copy_bytes(struct perf_inject * inject,struct perf_data * data,off_t size)145 static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
146 {
147 char buf[4096];
148 ssize_t ssz;
149 int ret;
150
151 while (size > 0) {
152 ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
153 if (ssz < 0)
154 return -errno;
155 ret = output_bytes(inject, buf, ssz);
156 if (ret)
157 return ret;
158 size -= ssz;
159 }
160
161 return 0;
162 }
163
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)164 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
165 union perf_event *event)
166 {
167 struct perf_tool *tool = session->tool;
168 struct perf_inject *inject = container_of(tool, struct perf_inject,
169 tool);
170 int ret;
171
172 inject->have_auxtrace = true;
173
174 if (!inject->output.is_pipe) {
175 off_t offset;
176
177 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
178 if (offset == -1)
179 return -errno;
180 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
181 event, offset);
182 if (ret < 0)
183 return ret;
184 }
185
186 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
187 ret = output_bytes(inject, event, event->header.size);
188 if (ret < 0)
189 return ret;
190 ret = copy_bytes(inject, session->data,
191 event->auxtrace.size);
192 } else {
193 ret = output_bytes(inject, event,
194 event->header.size + event->auxtrace.size);
195 }
196 if (ret < 0)
197 return ret;
198
199 return event->auxtrace.size;
200 }
201
202 #else
203
204 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)205 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
206 union perf_event *event __maybe_unused)
207 {
208 pr_err("AUX area tracing not supported\n");
209 return -EINVAL;
210 }
211
212 #endif
213
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)214 static int perf_event__repipe(struct perf_tool *tool,
215 union perf_event *event,
216 struct perf_sample *sample __maybe_unused,
217 struct machine *machine __maybe_unused)
218 {
219 return perf_event__repipe_synth(tool, event);
220 }
221
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)222 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
223 union perf_event *event __maybe_unused,
224 struct perf_sample *sample __maybe_unused,
225 struct machine *machine __maybe_unused)
226 {
227 return 0;
228 }
229
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)230 static int perf_event__drop_aux(struct perf_tool *tool,
231 union perf_event *event __maybe_unused,
232 struct perf_sample *sample,
233 struct machine *machine __maybe_unused)
234 {
235 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
236
237 if (!inject->aux_id)
238 inject->aux_id = sample->id;
239
240 return 0;
241 }
242
243 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)244 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
245 union perf_event *event,
246 struct perf_sample *sample)
247 {
248 size_t sz1 = sample->aux_sample.data - (void *)event;
249 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
250 union perf_event *ev = (union perf_event *)inject->event_copy;
251
252 if (sz1 > event->header.size || sz2 > event->header.size ||
253 sz1 + sz2 > event->header.size ||
254 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
255 return event;
256
257 memcpy(ev, event, sz1);
258 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
259 ev->header.size = sz1 + sz2;
260 ((u64 *)((void *)ev + sz1))[-1] = 0;
261
262 return ev;
263 }
264
265 typedef int (*inject_handler)(struct perf_tool *tool,
266 union perf_event *event,
267 struct perf_sample *sample,
268 struct evsel *evsel,
269 struct machine *machine);
270
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)271 static int perf_event__repipe_sample(struct perf_tool *tool,
272 union perf_event *event,
273 struct perf_sample *sample,
274 struct evsel *evsel,
275 struct machine *machine)
276 {
277 struct perf_inject *inject = container_of(tool, struct perf_inject,
278 tool);
279
280 if (evsel && evsel->handler) {
281 inject_handler f = evsel->handler;
282 return f(tool, event, sample, evsel, machine);
283 }
284
285 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
286
287 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
288 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
289
290 return perf_event__repipe_synth(tool, event);
291 }
292
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)293 static int perf_event__repipe_mmap(struct perf_tool *tool,
294 union perf_event *event,
295 struct perf_sample *sample,
296 struct machine *machine)
297 {
298 int err;
299
300 err = perf_event__process_mmap(tool, event, sample, machine);
301 perf_event__repipe(tool, event, sample, machine);
302
303 return err;
304 }
305
306 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)307 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
308 union perf_event *event,
309 struct perf_sample *sample,
310 struct machine *machine)
311 {
312 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
313 u64 n = 0;
314 int ret;
315
316 /*
317 * if jit marker, then inject jit mmaps and generate ELF images
318 */
319 ret = jit_process(inject->session, &inject->output, machine,
320 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
321 if (ret < 0)
322 return ret;
323 if (ret) {
324 inject->bytes_written += n;
325 return 0;
326 }
327 return perf_event__repipe_mmap(tool, event, sample, machine);
328 }
329 #endif
330
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)331 static struct dso *findnew_dso(int pid, int tid, const char *filename,
332 struct dso_id *id, struct machine *machine)
333 {
334 struct thread *thread;
335 struct nsinfo *nsi = NULL;
336 struct nsinfo *nnsi;
337 struct dso *dso;
338 bool vdso;
339
340 thread = machine__findnew_thread(machine, pid, tid);
341 if (thread == NULL) {
342 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
343 return NULL;
344 }
345
346 vdso = is_vdso_map(filename);
347 nsi = nsinfo__get(thread->nsinfo);
348
349 if (vdso) {
350 /* The vdso maps are always on the host and not the
351 * container. Ensure that we don't use setns to look
352 * them up.
353 */
354 nnsi = nsinfo__copy(nsi);
355 if (nnsi) {
356 nsinfo__put(nsi);
357 nnsi->need_setns = false;
358 nsi = nnsi;
359 }
360 dso = machine__findnew_vdso(machine, thread);
361 } else {
362 dso = machine__findnew_dso_id(machine, filename, id);
363 }
364
365 if (dso) {
366 nsinfo__put(dso->nsinfo);
367 dso->nsinfo = nsi;
368 } else
369 nsinfo__put(nsi);
370
371 thread__put(thread);
372 return dso;
373 }
374
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)375 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
376 union perf_event *event,
377 struct perf_sample *sample,
378 struct machine *machine)
379 {
380 struct dso *dso;
381
382 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
383 event->mmap.filename, NULL, machine);
384
385 if (dso && !dso->hit) {
386 dso->hit = 1;
387 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
388 }
389 dso__put(dso);
390
391 return perf_event__repipe(tool, event, sample, machine);
392 }
393
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)394 static int perf_event__repipe_mmap2(struct perf_tool *tool,
395 union perf_event *event,
396 struct perf_sample *sample,
397 struct machine *machine)
398 {
399 int err;
400
401 err = perf_event__process_mmap2(tool, event, sample, machine);
402 perf_event__repipe(tool, event, sample, machine);
403
404 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
405 struct dso *dso;
406
407 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
408 event->mmap2.filename, NULL, machine);
409 if (dso) {
410 /* mark it not to inject build-id */
411 dso->hit = 1;
412 }
413 dso__put(dso);
414 }
415
416 return err;
417 }
418
419 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)420 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
421 union perf_event *event,
422 struct perf_sample *sample,
423 struct machine *machine)
424 {
425 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
426 u64 n = 0;
427 int ret;
428
429 /*
430 * if jit marker, then inject jit mmaps and generate ELF images
431 */
432 ret = jit_process(inject->session, &inject->output, machine,
433 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
434 if (ret < 0)
435 return ret;
436 if (ret) {
437 inject->bytes_written += n;
438 return 0;
439 }
440 return perf_event__repipe_mmap2(tool, event, sample, machine);
441 }
442 #endif
443
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)444 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
445 union perf_event *event,
446 struct perf_sample *sample,
447 struct machine *machine)
448 {
449 struct dso_id dso_id = {
450 .maj = event->mmap2.maj,
451 .min = event->mmap2.min,
452 .ino = event->mmap2.ino,
453 .ino_generation = event->mmap2.ino_generation,
454 };
455 struct dso *dso;
456
457 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
458 /* cannot use dso_id since it'd have invalid info */
459 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
460 event->mmap2.filename, NULL, machine);
461 if (dso) {
462 /* mark it not to inject build-id */
463 dso->hit = 1;
464 }
465 dso__put(dso);
466 perf_event__repipe(tool, event, sample, machine);
467 return 0;
468 }
469
470 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
471 event->mmap2.filename, &dso_id, machine);
472
473 if (dso && !dso->hit) {
474 dso->hit = 1;
475 dso__inject_build_id(dso, tool, machine, sample->cpumode,
476 event->mmap2.flags);
477 }
478 dso__put(dso);
479
480 perf_event__repipe(tool, event, sample, machine);
481
482 return 0;
483 }
484
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)485 static int perf_event__repipe_fork(struct perf_tool *tool,
486 union perf_event *event,
487 struct perf_sample *sample,
488 struct machine *machine)
489 {
490 int err;
491
492 err = perf_event__process_fork(tool, event, sample, machine);
493 perf_event__repipe(tool, event, sample, machine);
494
495 return err;
496 }
497
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)498 static int perf_event__repipe_comm(struct perf_tool *tool,
499 union perf_event *event,
500 struct perf_sample *sample,
501 struct machine *machine)
502 {
503 int err;
504
505 err = perf_event__process_comm(tool, event, sample, machine);
506 perf_event__repipe(tool, event, sample, machine);
507
508 return err;
509 }
510
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)511 static int perf_event__repipe_namespaces(struct perf_tool *tool,
512 union perf_event *event,
513 struct perf_sample *sample,
514 struct machine *machine)
515 {
516 int err = perf_event__process_namespaces(tool, event, sample, machine);
517
518 perf_event__repipe(tool, event, sample, machine);
519
520 return err;
521 }
522
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)523 static int perf_event__repipe_exit(struct perf_tool *tool,
524 union perf_event *event,
525 struct perf_sample *sample,
526 struct machine *machine)
527 {
528 int err;
529
530 err = perf_event__process_exit(tool, event, sample, machine);
531 perf_event__repipe(tool, event, sample, machine);
532
533 return err;
534 }
535
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)536 static int perf_event__repipe_tracing_data(struct perf_session *session,
537 union perf_event *event)
538 {
539 int err;
540
541 perf_event__repipe_synth(session->tool, event);
542 err = perf_event__process_tracing_data(session, event);
543
544 return err;
545 }
546
dso__read_build_id(struct dso * dso)547 static int dso__read_build_id(struct dso *dso)
548 {
549 struct nscookie nsc;
550
551 if (dso->has_build_id)
552 return 0;
553
554 nsinfo__mountns_enter(dso->nsinfo, &nsc);
555 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
556 dso->has_build_id = true;
557 nsinfo__mountns_exit(&nsc);
558
559 return dso->has_build_id ? 0 : -1;
560 }
561
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)562 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
563 struct machine *machine, u8 cpumode, u32 flags)
564 {
565 int err;
566
567 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
568 return 0;
569 if (is_no_dso_memory(dso->long_name))
570 return 0;
571
572 if (dso__read_build_id(dso) < 0) {
573 pr_debug("no build_id found for %s\n", dso->long_name);
574 return -1;
575 }
576
577 err = perf_event__synthesize_build_id(tool, dso, cpumode,
578 perf_event__repipe, machine);
579 if (err) {
580 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
581 return -1;
582 }
583
584 return 0;
585 }
586
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)587 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
588 struct perf_sample *sample,
589 struct evsel *evsel __maybe_unused,
590 struct machine *machine)
591 {
592 struct addr_location al;
593 struct thread *thread;
594
595 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
596 if (thread == NULL) {
597 pr_err("problem processing %d event, skipping it.\n",
598 event->header.type);
599 goto repipe;
600 }
601
602 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
603 if (!al.map->dso->hit) {
604 al.map->dso->hit = 1;
605 dso__inject_build_id(al.map->dso, tool, machine,
606 sample->cpumode, al.map->flags);
607 }
608 }
609
610 thread__put(thread);
611 repipe:
612 perf_event__repipe(tool, event, sample, machine);
613 return 0;
614 }
615
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)616 static int perf_inject__sched_process_exit(struct perf_tool *tool,
617 union perf_event *event __maybe_unused,
618 struct perf_sample *sample,
619 struct evsel *evsel __maybe_unused,
620 struct machine *machine __maybe_unused)
621 {
622 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
623 struct event_entry *ent;
624
625 list_for_each_entry(ent, &inject->samples, node) {
626 if (sample->tid == ent->tid) {
627 list_del_init(&ent->node);
628 free(ent);
629 break;
630 }
631 }
632
633 return 0;
634 }
635
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)636 static int perf_inject__sched_switch(struct perf_tool *tool,
637 union perf_event *event,
638 struct perf_sample *sample,
639 struct evsel *evsel,
640 struct machine *machine)
641 {
642 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
643 struct event_entry *ent;
644
645 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
646
647 ent = malloc(event->header.size + sizeof(struct event_entry));
648 if (ent == NULL) {
649 color_fprintf(stderr, PERF_COLOR_RED,
650 "Not enough memory to process sched switch event!");
651 return -1;
652 }
653
654 ent->tid = sample->tid;
655 memcpy(&ent->event, event, event->header.size);
656 list_add(&ent->node, &inject->samples);
657 return 0;
658 }
659
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)660 static int perf_inject__sched_stat(struct perf_tool *tool,
661 union perf_event *event __maybe_unused,
662 struct perf_sample *sample,
663 struct evsel *evsel,
664 struct machine *machine)
665 {
666 struct event_entry *ent;
667 union perf_event *event_sw;
668 struct perf_sample sample_sw;
669 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
670 u32 pid = evsel__intval(evsel, sample, "pid");
671
672 list_for_each_entry(ent, &inject->samples, node) {
673 if (pid == ent->tid)
674 goto found;
675 }
676
677 return 0;
678 found:
679 event_sw = &ent->event[0];
680 evsel__parse_sample(evsel, event_sw, &sample_sw);
681
682 sample_sw.period = sample->period;
683 sample_sw.time = sample->time;
684 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
685 evsel->core.attr.read_format, &sample_sw);
686 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
687 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
688 }
689
sig_handler(int sig __maybe_unused)690 static void sig_handler(int sig __maybe_unused)
691 {
692 session_done = 1;
693 }
694
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)695 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
696 {
697 struct perf_event_attr *attr = &evsel->core.attr;
698 const char *name = evsel__name(evsel);
699
700 if (!(attr->sample_type & sample_type)) {
701 pr_err("Samples for %s event do not have %s attribute set.",
702 name, sample_msg);
703 return -EINVAL;
704 }
705
706 return 0;
707 }
708
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)709 static int drop_sample(struct perf_tool *tool __maybe_unused,
710 union perf_event *event __maybe_unused,
711 struct perf_sample *sample __maybe_unused,
712 struct evsel *evsel __maybe_unused,
713 struct machine *machine __maybe_unused)
714 {
715 return 0;
716 }
717
strip_init(struct perf_inject * inject)718 static void strip_init(struct perf_inject *inject)
719 {
720 struct evlist *evlist = inject->session->evlist;
721 struct evsel *evsel;
722
723 inject->tool.context_switch = perf_event__drop;
724
725 evlist__for_each_entry(evlist, evsel)
726 evsel->handler = drop_sample;
727 }
728
parse_vm_time_correlation(const struct option * opt,const char * str,int unset)729 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
730 {
731 struct perf_inject *inject = opt->value;
732 const char *args;
733 char *dry_run;
734
735 if (unset)
736 return 0;
737
738 inject->itrace_synth_opts.set = true;
739 inject->itrace_synth_opts.vm_time_correlation = true;
740 inject->in_place_update = true;
741
742 if (!str)
743 return 0;
744
745 dry_run = skip_spaces(str);
746 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
747 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
748 inject->in_place_update_dry_run = true;
749 args = dry_run + strlen("dry-run");
750 } else {
751 args = str;
752 }
753
754 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
755
756 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
757 }
758
output_fd(struct perf_inject * inject)759 static int output_fd(struct perf_inject *inject)
760 {
761 return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
762 }
763
__cmd_inject(struct perf_inject * inject)764 static int __cmd_inject(struct perf_inject *inject)
765 {
766 int ret = -EINVAL;
767 struct perf_session *session = inject->session;
768 int fd = output_fd(inject);
769 u64 output_data_offset;
770
771 signal(SIGINT, sig_handler);
772
773 if (inject->build_ids || inject->sched_stat ||
774 inject->itrace_synth_opts.set || inject->build_id_all) {
775 inject->tool.mmap = perf_event__repipe_mmap;
776 inject->tool.mmap2 = perf_event__repipe_mmap2;
777 inject->tool.fork = perf_event__repipe_fork;
778 inject->tool.tracing_data = perf_event__repipe_tracing_data;
779 }
780
781 output_data_offset = session->header.data_offset;
782
783 if (inject->build_id_all) {
784 inject->tool.mmap = perf_event__repipe_buildid_mmap;
785 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
786 } else if (inject->build_ids) {
787 inject->tool.sample = perf_event__inject_buildid;
788 } else if (inject->sched_stat) {
789 struct evsel *evsel;
790
791 evlist__for_each_entry(session->evlist, evsel) {
792 const char *name = evsel__name(evsel);
793
794 if (!strcmp(name, "sched:sched_switch")) {
795 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
796 return -EINVAL;
797
798 evsel->handler = perf_inject__sched_switch;
799 } else if (!strcmp(name, "sched:sched_process_exit"))
800 evsel->handler = perf_inject__sched_process_exit;
801 else if (!strncmp(name, "sched:sched_stat_", 17))
802 evsel->handler = perf_inject__sched_stat;
803 }
804 } else if (inject->itrace_synth_opts.vm_time_correlation) {
805 session->itrace_synth_opts = &inject->itrace_synth_opts;
806 memset(&inject->tool, 0, sizeof(inject->tool));
807 inject->tool.id_index = perf_event__process_id_index;
808 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
809 inject->tool.auxtrace = perf_event__process_auxtrace;
810 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
811 inject->tool.ordered_events = true;
812 inject->tool.ordering_requires_timestamps = true;
813 } else if (inject->itrace_synth_opts.set) {
814 session->itrace_synth_opts = &inject->itrace_synth_opts;
815 inject->itrace_synth_opts.inject = true;
816 inject->tool.comm = perf_event__repipe_comm;
817 inject->tool.namespaces = perf_event__repipe_namespaces;
818 inject->tool.exit = perf_event__repipe_exit;
819 inject->tool.id_index = perf_event__process_id_index;
820 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
821 inject->tool.auxtrace = perf_event__process_auxtrace;
822 inject->tool.aux = perf_event__drop_aux;
823 inject->tool.itrace_start = perf_event__drop_aux,
824 inject->tool.ordered_events = true;
825 inject->tool.ordering_requires_timestamps = true;
826 /* Allow space in the header for new attributes */
827 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
828 if (inject->strip)
829 strip_init(inject);
830 }
831
832 if (!inject->itrace_synth_opts.set)
833 auxtrace_index__free(&session->auxtrace_index);
834
835 if (!inject->is_pipe && !inject->in_place_update)
836 lseek(fd, output_data_offset, SEEK_SET);
837
838 ret = perf_session__process_events(session);
839 if (ret)
840 return ret;
841
842 if (!inject->is_pipe && !inject->in_place_update) {
843 if (inject->build_ids)
844 perf_header__set_feat(&session->header,
845 HEADER_BUILD_ID);
846 /*
847 * Keep all buildids when there is unprocessed AUX data because
848 * it is not known which ones the AUX trace hits.
849 */
850 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
851 inject->have_auxtrace && !inject->itrace_synth_opts.set)
852 dsos__hit_all(session);
853 /*
854 * The AUX areas have been removed and replaced with
855 * synthesized hardware events, so clear the feature flag.
856 */
857 if (inject->itrace_synth_opts.set) {
858 perf_header__clear_feat(&session->header,
859 HEADER_AUXTRACE);
860 if (inject->itrace_synth_opts.last_branch ||
861 inject->itrace_synth_opts.add_last_branch)
862 perf_header__set_feat(&session->header,
863 HEADER_BRANCH_STACK);
864 }
865 session->header.data_offset = output_data_offset;
866 session->header.data_size = inject->bytes_written;
867 perf_session__write_header(session, session->evlist, fd, true);
868 }
869
870 return ret;
871 }
872
cmd_inject(int argc,const char ** argv)873 int cmd_inject(int argc, const char **argv)
874 {
875 struct perf_inject inject = {
876 .tool = {
877 .sample = perf_event__repipe_sample,
878 .read = perf_event__repipe_sample,
879 .mmap = perf_event__repipe,
880 .mmap2 = perf_event__repipe,
881 .comm = perf_event__repipe,
882 .namespaces = perf_event__repipe,
883 .cgroup = perf_event__repipe,
884 .fork = perf_event__repipe,
885 .exit = perf_event__repipe,
886 .lost = perf_event__repipe,
887 .lost_samples = perf_event__repipe,
888 .aux = perf_event__repipe,
889 .itrace_start = perf_event__repipe,
890 .context_switch = perf_event__repipe,
891 .throttle = perf_event__repipe,
892 .unthrottle = perf_event__repipe,
893 .ksymbol = perf_event__repipe,
894 .bpf = perf_event__repipe,
895 .text_poke = perf_event__repipe,
896 .attr = perf_event__repipe_attr,
897 .event_update = perf_event__repipe_event_update,
898 .tracing_data = perf_event__repipe_op2_synth,
899 .finished_round = perf_event__repipe_oe_synth,
900 .build_id = perf_event__repipe_op2_synth,
901 .id_index = perf_event__repipe_op2_synth,
902 .auxtrace_info = perf_event__repipe_op2_synth,
903 .auxtrace_error = perf_event__repipe_op2_synth,
904 .time_conv = perf_event__repipe_op2_synth,
905 .thread_map = perf_event__repipe_op2_synth,
906 .cpu_map = perf_event__repipe_op2_synth,
907 .stat_config = perf_event__repipe_op2_synth,
908 .stat = perf_event__repipe_op2_synth,
909 .stat_round = perf_event__repipe_op2_synth,
910 .feature = perf_event__repipe_op2_synth,
911 .compressed = perf_event__repipe_op4_synth,
912 .auxtrace = perf_event__repipe_auxtrace,
913 },
914 .input_name = "-",
915 .samples = LIST_HEAD_INIT(inject.samples),
916 .output = {
917 .path = "-",
918 .mode = PERF_DATA_MODE_WRITE,
919 .use_stdio = true,
920 },
921 };
922 struct perf_data data = {
923 .mode = PERF_DATA_MODE_READ,
924 .use_stdio = true,
925 };
926 int ret;
927 bool repipe = true;
928
929 struct option options[] = {
930 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
931 "Inject build-ids into the output stream"),
932 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
933 "Inject build-ids of all DSOs into the output stream"),
934 OPT_STRING('i', "input", &inject.input_name, "file",
935 "input file name"),
936 OPT_STRING('o', "output", &inject.output.path, "file",
937 "output file name"),
938 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
939 "Merge sched-stat and sched-switch for getting events "
940 "where and how long tasks slept"),
941 #ifdef HAVE_JITDUMP
942 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
943 #endif
944 OPT_INCR('v', "verbose", &verbose,
945 "be more verbose (show build ids, etc)"),
946 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
947 "kallsyms pathname"),
948 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
949 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
950 NULL, "opts", "Instruction Tracing options\n"
951 ITRACE_HELP,
952 itrace_parse_synth_opts),
953 OPT_BOOLEAN(0, "strip", &inject.strip,
954 "strip non-synthesized events (use with --itrace)"),
955 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
956 "correlate time between VM guests and the host",
957 parse_vm_time_correlation),
958 OPT_END()
959 };
960 const char * const inject_usage[] = {
961 "perf inject [<options>]",
962 NULL
963 };
964 #ifndef HAVE_JITDUMP
965 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
966 #endif
967 argc = parse_options(argc, argv, options, inject_usage, 0);
968
969 /*
970 * Any (unrecognized) arguments left?
971 */
972 if (argc)
973 usage_with_options(inject_usage, options);
974
975 if (inject.strip && !inject.itrace_synth_opts.set) {
976 pr_err("--strip option requires --itrace option\n");
977 return -1;
978 }
979
980 if (inject.in_place_update) {
981 if (!strcmp(inject.input_name, "-")) {
982 pr_err("Input file name required for in-place updating\n");
983 return -1;
984 }
985 if (strcmp(inject.output.path, "-")) {
986 pr_err("Output file name must not be specified for in-place updating\n");
987 return -1;
988 }
989 if (!data.force && !inject.in_place_update_dry_run) {
990 pr_err("The input file would be updated in place, "
991 "the --force option is required.\n");
992 return -1;
993 }
994 if (!inject.in_place_update_dry_run)
995 data.in_place_update = true;
996 } else if (perf_data__open(&inject.output)) {
997 perror("failed to create output file");
998 return -1;
999 }
1000
1001 data.path = inject.input_name;
1002 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
1003 inject.is_pipe = true;
1004 /*
1005 * Do not repipe header when input is a regular file
1006 * since either it can rewrite the header at the end
1007 * or write a new pipe header.
1008 */
1009 if (strcmp(inject.input_name, "-"))
1010 repipe = false;
1011 }
1012
1013 inject.session = __perf_session__new(&data, repipe,
1014 output_fd(&inject),
1015 &inject.tool);
1016 if (IS_ERR(inject.session)) {
1017 ret = PTR_ERR(inject.session);
1018 goto out_close_output;
1019 }
1020
1021 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
1022 pr_warning("Decompression initialization failed.\n");
1023
1024 if (!data.is_pipe && inject.output.is_pipe) {
1025 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
1026 if (ret < 0) {
1027 pr_err("Couldn't write a new pipe header.\n");
1028 goto out_delete;
1029 }
1030
1031 ret = perf_event__synthesize_for_pipe(&inject.tool,
1032 inject.session,
1033 &inject.output,
1034 perf_event__repipe);
1035 if (ret < 0)
1036 goto out_delete;
1037 }
1038
1039 if (inject.build_ids && !inject.build_id_all) {
1040 /*
1041 * to make sure the mmap records are ordered correctly
1042 * and so that the correct especially due to jitted code
1043 * mmaps. We cannot generate the buildid hit list and
1044 * inject the jit mmaps at the same time for now.
1045 */
1046 inject.tool.ordered_events = true;
1047 inject.tool.ordering_requires_timestamps = true;
1048 }
1049
1050 if (inject.sched_stat) {
1051 inject.tool.ordered_events = true;
1052 }
1053
1054 #ifdef HAVE_JITDUMP
1055 if (inject.jit_mode) {
1056 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
1057 inject.tool.mmap = perf_event__jit_repipe_mmap;
1058 inject.tool.ordered_events = true;
1059 inject.tool.ordering_requires_timestamps = true;
1060 /*
1061 * JIT MMAP injection injects all MMAP events in one go, so it
1062 * does not obey finished_round semantics.
1063 */
1064 inject.tool.finished_round = perf_event__drop_oe;
1065 }
1066 #endif
1067 ret = symbol__init(&inject.session->header.env);
1068 if (ret < 0)
1069 goto out_delete;
1070
1071 ret = __cmd_inject(&inject);
1072
1073 out_delete:
1074 zstd_fini(&(inject.session->zstd_data));
1075 perf_session__delete(inject.session);
1076 out_close_output:
1077 if (!inject.in_place_update)
1078 perf_data__close(&inject.output);
1079 free(inject.itrace_synth_opts.vm_tm_corr_args);
1080 return ret;
1081 }
1082