1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/symbol.h"
25 #include "util/synthetic-events.h"
26 #include "util/thread.h"
27 #include "util/namespaces.h"
28
29 #include <linux/err.h>
30 #include <subcmd/parse-options.h>
31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
32
33 #include <linux/list.h>
34 #include <errno.h>
35 #include <signal.h>
36
37 struct perf_inject {
38 struct perf_tool tool;
39 struct perf_session *session;
40 bool build_ids;
41 bool build_id_all;
42 bool sched_stat;
43 bool have_auxtrace;
44 bool strip;
45 bool jit_mode;
46 const char *input_name;
47 struct perf_data output;
48 u64 bytes_written;
49 u64 aux_id;
50 struct list_head samples;
51 struct itrace_synth_opts itrace_synth_opts;
52 char event_copy[PERF_SAMPLE_MAX_SIZE];
53 };
54
55 struct event_entry {
56 struct list_head node;
57 u32 tid;
58 union perf_event event[];
59 };
60
61 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
62 struct machine *machine, u8 cpumode, u32 flags);
63
output_bytes(struct perf_inject * inject,void * buf,size_t sz)64 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
65 {
66 ssize_t size;
67
68 size = perf_data__write(&inject->output, buf, sz);
69 if (size < 0)
70 return -errno;
71
72 inject->bytes_written += size;
73 return 0;
74 }
75
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)76 static int perf_event__repipe_synth(struct perf_tool *tool,
77 union perf_event *event)
78 {
79 struct perf_inject *inject = container_of(tool, struct perf_inject,
80 tool);
81
82 return output_bytes(inject, event, event->header.size);
83 }
84
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)85 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
86 union perf_event *event,
87 struct ordered_events *oe __maybe_unused)
88 {
89 return perf_event__repipe_synth(tool, event);
90 }
91
92 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)93 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
94 union perf_event *event __maybe_unused,
95 struct ordered_events *oe __maybe_unused)
96 {
97 return 0;
98 }
99 #endif
100
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)101 static int perf_event__repipe_op2_synth(struct perf_session *session,
102 union perf_event *event)
103 {
104 return perf_event__repipe_synth(session->tool, event);
105 }
106
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused)107 static int perf_event__repipe_op4_synth(struct perf_session *session,
108 union perf_event *event,
109 u64 data __maybe_unused)
110 {
111 return perf_event__repipe_synth(session->tool, event);
112 }
113
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)114 static int perf_event__repipe_attr(struct perf_tool *tool,
115 union perf_event *event,
116 struct evlist **pevlist)
117 {
118 struct perf_inject *inject = container_of(tool, struct perf_inject,
119 tool);
120 int ret;
121
122 ret = perf_event__process_attr(tool, event, pevlist);
123 if (ret)
124 return ret;
125
126 if (!inject->output.is_pipe)
127 return 0;
128
129 return perf_event__repipe_synth(tool, event);
130 }
131
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)132 static int perf_event__repipe_event_update(struct perf_tool *tool,
133 union perf_event *event,
134 struct evlist **pevlist __maybe_unused)
135 {
136 return perf_event__repipe_synth(tool, event);
137 }
138
139 #ifdef HAVE_AUXTRACE_SUPPORT
140
copy_bytes(struct perf_inject * inject,int fd,off_t size)141 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
142 {
143 char buf[4096];
144 ssize_t ssz;
145 int ret;
146
147 while (size > 0) {
148 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
149 if (ssz < 0)
150 return -errno;
151 ret = output_bytes(inject, buf, ssz);
152 if (ret)
153 return ret;
154 size -= ssz;
155 }
156
157 return 0;
158 }
159
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)160 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
161 union perf_event *event)
162 {
163 struct perf_tool *tool = session->tool;
164 struct perf_inject *inject = container_of(tool, struct perf_inject,
165 tool);
166 int ret;
167
168 inject->have_auxtrace = true;
169
170 if (!inject->output.is_pipe) {
171 off_t offset;
172
173 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
174 if (offset == -1)
175 return -errno;
176 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
177 event, offset);
178 if (ret < 0)
179 return ret;
180 }
181
182 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
183 ret = output_bytes(inject, event, event->header.size);
184 if (ret < 0)
185 return ret;
186 ret = copy_bytes(inject, perf_data__fd(session->data),
187 event->auxtrace.size);
188 } else {
189 ret = output_bytes(inject, event,
190 event->header.size + event->auxtrace.size);
191 }
192 if (ret < 0)
193 return ret;
194
195 return event->auxtrace.size;
196 }
197
198 #else
199
200 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)201 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
202 union perf_event *event __maybe_unused)
203 {
204 pr_err("AUX area tracing not supported\n");
205 return -EINVAL;
206 }
207
208 #endif
209
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)210 static int perf_event__repipe(struct perf_tool *tool,
211 union perf_event *event,
212 struct perf_sample *sample __maybe_unused,
213 struct machine *machine __maybe_unused)
214 {
215 return perf_event__repipe_synth(tool, event);
216 }
217
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)218 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
219 union perf_event *event __maybe_unused,
220 struct perf_sample *sample __maybe_unused,
221 struct machine *machine __maybe_unused)
222 {
223 return 0;
224 }
225
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)226 static int perf_event__drop_aux(struct perf_tool *tool,
227 union perf_event *event __maybe_unused,
228 struct perf_sample *sample,
229 struct machine *machine __maybe_unused)
230 {
231 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
232
233 if (!inject->aux_id)
234 inject->aux_id = sample->id;
235
236 return 0;
237 }
238
239 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)240 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
241 union perf_event *event,
242 struct perf_sample *sample)
243 {
244 size_t sz1 = sample->aux_sample.data - (void *)event;
245 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
246 union perf_event *ev = (union perf_event *)inject->event_copy;
247
248 if (sz1 > event->header.size || sz2 > event->header.size ||
249 sz1 + sz2 > event->header.size ||
250 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
251 return event;
252
253 memcpy(ev, event, sz1);
254 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
255 ev->header.size = sz1 + sz2;
256 ((u64 *)((void *)ev + sz1))[-1] = 0;
257
258 return ev;
259 }
260
261 typedef int (*inject_handler)(struct perf_tool *tool,
262 union perf_event *event,
263 struct perf_sample *sample,
264 struct evsel *evsel,
265 struct machine *machine);
266
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)267 static int perf_event__repipe_sample(struct perf_tool *tool,
268 union perf_event *event,
269 struct perf_sample *sample,
270 struct evsel *evsel,
271 struct machine *machine)
272 {
273 struct perf_inject *inject = container_of(tool, struct perf_inject,
274 tool);
275
276 if (evsel && evsel->handler) {
277 inject_handler f = evsel->handler;
278 return f(tool, event, sample, evsel, machine);
279 }
280
281 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
282
283 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
284 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
285
286 return perf_event__repipe_synth(tool, event);
287 }
288
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)289 static int perf_event__repipe_mmap(struct perf_tool *tool,
290 union perf_event *event,
291 struct perf_sample *sample,
292 struct machine *machine)
293 {
294 int err;
295
296 err = perf_event__process_mmap(tool, event, sample, machine);
297 perf_event__repipe(tool, event, sample, machine);
298
299 return err;
300 }
301
302 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)303 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
304 union perf_event *event,
305 struct perf_sample *sample,
306 struct machine *machine)
307 {
308 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
309 u64 n = 0;
310 int ret;
311
312 /*
313 * if jit marker, then inject jit mmaps and generate ELF images
314 */
315 ret = jit_process(inject->session, &inject->output, machine,
316 event->mmap.filename, event->mmap.pid, &n);
317 if (ret < 0)
318 return ret;
319 if (ret) {
320 inject->bytes_written += n;
321 return 0;
322 }
323 return perf_event__repipe_mmap(tool, event, sample, machine);
324 }
325 #endif
326
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)327 static struct dso *findnew_dso(int pid, int tid, const char *filename,
328 struct dso_id *id, struct machine *machine)
329 {
330 struct thread *thread;
331 struct nsinfo *nsi = NULL;
332 struct nsinfo *nnsi;
333 struct dso *dso;
334 bool vdso;
335
336 thread = machine__findnew_thread(machine, pid, tid);
337 if (thread == NULL) {
338 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
339 return NULL;
340 }
341
342 vdso = is_vdso_map(filename);
343 nsi = nsinfo__get(thread->nsinfo);
344
345 if (vdso) {
346 /* The vdso maps are always on the host and not the
347 * container. Ensure that we don't use setns to look
348 * them up.
349 */
350 nnsi = nsinfo__copy(nsi);
351 if (nnsi) {
352 nsinfo__put(nsi);
353 nnsi->need_setns = false;
354 nsi = nnsi;
355 }
356 dso = machine__findnew_vdso(machine, thread);
357 } else {
358 dso = machine__findnew_dso_id(machine, filename, id);
359 }
360
361 if (dso) {
362 nsinfo__put(dso->nsinfo);
363 dso->nsinfo = nsi;
364 } else
365 nsinfo__put(nsi);
366
367 thread__put(thread);
368 return dso;
369 }
370
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)371 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
372 union perf_event *event,
373 struct perf_sample *sample,
374 struct machine *machine)
375 {
376 struct dso *dso;
377
378 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
379 event->mmap.filename, NULL, machine);
380
381 if (dso && !dso->hit) {
382 dso->hit = 1;
383 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
384 dso__put(dso);
385 }
386
387 return perf_event__repipe(tool, event, sample, machine);
388 }
389
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)390 static int perf_event__repipe_mmap2(struct perf_tool *tool,
391 union perf_event *event,
392 struct perf_sample *sample,
393 struct machine *machine)
394 {
395 int err;
396
397 err = perf_event__process_mmap2(tool, event, sample, machine);
398 perf_event__repipe(tool, event, sample, machine);
399
400 return err;
401 }
402
403 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)404 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
405 union perf_event *event,
406 struct perf_sample *sample,
407 struct machine *machine)
408 {
409 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
410 u64 n = 0;
411 int ret;
412
413 /*
414 * if jit marker, then inject jit mmaps and generate ELF images
415 */
416 ret = jit_process(inject->session, &inject->output, machine,
417 event->mmap2.filename, event->mmap2.pid, &n);
418 if (ret < 0)
419 return ret;
420 if (ret) {
421 inject->bytes_written += n;
422 return 0;
423 }
424 return perf_event__repipe_mmap2(tool, event, sample, machine);
425 }
426 #endif
427
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)428 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
429 union perf_event *event,
430 struct perf_sample *sample,
431 struct machine *machine)
432 {
433 struct dso_id dso_id = {
434 .maj = event->mmap2.maj,
435 .min = event->mmap2.min,
436 .ino = event->mmap2.ino,
437 .ino_generation = event->mmap2.ino_generation,
438 };
439 struct dso *dso;
440
441 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
442 event->mmap2.filename, &dso_id, machine);
443
444 if (dso && !dso->hit) {
445 dso->hit = 1;
446 dso__inject_build_id(dso, tool, machine, sample->cpumode,
447 event->mmap2.flags);
448 dso__put(dso);
449 }
450
451 perf_event__repipe(tool, event, sample, machine);
452
453 return 0;
454 }
455
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)456 static int perf_event__repipe_fork(struct perf_tool *tool,
457 union perf_event *event,
458 struct perf_sample *sample,
459 struct machine *machine)
460 {
461 int err;
462
463 err = perf_event__process_fork(tool, event, sample, machine);
464 perf_event__repipe(tool, event, sample, machine);
465
466 return err;
467 }
468
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)469 static int perf_event__repipe_comm(struct perf_tool *tool,
470 union perf_event *event,
471 struct perf_sample *sample,
472 struct machine *machine)
473 {
474 int err;
475
476 err = perf_event__process_comm(tool, event, sample, machine);
477 perf_event__repipe(tool, event, sample, machine);
478
479 return err;
480 }
481
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)482 static int perf_event__repipe_namespaces(struct perf_tool *tool,
483 union perf_event *event,
484 struct perf_sample *sample,
485 struct machine *machine)
486 {
487 int err = perf_event__process_namespaces(tool, event, sample, machine);
488
489 perf_event__repipe(tool, event, sample, machine);
490
491 return err;
492 }
493
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)494 static int perf_event__repipe_exit(struct perf_tool *tool,
495 union perf_event *event,
496 struct perf_sample *sample,
497 struct machine *machine)
498 {
499 int err;
500
501 err = perf_event__process_exit(tool, event, sample, machine);
502 perf_event__repipe(tool, event, sample, machine);
503
504 return err;
505 }
506
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)507 static int perf_event__repipe_tracing_data(struct perf_session *session,
508 union perf_event *event)
509 {
510 int err;
511
512 perf_event__repipe_synth(session->tool, event);
513 err = perf_event__process_tracing_data(session, event);
514
515 return err;
516 }
517
dso__read_build_id(struct dso * dso)518 static int dso__read_build_id(struct dso *dso)
519 {
520 struct nscookie nsc;
521
522 if (dso->has_build_id)
523 return 0;
524
525 nsinfo__mountns_enter(dso->nsinfo, &nsc);
526 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
527 dso->has_build_id = true;
528 nsinfo__mountns_exit(&nsc);
529
530 return dso->has_build_id ? 0 : -1;
531 }
532
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)533 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
534 struct machine *machine, u8 cpumode, u32 flags)
535 {
536 int err;
537
538 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
539 return 0;
540 if (is_no_dso_memory(dso->long_name))
541 return 0;
542
543 if (dso__read_build_id(dso) < 0) {
544 pr_debug("no build_id found for %s\n", dso->long_name);
545 return -1;
546 }
547
548 err = perf_event__synthesize_build_id(tool, dso, cpumode,
549 perf_event__repipe, machine);
550 if (err) {
551 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
552 return -1;
553 }
554
555 return 0;
556 }
557
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)558 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
559 struct perf_sample *sample,
560 struct evsel *evsel __maybe_unused,
561 struct machine *machine)
562 {
563 struct addr_location al;
564 struct thread *thread;
565
566 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
567 if (thread == NULL) {
568 pr_err("problem processing %d event, skipping it.\n",
569 event->header.type);
570 goto repipe;
571 }
572
573 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
574 if (!al.map->dso->hit) {
575 al.map->dso->hit = 1;
576 dso__inject_build_id(al.map->dso, tool, machine,
577 sample->cpumode, al.map->flags);
578 }
579 }
580
581 thread__put(thread);
582 repipe:
583 perf_event__repipe(tool, event, sample, machine);
584 return 0;
585 }
586
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)587 static int perf_inject__sched_process_exit(struct perf_tool *tool,
588 union perf_event *event __maybe_unused,
589 struct perf_sample *sample,
590 struct evsel *evsel __maybe_unused,
591 struct machine *machine __maybe_unused)
592 {
593 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
594 struct event_entry *ent;
595
596 list_for_each_entry(ent, &inject->samples, node) {
597 if (sample->tid == ent->tid) {
598 list_del_init(&ent->node);
599 free(ent);
600 break;
601 }
602 }
603
604 return 0;
605 }
606
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)607 static int perf_inject__sched_switch(struct perf_tool *tool,
608 union perf_event *event,
609 struct perf_sample *sample,
610 struct evsel *evsel,
611 struct machine *machine)
612 {
613 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
614 struct event_entry *ent;
615
616 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
617
618 ent = malloc(event->header.size + sizeof(struct event_entry));
619 if (ent == NULL) {
620 color_fprintf(stderr, PERF_COLOR_RED,
621 "Not enough memory to process sched switch event!");
622 return -1;
623 }
624
625 ent->tid = sample->tid;
626 memcpy(&ent->event, event, event->header.size);
627 list_add(&ent->node, &inject->samples);
628 return 0;
629 }
630
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)631 static int perf_inject__sched_stat(struct perf_tool *tool,
632 union perf_event *event __maybe_unused,
633 struct perf_sample *sample,
634 struct evsel *evsel,
635 struct machine *machine)
636 {
637 struct event_entry *ent;
638 union perf_event *event_sw;
639 struct perf_sample sample_sw;
640 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
641 u32 pid = evsel__intval(evsel, sample, "pid");
642
643 list_for_each_entry(ent, &inject->samples, node) {
644 if (pid == ent->tid)
645 goto found;
646 }
647
648 return 0;
649 found:
650 event_sw = &ent->event[0];
651 evsel__parse_sample(evsel, event_sw, &sample_sw);
652
653 sample_sw.period = sample->period;
654 sample_sw.time = sample->time;
655 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
656 evsel->core.attr.read_format, &sample_sw);
657 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
658 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
659 }
660
sig_handler(int sig __maybe_unused)661 static void sig_handler(int sig __maybe_unused)
662 {
663 session_done = 1;
664 }
665
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)666 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
667 {
668 struct perf_event_attr *attr = &evsel->core.attr;
669 const char *name = evsel__name(evsel);
670
671 if (!(attr->sample_type & sample_type)) {
672 pr_err("Samples for %s event do not have %s attribute set.",
673 name, sample_msg);
674 return -EINVAL;
675 }
676
677 return 0;
678 }
679
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)680 static int drop_sample(struct perf_tool *tool __maybe_unused,
681 union perf_event *event __maybe_unused,
682 struct perf_sample *sample __maybe_unused,
683 struct evsel *evsel __maybe_unused,
684 struct machine *machine __maybe_unused)
685 {
686 return 0;
687 }
688
strip_init(struct perf_inject * inject)689 static void strip_init(struct perf_inject *inject)
690 {
691 struct evlist *evlist = inject->session->evlist;
692 struct evsel *evsel;
693
694 inject->tool.context_switch = perf_event__drop;
695
696 evlist__for_each_entry(evlist, evsel)
697 evsel->handler = drop_sample;
698 }
699
__cmd_inject(struct perf_inject * inject)700 static int __cmd_inject(struct perf_inject *inject)
701 {
702 int ret = -EINVAL;
703 struct perf_session *session = inject->session;
704 struct perf_data *data_out = &inject->output;
705 int fd = perf_data__fd(data_out);
706 u64 output_data_offset;
707
708 signal(SIGINT, sig_handler);
709
710 if (inject->build_ids || inject->sched_stat ||
711 inject->itrace_synth_opts.set || inject->build_id_all) {
712 inject->tool.mmap = perf_event__repipe_mmap;
713 inject->tool.mmap2 = perf_event__repipe_mmap2;
714 inject->tool.fork = perf_event__repipe_fork;
715 inject->tool.tracing_data = perf_event__repipe_tracing_data;
716 }
717
718 output_data_offset = session->header.data_offset;
719
720 if (inject->build_id_all) {
721 inject->tool.mmap = perf_event__repipe_buildid_mmap;
722 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
723 } else if (inject->build_ids) {
724 inject->tool.sample = perf_event__inject_buildid;
725 } else if (inject->sched_stat) {
726 struct evsel *evsel;
727
728 evlist__for_each_entry(session->evlist, evsel) {
729 const char *name = evsel__name(evsel);
730
731 if (!strcmp(name, "sched:sched_switch")) {
732 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
733 return -EINVAL;
734
735 evsel->handler = perf_inject__sched_switch;
736 } else if (!strcmp(name, "sched:sched_process_exit"))
737 evsel->handler = perf_inject__sched_process_exit;
738 else if (!strncmp(name, "sched:sched_stat_", 17))
739 evsel->handler = perf_inject__sched_stat;
740 }
741 } else if (inject->itrace_synth_opts.set) {
742 session->itrace_synth_opts = &inject->itrace_synth_opts;
743 inject->itrace_synth_opts.inject = true;
744 inject->tool.comm = perf_event__repipe_comm;
745 inject->tool.namespaces = perf_event__repipe_namespaces;
746 inject->tool.exit = perf_event__repipe_exit;
747 inject->tool.id_index = perf_event__process_id_index;
748 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
749 inject->tool.auxtrace = perf_event__process_auxtrace;
750 inject->tool.aux = perf_event__drop_aux;
751 inject->tool.itrace_start = perf_event__drop_aux,
752 inject->tool.ordered_events = true;
753 inject->tool.ordering_requires_timestamps = true;
754 /* Allow space in the header for new attributes */
755 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
756 if (inject->strip)
757 strip_init(inject);
758 }
759
760 if (!inject->itrace_synth_opts.set)
761 auxtrace_index__free(&session->auxtrace_index);
762
763 if (!data_out->is_pipe)
764 lseek(fd, output_data_offset, SEEK_SET);
765
766 ret = perf_session__process_events(session);
767 if (ret)
768 return ret;
769
770 if (!data_out->is_pipe) {
771 if (inject->build_ids)
772 perf_header__set_feat(&session->header,
773 HEADER_BUILD_ID);
774 /*
775 * Keep all buildids when there is unprocessed AUX data because
776 * it is not known which ones the AUX trace hits.
777 */
778 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
779 inject->have_auxtrace && !inject->itrace_synth_opts.set)
780 dsos__hit_all(session);
781 /*
782 * The AUX areas have been removed and replaced with
783 * synthesized hardware events, so clear the feature flag.
784 */
785 if (inject->itrace_synth_opts.set) {
786 perf_header__clear_feat(&session->header,
787 HEADER_AUXTRACE);
788 if (inject->itrace_synth_opts.last_branch ||
789 inject->itrace_synth_opts.add_last_branch)
790 perf_header__set_feat(&session->header,
791 HEADER_BRANCH_STACK);
792 }
793 session->header.data_offset = output_data_offset;
794 session->header.data_size = inject->bytes_written;
795 perf_session__write_header(session, session->evlist, fd, true);
796 }
797
798 return ret;
799 }
800
cmd_inject(int argc,const char ** argv)801 int cmd_inject(int argc, const char **argv)
802 {
803 struct perf_inject inject = {
804 .tool = {
805 .sample = perf_event__repipe_sample,
806 .read = perf_event__repipe_sample,
807 .mmap = perf_event__repipe,
808 .mmap2 = perf_event__repipe,
809 .comm = perf_event__repipe,
810 .namespaces = perf_event__repipe,
811 .cgroup = perf_event__repipe,
812 .fork = perf_event__repipe,
813 .exit = perf_event__repipe,
814 .lost = perf_event__repipe,
815 .lost_samples = perf_event__repipe,
816 .aux = perf_event__repipe,
817 .itrace_start = perf_event__repipe,
818 .context_switch = perf_event__repipe,
819 .throttle = perf_event__repipe,
820 .unthrottle = perf_event__repipe,
821 .ksymbol = perf_event__repipe,
822 .bpf = perf_event__repipe,
823 .text_poke = perf_event__repipe,
824 .attr = perf_event__repipe_attr,
825 .event_update = perf_event__repipe_event_update,
826 .tracing_data = perf_event__repipe_op2_synth,
827 .finished_round = perf_event__repipe_oe_synth,
828 .build_id = perf_event__repipe_op2_synth,
829 .id_index = perf_event__repipe_op2_synth,
830 .auxtrace_info = perf_event__repipe_op2_synth,
831 .auxtrace_error = perf_event__repipe_op2_synth,
832 .time_conv = perf_event__repipe_op2_synth,
833 .thread_map = perf_event__repipe_op2_synth,
834 .cpu_map = perf_event__repipe_op2_synth,
835 .stat_config = perf_event__repipe_op2_synth,
836 .stat = perf_event__repipe_op2_synth,
837 .stat_round = perf_event__repipe_op2_synth,
838 .feature = perf_event__repipe_op2_synth,
839 .compressed = perf_event__repipe_op4_synth,
840 .auxtrace = perf_event__repipe_auxtrace,
841 },
842 .input_name = "-",
843 .samples = LIST_HEAD_INIT(inject.samples),
844 .output = {
845 .path = "-",
846 .mode = PERF_DATA_MODE_WRITE,
847 },
848 };
849 struct perf_data data = {
850 .mode = PERF_DATA_MODE_READ,
851 };
852 int ret;
853
854 struct option options[] = {
855 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
856 "Inject build-ids into the output stream"),
857 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
858 "Inject build-ids of all DSOs into the output stream"),
859 OPT_STRING('i', "input", &inject.input_name, "file",
860 "input file name"),
861 OPT_STRING('o', "output", &inject.output.path, "file",
862 "output file name"),
863 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
864 "Merge sched-stat and sched-switch for getting events "
865 "where and how long tasks slept"),
866 #ifdef HAVE_JITDUMP
867 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
868 #endif
869 OPT_INCR('v', "verbose", &verbose,
870 "be more verbose (show build ids, etc)"),
871 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
872 "kallsyms pathname"),
873 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
874 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
875 NULL, "opts", "Instruction Tracing options\n"
876 ITRACE_HELP,
877 itrace_parse_synth_opts),
878 OPT_BOOLEAN(0, "strip", &inject.strip,
879 "strip non-synthesized events (use with --itrace)"),
880 OPT_END()
881 };
882 const char * const inject_usage[] = {
883 "perf inject [<options>]",
884 NULL
885 };
886 #ifndef HAVE_JITDUMP
887 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
888 #endif
889 argc = parse_options(argc, argv, options, inject_usage, 0);
890
891 /*
892 * Any (unrecognized) arguments left?
893 */
894 if (argc)
895 usage_with_options(inject_usage, options);
896
897 if (inject.strip && !inject.itrace_synth_opts.set) {
898 pr_err("--strip option requires --itrace option\n");
899 return -1;
900 }
901
902 if (perf_data__open(&inject.output)) {
903 perror("failed to create output file");
904 return -1;
905 }
906
907 data.path = inject.input_name;
908 inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
909 if (IS_ERR(inject.session)) {
910 ret = PTR_ERR(inject.session);
911 goto out_close_output;
912 }
913
914 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
915 pr_warning("Decompression initialization failed.\n");
916
917 if (inject.build_ids && !inject.build_id_all) {
918 /*
919 * to make sure the mmap records are ordered correctly
920 * and so that the correct especially due to jitted code
921 * mmaps. We cannot generate the buildid hit list and
922 * inject the jit mmaps at the same time for now.
923 */
924 inject.tool.ordered_events = true;
925 inject.tool.ordering_requires_timestamps = true;
926 }
927
928 if (inject.sched_stat) {
929 inject.tool.ordered_events = true;
930 }
931
932 #ifdef HAVE_JITDUMP
933 if (inject.jit_mode) {
934 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
935 inject.tool.mmap = perf_event__jit_repipe_mmap;
936 inject.tool.ordered_events = true;
937 inject.tool.ordering_requires_timestamps = true;
938 /*
939 * JIT MMAP injection injects all MMAP events in one go, so it
940 * does not obey finished_round semantics.
941 */
942 inject.tool.finished_round = perf_event__drop_oe;
943 }
944 #endif
945 ret = symbol__init(&inject.session->header.env);
946 if (ret < 0)
947 goto out_delete;
948
949 ret = __cmd_inject(&inject);
950
951 out_delete:
952 zstd_fini(&(inject.session->zstd_data));
953 perf_session__delete(inject.session);
954 out_close_output:
955 perf_data__close(&inject.output);
956 return ret;
957 }
958