1 /*
2 * builtin-inject.c
3 *
4 * Builtin inject command: Examine the live mode (stdin) event stream
5 * and repipe it to stdout while optionally injecting additional
6 * events into it.
7 */
8 #include "builtin.h"
9
10 #include "perf.h"
11 #include "util/color.h"
12 #include "util/evlist.h"
13 #include "util/evsel.h"
14 #include "util/session.h"
15 #include "util/tool.h"
16 #include "util/debug.h"
17 #include "util/build-id.h"
18 #include "util/data.h"
19 #include "util/auxtrace.h"
20 #include "util/jit.h"
21
22 #include <subcmd/parse-options.h>
23
24 #include <linux/list.h>
25
26 struct perf_inject {
27 struct perf_tool tool;
28 struct perf_session *session;
29 bool build_ids;
30 bool sched_stat;
31 bool have_auxtrace;
32 bool strip;
33 bool jit_mode;
34 const char *input_name;
35 struct perf_data_file output;
36 u64 bytes_written;
37 u64 aux_id;
38 struct list_head samples;
39 struct itrace_synth_opts itrace_synth_opts;
40 };
41
42 struct event_entry {
43 struct list_head node;
44 u32 tid;
45 union perf_event event[0];
46 };
47
output_bytes(struct perf_inject * inject,void * buf,size_t sz)48 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
49 {
50 ssize_t size;
51
52 size = perf_data_file__write(&inject->output, buf, sz);
53 if (size < 0)
54 return -errno;
55
56 inject->bytes_written += size;
57 return 0;
58 }
59
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)60 static int perf_event__repipe_synth(struct perf_tool *tool,
61 union perf_event *event)
62 {
63 struct perf_inject *inject = container_of(tool, struct perf_inject,
64 tool);
65
66 return output_bytes(inject, event, event->header.size);
67 }
68
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)69 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
70 union perf_event *event,
71 struct ordered_events *oe __maybe_unused)
72 {
73 return perf_event__repipe_synth(tool, event);
74 }
75
76 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)77 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
78 union perf_event *event __maybe_unused,
79 struct ordered_events *oe __maybe_unused)
80 {
81 return 0;
82 }
83 #endif
84
perf_event__repipe_op2_synth(struct perf_tool * tool,union perf_event * event,struct perf_session * session __maybe_unused)85 static int perf_event__repipe_op2_synth(struct perf_tool *tool,
86 union perf_event *event,
87 struct perf_session *session
88 __maybe_unused)
89 {
90 return perf_event__repipe_synth(tool, event);
91 }
92
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct perf_evlist ** pevlist)93 static int perf_event__repipe_attr(struct perf_tool *tool,
94 union perf_event *event,
95 struct perf_evlist **pevlist)
96 {
97 struct perf_inject *inject = container_of(tool, struct perf_inject,
98 tool);
99 int ret;
100
101 ret = perf_event__process_attr(tool, event, pevlist);
102 if (ret)
103 return ret;
104
105 if (!inject->output.is_pipe)
106 return 0;
107
108 return perf_event__repipe_synth(tool, event);
109 }
110
111 #ifdef HAVE_AUXTRACE_SUPPORT
112
copy_bytes(struct perf_inject * inject,int fd,off_t size)113 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
114 {
115 char buf[4096];
116 ssize_t ssz;
117 int ret;
118
119 while (size > 0) {
120 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
121 if (ssz < 0)
122 return -errno;
123 ret = output_bytes(inject, buf, ssz);
124 if (ret)
125 return ret;
126 size -= ssz;
127 }
128
129 return 0;
130 }
131
perf_event__repipe_auxtrace(struct perf_tool * tool,union perf_event * event,struct perf_session * session)132 static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
133 union perf_event *event,
134 struct perf_session *session)
135 {
136 struct perf_inject *inject = container_of(tool, struct perf_inject,
137 tool);
138 int ret;
139
140 inject->have_auxtrace = true;
141
142 if (!inject->output.is_pipe) {
143 off_t offset;
144
145 offset = lseek(inject->output.fd, 0, SEEK_CUR);
146 if (offset == -1)
147 return -errno;
148 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
149 event, offset);
150 if (ret < 0)
151 return ret;
152 }
153
154 if (perf_data_file__is_pipe(session->file) || !session->one_mmap) {
155 ret = output_bytes(inject, event, event->header.size);
156 if (ret < 0)
157 return ret;
158 ret = copy_bytes(inject, perf_data_file__fd(session->file),
159 event->auxtrace.size);
160 } else {
161 ret = output_bytes(inject, event,
162 event->header.size + event->auxtrace.size);
163 }
164 if (ret < 0)
165 return ret;
166
167 return event->auxtrace.size;
168 }
169
170 #else
171
172 static s64
perf_event__repipe_auxtrace(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_session * session __maybe_unused)173 perf_event__repipe_auxtrace(struct perf_tool *tool __maybe_unused,
174 union perf_event *event __maybe_unused,
175 struct perf_session *session __maybe_unused)
176 {
177 pr_err("AUX area tracing not supported\n");
178 return -EINVAL;
179 }
180
181 #endif
182
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)183 static int perf_event__repipe(struct perf_tool *tool,
184 union perf_event *event,
185 struct perf_sample *sample __maybe_unused,
186 struct machine *machine __maybe_unused)
187 {
188 return perf_event__repipe_synth(tool, event);
189 }
190
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)191 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
192 union perf_event *event __maybe_unused,
193 struct perf_sample *sample __maybe_unused,
194 struct machine *machine __maybe_unused)
195 {
196 return 0;
197 }
198
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)199 static int perf_event__drop_aux(struct perf_tool *tool,
200 union perf_event *event __maybe_unused,
201 struct perf_sample *sample,
202 struct machine *machine __maybe_unused)
203 {
204 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
205
206 if (!inject->aux_id)
207 inject->aux_id = sample->id;
208
209 return 0;
210 }
211
212 typedef int (*inject_handler)(struct perf_tool *tool,
213 union perf_event *event,
214 struct perf_sample *sample,
215 struct perf_evsel *evsel,
216 struct machine *machine);
217
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct perf_evsel * evsel,struct machine * machine)218 static int perf_event__repipe_sample(struct perf_tool *tool,
219 union perf_event *event,
220 struct perf_sample *sample,
221 struct perf_evsel *evsel,
222 struct machine *machine)
223 {
224 if (evsel->handler) {
225 inject_handler f = evsel->handler;
226 return f(tool, event, sample, evsel, machine);
227 }
228
229 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
230
231 return perf_event__repipe_synth(tool, event);
232 }
233
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)234 static int perf_event__repipe_mmap(struct perf_tool *tool,
235 union perf_event *event,
236 struct perf_sample *sample,
237 struct machine *machine)
238 {
239 int err;
240
241 err = perf_event__process_mmap(tool, event, sample, machine);
242 perf_event__repipe(tool, event, sample, machine);
243
244 return err;
245 }
246
247 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)248 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
249 union perf_event *event,
250 struct perf_sample *sample,
251 struct machine *machine)
252 {
253 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
254 u64 n = 0;
255 int ret;
256
257 /*
258 * if jit marker, then inject jit mmaps and generate ELF images
259 */
260 ret = jit_process(inject->session, &inject->output, machine,
261 event->mmap.filename, sample->pid, &n);
262 if (ret < 0)
263 return ret;
264 if (ret) {
265 inject->bytes_written += n;
266 return 0;
267 }
268 return perf_event__repipe_mmap(tool, event, sample, machine);
269 }
270 #endif
271
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)272 static int perf_event__repipe_mmap2(struct perf_tool *tool,
273 union perf_event *event,
274 struct perf_sample *sample,
275 struct machine *machine)
276 {
277 int err;
278
279 err = perf_event__process_mmap2(tool, event, sample, machine);
280 perf_event__repipe(tool, event, sample, machine);
281
282 return err;
283 }
284
285 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)286 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
287 union perf_event *event,
288 struct perf_sample *sample,
289 struct machine *machine)
290 {
291 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
292 u64 n = 0;
293 int ret;
294
295 /*
296 * if jit marker, then inject jit mmaps and generate ELF images
297 */
298 ret = jit_process(inject->session, &inject->output, machine,
299 event->mmap2.filename, sample->pid, &n);
300 if (ret < 0)
301 return ret;
302 if (ret) {
303 inject->bytes_written += n;
304 return 0;
305 }
306 return perf_event__repipe_mmap2(tool, event, sample, machine);
307 }
308 #endif
309
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)310 static int perf_event__repipe_fork(struct perf_tool *tool,
311 union perf_event *event,
312 struct perf_sample *sample,
313 struct machine *machine)
314 {
315 int err;
316
317 err = perf_event__process_fork(tool, event, sample, machine);
318 perf_event__repipe(tool, event, sample, machine);
319
320 return err;
321 }
322
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)323 static int perf_event__repipe_comm(struct perf_tool *tool,
324 union perf_event *event,
325 struct perf_sample *sample,
326 struct machine *machine)
327 {
328 int err;
329
330 err = perf_event__process_comm(tool, event, sample, machine);
331 perf_event__repipe(tool, event, sample, machine);
332
333 return err;
334 }
335
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)336 static int perf_event__repipe_exit(struct perf_tool *tool,
337 union perf_event *event,
338 struct perf_sample *sample,
339 struct machine *machine)
340 {
341 int err;
342
343 err = perf_event__process_exit(tool, event, sample, machine);
344 perf_event__repipe(tool, event, sample, machine);
345
346 return err;
347 }
348
perf_event__repipe_tracing_data(struct perf_tool * tool,union perf_event * event,struct perf_session * session)349 static int perf_event__repipe_tracing_data(struct perf_tool *tool,
350 union perf_event *event,
351 struct perf_session *session)
352 {
353 int err;
354
355 perf_event__repipe_synth(tool, event);
356 err = perf_event__process_tracing_data(tool, event, session);
357
358 return err;
359 }
360
perf_event__repipe_id_index(struct perf_tool * tool,union perf_event * event,struct perf_session * session)361 static int perf_event__repipe_id_index(struct perf_tool *tool,
362 union perf_event *event,
363 struct perf_session *session)
364 {
365 int err;
366
367 perf_event__repipe_synth(tool, event);
368 err = perf_event__process_id_index(tool, event, session);
369
370 return err;
371 }
372
dso__read_build_id(struct dso * dso)373 static int dso__read_build_id(struct dso *dso)
374 {
375 if (dso->has_build_id)
376 return 0;
377
378 if (filename__read_build_id(dso->long_name, dso->build_id,
379 sizeof(dso->build_id)) > 0) {
380 dso->has_build_id = true;
381 return 0;
382 }
383
384 return -1;
385 }
386
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine)387 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
388 struct machine *machine)
389 {
390 u16 misc = PERF_RECORD_MISC_USER;
391 int err;
392
393 if (dso__read_build_id(dso) < 0) {
394 pr_debug("no build_id found for %s\n", dso->long_name);
395 return -1;
396 }
397
398 if (dso->kernel)
399 misc = PERF_RECORD_MISC_KERNEL;
400
401 err = perf_event__synthesize_build_id(tool, dso, misc, perf_event__repipe,
402 machine);
403 if (err) {
404 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
405 return -1;
406 }
407
408 return 0;
409 }
410
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct perf_evsel * evsel __maybe_unused,struct machine * machine)411 static int perf_event__inject_buildid(struct perf_tool *tool,
412 union perf_event *event,
413 struct perf_sample *sample,
414 struct perf_evsel *evsel __maybe_unused,
415 struct machine *machine)
416 {
417 struct addr_location al;
418 struct thread *thread;
419
420 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
421 if (thread == NULL) {
422 pr_err("problem processing %d event, skipping it.\n",
423 event->header.type);
424 goto repipe;
425 }
426
427 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al);
428
429 if (al.map != NULL) {
430 if (!al.map->dso->hit) {
431 al.map->dso->hit = 1;
432 if (map__load(al.map) >= 0) {
433 dso__inject_build_id(al.map->dso, tool, machine);
434 /*
435 * If this fails, too bad, let the other side
436 * account this as unresolved.
437 */
438 } else {
439 #ifdef HAVE_LIBELF_SUPPORT
440 pr_warning("no symbols found in %s, maybe "
441 "install a debug package?\n",
442 al.map->dso->long_name);
443 #endif
444 }
445 }
446 }
447
448 thread__put(thread);
449 repipe:
450 perf_event__repipe(tool, event, sample, machine);
451 return 0;
452 }
453
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct perf_evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)454 static int perf_inject__sched_process_exit(struct perf_tool *tool,
455 union perf_event *event __maybe_unused,
456 struct perf_sample *sample,
457 struct perf_evsel *evsel __maybe_unused,
458 struct machine *machine __maybe_unused)
459 {
460 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
461 struct event_entry *ent;
462
463 list_for_each_entry(ent, &inject->samples, node) {
464 if (sample->tid == ent->tid) {
465 list_del_init(&ent->node);
466 free(ent);
467 break;
468 }
469 }
470
471 return 0;
472 }
473
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct perf_evsel * evsel,struct machine * machine)474 static int perf_inject__sched_switch(struct perf_tool *tool,
475 union perf_event *event,
476 struct perf_sample *sample,
477 struct perf_evsel *evsel,
478 struct machine *machine)
479 {
480 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
481 struct event_entry *ent;
482
483 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
484
485 ent = malloc(event->header.size + sizeof(struct event_entry));
486 if (ent == NULL) {
487 color_fprintf(stderr, PERF_COLOR_RED,
488 "Not enough memory to process sched switch event!");
489 return -1;
490 }
491
492 ent->tid = sample->tid;
493 memcpy(&ent->event, event, event->header.size);
494 list_add(&ent->node, &inject->samples);
495 return 0;
496 }
497
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct perf_evsel * evsel,struct machine * machine)498 static int perf_inject__sched_stat(struct perf_tool *tool,
499 union perf_event *event __maybe_unused,
500 struct perf_sample *sample,
501 struct perf_evsel *evsel,
502 struct machine *machine)
503 {
504 struct event_entry *ent;
505 union perf_event *event_sw;
506 struct perf_sample sample_sw;
507 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
508 u32 pid = perf_evsel__intval(evsel, sample, "pid");
509
510 list_for_each_entry(ent, &inject->samples, node) {
511 if (pid == ent->tid)
512 goto found;
513 }
514
515 return 0;
516 found:
517 event_sw = &ent->event[0];
518 perf_evsel__parse_sample(evsel, event_sw, &sample_sw);
519
520 sample_sw.period = sample->period;
521 sample_sw.time = sample->time;
522 perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
523 evsel->attr.read_format, &sample_sw,
524 false);
525 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
526 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
527 }
528
sig_handler(int sig __maybe_unused)529 static void sig_handler(int sig __maybe_unused)
530 {
531 session_done = 1;
532 }
533
perf_evsel__check_stype(struct perf_evsel * evsel,u64 sample_type,const char * sample_msg)534 static int perf_evsel__check_stype(struct perf_evsel *evsel,
535 u64 sample_type, const char *sample_msg)
536 {
537 struct perf_event_attr *attr = &evsel->attr;
538 const char *name = perf_evsel__name(evsel);
539
540 if (!(attr->sample_type & sample_type)) {
541 pr_err("Samples for %s event do not have %s attribute set.",
542 name, sample_msg);
543 return -EINVAL;
544 }
545
546 return 0;
547 }
548
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct perf_evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)549 static int drop_sample(struct perf_tool *tool __maybe_unused,
550 union perf_event *event __maybe_unused,
551 struct perf_sample *sample __maybe_unused,
552 struct perf_evsel *evsel __maybe_unused,
553 struct machine *machine __maybe_unused)
554 {
555 return 0;
556 }
557
strip_init(struct perf_inject * inject)558 static void strip_init(struct perf_inject *inject)
559 {
560 struct perf_evlist *evlist = inject->session->evlist;
561 struct perf_evsel *evsel;
562
563 inject->tool.context_switch = perf_event__drop;
564
565 evlist__for_each_entry(evlist, evsel)
566 evsel->handler = drop_sample;
567 }
568
has_tracking(struct perf_evsel * evsel)569 static bool has_tracking(struct perf_evsel *evsel)
570 {
571 return evsel->attr.mmap || evsel->attr.mmap2 || evsel->attr.comm ||
572 evsel->attr.task;
573 }
574
575 #define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
576 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
577
578 /*
579 * In order that the perf.data file is parsable, tracking events like MMAP need
580 * their selected event to exist, except if there is only 1 selected event left
581 * and it has a compatible sample type.
582 */
ok_to_remove(struct perf_evlist * evlist,struct perf_evsel * evsel_to_remove)583 static bool ok_to_remove(struct perf_evlist *evlist,
584 struct perf_evsel *evsel_to_remove)
585 {
586 struct perf_evsel *evsel;
587 int cnt = 0;
588 bool ok = false;
589
590 if (!has_tracking(evsel_to_remove))
591 return true;
592
593 evlist__for_each_entry(evlist, evsel) {
594 if (evsel->handler != drop_sample) {
595 cnt += 1;
596 if ((evsel->attr.sample_type & COMPAT_MASK) ==
597 (evsel_to_remove->attr.sample_type & COMPAT_MASK))
598 ok = true;
599 }
600 }
601
602 return ok && cnt == 1;
603 }
604
strip_fini(struct perf_inject * inject)605 static void strip_fini(struct perf_inject *inject)
606 {
607 struct perf_evlist *evlist = inject->session->evlist;
608 struct perf_evsel *evsel, *tmp;
609
610 /* Remove non-synthesized evsels if possible */
611 evlist__for_each_entry_safe(evlist, tmp, evsel) {
612 if (evsel->handler == drop_sample &&
613 ok_to_remove(evlist, evsel)) {
614 pr_debug("Deleting %s\n", perf_evsel__name(evsel));
615 perf_evlist__remove(evlist, evsel);
616 perf_evsel__delete(evsel);
617 }
618 }
619 }
620
__cmd_inject(struct perf_inject * inject)621 static int __cmd_inject(struct perf_inject *inject)
622 {
623 int ret = -EINVAL;
624 struct perf_session *session = inject->session;
625 struct perf_data_file *file_out = &inject->output;
626 int fd = perf_data_file__fd(file_out);
627 u64 output_data_offset;
628
629 signal(SIGINT, sig_handler);
630
631 if (inject->build_ids || inject->sched_stat ||
632 inject->itrace_synth_opts.set) {
633 inject->tool.mmap = perf_event__repipe_mmap;
634 inject->tool.mmap2 = perf_event__repipe_mmap2;
635 inject->tool.fork = perf_event__repipe_fork;
636 inject->tool.tracing_data = perf_event__repipe_tracing_data;
637 }
638
639 output_data_offset = session->header.data_offset;
640
641 if (inject->build_ids) {
642 inject->tool.sample = perf_event__inject_buildid;
643 } else if (inject->sched_stat) {
644 struct perf_evsel *evsel;
645
646 evlist__for_each_entry(session->evlist, evsel) {
647 const char *name = perf_evsel__name(evsel);
648
649 if (!strcmp(name, "sched:sched_switch")) {
650 if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
651 return -EINVAL;
652
653 evsel->handler = perf_inject__sched_switch;
654 } else if (!strcmp(name, "sched:sched_process_exit"))
655 evsel->handler = perf_inject__sched_process_exit;
656 else if (!strncmp(name, "sched:sched_stat_", 17))
657 evsel->handler = perf_inject__sched_stat;
658 }
659 } else if (inject->itrace_synth_opts.set) {
660 session->itrace_synth_opts = &inject->itrace_synth_opts;
661 inject->itrace_synth_opts.inject = true;
662 inject->tool.comm = perf_event__repipe_comm;
663 inject->tool.exit = perf_event__repipe_exit;
664 inject->tool.id_index = perf_event__repipe_id_index;
665 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
666 inject->tool.auxtrace = perf_event__process_auxtrace;
667 inject->tool.aux = perf_event__drop_aux;
668 inject->tool.itrace_start = perf_event__drop_aux,
669 inject->tool.ordered_events = true;
670 inject->tool.ordering_requires_timestamps = true;
671 /* Allow space in the header for new attributes */
672 output_data_offset = 4096;
673 if (inject->strip)
674 strip_init(inject);
675 }
676
677 if (!inject->itrace_synth_opts.set)
678 auxtrace_index__free(&session->auxtrace_index);
679
680 if (!file_out->is_pipe)
681 lseek(fd, output_data_offset, SEEK_SET);
682
683 ret = perf_session__process_events(session);
684
685 if (!file_out->is_pipe) {
686 if (inject->build_ids)
687 perf_header__set_feat(&session->header,
688 HEADER_BUILD_ID);
689 /*
690 * Keep all buildids when there is unprocessed AUX data because
691 * it is not known which ones the AUX trace hits.
692 */
693 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
694 inject->have_auxtrace && !inject->itrace_synth_opts.set)
695 dsos__hit_all(session);
696 /*
697 * The AUX areas have been removed and replaced with
698 * synthesized hardware events, so clear the feature flag and
699 * remove the evsel.
700 */
701 if (inject->itrace_synth_opts.set) {
702 struct perf_evsel *evsel;
703
704 perf_header__clear_feat(&session->header,
705 HEADER_AUXTRACE);
706 if (inject->itrace_synth_opts.last_branch)
707 perf_header__set_feat(&session->header,
708 HEADER_BRANCH_STACK);
709 evsel = perf_evlist__id2evsel_strict(session->evlist,
710 inject->aux_id);
711 if (evsel) {
712 pr_debug("Deleting %s\n",
713 perf_evsel__name(evsel));
714 perf_evlist__remove(session->evlist, evsel);
715 perf_evsel__delete(evsel);
716 }
717 if (inject->strip)
718 strip_fini(inject);
719 }
720 session->header.data_offset = output_data_offset;
721 session->header.data_size = inject->bytes_written;
722 perf_session__write_header(session, session->evlist, fd, true);
723 }
724
725 return ret;
726 }
727
cmd_inject(int argc,const char ** argv,const char * prefix __maybe_unused)728 int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
729 {
730 struct perf_inject inject = {
731 .tool = {
732 .sample = perf_event__repipe_sample,
733 .mmap = perf_event__repipe,
734 .mmap2 = perf_event__repipe,
735 .comm = perf_event__repipe,
736 .fork = perf_event__repipe,
737 .exit = perf_event__repipe,
738 .lost = perf_event__repipe,
739 .lost_samples = perf_event__repipe,
740 .aux = perf_event__repipe,
741 .itrace_start = perf_event__repipe,
742 .context_switch = perf_event__repipe,
743 .read = perf_event__repipe_sample,
744 .throttle = perf_event__repipe,
745 .unthrottle = perf_event__repipe,
746 .attr = perf_event__repipe_attr,
747 .tracing_data = perf_event__repipe_op2_synth,
748 .auxtrace_info = perf_event__repipe_op2_synth,
749 .auxtrace = perf_event__repipe_auxtrace,
750 .auxtrace_error = perf_event__repipe_op2_synth,
751 .time_conv = perf_event__repipe_op2_synth,
752 .finished_round = perf_event__repipe_oe_synth,
753 .build_id = perf_event__repipe_op2_synth,
754 .id_index = perf_event__repipe_op2_synth,
755 },
756 .input_name = "-",
757 .samples = LIST_HEAD_INIT(inject.samples),
758 .output = {
759 .path = "-",
760 .mode = PERF_DATA_MODE_WRITE,
761 },
762 };
763 struct perf_data_file file = {
764 .mode = PERF_DATA_MODE_READ,
765 };
766 int ret;
767
768 struct option options[] = {
769 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
770 "Inject build-ids into the output stream"),
771 OPT_STRING('i', "input", &inject.input_name, "file",
772 "input file name"),
773 OPT_STRING('o', "output", &inject.output.path, "file",
774 "output file name"),
775 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
776 "Merge sched-stat and sched-switch for getting events "
777 "where and how long tasks slept"),
778 #ifdef HAVE_JITDUMP
779 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
780 #endif
781 OPT_INCR('v', "verbose", &verbose,
782 "be more verbose (show build ids, etc)"),
783 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
784 "kallsyms pathname"),
785 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
786 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
787 NULL, "opts", "Instruction Tracing options",
788 itrace_parse_synth_opts),
789 OPT_BOOLEAN(0, "strip", &inject.strip,
790 "strip non-synthesized events (use with --itrace)"),
791 OPT_END()
792 };
793 const char * const inject_usage[] = {
794 "perf inject [<options>]",
795 NULL
796 };
797 #ifndef HAVE_JITDUMP
798 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
799 #endif
800 argc = parse_options(argc, argv, options, inject_usage, 0);
801
802 /*
803 * Any (unrecognized) arguments left?
804 */
805 if (argc)
806 usage_with_options(inject_usage, options);
807
808 if (inject.strip && !inject.itrace_synth_opts.set) {
809 pr_err("--strip option requires --itrace option\n");
810 return -1;
811 }
812
813 if (perf_data_file__open(&inject.output)) {
814 perror("failed to create output file");
815 return -1;
816 }
817
818 inject.tool.ordered_events = inject.sched_stat;
819
820 file.path = inject.input_name;
821 inject.session = perf_session__new(&file, true, &inject.tool);
822 if (inject.session == NULL)
823 return -1;
824
825 if (inject.build_ids) {
826 /*
827 * to make sure the mmap records are ordered correctly
828 * and so that the correct especially due to jitted code
829 * mmaps. We cannot generate the buildid hit list and
830 * inject the jit mmaps at the same time for now.
831 */
832 inject.tool.ordered_events = true;
833 inject.tool.ordering_requires_timestamps = true;
834 }
835 #ifdef HAVE_JITDUMP
836 if (inject.jit_mode) {
837 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
838 inject.tool.mmap = perf_event__jit_repipe_mmap;
839 inject.tool.ordered_events = true;
840 inject.tool.ordering_requires_timestamps = true;
841 /*
842 * JIT MMAP injection injects all MMAP events in one go, so it
843 * does not obey finished_round semantics.
844 */
845 inject.tool.finished_round = perf_event__drop_oe;
846 }
847 #endif
848 ret = symbol__init(&inject.session->header.env);
849 if (ret < 0)
850 goto out_delete;
851
852 ret = __cmd_inject(&inject);
853
854 out_delete:
855 perf_session__delete(inject.session);
856 return ret;
857 }
858