1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CTF writing support via babeltrace.
4 *
5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include <traceevent/event-parse.h>
23 #include "asm/bug.h"
24 #include "data-convert.h"
25 #include "session.h"
26 #include "debug.h"
27 #include "tool.h"
28 #include "evlist.h"
29 #include "evsel.h"
30 #include "machine.h"
31 #include "config.h"
32 #include <linux/ctype.h>
33 #include <linux/err.h>
34 #include <linux/time64.h>
35 #include "util.h"
36 #include "clockid.h"
37
38 #define pr_N(n, fmt, ...) \
39 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
40
41 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
42 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
43
44 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
45
46 struct evsel_priv {
47 struct bt_ctf_event_class *event_class;
48 };
49
50 #define MAX_CPUS 4096
51
52 struct ctf_stream {
53 struct bt_ctf_stream *stream;
54 int cpu;
55 u32 count;
56 };
57
58 struct ctf_writer {
59 /* writer primitives */
60 struct bt_ctf_writer *writer;
61 struct ctf_stream **stream;
62 int stream_cnt;
63 struct bt_ctf_stream_class *stream_class;
64 struct bt_ctf_clock *clock;
65
66 /* data types */
67 union {
68 struct {
69 struct bt_ctf_field_type *s64;
70 struct bt_ctf_field_type *u64;
71 struct bt_ctf_field_type *s32;
72 struct bt_ctf_field_type *u32;
73 struct bt_ctf_field_type *string;
74 struct bt_ctf_field_type *u32_hex;
75 struct bt_ctf_field_type *u64_hex;
76 };
77 struct bt_ctf_field_type *array[6];
78 } data;
79 struct bt_ctf_event_class *comm_class;
80 struct bt_ctf_event_class *exit_class;
81 struct bt_ctf_event_class *fork_class;
82 struct bt_ctf_event_class *mmap_class;
83 struct bt_ctf_event_class *mmap2_class;
84 };
85
86 struct convert {
87 struct perf_tool tool;
88 struct ctf_writer writer;
89
90 u64 events_size;
91 u64 events_count;
92 u64 non_sample_count;
93
94 /* Ordered events configured queue size. */
95 u64 queue_size;
96 };
97
value_set(struct bt_ctf_field_type * type,struct bt_ctf_event * event,const char * name,u64 val)98 static int value_set(struct bt_ctf_field_type *type,
99 struct bt_ctf_event *event,
100 const char *name, u64 val)
101 {
102 struct bt_ctf_field *field;
103 bool sign = bt_ctf_field_type_integer_get_signed(type);
104 int ret;
105
106 field = bt_ctf_field_create(type);
107 if (!field) {
108 pr_err("failed to create a field %s\n", name);
109 return -1;
110 }
111
112 if (sign) {
113 ret = bt_ctf_field_signed_integer_set_value(field, val);
114 if (ret) {
115 pr_err("failed to set field value %s\n", name);
116 goto err;
117 }
118 } else {
119 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
120 if (ret) {
121 pr_err("failed to set field value %s\n", name);
122 goto err;
123 }
124 }
125
126 ret = bt_ctf_event_set_payload(event, name, field);
127 if (ret) {
128 pr_err("failed to set payload %s\n", name);
129 goto err;
130 }
131
132 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
133
134 err:
135 bt_ctf_field_put(field);
136 return ret;
137 }
138
139 #define __FUNC_VALUE_SET(_name, _val_type) \
140 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
141 struct bt_ctf_event *event, \
142 const char *name, \
143 _val_type val) \
144 { \
145 struct bt_ctf_field_type *type = cw->data._name; \
146 return value_set(type, event, name, (u64) val); \
147 }
148
149 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
150
151 FUNC_VALUE_SET(s32)
152 FUNC_VALUE_SET(u32)
153 FUNC_VALUE_SET(s64)
154 FUNC_VALUE_SET(u64)
155 __FUNC_VALUE_SET(u64_hex, u64)
156
157 static int string_set_value(struct bt_ctf_field *field, const char *string);
158 static __maybe_unused int
value_set_string(struct ctf_writer * cw,struct bt_ctf_event * event,const char * name,const char * string)159 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
160 const char *name, const char *string)
161 {
162 struct bt_ctf_field_type *type = cw->data.string;
163 struct bt_ctf_field *field;
164 int ret = 0;
165
166 field = bt_ctf_field_create(type);
167 if (!field) {
168 pr_err("failed to create a field %s\n", name);
169 return -1;
170 }
171
172 ret = string_set_value(field, string);
173 if (ret) {
174 pr_err("failed to set value %s\n", name);
175 goto err_put_field;
176 }
177
178 ret = bt_ctf_event_set_payload(event, name, field);
179 if (ret)
180 pr_err("failed to set payload %s\n", name);
181
182 err_put_field:
183 bt_ctf_field_put(field);
184 return ret;
185 }
186
187 static struct bt_ctf_field_type*
get_tracepoint_field_type(struct ctf_writer * cw,struct tep_format_field * field)188 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
189 {
190 unsigned long flags = field->flags;
191
192 if (flags & TEP_FIELD_IS_STRING)
193 return cw->data.string;
194
195 if (!(flags & TEP_FIELD_IS_SIGNED)) {
196 /* unsigned long are mostly pointers */
197 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
198 return cw->data.u64_hex;
199 }
200
201 if (flags & TEP_FIELD_IS_SIGNED) {
202 if (field->size == 8)
203 return cw->data.s64;
204 else
205 return cw->data.s32;
206 }
207
208 if (field->size == 8)
209 return cw->data.u64;
210 else
211 return cw->data.u32;
212 }
213
adjust_signedness(unsigned long long value_int,int size)214 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
215 {
216 unsigned long long value_mask;
217
218 /*
219 * value_mask = (1 << (size * 8 - 1)) - 1.
220 * Directly set value_mask for code readers.
221 */
222 switch (size) {
223 case 1:
224 value_mask = 0x7fULL;
225 break;
226 case 2:
227 value_mask = 0x7fffULL;
228 break;
229 case 4:
230 value_mask = 0x7fffffffULL;
231 break;
232 case 8:
233 /*
234 * For 64 bit value, return it self. There is no need
235 * to fill high bit.
236 */
237 /* Fall through */
238 default:
239 /* BUG! */
240 return value_int;
241 }
242
243 /* If it is a positive value, don't adjust. */
244 if ((value_int & (~0ULL - value_mask)) == 0)
245 return value_int;
246
247 /* Fill upper part of value_int with 1 to make it a negative long long. */
248 return (value_int & value_mask) | ~value_mask;
249 }
250
string_set_value(struct bt_ctf_field * field,const char * string)251 static int string_set_value(struct bt_ctf_field *field, const char *string)
252 {
253 char *buffer = NULL;
254 size_t len = strlen(string), i, p;
255 int err;
256
257 for (i = p = 0; i < len; i++, p++) {
258 if (isprint(string[i])) {
259 if (!buffer)
260 continue;
261 buffer[p] = string[i];
262 } else {
263 char numstr[5];
264
265 snprintf(numstr, sizeof(numstr), "\\x%02x",
266 (unsigned int)(string[i]) & 0xff);
267
268 if (!buffer) {
269 buffer = zalloc(i + (len - i) * 4 + 2);
270 if (!buffer) {
271 pr_err("failed to set unprintable string '%s'\n", string);
272 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
273 }
274 if (i > 0)
275 strncpy(buffer, string, i);
276 }
277 memcpy(buffer + p, numstr, 4);
278 p += 3;
279 }
280 }
281
282 if (!buffer)
283 return bt_ctf_field_string_set_value(field, string);
284 err = bt_ctf_field_string_set_value(field, buffer);
285 free(buffer);
286 return err;
287 }
288
add_tracepoint_field_value(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample,struct tep_format_field * fmtf)289 static int add_tracepoint_field_value(struct ctf_writer *cw,
290 struct bt_ctf_event_class *event_class,
291 struct bt_ctf_event *event,
292 struct perf_sample *sample,
293 struct tep_format_field *fmtf)
294 {
295 struct bt_ctf_field_type *type;
296 struct bt_ctf_field *array_field;
297 struct bt_ctf_field *field;
298 const char *name = fmtf->name;
299 void *data = sample->raw_data;
300 unsigned long flags = fmtf->flags;
301 unsigned int n_items;
302 unsigned int i;
303 unsigned int offset;
304 unsigned int len;
305 int ret;
306
307 name = fmtf->alias;
308 offset = fmtf->offset;
309 len = fmtf->size;
310 if (flags & TEP_FIELD_IS_STRING)
311 flags &= ~TEP_FIELD_IS_ARRAY;
312
313 if (flags & TEP_FIELD_IS_DYNAMIC) {
314 unsigned long long tmp_val;
315
316 tmp_val = tep_read_number(fmtf->event->tep,
317 data + offset, len);
318 offset = tmp_val;
319 len = offset >> 16;
320 offset &= 0xffff;
321 }
322
323 if (flags & TEP_FIELD_IS_ARRAY) {
324
325 type = bt_ctf_event_class_get_field_by_name(
326 event_class, name);
327 array_field = bt_ctf_field_create(type);
328 bt_ctf_field_type_put(type);
329 if (!array_field) {
330 pr_err("Failed to create array type %s\n", name);
331 return -1;
332 }
333
334 len = fmtf->size / fmtf->arraylen;
335 n_items = fmtf->arraylen;
336 } else {
337 n_items = 1;
338 array_field = NULL;
339 }
340
341 type = get_tracepoint_field_type(cw, fmtf);
342
343 for (i = 0; i < n_items; i++) {
344 if (flags & TEP_FIELD_IS_ARRAY)
345 field = bt_ctf_field_array_get_field(array_field, i);
346 else
347 field = bt_ctf_field_create(type);
348
349 if (!field) {
350 pr_err("failed to create a field %s\n", name);
351 return -1;
352 }
353
354 if (flags & TEP_FIELD_IS_STRING)
355 ret = string_set_value(field, data + offset + i * len);
356 else {
357 unsigned long long value_int;
358
359 value_int = tep_read_number(
360 fmtf->event->tep,
361 data + offset + i * len, len);
362
363 if (!(flags & TEP_FIELD_IS_SIGNED))
364 ret = bt_ctf_field_unsigned_integer_set_value(
365 field, value_int);
366 else
367 ret = bt_ctf_field_signed_integer_set_value(
368 field, adjust_signedness(value_int, len));
369 }
370
371 if (ret) {
372 pr_err("failed to set file value %s\n", name);
373 goto err_put_field;
374 }
375 if (!(flags & TEP_FIELD_IS_ARRAY)) {
376 ret = bt_ctf_event_set_payload(event, name, field);
377 if (ret) {
378 pr_err("failed to set payload %s\n", name);
379 goto err_put_field;
380 }
381 }
382 bt_ctf_field_put(field);
383 }
384 if (flags & TEP_FIELD_IS_ARRAY) {
385 ret = bt_ctf_event_set_payload(event, name, array_field);
386 if (ret) {
387 pr_err("Failed add payload array %s\n", name);
388 return -1;
389 }
390 bt_ctf_field_put(array_field);
391 }
392 return 0;
393
394 err_put_field:
395 bt_ctf_field_put(field);
396 return -1;
397 }
398
add_tracepoint_fields_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct tep_format_field * fields,struct perf_sample * sample)399 static int add_tracepoint_fields_values(struct ctf_writer *cw,
400 struct bt_ctf_event_class *event_class,
401 struct bt_ctf_event *event,
402 struct tep_format_field *fields,
403 struct perf_sample *sample)
404 {
405 struct tep_format_field *field;
406 int ret;
407
408 for (field = fields; field; field = field->next) {
409 ret = add_tracepoint_field_value(cw, event_class, event, sample,
410 field);
411 if (ret)
412 return -1;
413 }
414 return 0;
415 }
416
add_tracepoint_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)417 static int add_tracepoint_values(struct ctf_writer *cw,
418 struct bt_ctf_event_class *event_class,
419 struct bt_ctf_event *event,
420 struct evsel *evsel,
421 struct perf_sample *sample)
422 {
423 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
424 struct tep_format_field *fields = evsel->tp_format->format.fields;
425 int ret;
426
427 ret = add_tracepoint_fields_values(cw, event_class, event,
428 common_fields, sample);
429 if (!ret)
430 ret = add_tracepoint_fields_values(cw, event_class, event,
431 fields, sample);
432
433 return ret;
434 }
435
436 static int
add_bpf_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample)437 add_bpf_output_values(struct bt_ctf_event_class *event_class,
438 struct bt_ctf_event *event,
439 struct perf_sample *sample)
440 {
441 struct bt_ctf_field_type *len_type, *seq_type;
442 struct bt_ctf_field *len_field, *seq_field;
443 unsigned int raw_size = sample->raw_size;
444 unsigned int nr_elements = raw_size / sizeof(u32);
445 unsigned int i;
446 int ret;
447
448 if (nr_elements * sizeof(u32) != raw_size)
449 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
450 raw_size, nr_elements * sizeof(u32) - raw_size);
451
452 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
453 len_field = bt_ctf_field_create(len_type);
454 if (!len_field) {
455 pr_err("failed to create 'raw_len' for bpf output event\n");
456 ret = -1;
457 goto put_len_type;
458 }
459
460 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
461 if (ret) {
462 pr_err("failed to set field value for raw_len\n");
463 goto put_len_field;
464 }
465 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
466 if (ret) {
467 pr_err("failed to set payload to raw_len\n");
468 goto put_len_field;
469 }
470
471 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
472 seq_field = bt_ctf_field_create(seq_type);
473 if (!seq_field) {
474 pr_err("failed to create 'raw_data' for bpf output event\n");
475 ret = -1;
476 goto put_seq_type;
477 }
478
479 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
480 if (ret) {
481 pr_err("failed to set length of 'raw_data'\n");
482 goto put_seq_field;
483 }
484
485 for (i = 0; i < nr_elements; i++) {
486 struct bt_ctf_field *elem_field =
487 bt_ctf_field_sequence_get_field(seq_field, i);
488
489 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
490 ((u32 *)(sample->raw_data))[i]);
491
492 bt_ctf_field_put(elem_field);
493 if (ret) {
494 pr_err("failed to set raw_data[%d]\n", i);
495 goto put_seq_field;
496 }
497 }
498
499 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
500 if (ret)
501 pr_err("failed to set payload for raw_data\n");
502
503 put_seq_field:
504 bt_ctf_field_put(seq_field);
505 put_seq_type:
506 bt_ctf_field_type_put(seq_type);
507 put_len_field:
508 bt_ctf_field_put(len_field);
509 put_len_type:
510 bt_ctf_field_type_put(len_type);
511 return ret;
512 }
513
514 static int
add_callchain_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct ip_callchain * callchain)515 add_callchain_output_values(struct bt_ctf_event_class *event_class,
516 struct bt_ctf_event *event,
517 struct ip_callchain *callchain)
518 {
519 struct bt_ctf_field_type *len_type, *seq_type;
520 struct bt_ctf_field *len_field, *seq_field;
521 unsigned int nr_elements = callchain->nr;
522 unsigned int i;
523 int ret;
524
525 len_type = bt_ctf_event_class_get_field_by_name(
526 event_class, "perf_callchain_size");
527 len_field = bt_ctf_field_create(len_type);
528 if (!len_field) {
529 pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
530 ret = -1;
531 goto put_len_type;
532 }
533
534 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
535 if (ret) {
536 pr_err("failed to set field value for perf_callchain_size\n");
537 goto put_len_field;
538 }
539 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
540 if (ret) {
541 pr_err("failed to set payload to perf_callchain_size\n");
542 goto put_len_field;
543 }
544
545 seq_type = bt_ctf_event_class_get_field_by_name(
546 event_class, "perf_callchain");
547 seq_field = bt_ctf_field_create(seq_type);
548 if (!seq_field) {
549 pr_err("failed to create 'perf_callchain' for callchain output event\n");
550 ret = -1;
551 goto put_seq_type;
552 }
553
554 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
555 if (ret) {
556 pr_err("failed to set length of 'perf_callchain'\n");
557 goto put_seq_field;
558 }
559
560 for (i = 0; i < nr_elements; i++) {
561 struct bt_ctf_field *elem_field =
562 bt_ctf_field_sequence_get_field(seq_field, i);
563
564 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
565 ((u64 *)(callchain->ips))[i]);
566
567 bt_ctf_field_put(elem_field);
568 if (ret) {
569 pr_err("failed to set callchain[%d]\n", i);
570 goto put_seq_field;
571 }
572 }
573
574 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
575 if (ret)
576 pr_err("failed to set payload for raw_data\n");
577
578 put_seq_field:
579 bt_ctf_field_put(seq_field);
580 put_seq_type:
581 bt_ctf_field_type_put(seq_type);
582 put_len_field:
583 bt_ctf_field_put(len_field);
584 put_len_type:
585 bt_ctf_field_type_put(len_type);
586 return ret;
587 }
588
add_generic_values(struct ctf_writer * cw,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)589 static int add_generic_values(struct ctf_writer *cw,
590 struct bt_ctf_event *event,
591 struct evsel *evsel,
592 struct perf_sample *sample)
593 {
594 u64 type = evsel->core.attr.sample_type;
595 int ret;
596
597 /*
598 * missing:
599 * PERF_SAMPLE_TIME - not needed as we have it in
600 * ctf event header
601 * PERF_SAMPLE_READ - TODO
602 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
603 * PERF_SAMPLE_BRANCH_STACK - TODO
604 * PERF_SAMPLE_REGS_USER - TODO
605 * PERF_SAMPLE_STACK_USER - TODO
606 */
607
608 if (type & PERF_SAMPLE_IP) {
609 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
610 if (ret)
611 return -1;
612 }
613
614 if (type & PERF_SAMPLE_TID) {
615 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
616 if (ret)
617 return -1;
618
619 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
620 if (ret)
621 return -1;
622 }
623
624 if ((type & PERF_SAMPLE_ID) ||
625 (type & PERF_SAMPLE_IDENTIFIER)) {
626 ret = value_set_u64(cw, event, "perf_id", sample->id);
627 if (ret)
628 return -1;
629 }
630
631 if (type & PERF_SAMPLE_STREAM_ID) {
632 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
633 if (ret)
634 return -1;
635 }
636
637 if (type & PERF_SAMPLE_PERIOD) {
638 ret = value_set_u64(cw, event, "perf_period", sample->period);
639 if (ret)
640 return -1;
641 }
642
643 if (type & PERF_SAMPLE_WEIGHT) {
644 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
645 if (ret)
646 return -1;
647 }
648
649 if (type & PERF_SAMPLE_DATA_SRC) {
650 ret = value_set_u64(cw, event, "perf_data_src",
651 sample->data_src);
652 if (ret)
653 return -1;
654 }
655
656 if (type & PERF_SAMPLE_TRANSACTION) {
657 ret = value_set_u64(cw, event, "perf_transaction",
658 sample->transaction);
659 if (ret)
660 return -1;
661 }
662
663 return 0;
664 }
665
ctf_stream__flush(struct ctf_stream * cs)666 static int ctf_stream__flush(struct ctf_stream *cs)
667 {
668 int err = 0;
669
670 if (cs) {
671 err = bt_ctf_stream_flush(cs->stream);
672 if (err)
673 pr_err("CTF stream %d flush failed\n", cs->cpu);
674
675 pr("Flush stream for cpu %d (%u samples)\n",
676 cs->cpu, cs->count);
677
678 cs->count = 0;
679 }
680
681 return err;
682 }
683
ctf_stream__create(struct ctf_writer * cw,int cpu)684 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
685 {
686 struct ctf_stream *cs;
687 struct bt_ctf_field *pkt_ctx = NULL;
688 struct bt_ctf_field *cpu_field = NULL;
689 struct bt_ctf_stream *stream = NULL;
690 int ret;
691
692 cs = zalloc(sizeof(*cs));
693 if (!cs) {
694 pr_err("Failed to allocate ctf stream\n");
695 return NULL;
696 }
697
698 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
699 if (!stream) {
700 pr_err("Failed to create CTF stream\n");
701 goto out;
702 }
703
704 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
705 if (!pkt_ctx) {
706 pr_err("Failed to obtain packet context\n");
707 goto out;
708 }
709
710 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
711 bt_ctf_field_put(pkt_ctx);
712 if (!cpu_field) {
713 pr_err("Failed to obtain cpu field\n");
714 goto out;
715 }
716
717 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
718 if (ret) {
719 pr_err("Failed to update CPU number\n");
720 goto out;
721 }
722
723 bt_ctf_field_put(cpu_field);
724
725 cs->cpu = cpu;
726 cs->stream = stream;
727 return cs;
728
729 out:
730 if (cpu_field)
731 bt_ctf_field_put(cpu_field);
732 if (stream)
733 bt_ctf_stream_put(stream);
734
735 free(cs);
736 return NULL;
737 }
738
ctf_stream__delete(struct ctf_stream * cs)739 static void ctf_stream__delete(struct ctf_stream *cs)
740 {
741 if (cs) {
742 bt_ctf_stream_put(cs->stream);
743 free(cs);
744 }
745 }
746
ctf_stream(struct ctf_writer * cw,int cpu)747 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
748 {
749 struct ctf_stream *cs = cw->stream[cpu];
750
751 if (!cs) {
752 cs = ctf_stream__create(cw, cpu);
753 cw->stream[cpu] = cs;
754 }
755
756 return cs;
757 }
758
get_sample_cpu(struct ctf_writer * cw,struct perf_sample * sample,struct evsel * evsel)759 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
760 struct evsel *evsel)
761 {
762 int cpu = 0;
763
764 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
765 cpu = sample->cpu;
766
767 if (cpu > cw->stream_cnt) {
768 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
769 cpu, cw->stream_cnt);
770 cpu = 0;
771 }
772
773 return cpu;
774 }
775
776 #define STREAM_FLUSH_COUNT 100000
777
778 /*
779 * Currently we have no other way to determine the
780 * time for the stream flush other than keep track
781 * of the number of events and check it against
782 * threshold.
783 */
is_flush_needed(struct ctf_stream * cs)784 static bool is_flush_needed(struct ctf_stream *cs)
785 {
786 return cs->count >= STREAM_FLUSH_COUNT;
787 }
788
process_sample_event(struct perf_tool * tool,union perf_event * _event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)789 static int process_sample_event(struct perf_tool *tool,
790 union perf_event *_event,
791 struct perf_sample *sample,
792 struct evsel *evsel,
793 struct machine *machine __maybe_unused)
794 {
795 struct convert *c = container_of(tool, struct convert, tool);
796 struct evsel_priv *priv = evsel->priv;
797 struct ctf_writer *cw = &c->writer;
798 struct ctf_stream *cs;
799 struct bt_ctf_event_class *event_class;
800 struct bt_ctf_event *event;
801 int ret;
802 unsigned long type = evsel->core.attr.sample_type;
803
804 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
805 return 0;
806
807 event_class = priv->event_class;
808
809 /* update stats */
810 c->events_count++;
811 c->events_size += _event->header.size;
812
813 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
814
815 event = bt_ctf_event_create(event_class);
816 if (!event) {
817 pr_err("Failed to create an CTF event\n");
818 return -1;
819 }
820
821 bt_ctf_clock_set_time(cw->clock, sample->time);
822
823 ret = add_generic_values(cw, event, evsel, sample);
824 if (ret)
825 return -1;
826
827 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
828 ret = add_tracepoint_values(cw, event_class, event,
829 evsel, sample);
830 if (ret)
831 return -1;
832 }
833
834 if (type & PERF_SAMPLE_CALLCHAIN) {
835 ret = add_callchain_output_values(event_class,
836 event, sample->callchain);
837 if (ret)
838 return -1;
839 }
840
841 if (evsel__is_bpf_output(evsel)) {
842 ret = add_bpf_output_values(event_class, event, sample);
843 if (ret)
844 return -1;
845 }
846
847 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
848 if (cs) {
849 if (is_flush_needed(cs))
850 ctf_stream__flush(cs);
851
852 cs->count++;
853 bt_ctf_stream_append_event(cs->stream, event);
854 }
855
856 bt_ctf_event_put(event);
857 return cs ? 0 : -1;
858 }
859
860 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
861 do { \
862 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
863 if (ret) \
864 return -1; \
865 } while(0)
866
867 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
868 static int process_##_name##_event(struct perf_tool *tool, \
869 union perf_event *_event, \
870 struct perf_sample *sample, \
871 struct machine *machine) \
872 { \
873 struct convert *c = container_of(tool, struct convert, tool);\
874 struct ctf_writer *cw = &c->writer; \
875 struct bt_ctf_event_class *event_class = cw->_name##_class;\
876 struct bt_ctf_event *event; \
877 struct ctf_stream *cs; \
878 int ret; \
879 \
880 c->non_sample_count++; \
881 c->events_size += _event->header.size; \
882 event = bt_ctf_event_create(event_class); \
883 if (!event) { \
884 pr_err("Failed to create an CTF event\n"); \
885 return -1; \
886 } \
887 \
888 bt_ctf_clock_set_time(cw->clock, sample->time); \
889 body \
890 cs = ctf_stream(cw, 0); \
891 if (cs) { \
892 if (is_flush_needed(cs)) \
893 ctf_stream__flush(cs); \
894 \
895 cs->count++; \
896 bt_ctf_stream_append_event(cs->stream, event); \
897 } \
898 bt_ctf_event_put(event); \
899 \
900 return perf_event__process_##_name(tool, _event, sample, machine);\
901 }
902
__FUNC_PROCESS_NON_SAMPLE(comm,__NON_SAMPLE_SET_FIELD (comm,u32,pid);__NON_SAMPLE_SET_FIELD (comm,u32,tid);__NON_SAMPLE_SET_FIELD (comm,string,comm);)903 __FUNC_PROCESS_NON_SAMPLE(comm,
904 __NON_SAMPLE_SET_FIELD(comm, u32, pid);
905 __NON_SAMPLE_SET_FIELD(comm, u32, tid);
906 __NON_SAMPLE_SET_FIELD(comm, string, comm);
907 )
908 __FUNC_PROCESS_NON_SAMPLE(fork,
909 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
910 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
911 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
912 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
913 __NON_SAMPLE_SET_FIELD(fork, u64, time);
914 )
915
916 __FUNC_PROCESS_NON_SAMPLE(exit,
917 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
918 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
919 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
920 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
921 __NON_SAMPLE_SET_FIELD(fork, u64, time);
922 )
923 __FUNC_PROCESS_NON_SAMPLE(mmap,
924 __NON_SAMPLE_SET_FIELD(mmap, u32, pid);
925 __NON_SAMPLE_SET_FIELD(mmap, u32, tid);
926 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
927 __NON_SAMPLE_SET_FIELD(mmap, string, filename);
928 )
929 __FUNC_PROCESS_NON_SAMPLE(mmap2,
930 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
931 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
932 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
933 __NON_SAMPLE_SET_FIELD(mmap2, string, filename);
934 )
935 #undef __NON_SAMPLE_SET_FIELD
936 #undef __FUNC_PROCESS_NON_SAMPLE
937
938 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
939 static char *change_name(char *name, char *orig_name, int dup)
940 {
941 char *new_name = NULL;
942 size_t len;
943
944 if (!name)
945 name = orig_name;
946
947 if (dup >= 10)
948 goto out;
949 /*
950 * Add '_' prefix to potential keywork. According to
951 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
952 * further CTF spec updating may require us to use '$'.
953 */
954 if (dup < 0)
955 len = strlen(name) + sizeof("_");
956 else
957 len = strlen(orig_name) + sizeof("_dupl_X");
958
959 new_name = malloc(len);
960 if (!new_name)
961 goto out;
962
963 if (dup < 0)
964 snprintf(new_name, len, "_%s", name);
965 else
966 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
967
968 out:
969 if (name != orig_name)
970 free(name);
971 return new_name;
972 }
973
event_class_add_field(struct bt_ctf_event_class * event_class,struct bt_ctf_field_type * type,struct tep_format_field * field)974 static int event_class_add_field(struct bt_ctf_event_class *event_class,
975 struct bt_ctf_field_type *type,
976 struct tep_format_field *field)
977 {
978 struct bt_ctf_field_type *t = NULL;
979 char *name;
980 int dup = 1;
981 int ret;
982
983 /* alias was already assigned */
984 if (field->alias != field->name)
985 return bt_ctf_event_class_add_field(event_class, type,
986 (char *)field->alias);
987
988 name = field->name;
989
990 /* If 'name' is a keywork, add prefix. */
991 if (bt_ctf_validate_identifier(name))
992 name = change_name(name, field->name, -1);
993
994 if (!name) {
995 pr_err("Failed to fix invalid identifier.");
996 return -1;
997 }
998 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
999 bt_ctf_field_type_put(t);
1000 name = change_name(name, field->name, dup++);
1001 if (!name) {
1002 pr_err("Failed to create dup name for '%s'\n", field->name);
1003 return -1;
1004 }
1005 }
1006
1007 ret = bt_ctf_event_class_add_field(event_class, type, name);
1008 if (!ret)
1009 field->alias = name;
1010
1011 return ret;
1012 }
1013
add_tracepoint_fields_types(struct ctf_writer * cw,struct tep_format_field * fields,struct bt_ctf_event_class * event_class)1014 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1015 struct tep_format_field *fields,
1016 struct bt_ctf_event_class *event_class)
1017 {
1018 struct tep_format_field *field;
1019 int ret;
1020
1021 for (field = fields; field; field = field->next) {
1022 struct bt_ctf_field_type *type;
1023 unsigned long flags = field->flags;
1024
1025 pr2(" field '%s'\n", field->name);
1026
1027 type = get_tracepoint_field_type(cw, field);
1028 if (!type)
1029 return -1;
1030
1031 /*
1032 * A string is an array of chars. For this we use the string
1033 * type and don't care that it is an array. What we don't
1034 * support is an array of strings.
1035 */
1036 if (flags & TEP_FIELD_IS_STRING)
1037 flags &= ~TEP_FIELD_IS_ARRAY;
1038
1039 if (flags & TEP_FIELD_IS_ARRAY)
1040 type = bt_ctf_field_type_array_create(type, field->arraylen);
1041
1042 ret = event_class_add_field(event_class, type, field);
1043
1044 if (flags & TEP_FIELD_IS_ARRAY)
1045 bt_ctf_field_type_put(type);
1046
1047 if (ret) {
1048 pr_err("Failed to add field '%s': %d\n",
1049 field->name, ret);
1050 return -1;
1051 }
1052 }
1053
1054 return 0;
1055 }
1056
add_tracepoint_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * class)1057 static int add_tracepoint_types(struct ctf_writer *cw,
1058 struct evsel *evsel,
1059 struct bt_ctf_event_class *class)
1060 {
1061 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1062 struct tep_format_field *fields = evsel->tp_format->format.fields;
1063 int ret;
1064
1065 ret = add_tracepoint_fields_types(cw, common_fields, class);
1066 if (!ret)
1067 ret = add_tracepoint_fields_types(cw, fields, class);
1068
1069 return ret;
1070 }
1071
add_bpf_output_types(struct ctf_writer * cw,struct bt_ctf_event_class * class)1072 static int add_bpf_output_types(struct ctf_writer *cw,
1073 struct bt_ctf_event_class *class)
1074 {
1075 struct bt_ctf_field_type *len_type = cw->data.u32;
1076 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1077 struct bt_ctf_field_type *seq_type;
1078 int ret;
1079
1080 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1081 if (ret)
1082 return ret;
1083
1084 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1085 if (!seq_type)
1086 return -1;
1087
1088 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1089 }
1090
add_generic_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * event_class)1091 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1092 struct bt_ctf_event_class *event_class)
1093 {
1094 u64 type = evsel->core.attr.sample_type;
1095
1096 /*
1097 * missing:
1098 * PERF_SAMPLE_TIME - not needed as we have it in
1099 * ctf event header
1100 * PERF_SAMPLE_READ - TODO
1101 * PERF_SAMPLE_CALLCHAIN - TODO
1102 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1103 * are handled separately
1104 * PERF_SAMPLE_BRANCH_STACK - TODO
1105 * PERF_SAMPLE_REGS_USER - TODO
1106 * PERF_SAMPLE_STACK_USER - TODO
1107 */
1108
1109 #define ADD_FIELD(cl, t, n) \
1110 do { \
1111 pr2(" field '%s'\n", n); \
1112 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1113 pr_err("Failed to add field '%s';\n", n); \
1114 return -1; \
1115 } \
1116 } while (0)
1117
1118 if (type & PERF_SAMPLE_IP)
1119 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1120
1121 if (type & PERF_SAMPLE_TID) {
1122 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1123 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1124 }
1125
1126 if ((type & PERF_SAMPLE_ID) ||
1127 (type & PERF_SAMPLE_IDENTIFIER))
1128 ADD_FIELD(event_class, cw->data.u64, "perf_id");
1129
1130 if (type & PERF_SAMPLE_STREAM_ID)
1131 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1132
1133 if (type & PERF_SAMPLE_PERIOD)
1134 ADD_FIELD(event_class, cw->data.u64, "perf_period");
1135
1136 if (type & PERF_SAMPLE_WEIGHT)
1137 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1138
1139 if (type & PERF_SAMPLE_DATA_SRC)
1140 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1141
1142 if (type & PERF_SAMPLE_TRANSACTION)
1143 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1144
1145 if (type & PERF_SAMPLE_CALLCHAIN) {
1146 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1147 ADD_FIELD(event_class,
1148 bt_ctf_field_type_sequence_create(
1149 cw->data.u64_hex, "perf_callchain_size"),
1150 "perf_callchain");
1151 }
1152
1153 #undef ADD_FIELD
1154 return 0;
1155 }
1156
add_event(struct ctf_writer * cw,struct evsel * evsel)1157 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1158 {
1159 struct bt_ctf_event_class *event_class;
1160 struct evsel_priv *priv;
1161 const char *name = evsel__name(evsel);
1162 int ret;
1163
1164 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1165
1166 event_class = bt_ctf_event_class_create(name);
1167 if (!event_class)
1168 return -1;
1169
1170 ret = add_generic_types(cw, evsel, event_class);
1171 if (ret)
1172 goto err;
1173
1174 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1175 ret = add_tracepoint_types(cw, evsel, event_class);
1176 if (ret)
1177 goto err;
1178 }
1179
1180 if (evsel__is_bpf_output(evsel)) {
1181 ret = add_bpf_output_types(cw, event_class);
1182 if (ret)
1183 goto err;
1184 }
1185
1186 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1187 if (ret) {
1188 pr("Failed to add event class into stream.\n");
1189 goto err;
1190 }
1191
1192 priv = malloc(sizeof(*priv));
1193 if (!priv)
1194 goto err;
1195
1196 priv->event_class = event_class;
1197 evsel->priv = priv;
1198 return 0;
1199
1200 err:
1201 bt_ctf_event_class_put(event_class);
1202 pr_err("Failed to add event '%s'.\n", name);
1203 return -1;
1204 }
1205
setup_events(struct ctf_writer * cw,struct perf_session * session)1206 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1207 {
1208 struct evlist *evlist = session->evlist;
1209 struct evsel *evsel;
1210 int ret;
1211
1212 evlist__for_each_entry(evlist, evsel) {
1213 ret = add_event(cw, evsel);
1214 if (ret)
1215 return ret;
1216 }
1217 return 0;
1218 }
1219
1220 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1221 do { \
1222 pr2(" field '%s'\n", #n); \
1223 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1224 pr_err("Failed to add field '%s';\n", #n);\
1225 return -1; \
1226 } \
1227 } while(0)
1228
1229 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1230 static int add_##_name##_event(struct ctf_writer *cw) \
1231 { \
1232 struct bt_ctf_event_class *event_class; \
1233 int ret; \
1234 \
1235 pr("Adding "#_name" event\n"); \
1236 event_class = bt_ctf_event_class_create("perf_" #_name);\
1237 if (!event_class) \
1238 return -1; \
1239 body \
1240 \
1241 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1242 if (ret) { \
1243 pr("Failed to add event class '"#_name"' into stream.\n");\
1244 return ret; \
1245 } \
1246 \
1247 cw->_name##_class = event_class; \
1248 bt_ctf_event_class_put(event_class); \
1249 return 0; \
1250 }
1251
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,__NON_SAMPLE_ADD_FIELD (u32,pid);__NON_SAMPLE_ADD_FIELD (u32,tid);__NON_SAMPLE_ADD_FIELD (string,comm);)1252 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1253 __NON_SAMPLE_ADD_FIELD(u32, pid);
1254 __NON_SAMPLE_ADD_FIELD(u32, tid);
1255 __NON_SAMPLE_ADD_FIELD(string, comm);
1256 )
1257
1258 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1259 __NON_SAMPLE_ADD_FIELD(u32, pid);
1260 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1261 __NON_SAMPLE_ADD_FIELD(u32, tid);
1262 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1263 __NON_SAMPLE_ADD_FIELD(u64, time);
1264 )
1265
1266 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1267 __NON_SAMPLE_ADD_FIELD(u32, pid);
1268 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1269 __NON_SAMPLE_ADD_FIELD(u32, tid);
1270 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1271 __NON_SAMPLE_ADD_FIELD(u64, time);
1272 )
1273
1274 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1275 __NON_SAMPLE_ADD_FIELD(u32, pid);
1276 __NON_SAMPLE_ADD_FIELD(u32, tid);
1277 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1278 __NON_SAMPLE_ADD_FIELD(string, filename);
1279 )
1280
1281 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1282 __NON_SAMPLE_ADD_FIELD(u32, pid);
1283 __NON_SAMPLE_ADD_FIELD(u32, tid);
1284 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1285 __NON_SAMPLE_ADD_FIELD(string, filename);
1286 )
1287 #undef __NON_SAMPLE_ADD_FIELD
1288 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1289
1290 static int setup_non_sample_events(struct ctf_writer *cw,
1291 struct perf_session *session __maybe_unused)
1292 {
1293 int ret;
1294
1295 ret = add_comm_event(cw);
1296 if (ret)
1297 return ret;
1298 ret = add_exit_event(cw);
1299 if (ret)
1300 return ret;
1301 ret = add_fork_event(cw);
1302 if (ret)
1303 return ret;
1304 ret = add_mmap_event(cw);
1305 if (ret)
1306 return ret;
1307 ret = add_mmap2_event(cw);
1308 if (ret)
1309 return ret;
1310 return 0;
1311 }
1312
cleanup_events(struct perf_session * session)1313 static void cleanup_events(struct perf_session *session)
1314 {
1315 struct evlist *evlist = session->evlist;
1316 struct evsel *evsel;
1317
1318 evlist__for_each_entry(evlist, evsel) {
1319 struct evsel_priv *priv;
1320
1321 priv = evsel->priv;
1322 bt_ctf_event_class_put(priv->event_class);
1323 zfree(&evsel->priv);
1324 }
1325
1326 evlist__delete(evlist);
1327 session->evlist = NULL;
1328 }
1329
setup_streams(struct ctf_writer * cw,struct perf_session * session)1330 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1331 {
1332 struct ctf_stream **stream;
1333 struct perf_header *ph = &session->header;
1334 int ncpus;
1335
1336 /*
1337 * Try to get the number of cpus used in the data file,
1338 * if not present fallback to the MAX_CPUS.
1339 */
1340 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1341
1342 stream = zalloc(sizeof(*stream) * ncpus);
1343 if (!stream) {
1344 pr_err("Failed to allocate streams.\n");
1345 return -ENOMEM;
1346 }
1347
1348 cw->stream = stream;
1349 cw->stream_cnt = ncpus;
1350 return 0;
1351 }
1352
free_streams(struct ctf_writer * cw)1353 static void free_streams(struct ctf_writer *cw)
1354 {
1355 int cpu;
1356
1357 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1358 ctf_stream__delete(cw->stream[cpu]);
1359
1360 zfree(&cw->stream);
1361 }
1362
ctf_writer__setup_env(struct ctf_writer * cw,struct perf_session * session)1363 static int ctf_writer__setup_env(struct ctf_writer *cw,
1364 struct perf_session *session)
1365 {
1366 struct perf_header *header = &session->header;
1367 struct bt_ctf_writer *writer = cw->writer;
1368
1369 #define ADD(__n, __v) \
1370 do { \
1371 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1372 return -1; \
1373 } while (0)
1374
1375 ADD("host", header->env.hostname);
1376 ADD("sysname", "Linux");
1377 ADD("release", header->env.os_release);
1378 ADD("version", header->env.version);
1379 ADD("machine", header->env.arch);
1380 ADD("domain", "kernel");
1381 ADD("tracer_name", "perf");
1382
1383 #undef ADD
1384 return 0;
1385 }
1386
ctf_writer__setup_clock(struct ctf_writer * cw,struct perf_session * session,bool tod)1387 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1388 struct perf_session *session,
1389 bool tod)
1390 {
1391 struct bt_ctf_clock *clock = cw->clock;
1392 const char *desc = "perf clock";
1393 int64_t offset = 0;
1394
1395 if (tod) {
1396 struct perf_env *env = &session->header.env;
1397
1398 if (!env->clock.enabled) {
1399 pr_err("Can't provide --tod time, missing clock data. "
1400 "Please record with -k/--clockid option.\n");
1401 return -1;
1402 }
1403
1404 desc = clockid_name(env->clock.clockid);
1405 offset = env->clock.tod_ns - env->clock.clockid_ns;
1406 }
1407
1408 #define SET(__n, __v) \
1409 do { \
1410 if (bt_ctf_clock_set_##__n(clock, __v)) \
1411 return -1; \
1412 } while (0)
1413
1414 SET(frequency, 1000000000);
1415 SET(offset, offset);
1416 SET(description, desc);
1417 SET(precision, 10);
1418 SET(is_absolute, 0);
1419
1420 #undef SET
1421 return 0;
1422 }
1423
create_int_type(int size,bool sign,bool hex)1424 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1425 {
1426 struct bt_ctf_field_type *type;
1427
1428 type = bt_ctf_field_type_integer_create(size);
1429 if (!type)
1430 return NULL;
1431
1432 if (sign &&
1433 bt_ctf_field_type_integer_set_signed(type, 1))
1434 goto err;
1435
1436 if (hex &&
1437 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1438 goto err;
1439
1440 #if __BYTE_ORDER == __BIG_ENDIAN
1441 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1442 #else
1443 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1444 #endif
1445
1446 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1447 size, sign ? "un" : "", hex ? "hex" : "");
1448 return type;
1449
1450 err:
1451 bt_ctf_field_type_put(type);
1452 return NULL;
1453 }
1454
ctf_writer__cleanup_data(struct ctf_writer * cw)1455 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1456 {
1457 unsigned int i;
1458
1459 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1460 bt_ctf_field_type_put(cw->data.array[i]);
1461 }
1462
ctf_writer__init_data(struct ctf_writer * cw)1463 static int ctf_writer__init_data(struct ctf_writer *cw)
1464 {
1465 #define CREATE_INT_TYPE(type, size, sign, hex) \
1466 do { \
1467 (type) = create_int_type(size, sign, hex); \
1468 if (!(type)) \
1469 goto err; \
1470 } while (0)
1471
1472 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1473 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1474 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1475 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1476 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1477 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1478
1479 cw->data.string = bt_ctf_field_type_string_create();
1480 if (cw->data.string)
1481 return 0;
1482
1483 err:
1484 ctf_writer__cleanup_data(cw);
1485 pr_err("Failed to create data types.\n");
1486 return -1;
1487 }
1488
ctf_writer__cleanup(struct ctf_writer * cw)1489 static void ctf_writer__cleanup(struct ctf_writer *cw)
1490 {
1491 ctf_writer__cleanup_data(cw);
1492
1493 bt_ctf_clock_put(cw->clock);
1494 free_streams(cw);
1495 bt_ctf_stream_class_put(cw->stream_class);
1496 bt_ctf_writer_put(cw->writer);
1497
1498 /* and NULL all the pointers */
1499 memset(cw, 0, sizeof(*cw));
1500 }
1501
ctf_writer__init(struct ctf_writer * cw,const char * path,struct perf_session * session,bool tod)1502 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1503 struct perf_session *session, bool tod)
1504 {
1505 struct bt_ctf_writer *writer;
1506 struct bt_ctf_stream_class *stream_class;
1507 struct bt_ctf_clock *clock;
1508 struct bt_ctf_field_type *pkt_ctx_type;
1509 int ret;
1510
1511 /* CTF writer */
1512 writer = bt_ctf_writer_create(path);
1513 if (!writer)
1514 goto err;
1515
1516 cw->writer = writer;
1517
1518 /* CTF clock */
1519 clock = bt_ctf_clock_create("perf_clock");
1520 if (!clock) {
1521 pr("Failed to create CTF clock.\n");
1522 goto err_cleanup;
1523 }
1524
1525 cw->clock = clock;
1526
1527 if (ctf_writer__setup_clock(cw, session, tod)) {
1528 pr("Failed to setup CTF clock.\n");
1529 goto err_cleanup;
1530 }
1531
1532 /* CTF stream class */
1533 stream_class = bt_ctf_stream_class_create("perf_stream");
1534 if (!stream_class) {
1535 pr("Failed to create CTF stream class.\n");
1536 goto err_cleanup;
1537 }
1538
1539 cw->stream_class = stream_class;
1540
1541 /* CTF clock stream setup */
1542 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1543 pr("Failed to assign CTF clock to stream class.\n");
1544 goto err_cleanup;
1545 }
1546
1547 if (ctf_writer__init_data(cw))
1548 goto err_cleanup;
1549
1550 /* Add cpu_id for packet context */
1551 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1552 if (!pkt_ctx_type)
1553 goto err_cleanup;
1554
1555 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1556 bt_ctf_field_type_put(pkt_ctx_type);
1557 if (ret)
1558 goto err_cleanup;
1559
1560 /* CTF clock writer setup */
1561 if (bt_ctf_writer_add_clock(writer, clock)) {
1562 pr("Failed to assign CTF clock to writer.\n");
1563 goto err_cleanup;
1564 }
1565
1566 return 0;
1567
1568 err_cleanup:
1569 ctf_writer__cleanup(cw);
1570 err:
1571 pr_err("Failed to setup CTF writer.\n");
1572 return -1;
1573 }
1574
ctf_writer__flush_streams(struct ctf_writer * cw)1575 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1576 {
1577 int cpu, ret = 0;
1578
1579 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1580 ret = ctf_stream__flush(cw->stream[cpu]);
1581
1582 return ret;
1583 }
1584
convert__config(const char * var,const char * value,void * cb)1585 static int convert__config(const char *var, const char *value, void *cb)
1586 {
1587 struct convert *c = cb;
1588
1589 if (!strcmp(var, "convert.queue-size"))
1590 return perf_config_u64(&c->queue_size, var, value);
1591
1592 return 0;
1593 }
1594
bt_convert__perf2ctf(const char * input,const char * path,struct perf_data_convert_opts * opts)1595 int bt_convert__perf2ctf(const char *input, const char *path,
1596 struct perf_data_convert_opts *opts)
1597 {
1598 struct perf_session *session;
1599 struct perf_data data = {
1600 .path = input,
1601 .mode = PERF_DATA_MODE_READ,
1602 .force = opts->force,
1603 };
1604 struct convert c = {
1605 .tool = {
1606 .sample = process_sample_event,
1607 .mmap = perf_event__process_mmap,
1608 .mmap2 = perf_event__process_mmap2,
1609 .comm = perf_event__process_comm,
1610 .exit = perf_event__process_exit,
1611 .fork = perf_event__process_fork,
1612 .lost = perf_event__process_lost,
1613 .tracing_data = perf_event__process_tracing_data,
1614 .build_id = perf_event__process_build_id,
1615 .namespaces = perf_event__process_namespaces,
1616 .ordered_events = true,
1617 .ordering_requires_timestamps = true,
1618 },
1619 };
1620 struct ctf_writer *cw = &c.writer;
1621 int err;
1622
1623 if (opts->all) {
1624 c.tool.comm = process_comm_event;
1625 c.tool.exit = process_exit_event;
1626 c.tool.fork = process_fork_event;
1627 c.tool.mmap = process_mmap_event;
1628 c.tool.mmap2 = process_mmap2_event;
1629 }
1630
1631 err = perf_config(convert__config, &c);
1632 if (err)
1633 return err;
1634
1635 err = -1;
1636 /* perf.data session */
1637 session = perf_session__new(&data, &c.tool);
1638 if (IS_ERR(session))
1639 return PTR_ERR(session);
1640
1641 /* CTF writer */
1642 if (ctf_writer__init(cw, path, session, opts->tod))
1643 goto free_session;
1644
1645 if (c.queue_size) {
1646 ordered_events__set_alloc_size(&session->ordered_events,
1647 c.queue_size);
1648 }
1649
1650 /* CTF writer env/clock setup */
1651 if (ctf_writer__setup_env(cw, session))
1652 goto free_writer;
1653
1654 /* CTF events setup */
1655 if (setup_events(cw, session))
1656 goto free_writer;
1657
1658 if (opts->all && setup_non_sample_events(cw, session))
1659 goto free_writer;
1660
1661 if (setup_streams(cw, session))
1662 goto free_writer;
1663
1664 err = perf_session__process_events(session);
1665 if (!err)
1666 err = ctf_writer__flush_streams(cw);
1667 else
1668 pr_err("Error during conversion.\n");
1669
1670 fprintf(stderr,
1671 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1672 data.path, path);
1673
1674 fprintf(stderr,
1675 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1676 (double) c.events_size / 1024.0 / 1024.0,
1677 c.events_count);
1678
1679 if (!c.non_sample_count)
1680 fprintf(stderr, ") ]\n");
1681 else
1682 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1683
1684 cleanup_events(session);
1685 perf_session__delete(session);
1686 ctf_writer__cleanup(cw);
1687
1688 return err;
1689
1690 free_writer:
1691 ctf_writer__cleanup(cw);
1692 free_session:
1693 perf_session__delete(session);
1694 pr_err("Error during conversion setup.\n");
1695 return err;
1696 }
1697