1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7 #include <inttypes.h>
8 #include <sys/types.h>
9 #include <sys/mman.h>
10 #include <stdbool.h>
11 #include <string.h>
12 #include <limits.h>
13 #include <errno.h>
14
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
22
23 #include <sys/param.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
28
29 #include "evlist.h"
30 #include "dso.h"
31 #include "map.h"
32 #include "pmu.h"
33 #include "evsel.h"
34 #include "evsel_config.h"
35 #include "symbol.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "thread_map.h"
39 #include "asm/bug.h"
40 #include "auxtrace.h"
41
42 #include <linux/hash.h>
43
44 #include "event.h"
45 #include "record.h"
46 #include "session.h"
47 #include "debug.h"
48 #include <subcmd/parse-options.h>
49
50 #include "cs-etm.h"
51 #include "intel-pt.h"
52 #include "intel-bts.h"
53 #include "arm-spe.h"
54 #include "s390-cpumsf.h"
55 #include "util/mmap.h"
56
57 #include <linux/ctype.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
60
61 /*
62 * Make a group from 'leader' to 'last', requiring that the events were not
63 * already grouped to a different leader.
64 */
evlist__regroup(struct evlist * evlist,struct evsel * leader,struct evsel * last)65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
66 {
67 struct evsel *evsel;
68 bool grp;
69
70 if (!evsel__is_group_leader(leader))
71 return -EINVAL;
72
73 grp = false;
74 evlist__for_each_entry(evlist, evsel) {
75 if (grp) {
76 if (!(evsel__leader(evsel) == leader ||
77 (evsel__leader(evsel) == evsel &&
78 evsel->core.nr_members <= 1)))
79 return -EINVAL;
80 } else if (evsel == leader) {
81 grp = true;
82 }
83 if (evsel == last)
84 break;
85 }
86
87 grp = false;
88 evlist__for_each_entry(evlist, evsel) {
89 if (grp) {
90 if (!evsel__has_leader(evsel, leader)) {
91 evsel__set_leader(evsel, leader);
92 if (leader->core.nr_members < 1)
93 leader->core.nr_members = 1;
94 leader->core.nr_members += 1;
95 }
96 } else if (evsel == leader) {
97 grp = true;
98 }
99 if (evsel == last)
100 break;
101 }
102
103 return 0;
104 }
105
auxtrace__dont_decode(struct perf_session * session)106 static bool auxtrace__dont_decode(struct perf_session *session)
107 {
108 return !session->itrace_synth_opts ||
109 session->itrace_synth_opts->dont_decode;
110 }
111
auxtrace_mmap__mmap(struct auxtrace_mmap * mm,struct auxtrace_mmap_params * mp,void * userpg,int fd)112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
113 struct auxtrace_mmap_params *mp,
114 void *userpg, int fd)
115 {
116 struct perf_event_mmap_page *pc = userpg;
117
118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
119
120 mm->userpg = userpg;
121 mm->mask = mp->mask;
122 mm->len = mp->len;
123 mm->prev = 0;
124 mm->idx = mp->idx;
125 mm->tid = mp->tid;
126 mm->cpu = mp->cpu;
127
128 if (!mp->len) {
129 mm->base = NULL;
130 return 0;
131 }
132
133 pc->aux_offset = mp->offset;
134 pc->aux_size = mp->len;
135
136 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
137 if (mm->base == MAP_FAILED) {
138 pr_debug2("failed to mmap AUX area\n");
139 mm->base = NULL;
140 return -1;
141 }
142
143 return 0;
144 }
145
auxtrace_mmap__munmap(struct auxtrace_mmap * mm)146 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
147 {
148 if (mm->base) {
149 munmap(mm->base, mm->len);
150 mm->base = NULL;
151 }
152 }
153
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp,off_t auxtrace_offset,unsigned int auxtrace_pages,bool auxtrace_overwrite)154 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
155 off_t auxtrace_offset,
156 unsigned int auxtrace_pages,
157 bool auxtrace_overwrite)
158 {
159 if (auxtrace_pages) {
160 mp->offset = auxtrace_offset;
161 mp->len = auxtrace_pages * (size_t)page_size;
162 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
163 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
164 pr_debug2("AUX area mmap length %zu\n", mp->len);
165 } else {
166 mp->len = 0;
167 }
168 }
169
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp,struct evlist * evlist,int idx,bool per_cpu)170 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
171 struct evlist *evlist, int idx,
172 bool per_cpu)
173 {
174 mp->idx = idx;
175
176 if (per_cpu) {
177 mp->cpu = evlist->core.cpus->map[idx];
178 if (evlist->core.threads)
179 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
180 else
181 mp->tid = -1;
182 } else {
183 mp->cpu = -1;
184 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
185 }
186 }
187
188 #define AUXTRACE_INIT_NR_QUEUES 32
189
auxtrace_alloc_queue_array(unsigned int nr_queues)190 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
191 {
192 struct auxtrace_queue *queue_array;
193 unsigned int max_nr_queues, i;
194
195 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
196 if (nr_queues > max_nr_queues)
197 return NULL;
198
199 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
200 if (!queue_array)
201 return NULL;
202
203 for (i = 0; i < nr_queues; i++) {
204 INIT_LIST_HEAD(&queue_array[i].head);
205 queue_array[i].priv = NULL;
206 }
207
208 return queue_array;
209 }
210
auxtrace_queues__init(struct auxtrace_queues * queues)211 int auxtrace_queues__init(struct auxtrace_queues *queues)
212 {
213 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
214 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
215 if (!queues->queue_array)
216 return -ENOMEM;
217 return 0;
218 }
219
auxtrace_queues__grow(struct auxtrace_queues * queues,unsigned int new_nr_queues)220 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
221 unsigned int new_nr_queues)
222 {
223 unsigned int nr_queues = queues->nr_queues;
224 struct auxtrace_queue *queue_array;
225 unsigned int i;
226
227 if (!nr_queues)
228 nr_queues = AUXTRACE_INIT_NR_QUEUES;
229
230 while (nr_queues && nr_queues < new_nr_queues)
231 nr_queues <<= 1;
232
233 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
234 return -EINVAL;
235
236 queue_array = auxtrace_alloc_queue_array(nr_queues);
237 if (!queue_array)
238 return -ENOMEM;
239
240 for (i = 0; i < queues->nr_queues; i++) {
241 list_splice_tail(&queues->queue_array[i].head,
242 &queue_array[i].head);
243 queue_array[i].tid = queues->queue_array[i].tid;
244 queue_array[i].cpu = queues->queue_array[i].cpu;
245 queue_array[i].set = queues->queue_array[i].set;
246 queue_array[i].priv = queues->queue_array[i].priv;
247 }
248
249 queues->nr_queues = nr_queues;
250 queues->queue_array = queue_array;
251
252 return 0;
253 }
254
auxtrace_copy_data(u64 size,struct perf_session * session)255 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
256 {
257 int fd = perf_data__fd(session->data);
258 void *p;
259 ssize_t ret;
260
261 if (size > SSIZE_MAX)
262 return NULL;
263
264 p = malloc(size);
265 if (!p)
266 return NULL;
267
268 ret = readn(fd, p, size);
269 if (ret != (ssize_t)size) {
270 free(p);
271 return NULL;
272 }
273
274 return p;
275 }
276
auxtrace_queues__queue_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)277 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
278 unsigned int idx,
279 struct auxtrace_buffer *buffer)
280 {
281 struct auxtrace_queue *queue;
282 int err;
283
284 if (idx >= queues->nr_queues) {
285 err = auxtrace_queues__grow(queues, idx + 1);
286 if (err)
287 return err;
288 }
289
290 queue = &queues->queue_array[idx];
291
292 if (!queue->set) {
293 queue->set = true;
294 queue->tid = buffer->tid;
295 queue->cpu = buffer->cpu;
296 }
297
298 buffer->buffer_nr = queues->next_buffer_nr++;
299
300 list_add_tail(&buffer->list, &queue->head);
301
302 queues->new_data = true;
303 queues->populated = true;
304
305 return 0;
306 }
307
308 /* Limit buffers to 32MiB on 32-bit */
309 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
310
auxtrace_queues__split_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)311 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
312 unsigned int idx,
313 struct auxtrace_buffer *buffer)
314 {
315 u64 sz = buffer->size;
316 bool consecutive = false;
317 struct auxtrace_buffer *b;
318 int err;
319
320 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
321 b = memdup(buffer, sizeof(struct auxtrace_buffer));
322 if (!b)
323 return -ENOMEM;
324 b->size = BUFFER_LIMIT_FOR_32_BIT;
325 b->consecutive = consecutive;
326 err = auxtrace_queues__queue_buffer(queues, idx, b);
327 if (err) {
328 auxtrace_buffer__free(b);
329 return err;
330 }
331 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
332 sz -= BUFFER_LIMIT_FOR_32_BIT;
333 consecutive = true;
334 }
335
336 buffer->size = sz;
337 buffer->consecutive = consecutive;
338
339 return 0;
340 }
341
filter_cpu(struct perf_session * session,int cpu)342 static bool filter_cpu(struct perf_session *session, int cpu)
343 {
344 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
345
346 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
347 }
348
auxtrace_queues__add_buffer(struct auxtrace_queues * queues,struct perf_session * session,unsigned int idx,struct auxtrace_buffer * buffer,struct auxtrace_buffer ** buffer_ptr)349 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
350 struct perf_session *session,
351 unsigned int idx,
352 struct auxtrace_buffer *buffer,
353 struct auxtrace_buffer **buffer_ptr)
354 {
355 int err = -ENOMEM;
356
357 if (filter_cpu(session, buffer->cpu))
358 return 0;
359
360 buffer = memdup(buffer, sizeof(*buffer));
361 if (!buffer)
362 return -ENOMEM;
363
364 if (session->one_mmap) {
365 buffer->data = buffer->data_offset - session->one_mmap_offset +
366 session->one_mmap_addr;
367 } else if (perf_data__is_pipe(session->data)) {
368 buffer->data = auxtrace_copy_data(buffer->size, session);
369 if (!buffer->data)
370 goto out_free;
371 buffer->data_needs_freeing = true;
372 } else if (BITS_PER_LONG == 32 &&
373 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
374 err = auxtrace_queues__split_buffer(queues, idx, buffer);
375 if (err)
376 goto out_free;
377 }
378
379 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
380 if (err)
381 goto out_free;
382
383 /* FIXME: Doesn't work for split buffer */
384 if (buffer_ptr)
385 *buffer_ptr = buffer;
386
387 return 0;
388
389 out_free:
390 auxtrace_buffer__free(buffer);
391 return err;
392 }
393
auxtrace_queues__add_event(struct auxtrace_queues * queues,struct perf_session * session,union perf_event * event,off_t data_offset,struct auxtrace_buffer ** buffer_ptr)394 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
395 struct perf_session *session,
396 union perf_event *event, off_t data_offset,
397 struct auxtrace_buffer **buffer_ptr)
398 {
399 struct auxtrace_buffer buffer = {
400 .pid = -1,
401 .tid = event->auxtrace.tid,
402 .cpu = event->auxtrace.cpu,
403 .data_offset = data_offset,
404 .offset = event->auxtrace.offset,
405 .reference = event->auxtrace.reference,
406 .size = event->auxtrace.size,
407 };
408 unsigned int idx = event->auxtrace.idx;
409
410 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
411 buffer_ptr);
412 }
413
auxtrace_queues__add_indexed_event(struct auxtrace_queues * queues,struct perf_session * session,off_t file_offset,size_t sz)414 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
415 struct perf_session *session,
416 off_t file_offset, size_t sz)
417 {
418 union perf_event *event;
419 int err;
420 char buf[PERF_SAMPLE_MAX_SIZE];
421
422 err = perf_session__peek_event(session, file_offset, buf,
423 PERF_SAMPLE_MAX_SIZE, &event, NULL);
424 if (err)
425 return err;
426
427 if (event->header.type == PERF_RECORD_AUXTRACE) {
428 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
429 event->header.size != sz) {
430 err = -EINVAL;
431 goto out;
432 }
433 file_offset += event->header.size;
434 err = auxtrace_queues__add_event(queues, session, event,
435 file_offset, NULL);
436 }
437 out:
438 return err;
439 }
440
auxtrace_queues__free(struct auxtrace_queues * queues)441 void auxtrace_queues__free(struct auxtrace_queues *queues)
442 {
443 unsigned int i;
444
445 for (i = 0; i < queues->nr_queues; i++) {
446 while (!list_empty(&queues->queue_array[i].head)) {
447 struct auxtrace_buffer *buffer;
448
449 buffer = list_entry(queues->queue_array[i].head.next,
450 struct auxtrace_buffer, list);
451 list_del_init(&buffer->list);
452 auxtrace_buffer__free(buffer);
453 }
454 }
455
456 zfree(&queues->queue_array);
457 queues->nr_queues = 0;
458 }
459
auxtrace_heapify(struct auxtrace_heap_item * heap_array,unsigned int pos,unsigned int queue_nr,u64 ordinal)460 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
461 unsigned int pos, unsigned int queue_nr,
462 u64 ordinal)
463 {
464 unsigned int parent;
465
466 while (pos) {
467 parent = (pos - 1) >> 1;
468 if (heap_array[parent].ordinal <= ordinal)
469 break;
470 heap_array[pos] = heap_array[parent];
471 pos = parent;
472 }
473 heap_array[pos].queue_nr = queue_nr;
474 heap_array[pos].ordinal = ordinal;
475 }
476
auxtrace_heap__add(struct auxtrace_heap * heap,unsigned int queue_nr,u64 ordinal)477 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
478 u64 ordinal)
479 {
480 struct auxtrace_heap_item *heap_array;
481
482 if (queue_nr >= heap->heap_sz) {
483 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
484
485 while (heap_sz <= queue_nr)
486 heap_sz <<= 1;
487 heap_array = realloc(heap->heap_array,
488 heap_sz * sizeof(struct auxtrace_heap_item));
489 if (!heap_array)
490 return -ENOMEM;
491 heap->heap_array = heap_array;
492 heap->heap_sz = heap_sz;
493 }
494
495 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
496
497 return 0;
498 }
499
auxtrace_heap__free(struct auxtrace_heap * heap)500 void auxtrace_heap__free(struct auxtrace_heap *heap)
501 {
502 zfree(&heap->heap_array);
503 heap->heap_cnt = 0;
504 heap->heap_sz = 0;
505 }
506
auxtrace_heap__pop(struct auxtrace_heap * heap)507 void auxtrace_heap__pop(struct auxtrace_heap *heap)
508 {
509 unsigned int pos, last, heap_cnt = heap->heap_cnt;
510 struct auxtrace_heap_item *heap_array;
511
512 if (!heap_cnt)
513 return;
514
515 heap->heap_cnt -= 1;
516
517 heap_array = heap->heap_array;
518
519 pos = 0;
520 while (1) {
521 unsigned int left, right;
522
523 left = (pos << 1) + 1;
524 if (left >= heap_cnt)
525 break;
526 right = left + 1;
527 if (right >= heap_cnt) {
528 heap_array[pos] = heap_array[left];
529 return;
530 }
531 if (heap_array[left].ordinal < heap_array[right].ordinal) {
532 heap_array[pos] = heap_array[left];
533 pos = left;
534 } else {
535 heap_array[pos] = heap_array[right];
536 pos = right;
537 }
538 }
539
540 last = heap_cnt - 1;
541 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
542 heap_array[last].ordinal);
543 }
544
auxtrace_record__info_priv_size(struct auxtrace_record * itr,struct evlist * evlist)545 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
546 struct evlist *evlist)
547 {
548 if (itr)
549 return itr->info_priv_size(itr, evlist);
550 return 0;
551 }
552
auxtrace_not_supported(void)553 static int auxtrace_not_supported(void)
554 {
555 pr_err("AUX area tracing is not supported on this architecture\n");
556 return -EINVAL;
557 }
558
auxtrace_record__info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * auxtrace_info,size_t priv_size)559 int auxtrace_record__info_fill(struct auxtrace_record *itr,
560 struct perf_session *session,
561 struct perf_record_auxtrace_info *auxtrace_info,
562 size_t priv_size)
563 {
564 if (itr)
565 return itr->info_fill(itr, session, auxtrace_info, priv_size);
566 return auxtrace_not_supported();
567 }
568
auxtrace_record__free(struct auxtrace_record * itr)569 void auxtrace_record__free(struct auxtrace_record *itr)
570 {
571 if (itr)
572 itr->free(itr);
573 }
574
auxtrace_record__snapshot_start(struct auxtrace_record * itr)575 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
576 {
577 if (itr && itr->snapshot_start)
578 return itr->snapshot_start(itr);
579 return 0;
580 }
581
auxtrace_record__snapshot_finish(struct auxtrace_record * itr,bool on_exit)582 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
583 {
584 if (!on_exit && itr && itr->snapshot_finish)
585 return itr->snapshot_finish(itr);
586 return 0;
587 }
588
auxtrace_record__find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)589 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
590 struct auxtrace_mmap *mm,
591 unsigned char *data, u64 *head, u64 *old)
592 {
593 if (itr && itr->find_snapshot)
594 return itr->find_snapshot(itr, idx, mm, data, head, old);
595 return 0;
596 }
597
auxtrace_record__options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)598 int auxtrace_record__options(struct auxtrace_record *itr,
599 struct evlist *evlist,
600 struct record_opts *opts)
601 {
602 if (itr) {
603 itr->evlist = evlist;
604 return itr->recording_options(itr, evlist, opts);
605 }
606 return 0;
607 }
608
auxtrace_record__reference(struct auxtrace_record * itr)609 u64 auxtrace_record__reference(struct auxtrace_record *itr)
610 {
611 if (itr)
612 return itr->reference(itr);
613 return 0;
614 }
615
auxtrace_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)616 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
617 struct record_opts *opts, const char *str)
618 {
619 if (!str)
620 return 0;
621
622 /* PMU-agnostic options */
623 switch (*str) {
624 case 'e':
625 opts->auxtrace_snapshot_on_exit = true;
626 str++;
627 break;
628 default:
629 break;
630 }
631
632 if (itr && itr->parse_snapshot_options)
633 return itr->parse_snapshot_options(itr, opts, str);
634
635 pr_err("No AUX area tracing to snapshot\n");
636 return -EINVAL;
637 }
638
auxtrace_record__read_finish(struct auxtrace_record * itr,int idx)639 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
640 {
641 struct evsel *evsel;
642
643 if (!itr->evlist || !itr->pmu)
644 return -EINVAL;
645
646 evlist__for_each_entry(itr->evlist, evsel) {
647 if (evsel->core.attr.type == itr->pmu->type) {
648 if (evsel->disabled)
649 return 0;
650 return evlist__enable_event_idx(itr->evlist, evsel, idx);
651 }
652 }
653 return -EINVAL;
654 }
655
656 /*
657 * Event record size is 16-bit which results in a maximum size of about 64KiB.
658 * Allow about 4KiB for the rest of the sample record, to give a maximum
659 * AUX area sample size of 60KiB.
660 */
661 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
662
663 /* Arbitrary default size if no other default provided */
664 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
665
auxtrace_validate_aux_sample_size(struct evlist * evlist,struct record_opts * opts)666 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
667 struct record_opts *opts)
668 {
669 struct evsel *evsel;
670 bool has_aux_leader = false;
671 u32 sz;
672
673 evlist__for_each_entry(evlist, evsel) {
674 sz = evsel->core.attr.aux_sample_size;
675 if (evsel__is_group_leader(evsel)) {
676 has_aux_leader = evsel__is_aux_event(evsel);
677 if (sz) {
678 if (has_aux_leader)
679 pr_err("Cannot add AUX area sampling to an AUX area event\n");
680 else
681 pr_err("Cannot add AUX area sampling to a group leader\n");
682 return -EINVAL;
683 }
684 }
685 if (sz > MAX_AUX_SAMPLE_SIZE) {
686 pr_err("AUX area sample size %u too big, max. %d\n",
687 sz, MAX_AUX_SAMPLE_SIZE);
688 return -EINVAL;
689 }
690 if (sz) {
691 if (!has_aux_leader) {
692 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
693 return -EINVAL;
694 }
695 evsel__set_sample_bit(evsel, AUX);
696 opts->auxtrace_sample_mode = true;
697 } else {
698 evsel__reset_sample_bit(evsel, AUX);
699 }
700 }
701
702 if (!opts->auxtrace_sample_mode) {
703 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
704 return -EINVAL;
705 }
706
707 if (!perf_can_aux_sample()) {
708 pr_err("AUX area sampling is not supported by kernel\n");
709 return -EINVAL;
710 }
711
712 return 0;
713 }
714
auxtrace_parse_sample_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts,const char * str)715 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
716 struct evlist *evlist,
717 struct record_opts *opts, const char *str)
718 {
719 struct evsel_config_term *term;
720 struct evsel *aux_evsel;
721 bool has_aux_sample_size = false;
722 bool has_aux_leader = false;
723 struct evsel *evsel;
724 char *endptr;
725 unsigned long sz;
726
727 if (!str)
728 goto no_opt;
729
730 if (!itr) {
731 pr_err("No AUX area event to sample\n");
732 return -EINVAL;
733 }
734
735 sz = strtoul(str, &endptr, 0);
736 if (*endptr || sz > UINT_MAX) {
737 pr_err("Bad AUX area sampling option: '%s'\n", str);
738 return -EINVAL;
739 }
740
741 if (!sz)
742 sz = itr->default_aux_sample_size;
743
744 if (!sz)
745 sz = DEFAULT_AUX_SAMPLE_SIZE;
746
747 /* Set aux_sample_size based on --aux-sample option */
748 evlist__for_each_entry(evlist, evsel) {
749 if (evsel__is_group_leader(evsel)) {
750 has_aux_leader = evsel__is_aux_event(evsel);
751 } else if (has_aux_leader) {
752 evsel->core.attr.aux_sample_size = sz;
753 }
754 }
755 no_opt:
756 aux_evsel = NULL;
757 /* Override with aux_sample_size from config term */
758 evlist__for_each_entry(evlist, evsel) {
759 if (evsel__is_aux_event(evsel))
760 aux_evsel = evsel;
761 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
762 if (term) {
763 has_aux_sample_size = true;
764 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
765 /* If possible, group with the AUX event */
766 if (aux_evsel && evsel->core.attr.aux_sample_size)
767 evlist__regroup(evlist, aux_evsel, evsel);
768 }
769 }
770
771 if (!str && !has_aux_sample_size)
772 return 0;
773
774 if (!itr) {
775 pr_err("No AUX area event to sample\n");
776 return -EINVAL;
777 }
778
779 return auxtrace_validate_aux_sample_size(evlist, opts);
780 }
781
auxtrace_regroup_aux_output(struct evlist * evlist)782 void auxtrace_regroup_aux_output(struct evlist *evlist)
783 {
784 struct evsel *evsel, *aux_evsel = NULL;
785 struct evsel_config_term *term;
786
787 evlist__for_each_entry(evlist, evsel) {
788 if (evsel__is_aux_event(evsel))
789 aux_evsel = evsel;
790 term = evsel__get_config_term(evsel, AUX_OUTPUT);
791 /* If possible, group with the AUX event */
792 if (term && aux_evsel)
793 evlist__regroup(evlist, aux_evsel, evsel);
794 }
795 }
796
797 struct auxtrace_record *__weak
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)798 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
799 {
800 *err = 0;
801 return NULL;
802 }
803
auxtrace_index__alloc(struct list_head * head)804 static int auxtrace_index__alloc(struct list_head *head)
805 {
806 struct auxtrace_index *auxtrace_index;
807
808 auxtrace_index = malloc(sizeof(struct auxtrace_index));
809 if (!auxtrace_index)
810 return -ENOMEM;
811
812 auxtrace_index->nr = 0;
813 INIT_LIST_HEAD(&auxtrace_index->list);
814
815 list_add_tail(&auxtrace_index->list, head);
816
817 return 0;
818 }
819
auxtrace_index__free(struct list_head * head)820 void auxtrace_index__free(struct list_head *head)
821 {
822 struct auxtrace_index *auxtrace_index, *n;
823
824 list_for_each_entry_safe(auxtrace_index, n, head, list) {
825 list_del_init(&auxtrace_index->list);
826 free(auxtrace_index);
827 }
828 }
829
auxtrace_index__last(struct list_head * head)830 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
831 {
832 struct auxtrace_index *auxtrace_index;
833 int err;
834
835 if (list_empty(head)) {
836 err = auxtrace_index__alloc(head);
837 if (err)
838 return NULL;
839 }
840
841 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
842
843 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
844 err = auxtrace_index__alloc(head);
845 if (err)
846 return NULL;
847 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
848 list);
849 }
850
851 return auxtrace_index;
852 }
853
auxtrace_index__auxtrace_event(struct list_head * head,union perf_event * event,off_t file_offset)854 int auxtrace_index__auxtrace_event(struct list_head *head,
855 union perf_event *event, off_t file_offset)
856 {
857 struct auxtrace_index *auxtrace_index;
858 size_t nr;
859
860 auxtrace_index = auxtrace_index__last(head);
861 if (!auxtrace_index)
862 return -ENOMEM;
863
864 nr = auxtrace_index->nr;
865 auxtrace_index->entries[nr].file_offset = file_offset;
866 auxtrace_index->entries[nr].sz = event->header.size;
867 auxtrace_index->nr += 1;
868
869 return 0;
870 }
871
auxtrace_index__do_write(int fd,struct auxtrace_index * auxtrace_index)872 static int auxtrace_index__do_write(int fd,
873 struct auxtrace_index *auxtrace_index)
874 {
875 struct auxtrace_index_entry ent;
876 size_t i;
877
878 for (i = 0; i < auxtrace_index->nr; i++) {
879 ent.file_offset = auxtrace_index->entries[i].file_offset;
880 ent.sz = auxtrace_index->entries[i].sz;
881 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
882 return -errno;
883 }
884 return 0;
885 }
886
auxtrace_index__write(int fd,struct list_head * head)887 int auxtrace_index__write(int fd, struct list_head *head)
888 {
889 struct auxtrace_index *auxtrace_index;
890 u64 total = 0;
891 int err;
892
893 list_for_each_entry(auxtrace_index, head, list)
894 total += auxtrace_index->nr;
895
896 if (writen(fd, &total, sizeof(total)) != sizeof(total))
897 return -errno;
898
899 list_for_each_entry(auxtrace_index, head, list) {
900 err = auxtrace_index__do_write(fd, auxtrace_index);
901 if (err)
902 return err;
903 }
904
905 return 0;
906 }
907
auxtrace_index__process_entry(int fd,struct list_head * head,bool needs_swap)908 static int auxtrace_index__process_entry(int fd, struct list_head *head,
909 bool needs_swap)
910 {
911 struct auxtrace_index *auxtrace_index;
912 struct auxtrace_index_entry ent;
913 size_t nr;
914
915 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
916 return -1;
917
918 auxtrace_index = auxtrace_index__last(head);
919 if (!auxtrace_index)
920 return -1;
921
922 nr = auxtrace_index->nr;
923 if (needs_swap) {
924 auxtrace_index->entries[nr].file_offset =
925 bswap_64(ent.file_offset);
926 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
927 } else {
928 auxtrace_index->entries[nr].file_offset = ent.file_offset;
929 auxtrace_index->entries[nr].sz = ent.sz;
930 }
931
932 auxtrace_index->nr = nr + 1;
933
934 return 0;
935 }
936
auxtrace_index__process(int fd,u64 size,struct perf_session * session,bool needs_swap)937 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
938 bool needs_swap)
939 {
940 struct list_head *head = &session->auxtrace_index;
941 u64 nr;
942
943 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
944 return -1;
945
946 if (needs_swap)
947 nr = bswap_64(nr);
948
949 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
950 return -1;
951
952 while (nr--) {
953 int err;
954
955 err = auxtrace_index__process_entry(fd, head, needs_swap);
956 if (err)
957 return -1;
958 }
959
960 return 0;
961 }
962
auxtrace_queues__process_index_entry(struct auxtrace_queues * queues,struct perf_session * session,struct auxtrace_index_entry * ent)963 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
964 struct perf_session *session,
965 struct auxtrace_index_entry *ent)
966 {
967 return auxtrace_queues__add_indexed_event(queues, session,
968 ent->file_offset, ent->sz);
969 }
970
auxtrace_queues__process_index(struct auxtrace_queues * queues,struct perf_session * session)971 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
972 struct perf_session *session)
973 {
974 struct auxtrace_index *auxtrace_index;
975 struct auxtrace_index_entry *ent;
976 size_t i;
977 int err;
978
979 if (auxtrace__dont_decode(session))
980 return 0;
981
982 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
983 for (i = 0; i < auxtrace_index->nr; i++) {
984 ent = &auxtrace_index->entries[i];
985 err = auxtrace_queues__process_index_entry(queues,
986 session,
987 ent);
988 if (err)
989 return err;
990 }
991 }
992 return 0;
993 }
994
auxtrace_buffer__next(struct auxtrace_queue * queue,struct auxtrace_buffer * buffer)995 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
996 struct auxtrace_buffer *buffer)
997 {
998 if (buffer) {
999 if (list_is_last(&buffer->list, &queue->head))
1000 return NULL;
1001 return list_entry(buffer->list.next, struct auxtrace_buffer,
1002 list);
1003 } else {
1004 if (list_empty(&queue->head))
1005 return NULL;
1006 return list_entry(queue->head.next, struct auxtrace_buffer,
1007 list);
1008 }
1009 }
1010
auxtrace_queues__sample_queue(struct auxtrace_queues * queues,struct perf_sample * sample,struct perf_session * session)1011 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1012 struct perf_sample *sample,
1013 struct perf_session *session)
1014 {
1015 struct perf_sample_id *sid;
1016 unsigned int idx;
1017 u64 id;
1018
1019 id = sample->id;
1020 if (!id)
1021 return NULL;
1022
1023 sid = evlist__id2sid(session->evlist, id);
1024 if (!sid)
1025 return NULL;
1026
1027 idx = sid->idx;
1028
1029 if (idx >= queues->nr_queues)
1030 return NULL;
1031
1032 return &queues->queue_array[idx];
1033 }
1034
auxtrace_queues__add_sample(struct auxtrace_queues * queues,struct perf_session * session,struct perf_sample * sample,u64 data_offset,u64 reference)1035 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1036 struct perf_session *session,
1037 struct perf_sample *sample, u64 data_offset,
1038 u64 reference)
1039 {
1040 struct auxtrace_buffer buffer = {
1041 .pid = -1,
1042 .data_offset = data_offset,
1043 .reference = reference,
1044 .size = sample->aux_sample.size,
1045 };
1046 struct perf_sample_id *sid;
1047 u64 id = sample->id;
1048 unsigned int idx;
1049
1050 if (!id)
1051 return -EINVAL;
1052
1053 sid = evlist__id2sid(session->evlist, id);
1054 if (!sid)
1055 return -ENOENT;
1056
1057 idx = sid->idx;
1058 buffer.tid = sid->tid;
1059 buffer.cpu = sid->cpu;
1060
1061 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1062 }
1063
1064 struct queue_data {
1065 bool samples;
1066 bool events;
1067 };
1068
auxtrace_queue_data_cb(struct perf_session * session,union perf_event * event,u64 offset,void * data)1069 static int auxtrace_queue_data_cb(struct perf_session *session,
1070 union perf_event *event, u64 offset,
1071 void *data)
1072 {
1073 struct queue_data *qd = data;
1074 struct perf_sample sample;
1075 int err;
1076
1077 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1078 if (event->header.size < sizeof(struct perf_record_auxtrace))
1079 return -EINVAL;
1080 offset += event->header.size;
1081 return session->auxtrace->queue_data(session, NULL, event,
1082 offset);
1083 }
1084
1085 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1086 return 0;
1087
1088 err = evlist__parse_sample(session->evlist, event, &sample);
1089 if (err)
1090 return err;
1091
1092 if (!sample.aux_sample.size)
1093 return 0;
1094
1095 offset += sample.aux_sample.data - (void *)event;
1096
1097 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1098 }
1099
auxtrace_queue_data(struct perf_session * session,bool samples,bool events)1100 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1101 {
1102 struct queue_data qd = {
1103 .samples = samples,
1104 .events = events,
1105 };
1106
1107 if (auxtrace__dont_decode(session))
1108 return 0;
1109
1110 if (perf_data__is_pipe(session->data))
1111 return 0;
1112
1113 if (!session->auxtrace || !session->auxtrace->queue_data)
1114 return -EINVAL;
1115
1116 return perf_session__peek_events(session, session->header.data_offset,
1117 session->header.data_size,
1118 auxtrace_queue_data_cb, &qd);
1119 }
1120
auxtrace_buffer__get_data_rw(struct auxtrace_buffer * buffer,int fd,bool rw)1121 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1122 {
1123 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1124 size_t adj = buffer->data_offset & (page_size - 1);
1125 size_t size = buffer->size + adj;
1126 off_t file_offset = buffer->data_offset - adj;
1127 void *addr;
1128
1129 if (buffer->data)
1130 return buffer->data;
1131
1132 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1133 if (addr == MAP_FAILED)
1134 return NULL;
1135
1136 buffer->mmap_addr = addr;
1137 buffer->mmap_size = size;
1138
1139 buffer->data = addr + adj;
1140
1141 return buffer->data;
1142 }
1143
auxtrace_buffer__put_data(struct auxtrace_buffer * buffer)1144 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1145 {
1146 if (!buffer->data || !buffer->mmap_addr)
1147 return;
1148 munmap(buffer->mmap_addr, buffer->mmap_size);
1149 buffer->mmap_addr = NULL;
1150 buffer->mmap_size = 0;
1151 buffer->data = NULL;
1152 buffer->use_data = NULL;
1153 }
1154
auxtrace_buffer__drop_data(struct auxtrace_buffer * buffer)1155 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1156 {
1157 auxtrace_buffer__put_data(buffer);
1158 if (buffer->data_needs_freeing) {
1159 buffer->data_needs_freeing = false;
1160 zfree(&buffer->data);
1161 buffer->use_data = NULL;
1162 buffer->size = 0;
1163 }
1164 }
1165
auxtrace_buffer__free(struct auxtrace_buffer * buffer)1166 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1167 {
1168 auxtrace_buffer__drop_data(buffer);
1169 free(buffer);
1170 }
1171
auxtrace_synth_error(struct perf_record_auxtrace_error * auxtrace_error,int type,int code,int cpu,pid_t pid,pid_t tid,u64 ip,const char * msg,u64 timestamp)1172 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1173 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1174 const char *msg, u64 timestamp)
1175 {
1176 size_t size;
1177
1178 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1179
1180 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1181 auxtrace_error->type = type;
1182 auxtrace_error->code = code;
1183 auxtrace_error->cpu = cpu;
1184 auxtrace_error->pid = pid;
1185 auxtrace_error->tid = tid;
1186 auxtrace_error->fmt = 1;
1187 auxtrace_error->ip = ip;
1188 auxtrace_error->time = timestamp;
1189 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1190
1191 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1192 strlen(auxtrace_error->msg) + 1;
1193 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1194 }
1195
perf_event__synthesize_auxtrace_info(struct auxtrace_record * itr,struct perf_tool * tool,struct perf_session * session,perf_event__handler_t process)1196 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1197 struct perf_tool *tool,
1198 struct perf_session *session,
1199 perf_event__handler_t process)
1200 {
1201 union perf_event *ev;
1202 size_t priv_size;
1203 int err;
1204
1205 pr_debug2("Synthesizing auxtrace information\n");
1206 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1207 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1208 if (!ev)
1209 return -ENOMEM;
1210
1211 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1212 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1213 priv_size;
1214 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1215 priv_size);
1216 if (err)
1217 goto out_free;
1218
1219 err = process(tool, ev, NULL, NULL);
1220 out_free:
1221 free(ev);
1222 return err;
1223 }
1224
unleader_evsel(struct evlist * evlist,struct evsel * leader)1225 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1226 {
1227 struct evsel *new_leader = NULL;
1228 struct evsel *evsel;
1229
1230 /* Find new leader for the group */
1231 evlist__for_each_entry(evlist, evsel) {
1232 if (!evsel__has_leader(evsel, leader) || evsel == leader)
1233 continue;
1234 if (!new_leader)
1235 new_leader = evsel;
1236 evsel__set_leader(evsel, new_leader);
1237 }
1238
1239 /* Update group information */
1240 if (new_leader) {
1241 zfree(&new_leader->group_name);
1242 new_leader->group_name = leader->group_name;
1243 leader->group_name = NULL;
1244
1245 new_leader->core.nr_members = leader->core.nr_members - 1;
1246 leader->core.nr_members = 1;
1247 }
1248 }
1249
unleader_auxtrace(struct perf_session * session)1250 static void unleader_auxtrace(struct perf_session *session)
1251 {
1252 struct evsel *evsel;
1253
1254 evlist__for_each_entry(session->evlist, evsel) {
1255 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1256 evsel__is_group_leader(evsel)) {
1257 unleader_evsel(session->evlist, evsel);
1258 }
1259 }
1260 }
1261
perf_event__process_auxtrace_info(struct perf_session * session,union perf_event * event)1262 int perf_event__process_auxtrace_info(struct perf_session *session,
1263 union perf_event *event)
1264 {
1265 enum auxtrace_type type = event->auxtrace_info.type;
1266 int err;
1267
1268 if (dump_trace)
1269 fprintf(stdout, " type: %u\n", type);
1270
1271 switch (type) {
1272 case PERF_AUXTRACE_INTEL_PT:
1273 err = intel_pt_process_auxtrace_info(event, session);
1274 break;
1275 case PERF_AUXTRACE_INTEL_BTS:
1276 err = intel_bts_process_auxtrace_info(event, session);
1277 break;
1278 case PERF_AUXTRACE_ARM_SPE:
1279 err = arm_spe_process_auxtrace_info(event, session);
1280 break;
1281 case PERF_AUXTRACE_CS_ETM:
1282 err = cs_etm__process_auxtrace_info(event, session);
1283 break;
1284 case PERF_AUXTRACE_S390_CPUMSF:
1285 err = s390_cpumsf_process_auxtrace_info(event, session);
1286 break;
1287 case PERF_AUXTRACE_UNKNOWN:
1288 default:
1289 return -EINVAL;
1290 }
1291
1292 if (err)
1293 return err;
1294
1295 unleader_auxtrace(session);
1296
1297 return 0;
1298 }
1299
perf_event__process_auxtrace(struct perf_session * session,union perf_event * event)1300 s64 perf_event__process_auxtrace(struct perf_session *session,
1301 union perf_event *event)
1302 {
1303 s64 err;
1304
1305 if (dump_trace)
1306 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1307 event->auxtrace.size, event->auxtrace.offset,
1308 event->auxtrace.reference, event->auxtrace.idx,
1309 event->auxtrace.tid, event->auxtrace.cpu);
1310
1311 if (auxtrace__dont_decode(session))
1312 return event->auxtrace.size;
1313
1314 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1315 return -EINVAL;
1316
1317 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1318 if (err < 0)
1319 return err;
1320
1321 return event->auxtrace.size;
1322 }
1323
1324 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1325 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1326 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1327 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1328 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1329 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1330
itrace_synth_opts__set_default(struct itrace_synth_opts * synth_opts,bool no_sample)1331 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1332 bool no_sample)
1333 {
1334 synth_opts->branches = true;
1335 synth_opts->transactions = true;
1336 synth_opts->ptwrites = true;
1337 synth_opts->pwr_events = true;
1338 synth_opts->other_events = true;
1339 synth_opts->errors = true;
1340 synth_opts->flc = true;
1341 synth_opts->llc = true;
1342 synth_opts->tlb = true;
1343 synth_opts->mem = true;
1344 synth_opts->remote_access = true;
1345
1346 if (no_sample) {
1347 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1348 synth_opts->period = 1;
1349 synth_opts->calls = true;
1350 } else {
1351 synth_opts->instructions = true;
1352 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1353 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1354 }
1355 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1356 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1357 synth_opts->initial_skip = 0;
1358 }
1359
get_flag(const char ** ptr,unsigned int * flags)1360 static int get_flag(const char **ptr, unsigned int *flags)
1361 {
1362 while (1) {
1363 char c = **ptr;
1364
1365 if (c >= 'a' && c <= 'z') {
1366 *flags |= 1 << (c - 'a');
1367 ++*ptr;
1368 return 0;
1369 } else if (c == ' ') {
1370 ++*ptr;
1371 continue;
1372 } else {
1373 return -1;
1374 }
1375 }
1376 }
1377
get_flags(const char ** ptr,unsigned int * plus_flags,unsigned int * minus_flags)1378 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1379 {
1380 while (1) {
1381 switch (**ptr) {
1382 case '+':
1383 ++*ptr;
1384 if (get_flag(ptr, plus_flags))
1385 return -1;
1386 break;
1387 case '-':
1388 ++*ptr;
1389 if (get_flag(ptr, minus_flags))
1390 return -1;
1391 break;
1392 case ' ':
1393 ++*ptr;
1394 break;
1395 default:
1396 return 0;
1397 }
1398 }
1399 }
1400
1401 /*
1402 * Please check tools/perf/Documentation/perf-script.txt for information
1403 * about the options parsed here, which is introduced after this cset,
1404 * when support in 'perf script' for these options is introduced.
1405 */
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts,const char * str,int unset)1406 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1407 const char *str, int unset)
1408 {
1409 const char *p;
1410 char *endptr;
1411 bool period_type_set = false;
1412 bool period_set = false;
1413
1414 synth_opts->set = true;
1415
1416 if (unset) {
1417 synth_opts->dont_decode = true;
1418 return 0;
1419 }
1420
1421 if (!str) {
1422 itrace_synth_opts__set_default(synth_opts,
1423 synth_opts->default_no_sample);
1424 return 0;
1425 }
1426
1427 for (p = str; *p;) {
1428 switch (*p++) {
1429 case 'i':
1430 synth_opts->instructions = true;
1431 while (*p == ' ' || *p == ',')
1432 p += 1;
1433 if (isdigit(*p)) {
1434 synth_opts->period = strtoull(p, &endptr, 10);
1435 period_set = true;
1436 p = endptr;
1437 while (*p == ' ' || *p == ',')
1438 p += 1;
1439 switch (*p++) {
1440 case 'i':
1441 synth_opts->period_type =
1442 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1443 period_type_set = true;
1444 break;
1445 case 't':
1446 synth_opts->period_type =
1447 PERF_ITRACE_PERIOD_TICKS;
1448 period_type_set = true;
1449 break;
1450 case 'm':
1451 synth_opts->period *= 1000;
1452 /* Fall through */
1453 case 'u':
1454 synth_opts->period *= 1000;
1455 /* Fall through */
1456 case 'n':
1457 if (*p++ != 's')
1458 goto out_err;
1459 synth_opts->period_type =
1460 PERF_ITRACE_PERIOD_NANOSECS;
1461 period_type_set = true;
1462 break;
1463 case '\0':
1464 goto out;
1465 default:
1466 goto out_err;
1467 }
1468 }
1469 break;
1470 case 'b':
1471 synth_opts->branches = true;
1472 break;
1473 case 'x':
1474 synth_opts->transactions = true;
1475 break;
1476 case 'w':
1477 synth_opts->ptwrites = true;
1478 break;
1479 case 'p':
1480 synth_opts->pwr_events = true;
1481 break;
1482 case 'o':
1483 synth_opts->other_events = true;
1484 break;
1485 case 'e':
1486 synth_opts->errors = true;
1487 if (get_flags(&p, &synth_opts->error_plus_flags,
1488 &synth_opts->error_minus_flags))
1489 goto out_err;
1490 break;
1491 case 'd':
1492 synth_opts->log = true;
1493 if (get_flags(&p, &synth_opts->log_plus_flags,
1494 &synth_opts->log_minus_flags))
1495 goto out_err;
1496 break;
1497 case 'c':
1498 synth_opts->branches = true;
1499 synth_opts->calls = true;
1500 break;
1501 case 'r':
1502 synth_opts->branches = true;
1503 synth_opts->returns = true;
1504 break;
1505 case 'G':
1506 case 'g':
1507 if (p[-1] == 'G')
1508 synth_opts->add_callchain = true;
1509 else
1510 synth_opts->callchain = true;
1511 synth_opts->callchain_sz =
1512 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1513 while (*p == ' ' || *p == ',')
1514 p += 1;
1515 if (isdigit(*p)) {
1516 unsigned int val;
1517
1518 val = strtoul(p, &endptr, 10);
1519 p = endptr;
1520 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1521 goto out_err;
1522 synth_opts->callchain_sz = val;
1523 }
1524 break;
1525 case 'L':
1526 case 'l':
1527 if (p[-1] == 'L')
1528 synth_opts->add_last_branch = true;
1529 else
1530 synth_opts->last_branch = true;
1531 synth_opts->last_branch_sz =
1532 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1533 while (*p == ' ' || *p == ',')
1534 p += 1;
1535 if (isdigit(*p)) {
1536 unsigned int val;
1537
1538 val = strtoul(p, &endptr, 10);
1539 p = endptr;
1540 if (!val ||
1541 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1542 goto out_err;
1543 synth_opts->last_branch_sz = val;
1544 }
1545 break;
1546 case 's':
1547 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1548 if (p == endptr)
1549 goto out_err;
1550 p = endptr;
1551 break;
1552 case 'f':
1553 synth_opts->flc = true;
1554 break;
1555 case 'm':
1556 synth_opts->llc = true;
1557 break;
1558 case 't':
1559 synth_opts->tlb = true;
1560 break;
1561 case 'a':
1562 synth_opts->remote_access = true;
1563 break;
1564 case 'M':
1565 synth_opts->mem = true;
1566 break;
1567 case 'q':
1568 synth_opts->quick += 1;
1569 break;
1570 case 'Z':
1571 synth_opts->timeless_decoding = true;
1572 break;
1573 case ' ':
1574 case ',':
1575 break;
1576 default:
1577 goto out_err;
1578 }
1579 }
1580 out:
1581 if (synth_opts->instructions) {
1582 if (!period_type_set)
1583 synth_opts->period_type =
1584 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1585 if (!period_set)
1586 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1587 }
1588
1589 return 0;
1590
1591 out_err:
1592 pr_err("Bad Instruction Tracing options '%s'\n", str);
1593 return -EINVAL;
1594 }
1595
itrace_parse_synth_opts(const struct option * opt,const char * str,int unset)1596 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1597 {
1598 return itrace_do_parse_synth_opts(opt->value, str, unset);
1599 }
1600
1601 static const char * const auxtrace_error_type_name[] = {
1602 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1603 };
1604
auxtrace_error_name(int type)1605 static const char *auxtrace_error_name(int type)
1606 {
1607 const char *error_type_name = NULL;
1608
1609 if (type < PERF_AUXTRACE_ERROR_MAX)
1610 error_type_name = auxtrace_error_type_name[type];
1611 if (!error_type_name)
1612 error_type_name = "unknown AUX";
1613 return error_type_name;
1614 }
1615
perf_event__fprintf_auxtrace_error(union perf_event * event,FILE * fp)1616 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1617 {
1618 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1619 unsigned long long nsecs = e->time;
1620 const char *msg = e->msg;
1621 int ret;
1622
1623 ret = fprintf(fp, " %s error type %u",
1624 auxtrace_error_name(e->type), e->type);
1625
1626 if (e->fmt && nsecs) {
1627 unsigned long secs = nsecs / NSEC_PER_SEC;
1628
1629 nsecs -= secs * NSEC_PER_SEC;
1630 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1631 } else {
1632 ret += fprintf(fp, " time 0");
1633 }
1634
1635 if (!e->fmt)
1636 msg = (const char *)&e->time;
1637
1638 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1639 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1640 return ret;
1641 }
1642
perf_session__auxtrace_error_inc(struct perf_session * session,union perf_event * event)1643 void perf_session__auxtrace_error_inc(struct perf_session *session,
1644 union perf_event *event)
1645 {
1646 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1647
1648 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1649 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1650 }
1651
events_stats__auxtrace_error_warn(const struct events_stats * stats)1652 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1653 {
1654 int i;
1655
1656 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1657 if (!stats->nr_auxtrace_errors[i])
1658 continue;
1659 ui__warning("%u %s errors\n",
1660 stats->nr_auxtrace_errors[i],
1661 auxtrace_error_name(i));
1662 }
1663 }
1664
perf_event__process_auxtrace_error(struct perf_session * session,union perf_event * event)1665 int perf_event__process_auxtrace_error(struct perf_session *session,
1666 union perf_event *event)
1667 {
1668 if (auxtrace__dont_decode(session))
1669 return 0;
1670
1671 perf_event__fprintf_auxtrace_error(event, stdout);
1672 return 0;
1673 }
1674
1675 /*
1676 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode,
1677 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to
1678 * the issues caused by the below sequence on multiple CPUs: when perf tool
1679 * accesses either the load operation or the store operation for 64-bit value,
1680 * on some architectures the operation is divided into two instructions, one
1681 * is for accessing the low 32-bit value and another is for the high 32-bit;
1682 * thus these two user operations can give the kernel chances to access the
1683 * 64-bit value, and thus leads to the unexpected load values.
1684 *
1685 * kernel (64-bit) user (32-bit)
1686 *
1687 * if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo
1688 * STORE $aux_data | ,--->
1689 * FLUSH $aux_data | | LOAD ->aux_head_hi
1690 * STORE ->aux_head --|-------` smp_rmb()
1691 * } | LOAD $data
1692 * | smp_mb()
1693 * | STORE ->aux_tail_lo
1694 * `----------->
1695 * STORE ->aux_tail_hi
1696 *
1697 * For this reason, it's impossible for the perf tool to work correctly when
1698 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we
1699 * can not simply limit the AUX ring buffer to less than 4GB, the reason is
1700 * the pointers can be increased monotonically, whatever the buffer size it is,
1701 * at the end the head and tail can be bigger than 4GB and carry out to the
1702 * high 32-bit.
1703 *
1704 * To mitigate the issues and improve the user experience, we can allow the
1705 * perf tool working in certain conditions and bail out with error if detect
1706 * any overflow cannot be handled.
1707 *
1708 * For reading the AUX head, it reads out the values for three times, and
1709 * compares the high 4 bytes of the values between the first time and the last
1710 * time, if there has no change for high 4 bytes injected by the kernel during
1711 * the user reading sequence, it's safe for use the second value.
1712 *
1713 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high
1714 * 32 bits, it means there have two store operations in user space and it cannot
1715 * promise the atomicity for 64-bit write, so return '-1' in this case to tell
1716 * the caller an overflow error has happened.
1717 */
compat_auxtrace_mmap__read_head(struct auxtrace_mmap * mm)1718 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
1719 {
1720 struct perf_event_mmap_page *pc = mm->userpg;
1721 u64 first, second, last;
1722 u64 mask = (u64)(UINT32_MAX) << 32;
1723
1724 do {
1725 first = READ_ONCE(pc->aux_head);
1726 /* Ensure all reads are done after we read the head */
1727 smp_rmb();
1728 second = READ_ONCE(pc->aux_head);
1729 /* Ensure all reads are done after we read the head */
1730 smp_rmb();
1731 last = READ_ONCE(pc->aux_head);
1732 } while ((first & mask) != (last & mask));
1733
1734 return second;
1735 }
1736
compat_auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail)1737 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
1738 {
1739 struct perf_event_mmap_page *pc = mm->userpg;
1740 u64 mask = (u64)(UINT32_MAX) << 32;
1741
1742 if (tail & mask)
1743 return -1;
1744
1745 /* Ensure all reads are done before we write the tail out */
1746 smp_mb();
1747 WRITE_ONCE(pc->aux_tail, tail);
1748 return 0;
1749 }
1750
__auxtrace_mmap__read(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,bool snapshot,size_t snapshot_size)1751 static int __auxtrace_mmap__read(struct mmap *map,
1752 struct auxtrace_record *itr,
1753 struct perf_tool *tool, process_auxtrace_t fn,
1754 bool snapshot, size_t snapshot_size)
1755 {
1756 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1757 u64 head, old = mm->prev, offset, ref;
1758 unsigned char *data = mm->base;
1759 size_t size, head_off, old_off, len1, len2, padding;
1760 union perf_event ev;
1761 void *data1, *data2;
1762 int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
1763
1764 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
1765
1766 if (snapshot &&
1767 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
1768 return -1;
1769
1770 if (old == head)
1771 return 0;
1772
1773 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1774 mm->idx, old, head, head - old);
1775
1776 if (mm->mask) {
1777 head_off = head & mm->mask;
1778 old_off = old & mm->mask;
1779 } else {
1780 head_off = head % mm->len;
1781 old_off = old % mm->len;
1782 }
1783
1784 if (head_off > old_off)
1785 size = head_off - old_off;
1786 else
1787 size = mm->len - (old_off - head_off);
1788
1789 if (snapshot && size > snapshot_size)
1790 size = snapshot_size;
1791
1792 ref = auxtrace_record__reference(itr);
1793
1794 if (head > old || size <= head || mm->mask) {
1795 offset = head - size;
1796 } else {
1797 /*
1798 * When the buffer size is not a power of 2, 'head' wraps at the
1799 * highest multiple of the buffer size, so we have to subtract
1800 * the remainder here.
1801 */
1802 u64 rem = (0ULL - mm->len) % mm->len;
1803
1804 offset = head - size - rem;
1805 }
1806
1807 if (size > head_off) {
1808 len1 = size - head_off;
1809 data1 = &data[mm->len - len1];
1810 len2 = head_off;
1811 data2 = &data[0];
1812 } else {
1813 len1 = size;
1814 data1 = &data[head_off - len1];
1815 len2 = 0;
1816 data2 = NULL;
1817 }
1818
1819 if (itr->alignment) {
1820 unsigned int unwanted = len1 % itr->alignment;
1821
1822 len1 -= unwanted;
1823 size -= unwanted;
1824 }
1825
1826 /* padding must be written by fn() e.g. record__process_auxtrace() */
1827 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1828 if (padding)
1829 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1830
1831 memset(&ev, 0, sizeof(ev));
1832 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1833 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1834 ev.auxtrace.size = size + padding;
1835 ev.auxtrace.offset = offset;
1836 ev.auxtrace.reference = ref;
1837 ev.auxtrace.idx = mm->idx;
1838 ev.auxtrace.tid = mm->tid;
1839 ev.auxtrace.cpu = mm->cpu;
1840
1841 if (fn(tool, map, &ev, data1, len1, data2, len2))
1842 return -1;
1843
1844 mm->prev = head;
1845
1846 if (!snapshot) {
1847 int err;
1848
1849 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
1850 if (err < 0)
1851 return err;
1852
1853 if (itr->read_finish) {
1854 err = itr->read_finish(itr, mm->idx);
1855 if (err < 0)
1856 return err;
1857 }
1858 }
1859
1860 return 1;
1861 }
1862
auxtrace_mmap__read(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn)1863 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1864 struct perf_tool *tool, process_auxtrace_t fn)
1865 {
1866 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1867 }
1868
auxtrace_mmap__read_snapshot(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,size_t snapshot_size)1869 int auxtrace_mmap__read_snapshot(struct mmap *map,
1870 struct auxtrace_record *itr,
1871 struct perf_tool *tool, process_auxtrace_t fn,
1872 size_t snapshot_size)
1873 {
1874 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1875 }
1876
1877 /**
1878 * struct auxtrace_cache - hash table to implement a cache
1879 * @hashtable: the hashtable
1880 * @sz: hashtable size (number of hlists)
1881 * @entry_size: size of an entry
1882 * @limit: limit the number of entries to this maximum, when reached the cache
1883 * is dropped and caching begins again with an empty cache
1884 * @cnt: current number of entries
1885 * @bits: hashtable size (@sz = 2^@bits)
1886 */
1887 struct auxtrace_cache {
1888 struct hlist_head *hashtable;
1889 size_t sz;
1890 size_t entry_size;
1891 size_t limit;
1892 size_t cnt;
1893 unsigned int bits;
1894 };
1895
auxtrace_cache__new(unsigned int bits,size_t entry_size,unsigned int limit_percent)1896 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1897 unsigned int limit_percent)
1898 {
1899 struct auxtrace_cache *c;
1900 struct hlist_head *ht;
1901 size_t sz, i;
1902
1903 c = zalloc(sizeof(struct auxtrace_cache));
1904 if (!c)
1905 return NULL;
1906
1907 sz = 1UL << bits;
1908
1909 ht = calloc(sz, sizeof(struct hlist_head));
1910 if (!ht)
1911 goto out_free;
1912
1913 for (i = 0; i < sz; i++)
1914 INIT_HLIST_HEAD(&ht[i]);
1915
1916 c->hashtable = ht;
1917 c->sz = sz;
1918 c->entry_size = entry_size;
1919 c->limit = (c->sz * limit_percent) / 100;
1920 c->bits = bits;
1921
1922 return c;
1923
1924 out_free:
1925 free(c);
1926 return NULL;
1927 }
1928
auxtrace_cache__drop(struct auxtrace_cache * c)1929 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1930 {
1931 struct auxtrace_cache_entry *entry;
1932 struct hlist_node *tmp;
1933 size_t i;
1934
1935 if (!c)
1936 return;
1937
1938 for (i = 0; i < c->sz; i++) {
1939 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1940 hlist_del(&entry->hash);
1941 auxtrace_cache__free_entry(c, entry);
1942 }
1943 }
1944
1945 c->cnt = 0;
1946 }
1947
auxtrace_cache__free(struct auxtrace_cache * c)1948 void auxtrace_cache__free(struct auxtrace_cache *c)
1949 {
1950 if (!c)
1951 return;
1952
1953 auxtrace_cache__drop(c);
1954 zfree(&c->hashtable);
1955 free(c);
1956 }
1957
auxtrace_cache__alloc_entry(struct auxtrace_cache * c)1958 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1959 {
1960 return malloc(c->entry_size);
1961 }
1962
auxtrace_cache__free_entry(struct auxtrace_cache * c __maybe_unused,void * entry)1963 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1964 void *entry)
1965 {
1966 free(entry);
1967 }
1968
auxtrace_cache__add(struct auxtrace_cache * c,u32 key,struct auxtrace_cache_entry * entry)1969 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1970 struct auxtrace_cache_entry *entry)
1971 {
1972 if (c->limit && ++c->cnt > c->limit)
1973 auxtrace_cache__drop(c);
1974
1975 entry->key = key;
1976 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1977
1978 return 0;
1979 }
1980
auxtrace_cache__rm(struct auxtrace_cache * c,u32 key)1981 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1982 u32 key)
1983 {
1984 struct auxtrace_cache_entry *entry;
1985 struct hlist_head *hlist;
1986 struct hlist_node *n;
1987
1988 if (!c)
1989 return NULL;
1990
1991 hlist = &c->hashtable[hash_32(key, c->bits)];
1992 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1993 if (entry->key == key) {
1994 hlist_del(&entry->hash);
1995 return entry;
1996 }
1997 }
1998
1999 return NULL;
2000 }
2001
auxtrace_cache__remove(struct auxtrace_cache * c,u32 key)2002 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
2003 {
2004 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
2005
2006 auxtrace_cache__free_entry(c, entry);
2007 }
2008
auxtrace_cache__lookup(struct auxtrace_cache * c,u32 key)2009 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
2010 {
2011 struct auxtrace_cache_entry *entry;
2012 struct hlist_head *hlist;
2013
2014 if (!c)
2015 return NULL;
2016
2017 hlist = &c->hashtable[hash_32(key, c->bits)];
2018 hlist_for_each_entry(entry, hlist, hash) {
2019 if (entry->key == key)
2020 return entry;
2021 }
2022
2023 return NULL;
2024 }
2025
addr_filter__free_str(struct addr_filter * filt)2026 static void addr_filter__free_str(struct addr_filter *filt)
2027 {
2028 zfree(&filt->str);
2029 filt->action = NULL;
2030 filt->sym_from = NULL;
2031 filt->sym_to = NULL;
2032 filt->filename = NULL;
2033 }
2034
addr_filter__new(void)2035 static struct addr_filter *addr_filter__new(void)
2036 {
2037 struct addr_filter *filt = zalloc(sizeof(*filt));
2038
2039 if (filt)
2040 INIT_LIST_HEAD(&filt->list);
2041
2042 return filt;
2043 }
2044
addr_filter__free(struct addr_filter * filt)2045 static void addr_filter__free(struct addr_filter *filt)
2046 {
2047 if (filt)
2048 addr_filter__free_str(filt);
2049 free(filt);
2050 }
2051
addr_filters__add(struct addr_filters * filts,struct addr_filter * filt)2052 static void addr_filters__add(struct addr_filters *filts,
2053 struct addr_filter *filt)
2054 {
2055 list_add_tail(&filt->list, &filts->head);
2056 filts->cnt += 1;
2057 }
2058
addr_filters__del(struct addr_filters * filts,struct addr_filter * filt)2059 static void addr_filters__del(struct addr_filters *filts,
2060 struct addr_filter *filt)
2061 {
2062 list_del_init(&filt->list);
2063 filts->cnt -= 1;
2064 }
2065
addr_filters__init(struct addr_filters * filts)2066 void addr_filters__init(struct addr_filters *filts)
2067 {
2068 INIT_LIST_HEAD(&filts->head);
2069 filts->cnt = 0;
2070 }
2071
addr_filters__exit(struct addr_filters * filts)2072 void addr_filters__exit(struct addr_filters *filts)
2073 {
2074 struct addr_filter *filt, *n;
2075
2076 list_for_each_entry_safe(filt, n, &filts->head, list) {
2077 addr_filters__del(filts, filt);
2078 addr_filter__free(filt);
2079 }
2080 }
2081
parse_num_or_str(char ** inp,u64 * num,const char ** str,const char * str_delim)2082 static int parse_num_or_str(char **inp, u64 *num, const char **str,
2083 const char *str_delim)
2084 {
2085 *inp += strspn(*inp, " ");
2086
2087 if (isdigit(**inp)) {
2088 char *endptr;
2089
2090 if (!num)
2091 return -EINVAL;
2092 errno = 0;
2093 *num = strtoull(*inp, &endptr, 0);
2094 if (errno)
2095 return -errno;
2096 if (endptr == *inp)
2097 return -EINVAL;
2098 *inp = endptr;
2099 } else {
2100 size_t n;
2101
2102 if (!str)
2103 return -EINVAL;
2104 *inp += strspn(*inp, " ");
2105 *str = *inp;
2106 n = strcspn(*inp, str_delim);
2107 if (!n)
2108 return -EINVAL;
2109 *inp += n;
2110 if (**inp) {
2111 **inp = '\0';
2112 *inp += 1;
2113 }
2114 }
2115 return 0;
2116 }
2117
parse_action(struct addr_filter * filt)2118 static int parse_action(struct addr_filter *filt)
2119 {
2120 if (!strcmp(filt->action, "filter")) {
2121 filt->start = true;
2122 filt->range = true;
2123 } else if (!strcmp(filt->action, "start")) {
2124 filt->start = true;
2125 } else if (!strcmp(filt->action, "stop")) {
2126 filt->start = false;
2127 } else if (!strcmp(filt->action, "tracestop")) {
2128 filt->start = false;
2129 filt->range = true;
2130 filt->action += 5; /* Change 'tracestop' to 'stop' */
2131 } else {
2132 return -EINVAL;
2133 }
2134 return 0;
2135 }
2136
parse_sym_idx(char ** inp,int * idx)2137 static int parse_sym_idx(char **inp, int *idx)
2138 {
2139 *idx = -1;
2140
2141 *inp += strspn(*inp, " ");
2142
2143 if (**inp != '#')
2144 return 0;
2145
2146 *inp += 1;
2147
2148 if (**inp == 'g' || **inp == 'G') {
2149 *inp += 1;
2150 *idx = 0;
2151 } else {
2152 unsigned long num;
2153 char *endptr;
2154
2155 errno = 0;
2156 num = strtoul(*inp, &endptr, 0);
2157 if (errno)
2158 return -errno;
2159 if (endptr == *inp || num > INT_MAX)
2160 return -EINVAL;
2161 *inp = endptr;
2162 *idx = num;
2163 }
2164
2165 return 0;
2166 }
2167
parse_addr_size(char ** inp,u64 * num,const char ** str,int * idx)2168 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2169 {
2170 int err = parse_num_or_str(inp, num, str, " ");
2171
2172 if (!err && *str)
2173 err = parse_sym_idx(inp, idx);
2174
2175 return err;
2176 }
2177
parse_one_filter(struct addr_filter * filt,const char ** filter_inp)2178 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2179 {
2180 char *fstr;
2181 int err;
2182
2183 filt->str = fstr = strdup(*filter_inp);
2184 if (!fstr)
2185 return -ENOMEM;
2186
2187 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2188 if (err)
2189 goto out_err;
2190
2191 err = parse_action(filt);
2192 if (err)
2193 goto out_err;
2194
2195 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2196 &filt->sym_from_idx);
2197 if (err)
2198 goto out_err;
2199
2200 fstr += strspn(fstr, " ");
2201
2202 if (*fstr == '/') {
2203 fstr += 1;
2204 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2205 &filt->sym_to_idx);
2206 if (err)
2207 goto out_err;
2208 filt->range = true;
2209 }
2210
2211 fstr += strspn(fstr, " ");
2212
2213 if (*fstr == '@') {
2214 fstr += 1;
2215 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2216 if (err)
2217 goto out_err;
2218 }
2219
2220 fstr += strspn(fstr, " ,");
2221
2222 *filter_inp += fstr - filt->str;
2223
2224 return 0;
2225
2226 out_err:
2227 addr_filter__free_str(filt);
2228
2229 return err;
2230 }
2231
addr_filters__parse_bare_filter(struct addr_filters * filts,const char * filter)2232 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2233 const char *filter)
2234 {
2235 struct addr_filter *filt;
2236 const char *fstr = filter;
2237 int err;
2238
2239 while (*fstr) {
2240 filt = addr_filter__new();
2241 err = parse_one_filter(filt, &fstr);
2242 if (err) {
2243 addr_filter__free(filt);
2244 addr_filters__exit(filts);
2245 return err;
2246 }
2247 addr_filters__add(filts, filt);
2248 }
2249
2250 return 0;
2251 }
2252
2253 struct sym_args {
2254 const char *name;
2255 u64 start;
2256 u64 size;
2257 int idx;
2258 int cnt;
2259 bool started;
2260 bool global;
2261 bool selected;
2262 bool duplicate;
2263 bool near;
2264 };
2265
kern_sym_name_match(const char * kname,const char * name)2266 static bool kern_sym_name_match(const char *kname, const char *name)
2267 {
2268 size_t n = strlen(name);
2269
2270 return !strcmp(kname, name) ||
2271 (!strncmp(kname, name, n) && kname[n] == '\t');
2272 }
2273
kern_sym_match(struct sym_args * args,const char * name,char type)2274 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2275 {
2276 /* A function with the same name, and global or the n'th found or any */
2277 return kallsyms__is_function(type) &&
2278 kern_sym_name_match(name, args->name) &&
2279 ((args->global && isupper(type)) ||
2280 (args->selected && ++(args->cnt) == args->idx) ||
2281 (!args->global && !args->selected));
2282 }
2283
find_kern_sym_cb(void * arg,const char * name,char type,u64 start)2284 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2285 {
2286 struct sym_args *args = arg;
2287
2288 if (args->started) {
2289 if (!args->size)
2290 args->size = start - args->start;
2291 if (args->selected) {
2292 if (args->size)
2293 return 1;
2294 } else if (kern_sym_match(args, name, type)) {
2295 args->duplicate = true;
2296 return 1;
2297 }
2298 } else if (kern_sym_match(args, name, type)) {
2299 args->started = true;
2300 args->start = start;
2301 }
2302
2303 return 0;
2304 }
2305
print_kern_sym_cb(void * arg,const char * name,char type,u64 start)2306 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2307 {
2308 struct sym_args *args = arg;
2309
2310 if (kern_sym_match(args, name, type)) {
2311 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2312 ++args->cnt, start, type, name);
2313 args->near = true;
2314 } else if (args->near) {
2315 args->near = false;
2316 pr_err("\t\twhich is near\t\t%s\n", name);
2317 }
2318
2319 return 0;
2320 }
2321
sym_not_found_error(const char * sym_name,int idx)2322 static int sym_not_found_error(const char *sym_name, int idx)
2323 {
2324 if (idx > 0) {
2325 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2326 idx, sym_name);
2327 } else if (!idx) {
2328 pr_err("Global symbol '%s' not found.\n", sym_name);
2329 } else {
2330 pr_err("Symbol '%s' not found.\n", sym_name);
2331 }
2332 pr_err("Note that symbols must be functions.\n");
2333
2334 return -EINVAL;
2335 }
2336
find_kern_sym(const char * sym_name,u64 * start,u64 * size,int idx)2337 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2338 {
2339 struct sym_args args = {
2340 .name = sym_name,
2341 .idx = idx,
2342 .global = !idx,
2343 .selected = idx > 0,
2344 };
2345 int err;
2346
2347 *start = 0;
2348 *size = 0;
2349
2350 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2351 if (err < 0) {
2352 pr_err("Failed to parse /proc/kallsyms\n");
2353 return err;
2354 }
2355
2356 if (args.duplicate) {
2357 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2358 args.cnt = 0;
2359 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2360 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2361 sym_name);
2362 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2363 return -EINVAL;
2364 }
2365
2366 if (!args.started) {
2367 pr_err("Kernel symbol lookup: ");
2368 return sym_not_found_error(sym_name, idx);
2369 }
2370
2371 *start = args.start;
2372 *size = args.size;
2373
2374 return 0;
2375 }
2376
find_entire_kern_cb(void * arg,const char * name __maybe_unused,char type,u64 start)2377 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2378 char type, u64 start)
2379 {
2380 struct sym_args *args = arg;
2381 u64 size;
2382
2383 if (!kallsyms__is_function(type))
2384 return 0;
2385
2386 if (!args->started) {
2387 args->started = true;
2388 args->start = start;
2389 }
2390 /* Don't know exactly where the kernel ends, so we add a page */
2391 size = round_up(start, page_size) + page_size - args->start;
2392 if (size > args->size)
2393 args->size = size;
2394
2395 return 0;
2396 }
2397
addr_filter__entire_kernel(struct addr_filter * filt)2398 static int addr_filter__entire_kernel(struct addr_filter *filt)
2399 {
2400 struct sym_args args = { .started = false };
2401 int err;
2402
2403 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2404 if (err < 0 || !args.started) {
2405 pr_err("Failed to parse /proc/kallsyms\n");
2406 return err;
2407 }
2408
2409 filt->addr = args.start;
2410 filt->size = args.size;
2411
2412 return 0;
2413 }
2414
check_end_after_start(struct addr_filter * filt,u64 start,u64 size)2415 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2416 {
2417 if (start + size >= filt->addr)
2418 return 0;
2419
2420 if (filt->sym_from) {
2421 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2422 filt->sym_to, start, filt->sym_from, filt->addr);
2423 } else {
2424 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2425 filt->sym_to, start, filt->addr);
2426 }
2427
2428 return -EINVAL;
2429 }
2430
addr_filter__resolve_kernel_syms(struct addr_filter * filt)2431 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2432 {
2433 bool no_size = false;
2434 u64 start, size;
2435 int err;
2436
2437 if (symbol_conf.kptr_restrict) {
2438 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2439 return -EINVAL;
2440 }
2441
2442 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2443 return addr_filter__entire_kernel(filt);
2444
2445 if (filt->sym_from) {
2446 err = find_kern_sym(filt->sym_from, &start, &size,
2447 filt->sym_from_idx);
2448 if (err)
2449 return err;
2450 filt->addr = start;
2451 if (filt->range && !filt->size && !filt->sym_to) {
2452 filt->size = size;
2453 no_size = !size;
2454 }
2455 }
2456
2457 if (filt->sym_to) {
2458 err = find_kern_sym(filt->sym_to, &start, &size,
2459 filt->sym_to_idx);
2460 if (err)
2461 return err;
2462
2463 err = check_end_after_start(filt, start, size);
2464 if (err)
2465 return err;
2466 filt->size = start + size - filt->addr;
2467 no_size = !size;
2468 }
2469
2470 /* The very last symbol in kallsyms does not imply a particular size */
2471 if (no_size) {
2472 pr_err("Cannot determine size of symbol '%s'\n",
2473 filt->sym_to ? filt->sym_to : filt->sym_from);
2474 return -EINVAL;
2475 }
2476
2477 return 0;
2478 }
2479
load_dso(const char * name)2480 static struct dso *load_dso(const char *name)
2481 {
2482 struct map *map;
2483 struct dso *dso;
2484
2485 map = dso__new_map(name);
2486 if (!map)
2487 return NULL;
2488
2489 if (map__load(map) < 0)
2490 pr_err("File '%s' not found or has no symbols.\n", name);
2491
2492 dso = dso__get(map->dso);
2493
2494 map__put(map);
2495
2496 return dso;
2497 }
2498
dso_sym_match(struct symbol * sym,const char * name,int * cnt,int idx)2499 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2500 int idx)
2501 {
2502 /* Same name, and global or the n'th found or any */
2503 return !arch__compare_symbol_names(name, sym->name) &&
2504 ((!idx && sym->binding == STB_GLOBAL) ||
2505 (idx > 0 && ++*cnt == idx) ||
2506 idx < 0);
2507 }
2508
print_duplicate_syms(struct dso * dso,const char * sym_name)2509 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2510 {
2511 struct symbol *sym;
2512 bool near = false;
2513 int cnt = 0;
2514
2515 pr_err("Multiple symbols with name '%s'\n", sym_name);
2516
2517 sym = dso__first_symbol(dso);
2518 while (sym) {
2519 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2520 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2521 ++cnt, sym->start,
2522 sym->binding == STB_GLOBAL ? 'g' :
2523 sym->binding == STB_LOCAL ? 'l' : 'w',
2524 sym->name);
2525 near = true;
2526 } else if (near) {
2527 near = false;
2528 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2529 }
2530 sym = dso__next_symbol(sym);
2531 }
2532
2533 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2534 sym_name);
2535 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2536 }
2537
find_dso_sym(struct dso * dso,const char * sym_name,u64 * start,u64 * size,int idx)2538 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2539 u64 *size, int idx)
2540 {
2541 struct symbol *sym;
2542 int cnt = 0;
2543
2544 *start = 0;
2545 *size = 0;
2546
2547 sym = dso__first_symbol(dso);
2548 while (sym) {
2549 if (*start) {
2550 if (!*size)
2551 *size = sym->start - *start;
2552 if (idx > 0) {
2553 if (*size)
2554 return 0;
2555 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2556 print_duplicate_syms(dso, sym_name);
2557 return -EINVAL;
2558 }
2559 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2560 *start = sym->start;
2561 *size = sym->end - sym->start;
2562 }
2563 sym = dso__next_symbol(sym);
2564 }
2565
2566 if (!*start)
2567 return sym_not_found_error(sym_name, idx);
2568
2569 return 0;
2570 }
2571
addr_filter__entire_dso(struct addr_filter * filt,struct dso * dso)2572 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2573 {
2574 if (dso__data_file_size(dso, NULL)) {
2575 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2576 filt->filename);
2577 return -EINVAL;
2578 }
2579
2580 filt->addr = 0;
2581 filt->size = dso->data.file_size;
2582
2583 return 0;
2584 }
2585
addr_filter__resolve_syms(struct addr_filter * filt)2586 static int addr_filter__resolve_syms(struct addr_filter *filt)
2587 {
2588 u64 start, size;
2589 struct dso *dso;
2590 int err = 0;
2591
2592 if (!filt->sym_from && !filt->sym_to)
2593 return 0;
2594
2595 if (!filt->filename)
2596 return addr_filter__resolve_kernel_syms(filt);
2597
2598 dso = load_dso(filt->filename);
2599 if (!dso) {
2600 pr_err("Failed to load symbols from: %s\n", filt->filename);
2601 return -EINVAL;
2602 }
2603
2604 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2605 err = addr_filter__entire_dso(filt, dso);
2606 goto put_dso;
2607 }
2608
2609 if (filt->sym_from) {
2610 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2611 filt->sym_from_idx);
2612 if (err)
2613 goto put_dso;
2614 filt->addr = start;
2615 if (filt->range && !filt->size && !filt->sym_to)
2616 filt->size = size;
2617 }
2618
2619 if (filt->sym_to) {
2620 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2621 filt->sym_to_idx);
2622 if (err)
2623 goto put_dso;
2624
2625 err = check_end_after_start(filt, start, size);
2626 if (err)
2627 return err;
2628
2629 filt->size = start + size - filt->addr;
2630 }
2631
2632 put_dso:
2633 dso__put(dso);
2634
2635 return err;
2636 }
2637
addr_filter__to_str(struct addr_filter * filt)2638 static char *addr_filter__to_str(struct addr_filter *filt)
2639 {
2640 char filename_buf[PATH_MAX];
2641 const char *at = "";
2642 const char *fn = "";
2643 char *filter;
2644 int err;
2645
2646 if (filt->filename) {
2647 at = "@";
2648 fn = realpath(filt->filename, filename_buf);
2649 if (!fn)
2650 return NULL;
2651 }
2652
2653 if (filt->range) {
2654 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2655 filt->action, filt->addr, filt->size, at, fn);
2656 } else {
2657 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2658 filt->action, filt->addr, at, fn);
2659 }
2660
2661 return err < 0 ? NULL : filter;
2662 }
2663
parse_addr_filter(struct evsel * evsel,const char * filter,int max_nr)2664 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2665 int max_nr)
2666 {
2667 struct addr_filters filts;
2668 struct addr_filter *filt;
2669 int err;
2670
2671 addr_filters__init(&filts);
2672
2673 err = addr_filters__parse_bare_filter(&filts, filter);
2674 if (err)
2675 goto out_exit;
2676
2677 if (filts.cnt > max_nr) {
2678 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2679 filts.cnt, max_nr);
2680 err = -EINVAL;
2681 goto out_exit;
2682 }
2683
2684 list_for_each_entry(filt, &filts.head, list) {
2685 char *new_filter;
2686
2687 err = addr_filter__resolve_syms(filt);
2688 if (err)
2689 goto out_exit;
2690
2691 new_filter = addr_filter__to_str(filt);
2692 if (!new_filter) {
2693 err = -ENOMEM;
2694 goto out_exit;
2695 }
2696
2697 if (evsel__append_addr_filter(evsel, new_filter)) {
2698 err = -ENOMEM;
2699 goto out_exit;
2700 }
2701 }
2702
2703 out_exit:
2704 addr_filters__exit(&filts);
2705
2706 if (err) {
2707 pr_err("Failed to parse address filter: '%s'\n", filter);
2708 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2709 pr_err("Where multiple filters are separated by space or comma.\n");
2710 }
2711
2712 return err;
2713 }
2714
evsel__nr_addr_filter(struct evsel * evsel)2715 static int evsel__nr_addr_filter(struct evsel *evsel)
2716 {
2717 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2718 int nr_addr_filters = 0;
2719
2720 if (!pmu)
2721 return 0;
2722
2723 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2724
2725 return nr_addr_filters;
2726 }
2727
auxtrace_parse_filters(struct evlist * evlist)2728 int auxtrace_parse_filters(struct evlist *evlist)
2729 {
2730 struct evsel *evsel;
2731 char *filter;
2732 int err, max_nr;
2733
2734 evlist__for_each_entry(evlist, evsel) {
2735 filter = evsel->filter;
2736 max_nr = evsel__nr_addr_filter(evsel);
2737 if (!filter || !max_nr)
2738 continue;
2739 evsel->filter = NULL;
2740 err = parse_addr_filter(evsel, filter, max_nr);
2741 free(filter);
2742 if (err)
2743 return err;
2744 pr_debug("Address filter: %s\n", evsel->filter);
2745 }
2746
2747 return 0;
2748 }
2749
auxtrace__process_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample,struct perf_tool * tool)2750 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2751 struct perf_sample *sample, struct perf_tool *tool)
2752 {
2753 if (!session->auxtrace)
2754 return 0;
2755
2756 return session->auxtrace->process_event(session, event, sample, tool);
2757 }
2758
auxtrace__dump_auxtrace_sample(struct perf_session * session,struct perf_sample * sample)2759 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2760 struct perf_sample *sample)
2761 {
2762 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2763 auxtrace__dont_decode(session))
2764 return;
2765
2766 session->auxtrace->dump_auxtrace_sample(session, sample);
2767 }
2768
auxtrace__flush_events(struct perf_session * session,struct perf_tool * tool)2769 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2770 {
2771 if (!session->auxtrace)
2772 return 0;
2773
2774 return session->auxtrace->flush_events(session, tool);
2775 }
2776
auxtrace__free_events(struct perf_session * session)2777 void auxtrace__free_events(struct perf_session *session)
2778 {
2779 if (!session->auxtrace)
2780 return;
2781
2782 return session->auxtrace->free_events(session);
2783 }
2784
auxtrace__free(struct perf_session * session)2785 void auxtrace__free(struct perf_session *session)
2786 {
2787 if (!session->auxtrace)
2788 return;
2789
2790 return session->auxtrace->free(session);
2791 }
2792
auxtrace__evsel_is_auxtrace(struct perf_session * session,struct evsel * evsel)2793 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2794 struct evsel *evsel)
2795 {
2796 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2797 return false;
2798
2799 return session->auxtrace->evsel_is_auxtrace(session, evsel);
2800 }
2801