1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9 #include "util.h"
10 #include <api/fs/fs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "debug.h"
18 #include "asm/bug.h"
19 #include <unistd.h>
20
21 #include "parse-events.h"
22 #include <subcmd/parse-options.h>
23
24 #include <sys/mman.h>
25
26 #include <linux/bitops.h>
27 #include <linux/hash.h>
28 #include <linux/log2.h>
29 #include <linux/err.h>
30
31 static void perf_mmap__munmap(struct perf_mmap *map);
32 static void perf_mmap__put(struct perf_mmap *map);
33
34 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
35 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
36
perf_evlist__init(struct perf_evlist * evlist,struct cpu_map * cpus,struct thread_map * threads)37 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
38 struct thread_map *threads)
39 {
40 int i;
41
42 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
43 INIT_HLIST_HEAD(&evlist->heads[i]);
44 INIT_LIST_HEAD(&evlist->entries);
45 perf_evlist__set_maps(evlist, cpus, threads);
46 fdarray__init(&evlist->pollfd, 64);
47 evlist->workload.pid = -1;
48 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
49 }
50
perf_evlist__new(void)51 struct perf_evlist *perf_evlist__new(void)
52 {
53 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54
55 if (evlist != NULL)
56 perf_evlist__init(evlist, NULL, NULL);
57
58 return evlist;
59 }
60
perf_evlist__new_default(void)61 struct perf_evlist *perf_evlist__new_default(void)
62 {
63 struct perf_evlist *evlist = perf_evlist__new();
64
65 if (evlist && perf_evlist__add_default(evlist)) {
66 perf_evlist__delete(evlist);
67 evlist = NULL;
68 }
69
70 return evlist;
71 }
72
perf_evlist__new_dummy(void)73 struct perf_evlist *perf_evlist__new_dummy(void)
74 {
75 struct perf_evlist *evlist = perf_evlist__new();
76
77 if (evlist && perf_evlist__add_dummy(evlist)) {
78 perf_evlist__delete(evlist);
79 evlist = NULL;
80 }
81
82 return evlist;
83 }
84
85 /**
86 * perf_evlist__set_id_pos - set the positions of event ids.
87 * @evlist: selected event list
88 *
89 * Events with compatible sample types all have the same id_pos
90 * and is_pos. For convenience, put a copy on evlist.
91 */
perf_evlist__set_id_pos(struct perf_evlist * evlist)92 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
93 {
94 struct perf_evsel *first = perf_evlist__first(evlist);
95
96 evlist->id_pos = first->id_pos;
97 evlist->is_pos = first->is_pos;
98 }
99
perf_evlist__update_id_pos(struct perf_evlist * evlist)100 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
101 {
102 struct perf_evsel *evsel;
103
104 evlist__for_each_entry(evlist, evsel)
105 perf_evsel__calc_id_pos(evsel);
106
107 perf_evlist__set_id_pos(evlist);
108 }
109
perf_evlist__purge(struct perf_evlist * evlist)110 static void perf_evlist__purge(struct perf_evlist *evlist)
111 {
112 struct perf_evsel *pos, *n;
113
114 evlist__for_each_entry_safe(evlist, n, pos) {
115 list_del_init(&pos->node);
116 pos->evlist = NULL;
117 perf_evsel__delete(pos);
118 }
119
120 evlist->nr_entries = 0;
121 }
122
perf_evlist__exit(struct perf_evlist * evlist)123 void perf_evlist__exit(struct perf_evlist *evlist)
124 {
125 zfree(&evlist->mmap);
126 zfree(&evlist->backward_mmap);
127 fdarray__exit(&evlist->pollfd);
128 }
129
perf_evlist__delete(struct perf_evlist * evlist)130 void perf_evlist__delete(struct perf_evlist *evlist)
131 {
132 if (evlist == NULL)
133 return;
134
135 perf_evlist__munmap(evlist);
136 perf_evlist__close(evlist);
137 cpu_map__put(evlist->cpus);
138 thread_map__put(evlist->threads);
139 evlist->cpus = NULL;
140 evlist->threads = NULL;
141 perf_evlist__purge(evlist);
142 perf_evlist__exit(evlist);
143 free(evlist);
144 }
145
__perf_evlist__propagate_maps(struct perf_evlist * evlist,struct perf_evsel * evsel)146 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
147 struct perf_evsel *evsel)
148 {
149 /*
150 * We already have cpus for evsel (via PMU sysfs) so
151 * keep it, if there's no target cpu list defined.
152 */
153 if (!evsel->own_cpus || evlist->has_user_cpus) {
154 cpu_map__put(evsel->cpus);
155 evsel->cpus = cpu_map__get(evlist->cpus);
156 } else if (evsel->cpus != evsel->own_cpus) {
157 cpu_map__put(evsel->cpus);
158 evsel->cpus = cpu_map__get(evsel->own_cpus);
159 }
160
161 thread_map__put(evsel->threads);
162 evsel->threads = thread_map__get(evlist->threads);
163 }
164
perf_evlist__propagate_maps(struct perf_evlist * evlist)165 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
166 {
167 struct perf_evsel *evsel;
168
169 evlist__for_each_entry(evlist, evsel)
170 __perf_evlist__propagate_maps(evlist, evsel);
171 }
172
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * entry)173 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
174 {
175 entry->evlist = evlist;
176 list_add_tail(&entry->node, &evlist->entries);
177 entry->idx = evlist->nr_entries;
178 entry->tracking = !entry->idx;
179
180 if (!evlist->nr_entries++)
181 perf_evlist__set_id_pos(evlist);
182
183 __perf_evlist__propagate_maps(evlist, entry);
184 }
185
perf_evlist__remove(struct perf_evlist * evlist,struct perf_evsel * evsel)186 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
187 {
188 evsel->evlist = NULL;
189 list_del_init(&evsel->node);
190 evlist->nr_entries -= 1;
191 }
192
perf_evlist__splice_list_tail(struct perf_evlist * evlist,struct list_head * list)193 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
194 struct list_head *list)
195 {
196 struct perf_evsel *evsel, *temp;
197
198 __evlist__for_each_entry_safe(list, temp, evsel) {
199 list_del_init(&evsel->node);
200 perf_evlist__add(evlist, evsel);
201 }
202 }
203
__perf_evlist__set_leader(struct list_head * list)204 void __perf_evlist__set_leader(struct list_head *list)
205 {
206 struct perf_evsel *evsel, *leader;
207
208 leader = list_entry(list->next, struct perf_evsel, node);
209 evsel = list_entry(list->prev, struct perf_evsel, node);
210
211 leader->nr_members = evsel->idx - leader->idx + 1;
212
213 __evlist__for_each_entry(list, evsel) {
214 evsel->leader = leader;
215 }
216 }
217
perf_evlist__set_leader(struct perf_evlist * evlist)218 void perf_evlist__set_leader(struct perf_evlist *evlist)
219 {
220 if (evlist->nr_entries) {
221 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
222 __perf_evlist__set_leader(&evlist->entries);
223 }
224 }
225
perf_event_attr__set_max_precise_ip(struct perf_event_attr * attr)226 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
227 {
228 attr->precise_ip = 3;
229
230 while (attr->precise_ip != 0) {
231 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
232 if (fd != -1) {
233 close(fd);
234 break;
235 }
236 --attr->precise_ip;
237 }
238 }
239
perf_evlist__add_default(struct perf_evlist * evlist)240 int perf_evlist__add_default(struct perf_evlist *evlist)
241 {
242 struct perf_evsel *evsel = perf_evsel__new_cycles();
243
244 if (evsel == NULL)
245 return -ENOMEM;
246
247 perf_evlist__add(evlist, evsel);
248 return 0;
249 }
250
perf_evlist__add_dummy(struct perf_evlist * evlist)251 int perf_evlist__add_dummy(struct perf_evlist *evlist)
252 {
253 struct perf_event_attr attr = {
254 .type = PERF_TYPE_SOFTWARE,
255 .config = PERF_COUNT_SW_DUMMY,
256 .size = sizeof(attr), /* to capture ABI version */
257 };
258 struct perf_evsel *evsel = perf_evsel__new(&attr);
259
260 if (evsel == NULL)
261 return -ENOMEM;
262
263 perf_evlist__add(evlist, evsel);
264 return 0;
265 }
266
perf_evlist__add_attrs(struct perf_evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)267 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
268 struct perf_event_attr *attrs, size_t nr_attrs)
269 {
270 struct perf_evsel *evsel, *n;
271 LIST_HEAD(head);
272 size_t i;
273
274 for (i = 0; i < nr_attrs; i++) {
275 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
276 if (evsel == NULL)
277 goto out_delete_partial_list;
278 list_add_tail(&evsel->node, &head);
279 }
280
281 perf_evlist__splice_list_tail(evlist, &head);
282
283 return 0;
284
285 out_delete_partial_list:
286 __evlist__for_each_entry_safe(&head, n, evsel)
287 perf_evsel__delete(evsel);
288 return -1;
289 }
290
__perf_evlist__add_default_attrs(struct perf_evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)291 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
292 struct perf_event_attr *attrs, size_t nr_attrs)
293 {
294 size_t i;
295
296 for (i = 0; i < nr_attrs; i++)
297 event_attr_init(attrs + i);
298
299 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
300 }
301
302 struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist * evlist,int id)303 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
304 {
305 struct perf_evsel *evsel;
306
307 evlist__for_each_entry(evlist, evsel) {
308 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
309 (int)evsel->attr.config == id)
310 return evsel;
311 }
312
313 return NULL;
314 }
315
316 struct perf_evsel *
perf_evlist__find_tracepoint_by_name(struct perf_evlist * evlist,const char * name)317 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
318 const char *name)
319 {
320 struct perf_evsel *evsel;
321
322 evlist__for_each_entry(evlist, evsel) {
323 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
324 (strcmp(evsel->name, name) == 0))
325 return evsel;
326 }
327
328 return NULL;
329 }
330
perf_evlist__add_newtp(struct perf_evlist * evlist,const char * sys,const char * name,void * handler)331 int perf_evlist__add_newtp(struct perf_evlist *evlist,
332 const char *sys, const char *name, void *handler)
333 {
334 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
335
336 if (IS_ERR(evsel))
337 return -1;
338
339 evsel->handler = handler;
340 perf_evlist__add(evlist, evsel);
341 return 0;
342 }
343
perf_evlist__nr_threads(struct perf_evlist * evlist,struct perf_evsel * evsel)344 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
345 struct perf_evsel *evsel)
346 {
347 if (evsel->system_wide)
348 return 1;
349 else
350 return thread_map__nr(evlist->threads);
351 }
352
perf_evlist__disable(struct perf_evlist * evlist)353 void perf_evlist__disable(struct perf_evlist *evlist)
354 {
355 struct perf_evsel *pos;
356
357 evlist__for_each_entry(evlist, pos) {
358 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
359 continue;
360 perf_evsel__disable(pos);
361 }
362
363 evlist->enabled = false;
364 }
365
perf_evlist__enable(struct perf_evlist * evlist)366 void perf_evlist__enable(struct perf_evlist *evlist)
367 {
368 struct perf_evsel *pos;
369
370 evlist__for_each_entry(evlist, pos) {
371 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
372 continue;
373 perf_evsel__enable(pos);
374 }
375
376 evlist->enabled = true;
377 }
378
perf_evlist__toggle_enable(struct perf_evlist * evlist)379 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
380 {
381 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
382 }
383
perf_evlist__enable_event_cpu(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu)384 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
385 struct perf_evsel *evsel, int cpu)
386 {
387 int thread;
388 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
389
390 if (!evsel->fd)
391 return -EINVAL;
392
393 for (thread = 0; thread < nr_threads; thread++) {
394 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
395 if (err)
396 return err;
397 }
398 return 0;
399 }
400
perf_evlist__enable_event_thread(struct perf_evlist * evlist,struct perf_evsel * evsel,int thread)401 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
402 struct perf_evsel *evsel,
403 int thread)
404 {
405 int cpu;
406 int nr_cpus = cpu_map__nr(evlist->cpus);
407
408 if (!evsel->fd)
409 return -EINVAL;
410
411 for (cpu = 0; cpu < nr_cpus; cpu++) {
412 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
413 if (err)
414 return err;
415 }
416 return 0;
417 }
418
perf_evlist__enable_event_idx(struct perf_evlist * evlist,struct perf_evsel * evsel,int idx)419 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
420 struct perf_evsel *evsel, int idx)
421 {
422 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
423
424 if (per_cpu_mmaps)
425 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
426 else
427 return perf_evlist__enable_event_thread(evlist, evsel, idx);
428 }
429
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)430 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
431 {
432 int nr_cpus = cpu_map__nr(evlist->cpus);
433 int nr_threads = thread_map__nr(evlist->threads);
434 int nfds = 0;
435 struct perf_evsel *evsel;
436
437 evlist__for_each_entry(evlist, evsel) {
438 if (evsel->system_wide)
439 nfds += nr_cpus;
440 else
441 nfds += nr_cpus * nr_threads;
442 }
443
444 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
445 fdarray__grow(&evlist->pollfd, nfds) < 0)
446 return -ENOMEM;
447
448 return 0;
449 }
450
__perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd,struct perf_mmap * map,short revent)451 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
452 struct perf_mmap *map, short revent)
453 {
454 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
455 /*
456 * Save the idx so that when we filter out fds POLLHUP'ed we can
457 * close the associated evlist->mmap[] entry.
458 */
459 if (pos >= 0) {
460 evlist->pollfd.priv[pos].ptr = map;
461
462 fcntl(fd, F_SETFL, O_NONBLOCK);
463 }
464
465 return pos;
466 }
467
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd)468 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
469 {
470 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
471 }
472
perf_evlist__munmap_filtered(struct fdarray * fda,int fd,void * arg __maybe_unused)473 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
474 void *arg __maybe_unused)
475 {
476 struct perf_mmap *map = fda->priv[fd].ptr;
477
478 if (map)
479 perf_mmap__put(map);
480 }
481
perf_evlist__filter_pollfd(struct perf_evlist * evlist,short revents_and_mask)482 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
483 {
484 return fdarray__filter(&evlist->pollfd, revents_and_mask,
485 perf_evlist__munmap_filtered, NULL);
486 }
487
perf_evlist__poll(struct perf_evlist * evlist,int timeout)488 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
489 {
490 return fdarray__poll(&evlist->pollfd, timeout);
491 }
492
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)493 static void perf_evlist__id_hash(struct perf_evlist *evlist,
494 struct perf_evsel *evsel,
495 int cpu, int thread, u64 id)
496 {
497 int hash;
498 struct perf_sample_id *sid = SID(evsel, cpu, thread);
499
500 sid->id = id;
501 sid->evsel = evsel;
502 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
503 hlist_add_head(&sid->node, &evlist->heads[hash]);
504 }
505
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)506 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
507 int cpu, int thread, u64 id)
508 {
509 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
510 evsel->id[evsel->ids++] = id;
511 }
512
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,int fd)513 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
514 struct perf_evsel *evsel,
515 int cpu, int thread, int fd)
516 {
517 u64 read_data[4] = { 0, };
518 int id_idx = 1; /* The first entry is the counter value */
519 u64 id;
520 int ret;
521
522 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
523 if (!ret)
524 goto add;
525
526 if (errno != ENOTTY)
527 return -1;
528
529 /* Legacy way to get event id.. All hail to old kernels! */
530
531 /*
532 * This way does not work with group format read, so bail
533 * out in that case.
534 */
535 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
536 return -1;
537
538 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
539 read(fd, &read_data, sizeof(read_data)) == -1)
540 return -1;
541
542 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
543 ++id_idx;
544 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
545 ++id_idx;
546
547 id = read_data[id_idx];
548
549 add:
550 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
551 return 0;
552 }
553
perf_evlist__set_sid_idx(struct perf_evlist * evlist,struct perf_evsel * evsel,int idx,int cpu,int thread)554 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
555 struct perf_evsel *evsel, int idx, int cpu,
556 int thread)
557 {
558 struct perf_sample_id *sid = SID(evsel, cpu, thread);
559 sid->idx = idx;
560 if (evlist->cpus && cpu >= 0)
561 sid->cpu = evlist->cpus->map[cpu];
562 else
563 sid->cpu = -1;
564 if (!evsel->system_wide && evlist->threads && thread >= 0)
565 sid->tid = thread_map__pid(evlist->threads, thread);
566 else
567 sid->tid = -1;
568 }
569
perf_evlist__id2sid(struct perf_evlist * evlist,u64 id)570 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
571 {
572 struct hlist_head *head;
573 struct perf_sample_id *sid;
574 int hash;
575
576 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
577 head = &evlist->heads[hash];
578
579 hlist_for_each_entry(sid, head, node)
580 if (sid->id == id)
581 return sid;
582
583 return NULL;
584 }
585
perf_evlist__id2evsel(struct perf_evlist * evlist,u64 id)586 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
587 {
588 struct perf_sample_id *sid;
589
590 if (evlist->nr_entries == 1 || !id)
591 return perf_evlist__first(evlist);
592
593 sid = perf_evlist__id2sid(evlist, id);
594 if (sid)
595 return sid->evsel;
596
597 if (!perf_evlist__sample_id_all(evlist))
598 return perf_evlist__first(evlist);
599
600 return NULL;
601 }
602
perf_evlist__id2evsel_strict(struct perf_evlist * evlist,u64 id)603 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
604 u64 id)
605 {
606 struct perf_sample_id *sid;
607
608 if (!id)
609 return NULL;
610
611 sid = perf_evlist__id2sid(evlist, id);
612 if (sid)
613 return sid->evsel;
614
615 return NULL;
616 }
617
perf_evlist__event2id(struct perf_evlist * evlist,union perf_event * event,u64 * id)618 static int perf_evlist__event2id(struct perf_evlist *evlist,
619 union perf_event *event, u64 *id)
620 {
621 const u64 *array = event->sample.array;
622 ssize_t n;
623
624 n = (event->header.size - sizeof(event->header)) >> 3;
625
626 if (event->header.type == PERF_RECORD_SAMPLE) {
627 if (evlist->id_pos >= n)
628 return -1;
629 *id = array[evlist->id_pos];
630 } else {
631 if (evlist->is_pos > n)
632 return -1;
633 n -= evlist->is_pos;
634 *id = array[n];
635 }
636 return 0;
637 }
638
perf_evlist__event2evsel(struct perf_evlist * evlist,union perf_event * event)639 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
640 union perf_event *event)
641 {
642 struct perf_evsel *first = perf_evlist__first(evlist);
643 struct hlist_head *head;
644 struct perf_sample_id *sid;
645 int hash;
646 u64 id;
647
648 if (evlist->nr_entries == 1)
649 return first;
650
651 if (!first->attr.sample_id_all &&
652 event->header.type != PERF_RECORD_SAMPLE)
653 return first;
654
655 if (perf_evlist__event2id(evlist, event, &id))
656 return NULL;
657
658 /* Synthesized events have an id of zero */
659 if (!id)
660 return first;
661
662 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
663 head = &evlist->heads[hash];
664
665 hlist_for_each_entry(sid, head, node) {
666 if (sid->id == id)
667 return sid->evsel;
668 }
669 return NULL;
670 }
671
perf_evlist__set_paused(struct perf_evlist * evlist,bool value)672 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
673 {
674 int i;
675
676 if (!evlist->backward_mmap)
677 return 0;
678
679 for (i = 0; i < evlist->nr_mmaps; i++) {
680 int fd = evlist->backward_mmap[i].fd;
681 int err;
682
683 if (fd < 0)
684 continue;
685 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
686 if (err)
687 return err;
688 }
689 return 0;
690 }
691
perf_evlist__pause(struct perf_evlist * evlist)692 static int perf_evlist__pause(struct perf_evlist *evlist)
693 {
694 return perf_evlist__set_paused(evlist, true);
695 }
696
perf_evlist__resume(struct perf_evlist * evlist)697 static int perf_evlist__resume(struct perf_evlist *evlist)
698 {
699 return perf_evlist__set_paused(evlist, false);
700 }
701
702 /* When check_messup is true, 'end' must points to a good entry */
703 static union perf_event *
perf_mmap__read(struct perf_mmap * md,bool check_messup,u64 start,u64 end,u64 * prev)704 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
705 u64 end, u64 *prev)
706 {
707 unsigned char *data = md->base + page_size;
708 union perf_event *event = NULL;
709 int diff = end - start;
710
711 if (check_messup) {
712 /*
713 * If we're further behind than half the buffer, there's a chance
714 * the writer will bite our tail and mess up the samples under us.
715 *
716 * If we somehow ended up ahead of the 'end', we got messed up.
717 *
718 * In either case, truncate and restart at 'end'.
719 */
720 if (diff > md->mask / 2 || diff < 0) {
721 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
722
723 /*
724 * 'end' points to a known good entry, start there.
725 */
726 start = end;
727 diff = 0;
728 }
729 }
730
731 if (diff >= (int)sizeof(event->header)) {
732 size_t size;
733
734 event = (union perf_event *)&data[start & md->mask];
735 size = event->header.size;
736
737 if (size < sizeof(event->header) || diff < (int)size) {
738 event = NULL;
739 goto broken_event;
740 }
741
742 /*
743 * Event straddles the mmap boundary -- header should always
744 * be inside due to u64 alignment of output.
745 */
746 if ((start & md->mask) + size != ((start + size) & md->mask)) {
747 unsigned int offset = start;
748 unsigned int len = min(sizeof(*event), size), cpy;
749 void *dst = md->event_copy;
750
751 do {
752 cpy = min(md->mask + 1 - (offset & md->mask), len);
753 memcpy(dst, &data[offset & md->mask], cpy);
754 offset += cpy;
755 dst += cpy;
756 len -= cpy;
757 } while (len);
758
759 event = (union perf_event *) md->event_copy;
760 }
761
762 start += size;
763 }
764
765 broken_event:
766 if (prev)
767 *prev = start;
768
769 return event;
770 }
771
perf_mmap__read_forward(struct perf_mmap * md,bool check_messup)772 union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
773 {
774 u64 head;
775 u64 old = md->prev;
776
777 /*
778 * Check if event was unmapped due to a POLLHUP/POLLERR.
779 */
780 if (!atomic_read(&md->refcnt))
781 return NULL;
782
783 head = perf_mmap__read_head(md);
784
785 return perf_mmap__read(md, check_messup, old, head, &md->prev);
786 }
787
788 union perf_event *
perf_mmap__read_backward(struct perf_mmap * md)789 perf_mmap__read_backward(struct perf_mmap *md)
790 {
791 u64 head, end;
792 u64 start = md->prev;
793
794 /*
795 * Check if event was unmapped due to a POLLHUP/POLLERR.
796 */
797 if (!atomic_read(&md->refcnt))
798 return NULL;
799
800 head = perf_mmap__read_head(md);
801 if (!head)
802 return NULL;
803
804 /*
805 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
806 * it each time when kernel writes to it, so in fact 'head' is
807 * negative. 'end' pointer is made manually by adding the size of
808 * the ring buffer to 'head' pointer, means the validate data can
809 * read is the whole ring buffer. If 'end' is positive, the ring
810 * buffer has not fully filled, so we must adjust 'end' to 0.
811 *
812 * However, since both 'head' and 'end' is unsigned, we can't
813 * simply compare 'end' against 0. Here we compare '-head' and
814 * the size of the ring buffer, where -head is the number of bytes
815 * kernel write to the ring buffer.
816 */
817 if (-head < (u64)(md->mask + 1))
818 end = 0;
819 else
820 end = head + md->mask + 1;
821
822 return perf_mmap__read(md, false, start, end, &md->prev);
823 }
824
perf_evlist__mmap_read_forward(struct perf_evlist * evlist,int idx)825 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
826 {
827 struct perf_mmap *md = &evlist->mmap[idx];
828
829 /*
830 * Check messup is required for forward overwritable ring buffer:
831 * memory pointed by md->prev can be overwritten in this case.
832 * No need for read-write ring buffer: kernel stop outputting when
833 * it hit md->prev (perf_mmap__consume()).
834 */
835 return perf_mmap__read_forward(md, evlist->overwrite);
836 }
837
perf_evlist__mmap_read_backward(struct perf_evlist * evlist,int idx)838 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
839 {
840 struct perf_mmap *md = &evlist->mmap[idx];
841
842 /*
843 * No need to check messup for backward ring buffer:
844 * We can always read arbitrary long data from a backward
845 * ring buffer unless we forget to pause it before reading.
846 */
847 return perf_mmap__read_backward(md);
848 }
849
perf_evlist__mmap_read(struct perf_evlist * evlist,int idx)850 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
851 {
852 return perf_evlist__mmap_read_forward(evlist, idx);
853 }
854
perf_mmap__read_catchup(struct perf_mmap * md)855 void perf_mmap__read_catchup(struct perf_mmap *md)
856 {
857 u64 head;
858
859 if (!atomic_read(&md->refcnt))
860 return;
861
862 head = perf_mmap__read_head(md);
863 md->prev = head;
864 }
865
perf_evlist__mmap_read_catchup(struct perf_evlist * evlist,int idx)866 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
867 {
868 perf_mmap__read_catchup(&evlist->mmap[idx]);
869 }
870
perf_mmap__empty(struct perf_mmap * md)871 static bool perf_mmap__empty(struct perf_mmap *md)
872 {
873 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
874 }
875
perf_mmap__get(struct perf_mmap * map)876 static void perf_mmap__get(struct perf_mmap *map)
877 {
878 atomic_inc(&map->refcnt);
879 }
880
perf_mmap__put(struct perf_mmap * md)881 static void perf_mmap__put(struct perf_mmap *md)
882 {
883 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
884
885 if (atomic_dec_and_test(&md->refcnt))
886 perf_mmap__munmap(md);
887 }
888
perf_mmap__consume(struct perf_mmap * md,bool overwrite)889 void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
890 {
891 if (!overwrite) {
892 u64 old = md->prev;
893
894 perf_mmap__write_tail(md, old);
895 }
896
897 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
898 perf_mmap__put(md);
899 }
900
perf_evlist__mmap_consume(struct perf_evlist * evlist,int idx)901 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
902 {
903 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
904 }
905
auxtrace_mmap__mmap(struct auxtrace_mmap * mm __maybe_unused,struct auxtrace_mmap_params * mp __maybe_unused,void * userpg __maybe_unused,int fd __maybe_unused)906 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
907 struct auxtrace_mmap_params *mp __maybe_unused,
908 void *userpg __maybe_unused,
909 int fd __maybe_unused)
910 {
911 return 0;
912 }
913
auxtrace_mmap__munmap(struct auxtrace_mmap * mm __maybe_unused)914 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
915 {
916 }
917
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp __maybe_unused,off_t auxtrace_offset __maybe_unused,unsigned int auxtrace_pages __maybe_unused,bool auxtrace_overwrite __maybe_unused)918 void __weak auxtrace_mmap_params__init(
919 struct auxtrace_mmap_params *mp __maybe_unused,
920 off_t auxtrace_offset __maybe_unused,
921 unsigned int auxtrace_pages __maybe_unused,
922 bool auxtrace_overwrite __maybe_unused)
923 {
924 }
925
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp __maybe_unused,struct perf_evlist * evlist __maybe_unused,int idx __maybe_unused,bool per_cpu __maybe_unused)926 void __weak auxtrace_mmap_params__set_idx(
927 struct auxtrace_mmap_params *mp __maybe_unused,
928 struct perf_evlist *evlist __maybe_unused,
929 int idx __maybe_unused,
930 bool per_cpu __maybe_unused)
931 {
932 }
933
perf_mmap__munmap(struct perf_mmap * map)934 static void perf_mmap__munmap(struct perf_mmap *map)
935 {
936 if (map->base != NULL) {
937 munmap(map->base, perf_mmap__mmap_len(map));
938 map->base = NULL;
939 map->fd = -1;
940 atomic_set(&map->refcnt, 0);
941 }
942 auxtrace_mmap__munmap(&map->auxtrace_mmap);
943 }
944
perf_evlist__munmap_nofree(struct perf_evlist * evlist)945 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
946 {
947 int i;
948
949 if (evlist->mmap)
950 for (i = 0; i < evlist->nr_mmaps; i++)
951 perf_mmap__munmap(&evlist->mmap[i]);
952
953 if (evlist->backward_mmap)
954 for (i = 0; i < evlist->nr_mmaps; i++)
955 perf_mmap__munmap(&evlist->backward_mmap[i]);
956 }
957
perf_evlist__munmap(struct perf_evlist * evlist)958 void perf_evlist__munmap(struct perf_evlist *evlist)
959 {
960 perf_evlist__munmap_nofree(evlist);
961 zfree(&evlist->mmap);
962 zfree(&evlist->backward_mmap);
963 }
964
perf_evlist__alloc_mmap(struct perf_evlist * evlist)965 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
966 {
967 int i;
968 struct perf_mmap *map;
969
970 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
971 if (cpu_map__empty(evlist->cpus))
972 evlist->nr_mmaps = thread_map__nr(evlist->threads);
973 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
974 if (!map)
975 return NULL;
976
977 for (i = 0; i < evlist->nr_mmaps; i++)
978 map[i].fd = -1;
979 return map;
980 }
981
982 struct mmap_params {
983 int prot;
984 int mask;
985 struct auxtrace_mmap_params auxtrace_mp;
986 };
987
perf_mmap__mmap(struct perf_mmap * map,struct mmap_params * mp,int fd)988 static int perf_mmap__mmap(struct perf_mmap *map,
989 struct mmap_params *mp, int fd)
990 {
991 /*
992 * The last one will be done at perf_evlist__mmap_consume(), so that we
993 * make sure we don't prevent tools from consuming every last event in
994 * the ring buffer.
995 *
996 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
997 * anymore, but the last events for it are still in the ring buffer,
998 * waiting to be consumed.
999 *
1000 * Tools can chose to ignore this at their own discretion, but the
1001 * evlist layer can't just drop it when filtering events in
1002 * perf_evlist__filter_pollfd().
1003 */
1004 atomic_set(&map->refcnt, 2);
1005 map->prev = 0;
1006 map->mask = mp->mask;
1007 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1008 MAP_SHARED, fd, 0);
1009 if (map->base == MAP_FAILED) {
1010 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1011 errno);
1012 map->base = NULL;
1013 return -1;
1014 }
1015 map->fd = fd;
1016
1017 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1018 &mp->auxtrace_mp, map->base, fd))
1019 return -1;
1020
1021 return 0;
1022 }
1023
1024 static bool
perf_evlist__should_poll(struct perf_evlist * evlist __maybe_unused,struct perf_evsel * evsel)1025 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1026 struct perf_evsel *evsel)
1027 {
1028 if (evsel->attr.write_backward)
1029 return false;
1030 return true;
1031 }
1032
perf_evlist__mmap_per_evsel(struct perf_evlist * evlist,int idx,struct mmap_params * mp,int cpu_idx,int thread,int * _output,int * _output_backward)1033 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
1034 struct mmap_params *mp, int cpu_idx,
1035 int thread, int *_output, int *_output_backward)
1036 {
1037 struct perf_evsel *evsel;
1038 int revent;
1039 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
1040
1041 evlist__for_each_entry(evlist, evsel) {
1042 struct perf_mmap *maps = evlist->mmap;
1043 int *output = _output;
1044 int fd;
1045 int cpu;
1046
1047 if (evsel->attr.write_backward) {
1048 output = _output_backward;
1049 maps = evlist->backward_mmap;
1050
1051 if (!maps) {
1052 maps = perf_evlist__alloc_mmap(evlist);
1053 if (!maps)
1054 return -1;
1055 evlist->backward_mmap = maps;
1056 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1057 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
1058 }
1059 }
1060
1061 if (evsel->system_wide && thread)
1062 continue;
1063
1064 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1065 if (cpu == -1)
1066 continue;
1067
1068 fd = FD(evsel, cpu, thread);
1069
1070 if (*output == -1) {
1071 *output = fd;
1072
1073 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
1074 return -1;
1075 } else {
1076 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1077 return -1;
1078
1079 perf_mmap__get(&maps[idx]);
1080 }
1081
1082 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1083
1084 /*
1085 * The system_wide flag causes a selected event to be opened
1086 * always without a pid. Consequently it will never get a
1087 * POLLHUP, but it is used for tracking in combination with
1088 * other events, so it should not need to be polled anyway.
1089 * Therefore don't add it for polling.
1090 */
1091 if (!evsel->system_wide &&
1092 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1093 perf_mmap__put(&maps[idx]);
1094 return -1;
1095 }
1096
1097 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1098 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1099 fd) < 0)
1100 return -1;
1101 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1102 thread);
1103 }
1104 }
1105
1106 return 0;
1107 }
1108
perf_evlist__mmap_per_cpu(struct perf_evlist * evlist,struct mmap_params * mp)1109 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1110 struct mmap_params *mp)
1111 {
1112 int cpu, thread;
1113 int nr_cpus = cpu_map__nr(evlist->cpus);
1114 int nr_threads = thread_map__nr(evlist->threads);
1115
1116 pr_debug2("perf event ring buffer mmapped per cpu\n");
1117 for (cpu = 0; cpu < nr_cpus; cpu++) {
1118 int output = -1;
1119 int output_backward = -1;
1120
1121 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1122 true);
1123
1124 for (thread = 0; thread < nr_threads; thread++) {
1125 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
1126 thread, &output, &output_backward))
1127 goto out_unmap;
1128 }
1129 }
1130
1131 return 0;
1132
1133 out_unmap:
1134 perf_evlist__munmap_nofree(evlist);
1135 return -1;
1136 }
1137
perf_evlist__mmap_per_thread(struct perf_evlist * evlist,struct mmap_params * mp)1138 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1139 struct mmap_params *mp)
1140 {
1141 int thread;
1142 int nr_threads = thread_map__nr(evlist->threads);
1143
1144 pr_debug2("perf event ring buffer mmapped per thread\n");
1145 for (thread = 0; thread < nr_threads; thread++) {
1146 int output = -1;
1147 int output_backward = -1;
1148
1149 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1150 false);
1151
1152 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
1153 &output, &output_backward))
1154 goto out_unmap;
1155 }
1156
1157 return 0;
1158
1159 out_unmap:
1160 perf_evlist__munmap_nofree(evlist);
1161 return -1;
1162 }
1163
perf_event_mlock_kb_in_pages(void)1164 unsigned long perf_event_mlock_kb_in_pages(void)
1165 {
1166 unsigned long pages;
1167 int max;
1168
1169 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1170 /*
1171 * Pick a once upon a time good value, i.e. things look
1172 * strange since we can't read a sysctl value, but lets not
1173 * die yet...
1174 */
1175 max = 512;
1176 } else {
1177 max -= (page_size / 1024);
1178 }
1179
1180 pages = (max * 1024) / page_size;
1181 if (!is_power_of_2(pages))
1182 pages = rounddown_pow_of_two(pages);
1183
1184 return pages;
1185 }
1186
perf_evlist__mmap_size(unsigned long pages)1187 static size_t perf_evlist__mmap_size(unsigned long pages)
1188 {
1189 if (pages == UINT_MAX)
1190 pages = perf_event_mlock_kb_in_pages();
1191 else if (!is_power_of_2(pages))
1192 return 0;
1193
1194 return (pages + 1) * page_size;
1195 }
1196
parse_pages_arg(const char * str,unsigned long min,unsigned long max)1197 static long parse_pages_arg(const char *str, unsigned long min,
1198 unsigned long max)
1199 {
1200 unsigned long pages, val;
1201 static struct parse_tag tags[] = {
1202 { .tag = 'B', .mult = 1 },
1203 { .tag = 'K', .mult = 1 << 10 },
1204 { .tag = 'M', .mult = 1 << 20 },
1205 { .tag = 'G', .mult = 1 << 30 },
1206 { .tag = 0 },
1207 };
1208
1209 if (str == NULL)
1210 return -EINVAL;
1211
1212 val = parse_tag_value(str, tags);
1213 if (val != (unsigned long) -1) {
1214 /* we got file size value */
1215 pages = PERF_ALIGN(val, page_size) / page_size;
1216 } else {
1217 /* we got pages count value */
1218 char *eptr;
1219 pages = strtoul(str, &eptr, 10);
1220 if (*eptr != '\0')
1221 return -EINVAL;
1222 }
1223
1224 if (pages == 0 && min == 0) {
1225 /* leave number of pages at 0 */
1226 } else if (!is_power_of_2(pages)) {
1227 /* round pages up to next power of 2 */
1228 pages = roundup_pow_of_two(pages);
1229 if (!pages)
1230 return -EINVAL;
1231 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1232 pages * page_size, pages);
1233 }
1234
1235 if (pages > max)
1236 return -EINVAL;
1237
1238 return pages;
1239 }
1240
__perf_evlist__parse_mmap_pages(unsigned int * mmap_pages,const char * str)1241 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1242 {
1243 unsigned long max = UINT_MAX;
1244 long pages;
1245
1246 if (max > SIZE_MAX / page_size)
1247 max = SIZE_MAX / page_size;
1248
1249 pages = parse_pages_arg(str, 1, max);
1250 if (pages < 0) {
1251 pr_err("Invalid argument for --mmap_pages/-m\n");
1252 return -1;
1253 }
1254
1255 *mmap_pages = pages;
1256 return 0;
1257 }
1258
perf_evlist__parse_mmap_pages(const struct option * opt,const char * str,int unset __maybe_unused)1259 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1260 int unset __maybe_unused)
1261 {
1262 return __perf_evlist__parse_mmap_pages(opt->value, str);
1263 }
1264
1265 /**
1266 * perf_evlist__mmap_ex - Create mmaps to receive events.
1267 * @evlist: list of events
1268 * @pages: map length in pages
1269 * @overwrite: overwrite older events?
1270 * @auxtrace_pages - auxtrace map length in pages
1271 * @auxtrace_overwrite - overwrite older auxtrace data?
1272 *
1273 * If @overwrite is %false the user needs to signal event consumption using
1274 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1275 * automatically.
1276 *
1277 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1278 * consumption using auxtrace_mmap__write_tail().
1279 *
1280 * Return: %0 on success, negative error code otherwise.
1281 */
perf_evlist__mmap_ex(struct perf_evlist * evlist,unsigned int pages,bool overwrite,unsigned int auxtrace_pages,bool auxtrace_overwrite)1282 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1283 bool overwrite, unsigned int auxtrace_pages,
1284 bool auxtrace_overwrite)
1285 {
1286 struct perf_evsel *evsel;
1287 const struct cpu_map *cpus = evlist->cpus;
1288 const struct thread_map *threads = evlist->threads;
1289 struct mmap_params mp = {
1290 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1291 };
1292
1293 if (!evlist->mmap)
1294 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1295 if (!evlist->mmap)
1296 return -ENOMEM;
1297
1298 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1299 return -ENOMEM;
1300
1301 evlist->overwrite = overwrite;
1302 evlist->mmap_len = perf_evlist__mmap_size(pages);
1303 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1304 mp.mask = evlist->mmap_len - page_size - 1;
1305
1306 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1307 auxtrace_pages, auxtrace_overwrite);
1308
1309 evlist__for_each_entry(evlist, evsel) {
1310 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1311 evsel->sample_id == NULL &&
1312 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1313 return -ENOMEM;
1314 }
1315
1316 if (cpu_map__empty(cpus))
1317 return perf_evlist__mmap_per_thread(evlist, &mp);
1318
1319 return perf_evlist__mmap_per_cpu(evlist, &mp);
1320 }
1321
perf_evlist__mmap(struct perf_evlist * evlist,unsigned int pages,bool overwrite)1322 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1323 bool overwrite)
1324 {
1325 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1326 }
1327
perf_evlist__create_maps(struct perf_evlist * evlist,struct target * target)1328 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1329 {
1330 struct cpu_map *cpus;
1331 struct thread_map *threads;
1332
1333 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1334
1335 if (!threads)
1336 return -1;
1337
1338 if (target__uses_dummy_map(target))
1339 cpus = cpu_map__dummy_new();
1340 else
1341 cpus = cpu_map__new(target->cpu_list);
1342
1343 if (!cpus)
1344 goto out_delete_threads;
1345
1346 evlist->has_user_cpus = !!target->cpu_list;
1347
1348 perf_evlist__set_maps(evlist, cpus, threads);
1349
1350 return 0;
1351
1352 out_delete_threads:
1353 thread_map__put(threads);
1354 return -1;
1355 }
1356
perf_evlist__set_maps(struct perf_evlist * evlist,struct cpu_map * cpus,struct thread_map * threads)1357 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1358 struct thread_map *threads)
1359 {
1360 /*
1361 * Allow for the possibility that one or another of the maps isn't being
1362 * changed i.e. don't put it. Note we are assuming the maps that are
1363 * being applied are brand new and evlist is taking ownership of the
1364 * original reference count of 1. If that is not the case it is up to
1365 * the caller to increase the reference count.
1366 */
1367 if (cpus != evlist->cpus) {
1368 cpu_map__put(evlist->cpus);
1369 evlist->cpus = cpu_map__get(cpus);
1370 }
1371
1372 if (threads != evlist->threads) {
1373 thread_map__put(evlist->threads);
1374 evlist->threads = thread_map__get(threads);
1375 }
1376
1377 perf_evlist__propagate_maps(evlist);
1378 }
1379
__perf_evlist__set_sample_bit(struct perf_evlist * evlist,enum perf_event_sample_format bit)1380 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1381 enum perf_event_sample_format bit)
1382 {
1383 struct perf_evsel *evsel;
1384
1385 evlist__for_each_entry(evlist, evsel)
1386 __perf_evsel__set_sample_bit(evsel, bit);
1387 }
1388
__perf_evlist__reset_sample_bit(struct perf_evlist * evlist,enum perf_event_sample_format bit)1389 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1390 enum perf_event_sample_format bit)
1391 {
1392 struct perf_evsel *evsel;
1393
1394 evlist__for_each_entry(evlist, evsel)
1395 __perf_evsel__reset_sample_bit(evsel, bit);
1396 }
1397
perf_evlist__apply_filters(struct perf_evlist * evlist,struct perf_evsel ** err_evsel)1398 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1399 {
1400 struct perf_evsel *evsel;
1401 int err = 0;
1402 const int ncpus = cpu_map__nr(evlist->cpus),
1403 nthreads = thread_map__nr(evlist->threads);
1404
1405 evlist__for_each_entry(evlist, evsel) {
1406 if (evsel->filter == NULL)
1407 continue;
1408
1409 /*
1410 * filters only work for tracepoint event, which doesn't have cpu limit.
1411 * So evlist and evsel should always be same.
1412 */
1413 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1414 if (err) {
1415 *err_evsel = evsel;
1416 break;
1417 }
1418 }
1419
1420 return err;
1421 }
1422
perf_evlist__set_filter(struct perf_evlist * evlist,const char * filter)1423 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1424 {
1425 struct perf_evsel *evsel;
1426 int err = 0;
1427
1428 evlist__for_each_entry(evlist, evsel) {
1429 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1430 continue;
1431
1432 err = perf_evsel__set_filter(evsel, filter);
1433 if (err)
1434 break;
1435 }
1436
1437 return err;
1438 }
1439
perf_evlist__set_filter_pids(struct perf_evlist * evlist,size_t npids,pid_t * pids)1440 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1441 {
1442 char *filter;
1443 int ret = -1;
1444 size_t i;
1445
1446 for (i = 0; i < npids; ++i) {
1447 if (i == 0) {
1448 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1449 return -1;
1450 } else {
1451 char *tmp;
1452
1453 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1454 goto out_free;
1455
1456 free(filter);
1457 filter = tmp;
1458 }
1459 }
1460
1461 ret = perf_evlist__set_filter(evlist, filter);
1462 out_free:
1463 free(filter);
1464 return ret;
1465 }
1466
perf_evlist__set_filter_pid(struct perf_evlist * evlist,pid_t pid)1467 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1468 {
1469 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1470 }
1471
perf_evlist__valid_sample_type(struct perf_evlist * evlist)1472 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1473 {
1474 struct perf_evsel *pos;
1475
1476 if (evlist->nr_entries == 1)
1477 return true;
1478
1479 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1480 return false;
1481
1482 evlist__for_each_entry(evlist, pos) {
1483 if (pos->id_pos != evlist->id_pos ||
1484 pos->is_pos != evlist->is_pos)
1485 return false;
1486 }
1487
1488 return true;
1489 }
1490
__perf_evlist__combined_sample_type(struct perf_evlist * evlist)1491 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1492 {
1493 struct perf_evsel *evsel;
1494
1495 if (evlist->combined_sample_type)
1496 return evlist->combined_sample_type;
1497
1498 evlist__for_each_entry(evlist, evsel)
1499 evlist->combined_sample_type |= evsel->attr.sample_type;
1500
1501 return evlist->combined_sample_type;
1502 }
1503
perf_evlist__combined_sample_type(struct perf_evlist * evlist)1504 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1505 {
1506 evlist->combined_sample_type = 0;
1507 return __perf_evlist__combined_sample_type(evlist);
1508 }
1509
perf_evlist__combined_branch_type(struct perf_evlist * evlist)1510 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1511 {
1512 struct perf_evsel *evsel;
1513 u64 branch_type = 0;
1514
1515 evlist__for_each_entry(evlist, evsel)
1516 branch_type |= evsel->attr.branch_sample_type;
1517 return branch_type;
1518 }
1519
perf_evlist__valid_read_format(struct perf_evlist * evlist)1520 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1521 {
1522 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1523 u64 read_format = first->attr.read_format;
1524 u64 sample_type = first->attr.sample_type;
1525
1526 evlist__for_each_entry(evlist, pos) {
1527 if (read_format != pos->attr.read_format)
1528 return false;
1529 }
1530
1531 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1532 if ((sample_type & PERF_SAMPLE_READ) &&
1533 !(read_format & PERF_FORMAT_ID)) {
1534 return false;
1535 }
1536
1537 return true;
1538 }
1539
perf_evlist__read_format(struct perf_evlist * evlist)1540 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1541 {
1542 struct perf_evsel *first = perf_evlist__first(evlist);
1543 return first->attr.read_format;
1544 }
1545
perf_evlist__id_hdr_size(struct perf_evlist * evlist)1546 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1547 {
1548 struct perf_evsel *first = perf_evlist__first(evlist);
1549 struct perf_sample *data;
1550 u64 sample_type;
1551 u16 size = 0;
1552
1553 if (!first->attr.sample_id_all)
1554 goto out;
1555
1556 sample_type = first->attr.sample_type;
1557
1558 if (sample_type & PERF_SAMPLE_TID)
1559 size += sizeof(data->tid) * 2;
1560
1561 if (sample_type & PERF_SAMPLE_TIME)
1562 size += sizeof(data->time);
1563
1564 if (sample_type & PERF_SAMPLE_ID)
1565 size += sizeof(data->id);
1566
1567 if (sample_type & PERF_SAMPLE_STREAM_ID)
1568 size += sizeof(data->stream_id);
1569
1570 if (sample_type & PERF_SAMPLE_CPU)
1571 size += sizeof(data->cpu) * 2;
1572
1573 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1574 size += sizeof(data->id);
1575 out:
1576 return size;
1577 }
1578
perf_evlist__valid_sample_id_all(struct perf_evlist * evlist)1579 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1580 {
1581 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1582
1583 evlist__for_each_entry_continue(evlist, pos) {
1584 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1585 return false;
1586 }
1587
1588 return true;
1589 }
1590
perf_evlist__sample_id_all(struct perf_evlist * evlist)1591 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1592 {
1593 struct perf_evsel *first = perf_evlist__first(evlist);
1594 return first->attr.sample_id_all;
1595 }
1596
perf_evlist__set_selected(struct perf_evlist * evlist,struct perf_evsel * evsel)1597 void perf_evlist__set_selected(struct perf_evlist *evlist,
1598 struct perf_evsel *evsel)
1599 {
1600 evlist->selected = evsel;
1601 }
1602
perf_evlist__close(struct perf_evlist * evlist)1603 void perf_evlist__close(struct perf_evlist *evlist)
1604 {
1605 struct perf_evsel *evsel;
1606 int ncpus = cpu_map__nr(evlist->cpus);
1607 int nthreads = thread_map__nr(evlist->threads);
1608
1609 evlist__for_each_entry_reverse(evlist, evsel) {
1610 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
1611 perf_evsel__close(evsel, n, nthreads);
1612 }
1613 }
1614
perf_evlist__create_syswide_maps(struct perf_evlist * evlist)1615 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1616 {
1617 struct cpu_map *cpus;
1618 struct thread_map *threads;
1619 int err = -ENOMEM;
1620
1621 /*
1622 * Try reading /sys/devices/system/cpu/online to get
1623 * an all cpus map.
1624 *
1625 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1626 * code needs an overhaul to properly forward the
1627 * error, and we may not want to do that fallback to a
1628 * default cpu identity map :-\
1629 */
1630 cpus = cpu_map__new(NULL);
1631 if (!cpus)
1632 goto out;
1633
1634 threads = thread_map__new_dummy();
1635 if (!threads)
1636 goto out_put;
1637
1638 perf_evlist__set_maps(evlist, cpus, threads);
1639 out:
1640 return err;
1641 out_put:
1642 cpu_map__put(cpus);
1643 goto out;
1644 }
1645
perf_evlist__open(struct perf_evlist * evlist)1646 int perf_evlist__open(struct perf_evlist *evlist)
1647 {
1648 struct perf_evsel *evsel;
1649 int err;
1650
1651 /*
1652 * Default: one fd per CPU, all threads, aka systemwide
1653 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1654 */
1655 if (evlist->threads == NULL && evlist->cpus == NULL) {
1656 err = perf_evlist__create_syswide_maps(evlist);
1657 if (err < 0)
1658 goto out_err;
1659 }
1660
1661 perf_evlist__update_id_pos(evlist);
1662
1663 evlist__for_each_entry(evlist, evsel) {
1664 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1665 if (err < 0)
1666 goto out_err;
1667 }
1668
1669 return 0;
1670 out_err:
1671 perf_evlist__close(evlist);
1672 errno = -err;
1673 return err;
1674 }
1675
perf_evlist__prepare_workload(struct perf_evlist * evlist,struct target * target,const char * argv[],bool pipe_output,void (* exec_error)(int signo,siginfo_t * info,void * ucontext))1676 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1677 const char *argv[], bool pipe_output,
1678 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1679 {
1680 int child_ready_pipe[2], go_pipe[2];
1681 char bf;
1682
1683 if (pipe(child_ready_pipe) < 0) {
1684 perror("failed to create 'ready' pipe");
1685 return -1;
1686 }
1687
1688 if (pipe(go_pipe) < 0) {
1689 perror("failed to create 'go' pipe");
1690 goto out_close_ready_pipe;
1691 }
1692
1693 evlist->workload.pid = fork();
1694 if (evlist->workload.pid < 0) {
1695 perror("failed to fork");
1696 goto out_close_pipes;
1697 }
1698
1699 if (!evlist->workload.pid) {
1700 int ret;
1701
1702 if (pipe_output)
1703 dup2(2, 1);
1704
1705 signal(SIGTERM, SIG_DFL);
1706
1707 close(child_ready_pipe[0]);
1708 close(go_pipe[1]);
1709 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1710
1711 /*
1712 * Tell the parent we're ready to go
1713 */
1714 close(child_ready_pipe[1]);
1715
1716 /*
1717 * Wait until the parent tells us to go.
1718 */
1719 ret = read(go_pipe[0], &bf, 1);
1720 /*
1721 * The parent will ask for the execvp() to be performed by
1722 * writing exactly one byte, in workload.cork_fd, usually via
1723 * perf_evlist__start_workload().
1724 *
1725 * For cancelling the workload without actually running it,
1726 * the parent will just close workload.cork_fd, without writing
1727 * anything, i.e. read will return zero and we just exit()
1728 * here.
1729 */
1730 if (ret != 1) {
1731 if (ret == -1)
1732 perror("unable to read pipe");
1733 exit(ret);
1734 }
1735
1736 execvp(argv[0], (char **)argv);
1737
1738 if (exec_error) {
1739 union sigval val;
1740
1741 val.sival_int = errno;
1742 if (sigqueue(getppid(), SIGUSR1, val))
1743 perror(argv[0]);
1744 } else
1745 perror(argv[0]);
1746 exit(-1);
1747 }
1748
1749 if (exec_error) {
1750 struct sigaction act = {
1751 .sa_flags = SA_SIGINFO,
1752 .sa_sigaction = exec_error,
1753 };
1754 sigaction(SIGUSR1, &act, NULL);
1755 }
1756
1757 if (target__none(target)) {
1758 if (evlist->threads == NULL) {
1759 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1760 __func__, __LINE__);
1761 goto out_close_pipes;
1762 }
1763 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1764 }
1765
1766 close(child_ready_pipe[1]);
1767 close(go_pipe[0]);
1768 /*
1769 * wait for child to settle
1770 */
1771 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1772 perror("unable to read pipe");
1773 goto out_close_pipes;
1774 }
1775
1776 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1777 evlist->workload.cork_fd = go_pipe[1];
1778 close(child_ready_pipe[0]);
1779 return 0;
1780
1781 out_close_pipes:
1782 close(go_pipe[0]);
1783 close(go_pipe[1]);
1784 out_close_ready_pipe:
1785 close(child_ready_pipe[0]);
1786 close(child_ready_pipe[1]);
1787 return -1;
1788 }
1789
perf_evlist__start_workload(struct perf_evlist * evlist)1790 int perf_evlist__start_workload(struct perf_evlist *evlist)
1791 {
1792 if (evlist->workload.cork_fd > 0) {
1793 char bf = 0;
1794 int ret;
1795 /*
1796 * Remove the cork, let it rip!
1797 */
1798 ret = write(evlist->workload.cork_fd, &bf, 1);
1799 if (ret < 0)
1800 perror("enable to write to pipe");
1801
1802 close(evlist->workload.cork_fd);
1803 return ret;
1804 }
1805
1806 return 0;
1807 }
1808
perf_evlist__parse_sample(struct perf_evlist * evlist,union perf_event * event,struct perf_sample * sample)1809 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1810 struct perf_sample *sample)
1811 {
1812 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1813
1814 if (!evsel)
1815 return -EFAULT;
1816 return perf_evsel__parse_sample(evsel, event, sample);
1817 }
1818
perf_evlist__fprintf(struct perf_evlist * evlist,FILE * fp)1819 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1820 {
1821 struct perf_evsel *evsel;
1822 size_t printed = 0;
1823
1824 evlist__for_each_entry(evlist, evsel) {
1825 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1826 perf_evsel__name(evsel));
1827 }
1828
1829 return printed + fprintf(fp, "\n");
1830 }
1831
perf_evlist__strerror_open(struct perf_evlist * evlist,int err,char * buf,size_t size)1832 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1833 int err, char *buf, size_t size)
1834 {
1835 int printed, value;
1836 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1837
1838 switch (err) {
1839 case EACCES:
1840 case EPERM:
1841 printed = scnprintf(buf, size,
1842 "Error:\t%s.\n"
1843 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1844
1845 value = perf_event_paranoid();
1846
1847 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1848
1849 if (value >= 2) {
1850 printed += scnprintf(buf + printed, size - printed,
1851 "For your workloads it needs to be <= 1\nHint:\t");
1852 }
1853 printed += scnprintf(buf + printed, size - printed,
1854 "For system wide tracing it needs to be set to -1.\n");
1855
1856 printed += scnprintf(buf + printed, size - printed,
1857 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1858 "Hint:\tThe current value is %d.", value);
1859 break;
1860 case EINVAL: {
1861 struct perf_evsel *first = perf_evlist__first(evlist);
1862 int max_freq;
1863
1864 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1865 goto out_default;
1866
1867 if (first->attr.sample_freq < (u64)max_freq)
1868 goto out_default;
1869
1870 printed = scnprintf(buf, size,
1871 "Error:\t%s.\n"
1872 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1873 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1874 emsg, max_freq, first->attr.sample_freq);
1875 break;
1876 }
1877 default:
1878 out_default:
1879 scnprintf(buf, size, "%s", emsg);
1880 break;
1881 }
1882
1883 return 0;
1884 }
1885
perf_evlist__strerror_mmap(struct perf_evlist * evlist,int err,char * buf,size_t size)1886 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1887 {
1888 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1889 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1890
1891 switch (err) {
1892 case EPERM:
1893 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1894 printed += scnprintf(buf + printed, size - printed,
1895 "Error:\t%s.\n"
1896 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1897 "Hint:\tTried using %zd kB.\n",
1898 emsg, pages_max_per_user, pages_attempted);
1899
1900 if (pages_attempted >= pages_max_per_user) {
1901 printed += scnprintf(buf + printed, size - printed,
1902 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1903 pages_max_per_user + pages_attempted);
1904 }
1905
1906 printed += scnprintf(buf + printed, size - printed,
1907 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1908 break;
1909 default:
1910 scnprintf(buf, size, "%s", emsg);
1911 break;
1912 }
1913
1914 return 0;
1915 }
1916
perf_evlist__to_front(struct perf_evlist * evlist,struct perf_evsel * move_evsel)1917 void perf_evlist__to_front(struct perf_evlist *evlist,
1918 struct perf_evsel *move_evsel)
1919 {
1920 struct perf_evsel *evsel, *n;
1921 LIST_HEAD(move);
1922
1923 if (move_evsel == perf_evlist__first(evlist))
1924 return;
1925
1926 evlist__for_each_entry_safe(evlist, n, evsel) {
1927 if (evsel->leader == move_evsel->leader)
1928 list_move_tail(&evsel->node, &move);
1929 }
1930
1931 list_splice(&move, &evlist->entries);
1932 }
1933
perf_evlist__set_tracking_event(struct perf_evlist * evlist,struct perf_evsel * tracking_evsel)1934 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1935 struct perf_evsel *tracking_evsel)
1936 {
1937 struct perf_evsel *evsel;
1938
1939 if (tracking_evsel->tracking)
1940 return;
1941
1942 evlist__for_each_entry(evlist, evsel) {
1943 if (evsel != tracking_evsel)
1944 evsel->tracking = false;
1945 }
1946
1947 tracking_evsel->tracking = true;
1948 }
1949
1950 struct perf_evsel *
perf_evlist__find_evsel_by_str(struct perf_evlist * evlist,const char * str)1951 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1952 const char *str)
1953 {
1954 struct perf_evsel *evsel;
1955
1956 evlist__for_each_entry(evlist, evsel) {
1957 if (!evsel->name)
1958 continue;
1959 if (strcmp(str, evsel->name) == 0)
1960 return evsel;
1961 }
1962
1963 return NULL;
1964 }
1965
perf_evlist__toggle_bkw_mmap(struct perf_evlist * evlist,enum bkw_mmap_state state)1966 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1967 enum bkw_mmap_state state)
1968 {
1969 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1970 enum action {
1971 NONE,
1972 PAUSE,
1973 RESUME,
1974 } action = NONE;
1975
1976 if (!evlist->backward_mmap)
1977 return;
1978
1979 switch (old_state) {
1980 case BKW_MMAP_NOTREADY: {
1981 if (state != BKW_MMAP_RUNNING)
1982 goto state_err;;
1983 break;
1984 }
1985 case BKW_MMAP_RUNNING: {
1986 if (state != BKW_MMAP_DATA_PENDING)
1987 goto state_err;
1988 action = PAUSE;
1989 break;
1990 }
1991 case BKW_MMAP_DATA_PENDING: {
1992 if (state != BKW_MMAP_EMPTY)
1993 goto state_err;
1994 break;
1995 }
1996 case BKW_MMAP_EMPTY: {
1997 if (state != BKW_MMAP_RUNNING)
1998 goto state_err;
1999 action = RESUME;
2000 break;
2001 }
2002 default:
2003 WARN_ONCE(1, "Shouldn't get there\n");
2004 }
2005
2006 evlist->bkw_mmap_state = state;
2007
2008 switch (action) {
2009 case PAUSE:
2010 perf_evlist__pause(evlist);
2011 break;
2012 case RESUME:
2013 perf_evlist__resume(evlist);
2014 break;
2015 case NONE:
2016 default:
2017 break;
2018 }
2019
2020 state_err:
2021 return;
2022 }
2023