1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/lib.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <errno.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <signal.h>
21 #include <poll.h>
22 #include <sys/mman.h>
23 #include <perf/cpumap.h>
24 #include <perf/threadmap.h>
25 #include <api/fd/array.h>
26
perf_evlist__init(struct perf_evlist * evlist)27 void perf_evlist__init(struct perf_evlist *evlist)
28 {
29 INIT_LIST_HEAD(&evlist->entries);
30 evlist->nr_entries = 0;
31 fdarray__init(&evlist->pollfd, 64);
32 perf_evlist__reset_id_hash(evlist);
33 }
34
__perf_evlist__propagate_maps(struct perf_evlist * evlist,struct perf_evsel * evsel)35 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
36 struct perf_evsel *evsel)
37 {
38 /*
39 * We already have cpus for evsel (via PMU sysfs) so
40 * keep it, if there's no target cpu list defined.
41 */
42 if (!evsel->own_cpus || evlist->has_user_cpus) {
43 perf_cpu_map__put(evsel->cpus);
44 evsel->cpus = perf_cpu_map__get(evlist->cpus);
45 } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
46 perf_cpu_map__put(evsel->cpus);
47 evsel->cpus = perf_cpu_map__get(evlist->cpus);
48 } else if (evsel->cpus != evsel->own_cpus) {
49 perf_cpu_map__put(evsel->cpus);
50 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
51 }
52
53 perf_thread_map__put(evsel->threads);
54 evsel->threads = perf_thread_map__get(evlist->threads);
55 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
56 }
57
perf_evlist__propagate_maps(struct perf_evlist * evlist)58 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
59 {
60 struct perf_evsel *evsel;
61
62 perf_evlist__for_each_evsel(evlist, evsel)
63 __perf_evlist__propagate_maps(evlist, evsel);
64 }
65
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * evsel)66 void perf_evlist__add(struct perf_evlist *evlist,
67 struct perf_evsel *evsel)
68 {
69 evsel->idx = evlist->nr_entries;
70 list_add_tail(&evsel->node, &evlist->entries);
71 evlist->nr_entries += 1;
72 __perf_evlist__propagate_maps(evlist, evsel);
73 }
74
perf_evlist__remove(struct perf_evlist * evlist,struct perf_evsel * evsel)75 void perf_evlist__remove(struct perf_evlist *evlist,
76 struct perf_evsel *evsel)
77 {
78 list_del_init(&evsel->node);
79 evlist->nr_entries -= 1;
80 }
81
perf_evlist__new(void)82 struct perf_evlist *perf_evlist__new(void)
83 {
84 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
85
86 if (evlist != NULL)
87 perf_evlist__init(evlist);
88
89 return evlist;
90 }
91
92 struct perf_evsel *
perf_evlist__next(struct perf_evlist * evlist,struct perf_evsel * prev)93 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
94 {
95 struct perf_evsel *next;
96
97 if (!prev) {
98 next = list_first_entry(&evlist->entries,
99 struct perf_evsel,
100 node);
101 } else {
102 next = list_next_entry(prev, node);
103 }
104
105 /* Empty list is noticed here so don't need checking on entry. */
106 if (&next->node == &evlist->entries)
107 return NULL;
108
109 return next;
110 }
111
perf_evlist__purge(struct perf_evlist * evlist)112 static void perf_evlist__purge(struct perf_evlist *evlist)
113 {
114 struct perf_evsel *pos, *n;
115
116 perf_evlist__for_each_entry_safe(evlist, n, pos) {
117 list_del_init(&pos->node);
118 perf_evsel__delete(pos);
119 }
120
121 evlist->nr_entries = 0;
122 }
123
perf_evlist__exit(struct perf_evlist * evlist)124 void perf_evlist__exit(struct perf_evlist *evlist)
125 {
126 perf_cpu_map__put(evlist->cpus);
127 perf_cpu_map__put(evlist->all_cpus);
128 perf_thread_map__put(evlist->threads);
129 evlist->cpus = NULL;
130 evlist->all_cpus = NULL;
131 evlist->threads = NULL;
132 fdarray__exit(&evlist->pollfd);
133 }
134
perf_evlist__delete(struct perf_evlist * evlist)135 void perf_evlist__delete(struct perf_evlist *evlist)
136 {
137 if (evlist == NULL)
138 return;
139
140 perf_evlist__munmap(evlist);
141 perf_evlist__close(evlist);
142 perf_evlist__purge(evlist);
143 perf_evlist__exit(evlist);
144 free(evlist);
145 }
146
perf_evlist__set_maps(struct perf_evlist * evlist,struct perf_cpu_map * cpus,struct perf_thread_map * threads)147 void perf_evlist__set_maps(struct perf_evlist *evlist,
148 struct perf_cpu_map *cpus,
149 struct perf_thread_map *threads)
150 {
151 /*
152 * Allow for the possibility that one or another of the maps isn't being
153 * changed i.e. don't put it. Note we are assuming the maps that are
154 * being applied are brand new and evlist is taking ownership of the
155 * original reference count of 1. If that is not the case it is up to
156 * the caller to increase the reference count.
157 */
158 if (cpus != evlist->cpus) {
159 perf_cpu_map__put(evlist->cpus);
160 evlist->cpus = perf_cpu_map__get(cpus);
161 }
162
163 if (threads != evlist->threads) {
164 perf_thread_map__put(evlist->threads);
165 evlist->threads = perf_thread_map__get(threads);
166 }
167
168 if (!evlist->all_cpus && cpus)
169 evlist->all_cpus = perf_cpu_map__get(cpus);
170
171 perf_evlist__propagate_maps(evlist);
172 }
173
perf_evlist__open(struct perf_evlist * evlist)174 int perf_evlist__open(struct perf_evlist *evlist)
175 {
176 struct perf_evsel *evsel;
177 int err;
178
179 perf_evlist__for_each_entry(evlist, evsel) {
180 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
181 if (err < 0)
182 goto out_err;
183 }
184
185 return 0;
186
187 out_err:
188 perf_evlist__close(evlist);
189 return err;
190 }
191
perf_evlist__close(struct perf_evlist * evlist)192 void perf_evlist__close(struct perf_evlist *evlist)
193 {
194 struct perf_evsel *evsel;
195
196 perf_evlist__for_each_entry_reverse(evlist, evsel)
197 perf_evsel__close(evsel);
198 }
199
perf_evlist__enable(struct perf_evlist * evlist)200 void perf_evlist__enable(struct perf_evlist *evlist)
201 {
202 struct perf_evsel *evsel;
203
204 perf_evlist__for_each_entry(evlist, evsel)
205 perf_evsel__enable(evsel);
206 }
207
perf_evlist__disable(struct perf_evlist * evlist)208 void perf_evlist__disable(struct perf_evlist *evlist)
209 {
210 struct perf_evsel *evsel;
211
212 perf_evlist__for_each_entry(evlist, evsel)
213 perf_evsel__disable(evsel);
214 }
215
perf_evlist__read_format(struct perf_evlist * evlist)216 u64 perf_evlist__read_format(struct perf_evlist *evlist)
217 {
218 struct perf_evsel *first = perf_evlist__first(evlist);
219
220 return first->attr.read_format;
221 }
222
223 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
224
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)225 static void perf_evlist__id_hash(struct perf_evlist *evlist,
226 struct perf_evsel *evsel,
227 int cpu, int thread, u64 id)
228 {
229 int hash;
230 struct perf_sample_id *sid = SID(evsel, cpu, thread);
231
232 sid->id = id;
233 sid->evsel = evsel;
234 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
235 hlist_add_head(&sid->node, &evlist->heads[hash]);
236 }
237
perf_evlist__reset_id_hash(struct perf_evlist * evlist)238 void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
239 {
240 int i;
241
242 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
243 INIT_HLIST_HEAD(&evlist->heads[i]);
244 }
245
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)246 void perf_evlist__id_add(struct perf_evlist *evlist,
247 struct perf_evsel *evsel,
248 int cpu, int thread, u64 id)
249 {
250 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
251 evsel->id[evsel->ids++] = id;
252 }
253
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,int fd)254 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
255 struct perf_evsel *evsel,
256 int cpu, int thread, int fd)
257 {
258 u64 read_data[4] = { 0, };
259 int id_idx = 1; /* The first entry is the counter value */
260 u64 id;
261 int ret;
262
263 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
264 if (!ret)
265 goto add;
266
267 if (errno != ENOTTY)
268 return -1;
269
270 /* Legacy way to get event id.. All hail to old kernels! */
271
272 /*
273 * This way does not work with group format read, so bail
274 * out in that case.
275 */
276 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
277 return -1;
278
279 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
280 read(fd, &read_data, sizeof(read_data)) == -1)
281 return -1;
282
283 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
284 ++id_idx;
285 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
286 ++id_idx;
287
288 id = read_data[id_idx];
289
290 add:
291 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
292 return 0;
293 }
294
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)295 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
296 {
297 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
298 int nr_threads = perf_thread_map__nr(evlist->threads);
299 int nfds = 0;
300 struct perf_evsel *evsel;
301
302 perf_evlist__for_each_entry(evlist, evsel) {
303 if (evsel->system_wide)
304 nfds += nr_cpus;
305 else
306 nfds += nr_cpus * nr_threads;
307 }
308
309 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
310 fdarray__grow(&evlist->pollfd, nfds) < 0)
311 return -ENOMEM;
312
313 return 0;
314 }
315
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd,void * ptr,short revent,enum fdarray_flags flags)316 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
317 void *ptr, short revent, enum fdarray_flags flags)
318 {
319 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
320
321 if (pos >= 0) {
322 evlist->pollfd.priv[pos].ptr = ptr;
323 fcntl(fd, F_SETFL, O_NONBLOCK);
324 }
325
326 return pos;
327 }
328
perf_evlist__munmap_filtered(struct fdarray * fda,int fd,void * arg __maybe_unused)329 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
330 void *arg __maybe_unused)
331 {
332 struct perf_mmap *map = fda->priv[fd].ptr;
333
334 if (map)
335 perf_mmap__put(map);
336 }
337
perf_evlist__filter_pollfd(struct perf_evlist * evlist,short revents_and_mask)338 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
339 {
340 return fdarray__filter(&evlist->pollfd, revents_and_mask,
341 perf_evlist__munmap_filtered, NULL);
342 }
343
perf_evlist__poll(struct perf_evlist * evlist,int timeout)344 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
345 {
346 return fdarray__poll(&evlist->pollfd, timeout);
347 }
348
perf_evlist__alloc_mmap(struct perf_evlist * evlist,bool overwrite)349 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
350 {
351 int i;
352 struct perf_mmap *map;
353
354 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
355 if (!map)
356 return NULL;
357
358 for (i = 0; i < evlist->nr_mmaps; i++) {
359 struct perf_mmap *prev = i ? &map[i - 1] : NULL;
360
361 /*
362 * When the perf_mmap() call is made we grab one refcount, plus
363 * one extra to let perf_mmap__consume() get the last
364 * events after all real references (perf_mmap__get()) are
365 * dropped.
366 *
367 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
368 * thus does perf_mmap__get() on it.
369 */
370 perf_mmap__init(&map[i], prev, overwrite, NULL);
371 }
372
373 return map;
374 }
375
perf_evsel__set_sid_idx(struct perf_evsel * evsel,int idx,int cpu,int thread)376 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
377 {
378 struct perf_sample_id *sid = SID(evsel, cpu, thread);
379
380 sid->idx = idx;
381 sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
382 sid->tid = perf_thread_map__pid(evsel->threads, thread);
383 }
384
385 static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist * evlist,bool overwrite,int idx)386 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
387 {
388 struct perf_mmap *maps;
389
390 maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
391
392 if (!maps) {
393 maps = perf_evlist__alloc_mmap(evlist, overwrite);
394 if (!maps)
395 return NULL;
396
397 if (overwrite)
398 evlist->mmap_ovw = maps;
399 else
400 evlist->mmap = maps;
401 }
402
403 return &maps[idx];
404 }
405
406 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
407
408 static int
perf_evlist__mmap_cb_mmap(struct perf_mmap * map,struct perf_mmap_param * mp,int output,int cpu)409 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
410 int output, int cpu)
411 {
412 return perf_mmap__mmap(map, mp, output, cpu);
413 }
414
perf_evlist__set_mmap_first(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)415 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
416 bool overwrite)
417 {
418 if (overwrite)
419 evlist->mmap_ovw_first = map;
420 else
421 evlist->mmap_first = map;
422 }
423
424 static int
mmap_per_evsel(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,int idx,struct perf_mmap_param * mp,int cpu_idx,int thread,int * _output,int * _output_overwrite)425 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
426 int idx, struct perf_mmap_param *mp, int cpu_idx,
427 int thread, int *_output, int *_output_overwrite)
428 {
429 int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
430 struct perf_evsel *evsel;
431 int revent;
432
433 perf_evlist__for_each_entry(evlist, evsel) {
434 bool overwrite = evsel->attr.write_backward;
435 struct perf_mmap *map;
436 int *output, fd, cpu;
437
438 if (evsel->system_wide && thread)
439 continue;
440
441 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
442 if (cpu == -1)
443 continue;
444
445 map = ops->get(evlist, overwrite, idx);
446 if (map == NULL)
447 return -ENOMEM;
448
449 if (overwrite) {
450 mp->prot = PROT_READ;
451 output = _output_overwrite;
452 } else {
453 mp->prot = PROT_READ | PROT_WRITE;
454 output = _output;
455 }
456
457 fd = FD(evsel, cpu, thread);
458
459 if (*output == -1) {
460 *output = fd;
461
462 /*
463 * The last one will be done at perf_mmap__consume(), so that we
464 * make sure we don't prevent tools from consuming every last event in
465 * the ring buffer.
466 *
467 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
468 * anymore, but the last events for it are still in the ring buffer,
469 * waiting to be consumed.
470 *
471 * Tools can chose to ignore this at their own discretion, but the
472 * evlist layer can't just drop it when filtering events in
473 * perf_evlist__filter_pollfd().
474 */
475 refcount_set(&map->refcnt, 2);
476
477 if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
478 return -1;
479
480 if (!idx)
481 perf_evlist__set_mmap_first(evlist, map, overwrite);
482 } else {
483 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
484 return -1;
485
486 perf_mmap__get(map);
487 }
488
489 revent = !overwrite ? POLLIN : 0;
490
491 if (!evsel->system_wide &&
492 perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
493 perf_mmap__put(map);
494 return -1;
495 }
496
497 if (evsel->attr.read_format & PERF_FORMAT_ID) {
498 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
499 fd) < 0)
500 return -1;
501 perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
502 }
503 }
504
505 return 0;
506 }
507
508 static int
mmap_per_thread(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)509 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
510 struct perf_mmap_param *mp)
511 {
512 int thread;
513 int nr_threads = perf_thread_map__nr(evlist->threads);
514
515 for (thread = 0; thread < nr_threads; thread++) {
516 int output = -1;
517 int output_overwrite = -1;
518
519 if (ops->idx)
520 ops->idx(evlist, mp, thread, false);
521
522 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
523 &output, &output_overwrite))
524 goto out_unmap;
525 }
526
527 return 0;
528
529 out_unmap:
530 perf_evlist__munmap(evlist);
531 return -1;
532 }
533
534 static int
mmap_per_cpu(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)535 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
536 struct perf_mmap_param *mp)
537 {
538 int nr_threads = perf_thread_map__nr(evlist->threads);
539 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
540 int cpu, thread;
541
542 for (cpu = 0; cpu < nr_cpus; cpu++) {
543 int output = -1;
544 int output_overwrite = -1;
545
546 if (ops->idx)
547 ops->idx(evlist, mp, cpu, true);
548
549 for (thread = 0; thread < nr_threads; thread++) {
550 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
551 thread, &output, &output_overwrite))
552 goto out_unmap;
553 }
554 }
555
556 return 0;
557
558 out_unmap:
559 perf_evlist__munmap(evlist);
560 return -1;
561 }
562
perf_evlist__nr_mmaps(struct perf_evlist * evlist)563 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
564 {
565 int nr_mmaps;
566
567 nr_mmaps = perf_cpu_map__nr(evlist->cpus);
568 if (perf_cpu_map__empty(evlist->cpus))
569 nr_mmaps = perf_thread_map__nr(evlist->threads);
570
571 return nr_mmaps;
572 }
573
perf_evlist__mmap_ops(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)574 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
575 struct perf_evlist_mmap_ops *ops,
576 struct perf_mmap_param *mp)
577 {
578 struct perf_evsel *evsel;
579 const struct perf_cpu_map *cpus = evlist->cpus;
580
581 if (!ops || !ops->get || !ops->mmap)
582 return -EINVAL;
583
584 mp->mask = evlist->mmap_len - page_size - 1;
585
586 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
587
588 perf_evlist__for_each_entry(evlist, evsel) {
589 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
590 evsel->sample_id == NULL &&
591 perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
592 return -ENOMEM;
593 }
594
595 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
596 return -ENOMEM;
597
598 if (perf_cpu_map__empty(cpus))
599 return mmap_per_thread(evlist, ops, mp);
600
601 return mmap_per_cpu(evlist, ops, mp);
602 }
603
perf_evlist__mmap(struct perf_evlist * evlist,int pages)604 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
605 {
606 struct perf_mmap_param mp;
607 struct perf_evlist_mmap_ops ops = {
608 .get = perf_evlist__mmap_cb_get,
609 .mmap = perf_evlist__mmap_cb_mmap,
610 };
611
612 evlist->mmap_len = (pages + 1) * page_size;
613
614 return perf_evlist__mmap_ops(evlist, &ops, &mp);
615 }
616
perf_evlist__munmap(struct perf_evlist * evlist)617 void perf_evlist__munmap(struct perf_evlist *evlist)
618 {
619 int i;
620
621 if (evlist->mmap) {
622 for (i = 0; i < evlist->nr_mmaps; i++)
623 perf_mmap__munmap(&evlist->mmap[i]);
624 }
625
626 if (evlist->mmap_ovw) {
627 for (i = 0; i < evlist->nr_mmaps; i++)
628 perf_mmap__munmap(&evlist->mmap_ovw[i]);
629 }
630
631 zfree(&evlist->mmap);
632 zfree(&evlist->mmap_ovw);
633 }
634
635 struct perf_mmap*
perf_evlist__next_mmap(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)636 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
637 bool overwrite)
638 {
639 if (map)
640 return map->next;
641
642 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
643 }
644
__perf_evlist__set_leader(struct list_head * list)645 void __perf_evlist__set_leader(struct list_head *list)
646 {
647 struct perf_evsel *evsel, *leader;
648
649 leader = list_entry(list->next, struct perf_evsel, node);
650 evsel = list_entry(list->prev, struct perf_evsel, node);
651
652 leader->nr_members = evsel->idx - leader->idx + 1;
653
654 __perf_evlist__for_each_entry(list, evsel)
655 evsel->leader = leader;
656 }
657
perf_evlist__set_leader(struct perf_evlist * evlist)658 void perf_evlist__set_leader(struct perf_evlist *evlist)
659 {
660 if (evlist->nr_entries) {
661 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
662 __perf_evlist__set_leader(&evlist->entries);
663 }
664 }
665