1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34 struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36 struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38 struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40 struct hist_entry *he);
41
hists__col_len(struct hists * hists,enum hist_column col)42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44 return hists->col_len[col];
45 }
46
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49 hists->col_len[col] = len;
50 }
51
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54 if (len > hists__col_len(hists, col)) {
55 hists__set_col_len(hists, col, len);
56 return true;
57 }
58 return false;
59 }
60
hists__reset_col_len(struct hists * hists)61 void hists__reset_col_len(struct hists *hists)
62 {
63 enum hist_column col;
64
65 for (col = 0; col < HISTC_NR_COLS; ++col)
66 hists__set_col_len(hists, col, 0);
67 }
68
hists__set_unres_dso_col_len(struct hists * hists,int dso)69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72
73 if (hists__col_len(hists, dso) < unresolved_col_width &&
74 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75 !symbol_conf.dso_list)
76 hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78
hists__calc_col_len(struct hists * hists,struct hist_entry * h)79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82 int symlen;
83 u16 len;
84
85 if (h->block_info)
86 return;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 accounts for 0x prefix on raw addresses
90 * +3 accounts for ' y ' symtab origin info
91 */
92 if (h->ms.sym) {
93 symlen = h->ms.sym->namelen + 4;
94 if (verbose > 0)
95 symlen += BITS_PER_LONG / 4 + 2 + 3;
96 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO);
101 }
102
103 len = thread__comm_len(h->thread);
104 if (hists__new_col_len(hists, HISTC_COMM, len))
105 hists__set_col_len(hists, HISTC_THREAD, len + 8);
106
107 if (h->ms.map) {
108 len = dso__name_len(h->ms.map->dso);
109 hists__new_col_len(hists, HISTC_DSO, len);
110 }
111
112 if (h->parent)
113 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114
115 if (h->branch_info) {
116 if (h->branch_info->from.ms.sym) {
117 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118 if (verbose > 0)
119 symlen += BITS_PER_LONG / 4 + 2 + 3;
120 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121
122 symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124 } else {
125 symlen = unresolved_col_width + 4 + 2;
126 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
128 }
129
130 if (h->branch_info->to.ms.sym) {
131 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
132 if (verbose > 0)
133 symlen += BITS_PER_LONG / 4 + 2 + 3;
134 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
135
136 symlen = dso__name_len(h->branch_info->to.ms.map->dso);
137 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
138 } else {
139 symlen = unresolved_col_width + 4 + 2;
140 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
141 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
142 }
143
144 if (h->branch_info->srcline_from)
145 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
146 strlen(h->branch_info->srcline_from));
147 if (h->branch_info->srcline_to)
148 hists__new_col_len(hists, HISTC_SRCLINE_TO,
149 strlen(h->branch_info->srcline_to));
150 }
151
152 if (h->mem_info) {
153 if (h->mem_info->daddr.ms.sym) {
154 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
155 + unresolved_col_width + 2;
156 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
157 symlen);
158 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
159 symlen + 1);
160 } else {
161 symlen = unresolved_col_width + 4 + 2;
162 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
163 symlen);
164 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
165 symlen);
166 }
167
168 if (h->mem_info->iaddr.ms.sym) {
169 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
170 + unresolved_col_width + 2;
171 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172 symlen);
173 } else {
174 symlen = unresolved_col_width + 4 + 2;
175 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
176 symlen);
177 }
178
179 if (h->mem_info->daddr.ms.map) {
180 symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
181 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
182 symlen);
183 } else {
184 symlen = unresolved_col_width + 4 + 2;
185 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
186 }
187
188 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
189 unresolved_col_width + 4 + 2);
190
191 } else {
192 symlen = unresolved_col_width + 4 + 2;
193 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
194 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
195 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
196 }
197
198 hists__new_col_len(hists, HISTC_CGROUP, 6);
199 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
200 hists__new_col_len(hists, HISTC_CPU, 3);
201 hists__new_col_len(hists, HISTC_SOCKET, 6);
202 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
203 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
204 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
205 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
206 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
207 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
208 if (symbol_conf.nanosecs)
209 hists__new_col_len(hists, HISTC_TIME, 16);
210 else
211 hists__new_col_len(hists, HISTC_TIME, 12);
212
213 if (h->srcline) {
214 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
215 hists__new_col_len(hists, HISTC_SRCLINE, len);
216 }
217
218 if (h->srcfile)
219 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
220
221 if (h->transaction)
222 hists__new_col_len(hists, HISTC_TRANSACTION,
223 hist_entry__transaction_len());
224
225 if (h->trace_output)
226 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
227
228 if (h->cgroup) {
229 const char *cgrp_name = "unknown";
230 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
231 h->cgroup);
232 if (cgrp != NULL)
233 cgrp_name = cgrp->name;
234
235 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
236 }
237 }
238
hists__output_recalc_col_len(struct hists * hists,int max_rows)239 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
240 {
241 struct rb_node *next = rb_first_cached(&hists->entries);
242 struct hist_entry *n;
243 int row = 0;
244
245 hists__reset_col_len(hists);
246
247 while (next && row++ < max_rows) {
248 n = rb_entry(next, struct hist_entry, rb_node);
249 if (!n->filtered)
250 hists__calc_col_len(hists, n);
251 next = rb_next(&n->rb_node);
252 }
253 }
254
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)255 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
256 unsigned int cpumode, u64 period)
257 {
258 switch (cpumode) {
259 case PERF_RECORD_MISC_KERNEL:
260 he_stat->period_sys += period;
261 break;
262 case PERF_RECORD_MISC_USER:
263 he_stat->period_us += period;
264 break;
265 case PERF_RECORD_MISC_GUEST_KERNEL:
266 he_stat->period_guest_sys += period;
267 break;
268 case PERF_RECORD_MISC_GUEST_USER:
269 he_stat->period_guest_us += period;
270 break;
271 default:
272 break;
273 }
274 }
275
hist_time(unsigned long htime)276 static long hist_time(unsigned long htime)
277 {
278 unsigned long time_quantum = symbol_conf.time_quantum;
279 if (time_quantum)
280 return (htime / time_quantum) * time_quantum;
281 return htime;
282 }
283
he_stat__add_period(struct he_stat * he_stat,u64 period,u64 weight)284 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
285 u64 weight)
286 {
287
288 he_stat->period += period;
289 he_stat->weight += weight;
290 he_stat->nr_events += 1;
291 }
292
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)293 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
294 {
295 dest->period += src->period;
296 dest->period_sys += src->period_sys;
297 dest->period_us += src->period_us;
298 dest->period_guest_sys += src->period_guest_sys;
299 dest->period_guest_us += src->period_guest_us;
300 dest->nr_events += src->nr_events;
301 dest->weight += src->weight;
302 }
303
he_stat__decay(struct he_stat * he_stat)304 static void he_stat__decay(struct he_stat *he_stat)
305 {
306 he_stat->period = (he_stat->period * 7) / 8;
307 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
308 /* XXX need decay for weight too? */
309 }
310
311 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
312
hists__decay_entry(struct hists * hists,struct hist_entry * he)313 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
314 {
315 u64 prev_period = he->stat.period;
316 u64 diff;
317
318 if (prev_period == 0)
319 return true;
320
321 he_stat__decay(&he->stat);
322 if (symbol_conf.cumulate_callchain)
323 he_stat__decay(he->stat_acc);
324 decay_callchain(he->callchain);
325
326 diff = prev_period - he->stat.period;
327
328 if (!he->depth) {
329 hists->stats.total_period -= diff;
330 if (!he->filtered)
331 hists->stats.total_non_filtered_period -= diff;
332 }
333
334 if (!he->leaf) {
335 struct hist_entry *child;
336 struct rb_node *node = rb_first_cached(&he->hroot_out);
337 while (node) {
338 child = rb_entry(node, struct hist_entry, rb_node);
339 node = rb_next(node);
340
341 if (hists__decay_entry(hists, child))
342 hists__delete_entry(hists, child);
343 }
344 }
345
346 return he->stat.period == 0;
347 }
348
hists__delete_entry(struct hists * hists,struct hist_entry * he)349 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
350 {
351 struct rb_root_cached *root_in;
352 struct rb_root_cached *root_out;
353
354 if (he->parent_he) {
355 root_in = &he->parent_he->hroot_in;
356 root_out = &he->parent_he->hroot_out;
357 } else {
358 if (hists__has(hists, need_collapse))
359 root_in = &hists->entries_collapsed;
360 else
361 root_in = hists->entries_in;
362 root_out = &hists->entries;
363 }
364
365 rb_erase_cached(&he->rb_node_in, root_in);
366 rb_erase_cached(&he->rb_node, root_out);
367
368 --hists->nr_entries;
369 if (!he->filtered)
370 --hists->nr_non_filtered_entries;
371
372 hist_entry__delete(he);
373 }
374
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)375 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
376 {
377 struct rb_node *next = rb_first_cached(&hists->entries);
378 struct hist_entry *n;
379
380 while (next) {
381 n = rb_entry(next, struct hist_entry, rb_node);
382 next = rb_next(&n->rb_node);
383 if (((zap_user && n->level == '.') ||
384 (zap_kernel && n->level != '.') ||
385 hists__decay_entry(hists, n))) {
386 hists__delete_entry(hists, n);
387 }
388 }
389 }
390
hists__delete_entries(struct hists * hists)391 void hists__delete_entries(struct hists *hists)
392 {
393 struct rb_node *next = rb_first_cached(&hists->entries);
394 struct hist_entry *n;
395
396 while (next) {
397 n = rb_entry(next, struct hist_entry, rb_node);
398 next = rb_next(&n->rb_node);
399
400 hists__delete_entry(hists, n);
401 }
402 }
403
hists__get_entry(struct hists * hists,int idx)404 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
405 {
406 struct rb_node *next = rb_first_cached(&hists->entries);
407 struct hist_entry *n;
408 int i = 0;
409
410 while (next) {
411 n = rb_entry(next, struct hist_entry, rb_node);
412 if (i == idx)
413 return n;
414
415 next = rb_next(&n->rb_node);
416 i++;
417 }
418
419 return NULL;
420 }
421
422 /*
423 * histogram, sorted on item, collects periods
424 */
425
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)426 static int hist_entry__init(struct hist_entry *he,
427 struct hist_entry *template,
428 bool sample_self,
429 size_t callchain_size)
430 {
431 *he = *template;
432 he->callchain_size = callchain_size;
433
434 if (symbol_conf.cumulate_callchain) {
435 he->stat_acc = malloc(sizeof(he->stat));
436 if (he->stat_acc == NULL)
437 return -ENOMEM;
438 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
439 if (!sample_self)
440 memset(&he->stat, 0, sizeof(he->stat));
441 }
442
443 map__get(he->ms.map);
444
445 if (he->branch_info) {
446 /*
447 * This branch info is (a part of) allocated from
448 * sample__resolve_bstack() and will be freed after
449 * adding new entries. So we need to save a copy.
450 */
451 he->branch_info = malloc(sizeof(*he->branch_info));
452 if (he->branch_info == NULL)
453 goto err;
454
455 memcpy(he->branch_info, template->branch_info,
456 sizeof(*he->branch_info));
457
458 map__get(he->branch_info->from.ms.map);
459 map__get(he->branch_info->to.ms.map);
460 }
461
462 if (he->mem_info) {
463 map__get(he->mem_info->iaddr.ms.map);
464 map__get(he->mem_info->daddr.ms.map);
465 }
466
467 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
468 callchain_init(he->callchain);
469
470 if (he->raw_data) {
471 he->raw_data = memdup(he->raw_data, he->raw_size);
472 if (he->raw_data == NULL)
473 goto err_infos;
474 }
475
476 if (he->srcline) {
477 he->srcline = strdup(he->srcline);
478 if (he->srcline == NULL)
479 goto err_rawdata;
480 }
481
482 if (symbol_conf.res_sample) {
483 he->res_samples = calloc(sizeof(struct res_sample),
484 symbol_conf.res_sample);
485 if (!he->res_samples)
486 goto err_srcline;
487 }
488
489 INIT_LIST_HEAD(&he->pairs.node);
490 thread__get(he->thread);
491 he->hroot_in = RB_ROOT_CACHED;
492 he->hroot_out = RB_ROOT_CACHED;
493
494 if (!symbol_conf.report_hierarchy)
495 he->leaf = true;
496
497 return 0;
498
499 err_srcline:
500 zfree(&he->srcline);
501
502 err_rawdata:
503 zfree(&he->raw_data);
504
505 err_infos:
506 if (he->branch_info) {
507 map__put(he->branch_info->from.ms.map);
508 map__put(he->branch_info->to.ms.map);
509 zfree(&he->branch_info);
510 }
511 if (he->mem_info) {
512 map__put(he->mem_info->iaddr.ms.map);
513 map__put(he->mem_info->daddr.ms.map);
514 }
515 err:
516 map__zput(he->ms.map);
517 zfree(&he->stat_acc);
518 return -ENOMEM;
519 }
520
hist_entry__zalloc(size_t size)521 static void *hist_entry__zalloc(size_t size)
522 {
523 return zalloc(size + sizeof(struct hist_entry));
524 }
525
hist_entry__free(void * ptr)526 static void hist_entry__free(void *ptr)
527 {
528 free(ptr);
529 }
530
531 static struct hist_entry_ops default_ops = {
532 .new = hist_entry__zalloc,
533 .free = hist_entry__free,
534 };
535
hist_entry__new(struct hist_entry * template,bool sample_self)536 static struct hist_entry *hist_entry__new(struct hist_entry *template,
537 bool sample_self)
538 {
539 struct hist_entry_ops *ops = template->ops;
540 size_t callchain_size = 0;
541 struct hist_entry *he;
542 int err = 0;
543
544 if (!ops)
545 ops = template->ops = &default_ops;
546
547 if (symbol_conf.use_callchain)
548 callchain_size = sizeof(struct callchain_root);
549
550 he = ops->new(callchain_size);
551 if (he) {
552 err = hist_entry__init(he, template, sample_self, callchain_size);
553 if (err) {
554 ops->free(he);
555 he = NULL;
556 }
557 }
558
559 return he;
560 }
561
symbol__parent_filter(const struct symbol * parent)562 static u8 symbol__parent_filter(const struct symbol *parent)
563 {
564 if (symbol_conf.exclude_other && parent == NULL)
565 return 1 << HIST_FILTER__PARENT;
566 return 0;
567 }
568
hist_entry__add_callchain_period(struct hist_entry * he,u64 period)569 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
570 {
571 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
572 return;
573
574 he->hists->callchain_period += period;
575 if (!he->filtered)
576 he->hists->callchain_non_filtered_period += period;
577 }
578
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,struct addr_location * al,bool sample_self)579 static struct hist_entry *hists__findnew_entry(struct hists *hists,
580 struct hist_entry *entry,
581 struct addr_location *al,
582 bool sample_self)
583 {
584 struct rb_node **p;
585 struct rb_node *parent = NULL;
586 struct hist_entry *he;
587 int64_t cmp;
588 u64 period = entry->stat.period;
589 u64 weight = entry->stat.weight;
590 bool leftmost = true;
591
592 p = &hists->entries_in->rb_root.rb_node;
593
594 while (*p != NULL) {
595 parent = *p;
596 he = rb_entry(parent, struct hist_entry, rb_node_in);
597
598 /*
599 * Make sure that it receives arguments in a same order as
600 * hist_entry__collapse() so that we can use an appropriate
601 * function when searching an entry regardless which sort
602 * keys were used.
603 */
604 cmp = hist_entry__cmp(he, entry);
605
606 if (!cmp) {
607 if (sample_self) {
608 he_stat__add_period(&he->stat, period, weight);
609 hist_entry__add_callchain_period(he, period);
610 }
611 if (symbol_conf.cumulate_callchain)
612 he_stat__add_period(he->stat_acc, period, weight);
613
614 /*
615 * This mem info was allocated from sample__resolve_mem
616 * and will not be used anymore.
617 */
618 mem_info__zput(entry->mem_info);
619
620 block_info__zput(entry->block_info);
621
622 /* If the map of an existing hist_entry has
623 * become out-of-date due to an exec() or
624 * similar, update it. Otherwise we will
625 * mis-adjust symbol addresses when computing
626 * the history counter to increment.
627 */
628 if (he->ms.map != entry->ms.map) {
629 map__put(he->ms.map);
630 he->ms.map = map__get(entry->ms.map);
631 }
632 goto out;
633 }
634
635 if (cmp < 0)
636 p = &(*p)->rb_left;
637 else {
638 p = &(*p)->rb_right;
639 leftmost = false;
640 }
641 }
642
643 he = hist_entry__new(entry, sample_self);
644 if (!he)
645 return NULL;
646
647 if (sample_self)
648 hist_entry__add_callchain_period(he, period);
649 hists->nr_entries++;
650
651 rb_link_node(&he->rb_node_in, parent, p);
652 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
653 out:
654 if (sample_self)
655 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
656 if (symbol_conf.cumulate_callchain)
657 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
658 return he;
659 }
660
random_max(unsigned high)661 static unsigned random_max(unsigned high)
662 {
663 unsigned thresh = -high % high;
664 for (;;) {
665 unsigned r = random();
666 if (r >= thresh)
667 return r % high;
668 }
669 }
670
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)671 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
672 {
673 struct res_sample *r;
674 int j;
675
676 if (he->num_res < symbol_conf.res_sample) {
677 j = he->num_res++;
678 } else {
679 j = random_max(symbol_conf.res_sample);
680 }
681 r = &he->res_samples[j];
682 r->time = sample->time;
683 r->cpu = sample->cpu;
684 r->tid = sample->tid;
685 }
686
687 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)688 __hists__add_entry(struct hists *hists,
689 struct addr_location *al,
690 struct symbol *sym_parent,
691 struct branch_info *bi,
692 struct mem_info *mi,
693 struct block_info *block_info,
694 struct perf_sample *sample,
695 bool sample_self,
696 struct hist_entry_ops *ops)
697 {
698 struct namespaces *ns = thread__namespaces(al->thread);
699 struct hist_entry entry = {
700 .thread = al->thread,
701 .comm = thread__comm(al->thread),
702 .cgroup_id = {
703 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
704 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
705 },
706 .cgroup = sample->cgroup,
707 .ms = {
708 .maps = al->maps,
709 .map = al->map,
710 .sym = al->sym,
711 },
712 .srcline = (char *) al->srcline,
713 .socket = al->socket,
714 .cpu = al->cpu,
715 .cpumode = al->cpumode,
716 .ip = al->addr,
717 .level = al->level,
718 .stat = {
719 .nr_events = 1,
720 .period = sample->period,
721 .weight = sample->weight,
722 },
723 .parent = sym_parent,
724 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
725 .hists = hists,
726 .branch_info = bi,
727 .mem_info = mi,
728 .block_info = block_info,
729 .transaction = sample->transaction,
730 .raw_data = sample->raw_data,
731 .raw_size = sample->raw_size,
732 .ops = ops,
733 .time = hist_time(sample->time),
734 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
735
736 if (!hists->has_callchains && he && he->callchain_size != 0)
737 hists->has_callchains = true;
738 if (he && symbol_conf.res_sample)
739 hists__res_sample(he, sample);
740 return he;
741 }
742
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct perf_sample * sample,bool sample_self)743 struct hist_entry *hists__add_entry(struct hists *hists,
744 struct addr_location *al,
745 struct symbol *sym_parent,
746 struct branch_info *bi,
747 struct mem_info *mi,
748 struct perf_sample *sample,
749 bool sample_self)
750 {
751 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
752 sample, sample_self, NULL);
753 }
754
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct perf_sample * sample,bool sample_self)755 struct hist_entry *hists__add_entry_ops(struct hists *hists,
756 struct hist_entry_ops *ops,
757 struct addr_location *al,
758 struct symbol *sym_parent,
759 struct branch_info *bi,
760 struct mem_info *mi,
761 struct perf_sample *sample,
762 bool sample_self)
763 {
764 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
765 sample, sample_self, ops);
766 }
767
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)768 struct hist_entry *hists__add_entry_block(struct hists *hists,
769 struct addr_location *al,
770 struct block_info *block_info)
771 {
772 struct hist_entry entry = {
773 .block_info = block_info,
774 .hists = hists,
775 .ms = {
776 .maps = al->maps,
777 .map = al->map,
778 .sym = al->sym,
779 },
780 }, *he = hists__findnew_entry(hists, &entry, al, false);
781
782 return he;
783 }
784
785 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)786 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
787 struct addr_location *al __maybe_unused)
788 {
789 return 0;
790 }
791
792 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)793 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
794 struct addr_location *al __maybe_unused)
795 {
796 return 0;
797 }
798
799 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)800 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
801 {
802 struct perf_sample *sample = iter->sample;
803 struct mem_info *mi;
804
805 mi = sample__resolve_mem(sample, al);
806 if (mi == NULL)
807 return -ENOMEM;
808
809 iter->priv = mi;
810 return 0;
811 }
812
813 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)814 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
815 {
816 u64 cost;
817 struct mem_info *mi = iter->priv;
818 struct hists *hists = evsel__hists(iter->evsel);
819 struct perf_sample *sample = iter->sample;
820 struct hist_entry *he;
821
822 if (mi == NULL)
823 return -EINVAL;
824
825 cost = sample->weight;
826 if (!cost)
827 cost = 1;
828
829 /*
830 * must pass period=weight in order to get the correct
831 * sorting from hists__collapse_resort() which is solely
832 * based on periods. We want sorting be done on nr_events * weight
833 * and this is indirectly achieved by passing period=weight here
834 * and the he_stat__add_period() function.
835 */
836 sample->period = cost;
837
838 he = hists__add_entry(hists, al, iter->parent, NULL, mi,
839 sample, true);
840 if (!he)
841 return -ENOMEM;
842
843 iter->he = he;
844 return 0;
845 }
846
847 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)848 iter_finish_mem_entry(struct hist_entry_iter *iter,
849 struct addr_location *al __maybe_unused)
850 {
851 struct evsel *evsel = iter->evsel;
852 struct hists *hists = evsel__hists(evsel);
853 struct hist_entry *he = iter->he;
854 int err = -EINVAL;
855
856 if (he == NULL)
857 goto out;
858
859 hists__inc_nr_samples(hists, he->filtered);
860
861 err = hist_entry__append_callchain(he, iter->sample);
862
863 out:
864 /*
865 * We don't need to free iter->priv (mem_info) here since the mem info
866 * was either already freed in hists__findnew_entry() or passed to a
867 * new hist entry by hist_entry__new().
868 */
869 iter->priv = NULL;
870
871 iter->he = NULL;
872 return err;
873 }
874
875 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)876 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
877 {
878 struct branch_info *bi;
879 struct perf_sample *sample = iter->sample;
880
881 bi = sample__resolve_bstack(sample, al);
882 if (!bi)
883 return -ENOMEM;
884
885 iter->curr = 0;
886 iter->total = sample->branch_stack->nr;
887
888 iter->priv = bi;
889 return 0;
890 }
891
892 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)893 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
894 struct addr_location *al __maybe_unused)
895 {
896 return 0;
897 }
898
899 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)900 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
901 {
902 struct branch_info *bi = iter->priv;
903 int i = iter->curr;
904
905 if (bi == NULL)
906 return 0;
907
908 if (iter->curr >= iter->total)
909 return 0;
910
911 al->maps = bi[i].to.ms.maps;
912 al->map = bi[i].to.ms.map;
913 al->sym = bi[i].to.ms.sym;
914 al->addr = bi[i].to.addr;
915 return 1;
916 }
917
918 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)919 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
920 {
921 struct branch_info *bi;
922 struct evsel *evsel = iter->evsel;
923 struct hists *hists = evsel__hists(evsel);
924 struct perf_sample *sample = iter->sample;
925 struct hist_entry *he = NULL;
926 int i = iter->curr;
927 int err = 0;
928
929 bi = iter->priv;
930
931 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
932 goto out;
933
934 /*
935 * The report shows the percentage of total branches captured
936 * and not events sampled. Thus we use a pseudo period of 1.
937 */
938 sample->period = 1;
939 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
940
941 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
942 sample, true);
943 if (he == NULL)
944 return -ENOMEM;
945
946 hists__inc_nr_samples(hists, he->filtered);
947
948 out:
949 iter->he = he;
950 iter->curr++;
951 return err;
952 }
953
954 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)955 iter_finish_branch_entry(struct hist_entry_iter *iter,
956 struct addr_location *al __maybe_unused)
957 {
958 zfree(&iter->priv);
959 iter->he = NULL;
960
961 return iter->curr >= iter->total ? 0 : -1;
962 }
963
964 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)965 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
966 struct addr_location *al __maybe_unused)
967 {
968 return 0;
969 }
970
971 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)972 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
973 {
974 struct evsel *evsel = iter->evsel;
975 struct perf_sample *sample = iter->sample;
976 struct hist_entry *he;
977
978 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
979 sample, true);
980 if (he == NULL)
981 return -ENOMEM;
982
983 iter->he = he;
984 return 0;
985 }
986
987 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)988 iter_finish_normal_entry(struct hist_entry_iter *iter,
989 struct addr_location *al __maybe_unused)
990 {
991 struct hist_entry *he = iter->he;
992 struct evsel *evsel = iter->evsel;
993 struct perf_sample *sample = iter->sample;
994
995 if (he == NULL)
996 return 0;
997
998 iter->he = NULL;
999
1000 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1001
1002 return hist_entry__append_callchain(he, sample);
1003 }
1004
1005 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1006 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1007 struct addr_location *al __maybe_unused)
1008 {
1009 struct hist_entry **he_cache;
1010
1011 callchain_cursor_commit(&callchain_cursor);
1012
1013 /*
1014 * This is for detecting cycles or recursions so that they're
1015 * cumulated only one time to prevent entries more than 100%
1016 * overhead.
1017 */
1018 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1019 if (he_cache == NULL)
1020 return -ENOMEM;
1021
1022 iter->priv = he_cache;
1023 iter->curr = 0;
1024
1025 return 0;
1026 }
1027
1028 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1029 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1030 struct addr_location *al)
1031 {
1032 struct evsel *evsel = iter->evsel;
1033 struct hists *hists = evsel__hists(evsel);
1034 struct perf_sample *sample = iter->sample;
1035 struct hist_entry **he_cache = iter->priv;
1036 struct hist_entry *he;
1037 int err = 0;
1038
1039 he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1040 sample, true);
1041 if (he == NULL)
1042 return -ENOMEM;
1043
1044 iter->he = he;
1045 he_cache[iter->curr++] = he;
1046
1047 hist_entry__append_callchain(he, sample);
1048
1049 /*
1050 * We need to re-initialize the cursor since callchain_append()
1051 * advanced the cursor to the end.
1052 */
1053 callchain_cursor_commit(&callchain_cursor);
1054
1055 hists__inc_nr_samples(hists, he->filtered);
1056
1057 return err;
1058 }
1059
1060 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1061 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1062 struct addr_location *al)
1063 {
1064 struct callchain_cursor_node *node;
1065
1066 node = callchain_cursor_current(&callchain_cursor);
1067 if (node == NULL)
1068 return 0;
1069
1070 return fill_callchain_info(al, node, iter->hide_unresolved);
1071 }
1072
1073 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1074 hist_entry__fast__sym_diff(struct hist_entry *left,
1075 struct hist_entry *right)
1076 {
1077 struct symbol *sym_l = left->ms.sym;
1078 struct symbol *sym_r = right->ms.sym;
1079
1080 if (!sym_l && !sym_r)
1081 return left->ip != right->ip;
1082
1083 return !!_sort__sym_cmp(sym_l, sym_r);
1084 }
1085
1086
1087 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1088 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1089 struct addr_location *al)
1090 {
1091 struct evsel *evsel = iter->evsel;
1092 struct perf_sample *sample = iter->sample;
1093 struct hist_entry **he_cache = iter->priv;
1094 struct hist_entry *he;
1095 struct hist_entry he_tmp = {
1096 .hists = evsel__hists(evsel),
1097 .cpu = al->cpu,
1098 .thread = al->thread,
1099 .comm = thread__comm(al->thread),
1100 .ip = al->addr,
1101 .ms = {
1102 .maps = al->maps,
1103 .map = al->map,
1104 .sym = al->sym,
1105 },
1106 .srcline = (char *) al->srcline,
1107 .parent = iter->parent,
1108 .raw_data = sample->raw_data,
1109 .raw_size = sample->raw_size,
1110 };
1111 int i;
1112 struct callchain_cursor cursor;
1113 bool fast = hists__has(he_tmp.hists, sym);
1114
1115 callchain_cursor_snapshot(&cursor, &callchain_cursor);
1116
1117 callchain_cursor_advance(&callchain_cursor);
1118
1119 /*
1120 * Check if there's duplicate entries in the callchain.
1121 * It's possible that it has cycles or recursive calls.
1122 */
1123 for (i = 0; i < iter->curr; i++) {
1124 /*
1125 * For most cases, there are no duplicate entries in callchain.
1126 * The symbols are usually different. Do a quick check for
1127 * symbols first.
1128 */
1129 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1130 continue;
1131
1132 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1133 /* to avoid calling callback function */
1134 iter->he = NULL;
1135 return 0;
1136 }
1137 }
1138
1139 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1140 sample, false);
1141 if (he == NULL)
1142 return -ENOMEM;
1143
1144 iter->he = he;
1145 he_cache[iter->curr++] = he;
1146
1147 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1148 callchain_append(he->callchain, &cursor, sample->period);
1149 return 0;
1150 }
1151
1152 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1153 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1154 struct addr_location *al __maybe_unused)
1155 {
1156 zfree(&iter->priv);
1157 iter->he = NULL;
1158
1159 return 0;
1160 }
1161
1162 const struct hist_iter_ops hist_iter_mem = {
1163 .prepare_entry = iter_prepare_mem_entry,
1164 .add_single_entry = iter_add_single_mem_entry,
1165 .next_entry = iter_next_nop_entry,
1166 .add_next_entry = iter_add_next_nop_entry,
1167 .finish_entry = iter_finish_mem_entry,
1168 };
1169
1170 const struct hist_iter_ops hist_iter_branch = {
1171 .prepare_entry = iter_prepare_branch_entry,
1172 .add_single_entry = iter_add_single_branch_entry,
1173 .next_entry = iter_next_branch_entry,
1174 .add_next_entry = iter_add_next_branch_entry,
1175 .finish_entry = iter_finish_branch_entry,
1176 };
1177
1178 const struct hist_iter_ops hist_iter_normal = {
1179 .prepare_entry = iter_prepare_normal_entry,
1180 .add_single_entry = iter_add_single_normal_entry,
1181 .next_entry = iter_next_nop_entry,
1182 .add_next_entry = iter_add_next_nop_entry,
1183 .finish_entry = iter_finish_normal_entry,
1184 };
1185
1186 const struct hist_iter_ops hist_iter_cumulative = {
1187 .prepare_entry = iter_prepare_cumulative_entry,
1188 .add_single_entry = iter_add_single_cumulative_entry,
1189 .next_entry = iter_next_cumulative_entry,
1190 .add_next_entry = iter_add_next_cumulative_entry,
1191 .finish_entry = iter_finish_cumulative_entry,
1192 };
1193
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1194 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1195 int max_stack_depth, void *arg)
1196 {
1197 int err, err2;
1198 struct map *alm = NULL;
1199
1200 if (al)
1201 alm = map__get(al->map);
1202
1203 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1204 iter->evsel, al, max_stack_depth);
1205 if (err) {
1206 map__put(alm);
1207 return err;
1208 }
1209
1210 err = iter->ops->prepare_entry(iter, al);
1211 if (err)
1212 goto out;
1213
1214 err = iter->ops->add_single_entry(iter, al);
1215 if (err)
1216 goto out;
1217
1218 if (iter->he && iter->add_entry_cb) {
1219 err = iter->add_entry_cb(iter, al, true, arg);
1220 if (err)
1221 goto out;
1222 }
1223
1224 while (iter->ops->next_entry(iter, al)) {
1225 err = iter->ops->add_next_entry(iter, al);
1226 if (err)
1227 break;
1228
1229 if (iter->he && iter->add_entry_cb) {
1230 err = iter->add_entry_cb(iter, al, false, arg);
1231 if (err)
1232 goto out;
1233 }
1234 }
1235
1236 out:
1237 err2 = iter->ops->finish_entry(iter, al);
1238 if (!err)
1239 err = err2;
1240
1241 map__put(alm);
1242
1243 return err;
1244 }
1245
1246 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1247 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1248 {
1249 struct hists *hists = left->hists;
1250 struct perf_hpp_fmt *fmt;
1251 int64_t cmp = 0;
1252
1253 hists__for_each_sort_list(hists, fmt) {
1254 if (perf_hpp__is_dynamic_entry(fmt) &&
1255 !perf_hpp__defined_dynamic_entry(fmt, hists))
1256 continue;
1257
1258 cmp = fmt->cmp(fmt, left, right);
1259 if (cmp)
1260 break;
1261 }
1262
1263 return cmp;
1264 }
1265
1266 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1267 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1268 {
1269 struct hists *hists = left->hists;
1270 struct perf_hpp_fmt *fmt;
1271 int64_t cmp = 0;
1272
1273 hists__for_each_sort_list(hists, fmt) {
1274 if (perf_hpp__is_dynamic_entry(fmt) &&
1275 !perf_hpp__defined_dynamic_entry(fmt, hists))
1276 continue;
1277
1278 cmp = fmt->collapse(fmt, left, right);
1279 if (cmp)
1280 break;
1281 }
1282
1283 return cmp;
1284 }
1285
hist_entry__delete(struct hist_entry * he)1286 void hist_entry__delete(struct hist_entry *he)
1287 {
1288 struct hist_entry_ops *ops = he->ops;
1289
1290 thread__zput(he->thread);
1291 map__zput(he->ms.map);
1292
1293 if (he->branch_info) {
1294 map__zput(he->branch_info->from.ms.map);
1295 map__zput(he->branch_info->to.ms.map);
1296 free_srcline(he->branch_info->srcline_from);
1297 free_srcline(he->branch_info->srcline_to);
1298 zfree(&he->branch_info);
1299 }
1300
1301 if (he->mem_info) {
1302 map__zput(he->mem_info->iaddr.ms.map);
1303 map__zput(he->mem_info->daddr.ms.map);
1304 mem_info__zput(he->mem_info);
1305 }
1306
1307 if (he->block_info)
1308 block_info__zput(he->block_info);
1309
1310 zfree(&he->res_samples);
1311 zfree(&he->stat_acc);
1312 free_srcline(he->srcline);
1313 if (he->srcfile && he->srcfile[0])
1314 zfree(&he->srcfile);
1315 free_callchain(he->callchain);
1316 zfree(&he->trace_output);
1317 zfree(&he->raw_data);
1318 ops->free(he);
1319 }
1320
1321 /*
1322 * If this is not the last column, then we need to pad it according to the
1323 * pre-calculated max length for this column, otherwise don't bother adding
1324 * spaces because that would break viewing this with, for instance, 'less',
1325 * that would show tons of trailing spaces when a long C++ demangled method
1326 * names is sampled.
1327 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1328 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1329 struct perf_hpp_fmt *fmt, int printed)
1330 {
1331 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1332 const int width = fmt->width(fmt, hpp, he->hists);
1333 if (printed < width) {
1334 advance_hpp(hpp, printed);
1335 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1336 }
1337 }
1338
1339 return printed;
1340 }
1341
1342 /*
1343 * collapse the histogram
1344 */
1345
1346 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1347 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1348 enum hist_filter type);
1349
1350 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1351
check_thread_entry(struct perf_hpp_fmt * fmt)1352 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1353 {
1354 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1355 }
1356
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1357 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1358 enum hist_filter type,
1359 fmt_chk_fn check)
1360 {
1361 struct perf_hpp_fmt *fmt;
1362 bool type_match = false;
1363 struct hist_entry *parent = he->parent_he;
1364
1365 switch (type) {
1366 case HIST_FILTER__THREAD:
1367 if (symbol_conf.comm_list == NULL &&
1368 symbol_conf.pid_list == NULL &&
1369 symbol_conf.tid_list == NULL)
1370 return;
1371 break;
1372 case HIST_FILTER__DSO:
1373 if (symbol_conf.dso_list == NULL)
1374 return;
1375 break;
1376 case HIST_FILTER__SYMBOL:
1377 if (symbol_conf.sym_list == NULL)
1378 return;
1379 break;
1380 case HIST_FILTER__PARENT:
1381 case HIST_FILTER__GUEST:
1382 case HIST_FILTER__HOST:
1383 case HIST_FILTER__SOCKET:
1384 case HIST_FILTER__C2C:
1385 default:
1386 return;
1387 }
1388
1389 /* if it's filtered by own fmt, it has to have filter bits */
1390 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1391 if (check(fmt)) {
1392 type_match = true;
1393 break;
1394 }
1395 }
1396
1397 if (type_match) {
1398 /*
1399 * If the filter is for current level entry, propagate
1400 * filter marker to parents. The marker bit was
1401 * already set by default so it only needs to clear
1402 * non-filtered entries.
1403 */
1404 if (!(he->filtered & (1 << type))) {
1405 while (parent) {
1406 parent->filtered &= ~(1 << type);
1407 parent = parent->parent_he;
1408 }
1409 }
1410 } else {
1411 /*
1412 * If current entry doesn't have matching formats, set
1413 * filter marker for upper level entries. it will be
1414 * cleared if its lower level entries is not filtered.
1415 *
1416 * For lower-level entries, it inherits parent's
1417 * filter bit so that lower level entries of a
1418 * non-filtered entry won't set the filter marker.
1419 */
1420 if (parent == NULL)
1421 he->filtered |= (1 << type);
1422 else
1423 he->filtered |= (parent->filtered & (1 << type));
1424 }
1425 }
1426
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1427 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1428 {
1429 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1430 check_thread_entry);
1431
1432 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1433 perf_hpp__is_dso_entry);
1434
1435 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1436 perf_hpp__is_sym_entry);
1437
1438 hists__apply_filters(he->hists, he);
1439 }
1440
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1441 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1442 struct rb_root_cached *root,
1443 struct hist_entry *he,
1444 struct hist_entry *parent_he,
1445 struct perf_hpp_list *hpp_list)
1446 {
1447 struct rb_node **p = &root->rb_root.rb_node;
1448 struct rb_node *parent = NULL;
1449 struct hist_entry *iter, *new;
1450 struct perf_hpp_fmt *fmt;
1451 int64_t cmp;
1452 bool leftmost = true;
1453
1454 while (*p != NULL) {
1455 parent = *p;
1456 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1457
1458 cmp = 0;
1459 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1460 cmp = fmt->collapse(fmt, iter, he);
1461 if (cmp)
1462 break;
1463 }
1464
1465 if (!cmp) {
1466 he_stat__add_stat(&iter->stat, &he->stat);
1467 return iter;
1468 }
1469
1470 if (cmp < 0)
1471 p = &parent->rb_left;
1472 else {
1473 p = &parent->rb_right;
1474 leftmost = false;
1475 }
1476 }
1477
1478 new = hist_entry__new(he, true);
1479 if (new == NULL)
1480 return NULL;
1481
1482 hists->nr_entries++;
1483
1484 /* save related format list for output */
1485 new->hpp_list = hpp_list;
1486 new->parent_he = parent_he;
1487
1488 hist_entry__apply_hierarchy_filters(new);
1489
1490 /* some fields are now passed to 'new' */
1491 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1492 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1493 he->trace_output = NULL;
1494 else
1495 new->trace_output = NULL;
1496
1497 if (perf_hpp__is_srcline_entry(fmt))
1498 he->srcline = NULL;
1499 else
1500 new->srcline = NULL;
1501
1502 if (perf_hpp__is_srcfile_entry(fmt))
1503 he->srcfile = NULL;
1504 else
1505 new->srcfile = NULL;
1506 }
1507
1508 rb_link_node(&new->rb_node_in, parent, p);
1509 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1510 return new;
1511 }
1512
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1513 static int hists__hierarchy_insert_entry(struct hists *hists,
1514 struct rb_root_cached *root,
1515 struct hist_entry *he)
1516 {
1517 struct perf_hpp_list_node *node;
1518 struct hist_entry *new_he = NULL;
1519 struct hist_entry *parent = NULL;
1520 int depth = 0;
1521 int ret = 0;
1522
1523 list_for_each_entry(node, &hists->hpp_formats, list) {
1524 /* skip period (overhead) and elided columns */
1525 if (node->level == 0 || node->skip)
1526 continue;
1527
1528 /* insert copy of 'he' for each fmt into the hierarchy */
1529 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1530 if (new_he == NULL) {
1531 ret = -1;
1532 break;
1533 }
1534
1535 root = &new_he->hroot_in;
1536 new_he->depth = depth++;
1537 parent = new_he;
1538 }
1539
1540 if (new_he) {
1541 new_he->leaf = true;
1542
1543 if (hist_entry__has_callchains(new_he) &&
1544 symbol_conf.use_callchain) {
1545 callchain_cursor_reset(&callchain_cursor);
1546 if (callchain_merge(&callchain_cursor,
1547 new_he->callchain,
1548 he->callchain) < 0)
1549 ret = -1;
1550 }
1551 }
1552
1553 /* 'he' is no longer used */
1554 hist_entry__delete(he);
1555
1556 /* return 0 (or -1) since it already applied filters */
1557 return ret;
1558 }
1559
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1560 static int hists__collapse_insert_entry(struct hists *hists,
1561 struct rb_root_cached *root,
1562 struct hist_entry *he)
1563 {
1564 struct rb_node **p = &root->rb_root.rb_node;
1565 struct rb_node *parent = NULL;
1566 struct hist_entry *iter;
1567 int64_t cmp;
1568 bool leftmost = true;
1569
1570 if (symbol_conf.report_hierarchy)
1571 return hists__hierarchy_insert_entry(hists, root, he);
1572
1573 while (*p != NULL) {
1574 parent = *p;
1575 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1576
1577 cmp = hist_entry__collapse(iter, he);
1578
1579 if (!cmp) {
1580 int ret = 0;
1581
1582 he_stat__add_stat(&iter->stat, &he->stat);
1583 if (symbol_conf.cumulate_callchain)
1584 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1585
1586 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1587 callchain_cursor_reset(&callchain_cursor);
1588 if (callchain_merge(&callchain_cursor,
1589 iter->callchain,
1590 he->callchain) < 0)
1591 ret = -1;
1592 }
1593 hist_entry__delete(he);
1594 return ret;
1595 }
1596
1597 if (cmp < 0)
1598 p = &(*p)->rb_left;
1599 else {
1600 p = &(*p)->rb_right;
1601 leftmost = false;
1602 }
1603 }
1604 hists->nr_entries++;
1605
1606 rb_link_node(&he->rb_node_in, parent, p);
1607 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1608 return 1;
1609 }
1610
hists__get_rotate_entries_in(struct hists * hists)1611 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1612 {
1613 struct rb_root_cached *root;
1614
1615 pthread_mutex_lock(&hists->lock);
1616
1617 root = hists->entries_in;
1618 if (++hists->entries_in > &hists->entries_in_array[1])
1619 hists->entries_in = &hists->entries_in_array[0];
1620
1621 pthread_mutex_unlock(&hists->lock);
1622
1623 return root;
1624 }
1625
hists__apply_filters(struct hists * hists,struct hist_entry * he)1626 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1627 {
1628 hists__filter_entry_by_dso(hists, he);
1629 hists__filter_entry_by_thread(hists, he);
1630 hists__filter_entry_by_symbol(hists, he);
1631 hists__filter_entry_by_socket(hists, he);
1632 }
1633
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1634 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1635 {
1636 struct rb_root_cached *root;
1637 struct rb_node *next;
1638 struct hist_entry *n;
1639 int ret;
1640
1641 if (!hists__has(hists, need_collapse))
1642 return 0;
1643
1644 hists->nr_entries = 0;
1645
1646 root = hists__get_rotate_entries_in(hists);
1647
1648 next = rb_first_cached(root);
1649
1650 while (next) {
1651 if (session_done())
1652 break;
1653 n = rb_entry(next, struct hist_entry, rb_node_in);
1654 next = rb_next(&n->rb_node_in);
1655
1656 rb_erase_cached(&n->rb_node_in, root);
1657 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1658 if (ret < 0)
1659 return -1;
1660
1661 if (ret) {
1662 /*
1663 * If it wasn't combined with one of the entries already
1664 * collapsed, we need to apply the filters that may have
1665 * been set by, say, the hist_browser.
1666 */
1667 hists__apply_filters(hists, n);
1668 }
1669 if (prog)
1670 ui_progress__update(prog, 1);
1671 }
1672 return 0;
1673 }
1674
hist_entry__sort(struct hist_entry * a,struct hist_entry * b)1675 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1676 {
1677 struct hists *hists = a->hists;
1678 struct perf_hpp_fmt *fmt;
1679 int64_t cmp = 0;
1680
1681 hists__for_each_sort_list(hists, fmt) {
1682 if (perf_hpp__should_skip(fmt, a->hists))
1683 continue;
1684
1685 cmp = fmt->sort(fmt, a, b);
1686 if (cmp)
1687 break;
1688 }
1689
1690 return cmp;
1691 }
1692
hists__reset_filter_stats(struct hists * hists)1693 static void hists__reset_filter_stats(struct hists *hists)
1694 {
1695 hists->nr_non_filtered_entries = 0;
1696 hists->stats.total_non_filtered_period = 0;
1697 }
1698
hists__reset_stats(struct hists * hists)1699 void hists__reset_stats(struct hists *hists)
1700 {
1701 hists->nr_entries = 0;
1702 hists->stats.total_period = 0;
1703
1704 hists__reset_filter_stats(hists);
1705 }
1706
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1707 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1708 {
1709 hists->nr_non_filtered_entries++;
1710 hists->stats.total_non_filtered_period += h->stat.period;
1711 }
1712
hists__inc_stats(struct hists * hists,struct hist_entry * h)1713 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1714 {
1715 if (!h->filtered)
1716 hists__inc_filter_stats(hists, h);
1717
1718 hists->nr_entries++;
1719 hists->stats.total_period += h->stat.period;
1720 }
1721
hierarchy_recalc_total_periods(struct hists * hists)1722 static void hierarchy_recalc_total_periods(struct hists *hists)
1723 {
1724 struct rb_node *node;
1725 struct hist_entry *he;
1726
1727 node = rb_first_cached(&hists->entries);
1728
1729 hists->stats.total_period = 0;
1730 hists->stats.total_non_filtered_period = 0;
1731
1732 /*
1733 * recalculate total period using top-level entries only
1734 * since lower level entries only see non-filtered entries
1735 * but upper level entries have sum of both entries.
1736 */
1737 while (node) {
1738 he = rb_entry(node, struct hist_entry, rb_node);
1739 node = rb_next(node);
1740
1741 hists->stats.total_period += he->stat.period;
1742 if (!he->filtered)
1743 hists->stats.total_non_filtered_period += he->stat.period;
1744 }
1745 }
1746
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1747 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1748 struct hist_entry *he)
1749 {
1750 struct rb_node **p = &root->rb_root.rb_node;
1751 struct rb_node *parent = NULL;
1752 struct hist_entry *iter;
1753 struct perf_hpp_fmt *fmt;
1754 bool leftmost = true;
1755
1756 while (*p != NULL) {
1757 parent = *p;
1758 iter = rb_entry(parent, struct hist_entry, rb_node);
1759
1760 if (hist_entry__sort(he, iter) > 0)
1761 p = &parent->rb_left;
1762 else {
1763 p = &parent->rb_right;
1764 leftmost = false;
1765 }
1766 }
1767
1768 rb_link_node(&he->rb_node, parent, p);
1769 rb_insert_color_cached(&he->rb_node, root, leftmost);
1770
1771 /* update column width of dynamic entry */
1772 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1773 if (perf_hpp__is_dynamic_entry(fmt))
1774 fmt->sort(fmt, he, NULL);
1775 }
1776 }
1777
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1778 static void hists__hierarchy_output_resort(struct hists *hists,
1779 struct ui_progress *prog,
1780 struct rb_root_cached *root_in,
1781 struct rb_root_cached *root_out,
1782 u64 min_callchain_hits,
1783 bool use_callchain)
1784 {
1785 struct rb_node *node;
1786 struct hist_entry *he;
1787
1788 *root_out = RB_ROOT_CACHED;
1789 node = rb_first_cached(root_in);
1790
1791 while (node) {
1792 he = rb_entry(node, struct hist_entry, rb_node_in);
1793 node = rb_next(node);
1794
1795 hierarchy_insert_output_entry(root_out, he);
1796
1797 if (prog)
1798 ui_progress__update(prog, 1);
1799
1800 hists->nr_entries++;
1801 if (!he->filtered) {
1802 hists->nr_non_filtered_entries++;
1803 hists__calc_col_len(hists, he);
1804 }
1805
1806 if (!he->leaf) {
1807 hists__hierarchy_output_resort(hists, prog,
1808 &he->hroot_in,
1809 &he->hroot_out,
1810 min_callchain_hits,
1811 use_callchain);
1812 continue;
1813 }
1814
1815 if (!use_callchain)
1816 continue;
1817
1818 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1819 u64 total = he->stat.period;
1820
1821 if (symbol_conf.cumulate_callchain)
1822 total = he->stat_acc->period;
1823
1824 min_callchain_hits = total * (callchain_param.min_percent / 100);
1825 }
1826
1827 callchain_param.sort(&he->sorted_chain, he->callchain,
1828 min_callchain_hits, &callchain_param);
1829 }
1830 }
1831
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)1832 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1833 struct hist_entry *he,
1834 u64 min_callchain_hits,
1835 bool use_callchain)
1836 {
1837 struct rb_node **p = &entries->rb_root.rb_node;
1838 struct rb_node *parent = NULL;
1839 struct hist_entry *iter;
1840 struct perf_hpp_fmt *fmt;
1841 bool leftmost = true;
1842
1843 if (use_callchain) {
1844 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1845 u64 total = he->stat.period;
1846
1847 if (symbol_conf.cumulate_callchain)
1848 total = he->stat_acc->period;
1849
1850 min_callchain_hits = total * (callchain_param.min_percent / 100);
1851 }
1852 callchain_param.sort(&he->sorted_chain, he->callchain,
1853 min_callchain_hits, &callchain_param);
1854 }
1855
1856 while (*p != NULL) {
1857 parent = *p;
1858 iter = rb_entry(parent, struct hist_entry, rb_node);
1859
1860 if (hist_entry__sort(he, iter) > 0)
1861 p = &(*p)->rb_left;
1862 else {
1863 p = &(*p)->rb_right;
1864 leftmost = false;
1865 }
1866 }
1867
1868 rb_link_node(&he->rb_node, parent, p);
1869 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1870
1871 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1872 if (perf_hpp__is_dynamic_entry(fmt) &&
1873 perf_hpp__defined_dynamic_entry(fmt, he->hists))
1874 fmt->sort(fmt, he, NULL); /* update column width */
1875 }
1876 }
1877
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)1878 static void output_resort(struct hists *hists, struct ui_progress *prog,
1879 bool use_callchain, hists__resort_cb_t cb,
1880 void *cb_arg)
1881 {
1882 struct rb_root_cached *root;
1883 struct rb_node *next;
1884 struct hist_entry *n;
1885 u64 callchain_total;
1886 u64 min_callchain_hits;
1887
1888 callchain_total = hists->callchain_period;
1889 if (symbol_conf.filter_relative)
1890 callchain_total = hists->callchain_non_filtered_period;
1891
1892 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1893
1894 hists__reset_stats(hists);
1895 hists__reset_col_len(hists);
1896
1897 if (symbol_conf.report_hierarchy) {
1898 hists__hierarchy_output_resort(hists, prog,
1899 &hists->entries_collapsed,
1900 &hists->entries,
1901 min_callchain_hits,
1902 use_callchain);
1903 hierarchy_recalc_total_periods(hists);
1904 return;
1905 }
1906
1907 if (hists__has(hists, need_collapse))
1908 root = &hists->entries_collapsed;
1909 else
1910 root = hists->entries_in;
1911
1912 next = rb_first_cached(root);
1913 hists->entries = RB_ROOT_CACHED;
1914
1915 while (next) {
1916 n = rb_entry(next, struct hist_entry, rb_node_in);
1917 next = rb_next(&n->rb_node_in);
1918
1919 if (cb && cb(n, cb_arg))
1920 continue;
1921
1922 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1923 hists__inc_stats(hists, n);
1924
1925 if (!n->filtered)
1926 hists__calc_col_len(hists, n);
1927
1928 if (prog)
1929 ui_progress__update(prog, 1);
1930 }
1931 }
1932
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)1933 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1934 hists__resort_cb_t cb, void *cb_arg)
1935 {
1936 bool use_callchain;
1937
1938 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1939 use_callchain = evsel__has_callchain(evsel);
1940 else
1941 use_callchain = symbol_conf.use_callchain;
1942
1943 use_callchain |= symbol_conf.show_branchflag_count;
1944
1945 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1946 }
1947
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)1948 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1949 {
1950 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1951 }
1952
hists__output_resort(struct hists * hists,struct ui_progress * prog)1953 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1954 {
1955 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1956 }
1957
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)1958 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1959 hists__resort_cb_t cb)
1960 {
1961 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1962 }
1963
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)1964 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1965 {
1966 if (he->leaf || hmd == HMD_FORCE_SIBLING)
1967 return false;
1968
1969 if (he->unfolded || hmd == HMD_FORCE_CHILD)
1970 return true;
1971
1972 return false;
1973 }
1974
rb_hierarchy_last(struct rb_node * node)1975 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1976 {
1977 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1978
1979 while (can_goto_child(he, HMD_NORMAL)) {
1980 node = rb_last(&he->hroot_out.rb_root);
1981 he = rb_entry(node, struct hist_entry, rb_node);
1982 }
1983 return node;
1984 }
1985
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)1986 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1987 {
1988 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1989
1990 if (can_goto_child(he, hmd))
1991 node = rb_first_cached(&he->hroot_out);
1992 else
1993 node = rb_next(node);
1994
1995 while (node == NULL) {
1996 he = he->parent_he;
1997 if (he == NULL)
1998 break;
1999
2000 node = rb_next(&he->rb_node);
2001 }
2002 return node;
2003 }
2004
rb_hierarchy_prev(struct rb_node * node)2005 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2006 {
2007 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2008
2009 node = rb_prev(node);
2010 if (node)
2011 return rb_hierarchy_last(node);
2012
2013 he = he->parent_he;
2014 if (he == NULL)
2015 return NULL;
2016
2017 return &he->rb_node;
2018 }
2019
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2020 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2021 {
2022 struct rb_node *node;
2023 struct hist_entry *child;
2024 float percent;
2025
2026 if (he->leaf)
2027 return false;
2028
2029 node = rb_first_cached(&he->hroot_out);
2030 child = rb_entry(node, struct hist_entry, rb_node);
2031
2032 while (node && child->filtered) {
2033 node = rb_next(node);
2034 child = rb_entry(node, struct hist_entry, rb_node);
2035 }
2036
2037 if (node)
2038 percent = hist_entry__get_percent_limit(child);
2039 else
2040 percent = 0;
2041
2042 return node && percent >= limit;
2043 }
2044
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2045 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2046 enum hist_filter filter)
2047 {
2048 h->filtered &= ~(1 << filter);
2049
2050 if (symbol_conf.report_hierarchy) {
2051 struct hist_entry *parent = h->parent_he;
2052
2053 while (parent) {
2054 he_stat__add_stat(&parent->stat, &h->stat);
2055
2056 parent->filtered &= ~(1 << filter);
2057
2058 if (parent->filtered)
2059 goto next;
2060
2061 /* force fold unfiltered entry for simplicity */
2062 parent->unfolded = false;
2063 parent->has_no_entry = false;
2064 parent->row_offset = 0;
2065 parent->nr_rows = 0;
2066 next:
2067 parent = parent->parent_he;
2068 }
2069 }
2070
2071 if (h->filtered)
2072 return;
2073
2074 /* force fold unfiltered entry for simplicity */
2075 h->unfolded = false;
2076 h->has_no_entry = false;
2077 h->row_offset = 0;
2078 h->nr_rows = 0;
2079
2080 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2081
2082 hists__inc_filter_stats(hists, h);
2083 hists__calc_col_len(hists, h);
2084 }
2085
2086
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2087 static bool hists__filter_entry_by_dso(struct hists *hists,
2088 struct hist_entry *he)
2089 {
2090 if (hists->dso_filter != NULL &&
2091 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2092 he->filtered |= (1 << HIST_FILTER__DSO);
2093 return true;
2094 }
2095
2096 return false;
2097 }
2098
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2099 static bool hists__filter_entry_by_thread(struct hists *hists,
2100 struct hist_entry *he)
2101 {
2102 if (hists->thread_filter != NULL &&
2103 he->thread != hists->thread_filter) {
2104 he->filtered |= (1 << HIST_FILTER__THREAD);
2105 return true;
2106 }
2107
2108 return false;
2109 }
2110
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2111 static bool hists__filter_entry_by_symbol(struct hists *hists,
2112 struct hist_entry *he)
2113 {
2114 if (hists->symbol_filter_str != NULL &&
2115 (!he->ms.sym || strstr(he->ms.sym->name,
2116 hists->symbol_filter_str) == NULL)) {
2117 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2118 return true;
2119 }
2120
2121 return false;
2122 }
2123
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2124 static bool hists__filter_entry_by_socket(struct hists *hists,
2125 struct hist_entry *he)
2126 {
2127 if ((hists->socket_filter > -1) &&
2128 (he->socket != hists->socket_filter)) {
2129 he->filtered |= (1 << HIST_FILTER__SOCKET);
2130 return true;
2131 }
2132
2133 return false;
2134 }
2135
2136 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2137
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2138 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2139 {
2140 struct rb_node *nd;
2141
2142 hists->stats.nr_non_filtered_samples = 0;
2143
2144 hists__reset_filter_stats(hists);
2145 hists__reset_col_len(hists);
2146
2147 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2148 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2149
2150 if (filter(hists, h))
2151 continue;
2152
2153 hists__remove_entry_filter(hists, h, type);
2154 }
2155 }
2156
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2157 static void resort_filtered_entry(struct rb_root_cached *root,
2158 struct hist_entry *he)
2159 {
2160 struct rb_node **p = &root->rb_root.rb_node;
2161 struct rb_node *parent = NULL;
2162 struct hist_entry *iter;
2163 struct rb_root_cached new_root = RB_ROOT_CACHED;
2164 struct rb_node *nd;
2165 bool leftmost = true;
2166
2167 while (*p != NULL) {
2168 parent = *p;
2169 iter = rb_entry(parent, struct hist_entry, rb_node);
2170
2171 if (hist_entry__sort(he, iter) > 0)
2172 p = &(*p)->rb_left;
2173 else {
2174 p = &(*p)->rb_right;
2175 leftmost = false;
2176 }
2177 }
2178
2179 rb_link_node(&he->rb_node, parent, p);
2180 rb_insert_color_cached(&he->rb_node, root, leftmost);
2181
2182 if (he->leaf || he->filtered)
2183 return;
2184
2185 nd = rb_first_cached(&he->hroot_out);
2186 while (nd) {
2187 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2188
2189 nd = rb_next(nd);
2190 rb_erase_cached(&h->rb_node, &he->hroot_out);
2191
2192 resort_filtered_entry(&new_root, h);
2193 }
2194
2195 he->hroot_out = new_root;
2196 }
2197
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2198 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2199 {
2200 struct rb_node *nd;
2201 struct rb_root_cached new_root = RB_ROOT_CACHED;
2202
2203 hists->stats.nr_non_filtered_samples = 0;
2204
2205 hists__reset_filter_stats(hists);
2206 hists__reset_col_len(hists);
2207
2208 nd = rb_first_cached(&hists->entries);
2209 while (nd) {
2210 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2211 int ret;
2212
2213 ret = hist_entry__filter(h, type, arg);
2214
2215 /*
2216 * case 1. non-matching type
2217 * zero out the period, set filter marker and move to child
2218 */
2219 if (ret < 0) {
2220 memset(&h->stat, 0, sizeof(h->stat));
2221 h->filtered |= (1 << type);
2222
2223 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2224 }
2225 /*
2226 * case 2. matched type (filter out)
2227 * set filter marker and move to next
2228 */
2229 else if (ret == 1) {
2230 h->filtered |= (1 << type);
2231
2232 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2233 }
2234 /*
2235 * case 3. ok (not filtered)
2236 * add period to hists and parents, erase the filter marker
2237 * and move to next sibling
2238 */
2239 else {
2240 hists__remove_entry_filter(hists, h, type);
2241
2242 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2243 }
2244 }
2245
2246 hierarchy_recalc_total_periods(hists);
2247
2248 /*
2249 * resort output after applying a new filter since filter in a lower
2250 * hierarchy can change periods in a upper hierarchy.
2251 */
2252 nd = rb_first_cached(&hists->entries);
2253 while (nd) {
2254 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2255
2256 nd = rb_next(nd);
2257 rb_erase_cached(&h->rb_node, &hists->entries);
2258
2259 resort_filtered_entry(&new_root, h);
2260 }
2261
2262 hists->entries = new_root;
2263 }
2264
hists__filter_by_thread(struct hists * hists)2265 void hists__filter_by_thread(struct hists *hists)
2266 {
2267 if (symbol_conf.report_hierarchy)
2268 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2269 hists->thread_filter);
2270 else
2271 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2272 hists__filter_entry_by_thread);
2273 }
2274
hists__filter_by_dso(struct hists * hists)2275 void hists__filter_by_dso(struct hists *hists)
2276 {
2277 if (symbol_conf.report_hierarchy)
2278 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2279 hists->dso_filter);
2280 else
2281 hists__filter_by_type(hists, HIST_FILTER__DSO,
2282 hists__filter_entry_by_dso);
2283 }
2284
hists__filter_by_symbol(struct hists * hists)2285 void hists__filter_by_symbol(struct hists *hists)
2286 {
2287 if (symbol_conf.report_hierarchy)
2288 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2289 hists->symbol_filter_str);
2290 else
2291 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2292 hists__filter_entry_by_symbol);
2293 }
2294
hists__filter_by_socket(struct hists * hists)2295 void hists__filter_by_socket(struct hists *hists)
2296 {
2297 if (symbol_conf.report_hierarchy)
2298 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2299 &hists->socket_filter);
2300 else
2301 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2302 hists__filter_entry_by_socket);
2303 }
2304
events_stats__inc(struct events_stats * stats,u32 type)2305 void events_stats__inc(struct events_stats *stats, u32 type)
2306 {
2307 ++stats->nr_events[0];
2308 ++stats->nr_events[type];
2309 }
2310
hists__inc_nr_events(struct hists * hists,u32 type)2311 void hists__inc_nr_events(struct hists *hists, u32 type)
2312 {
2313 events_stats__inc(&hists->stats, type);
2314 }
2315
hists__inc_nr_samples(struct hists * hists,bool filtered)2316 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2317 {
2318 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2319 if (!filtered)
2320 hists->stats.nr_non_filtered_samples++;
2321 }
2322
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2323 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2324 struct hist_entry *pair)
2325 {
2326 struct rb_root_cached *root;
2327 struct rb_node **p;
2328 struct rb_node *parent = NULL;
2329 struct hist_entry *he;
2330 int64_t cmp;
2331 bool leftmost = true;
2332
2333 if (hists__has(hists, need_collapse))
2334 root = &hists->entries_collapsed;
2335 else
2336 root = hists->entries_in;
2337
2338 p = &root->rb_root.rb_node;
2339
2340 while (*p != NULL) {
2341 parent = *p;
2342 he = rb_entry(parent, struct hist_entry, rb_node_in);
2343
2344 cmp = hist_entry__collapse(he, pair);
2345
2346 if (!cmp)
2347 goto out;
2348
2349 if (cmp < 0)
2350 p = &(*p)->rb_left;
2351 else {
2352 p = &(*p)->rb_right;
2353 leftmost = false;
2354 }
2355 }
2356
2357 he = hist_entry__new(pair, true);
2358 if (he) {
2359 memset(&he->stat, 0, sizeof(he->stat));
2360 he->hists = hists;
2361 if (symbol_conf.cumulate_callchain)
2362 memset(he->stat_acc, 0, sizeof(he->stat));
2363 rb_link_node(&he->rb_node_in, parent, p);
2364 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2365 hists__inc_stats(hists, he);
2366 he->dummy = true;
2367 }
2368 out:
2369 return he;
2370 }
2371
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2372 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2373 struct rb_root_cached *root,
2374 struct hist_entry *pair)
2375 {
2376 struct rb_node **p;
2377 struct rb_node *parent = NULL;
2378 struct hist_entry *he;
2379 struct perf_hpp_fmt *fmt;
2380 bool leftmost = true;
2381
2382 p = &root->rb_root.rb_node;
2383 while (*p != NULL) {
2384 int64_t cmp = 0;
2385
2386 parent = *p;
2387 he = rb_entry(parent, struct hist_entry, rb_node_in);
2388
2389 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2390 cmp = fmt->collapse(fmt, he, pair);
2391 if (cmp)
2392 break;
2393 }
2394 if (!cmp)
2395 goto out;
2396
2397 if (cmp < 0)
2398 p = &parent->rb_left;
2399 else {
2400 p = &parent->rb_right;
2401 leftmost = false;
2402 }
2403 }
2404
2405 he = hist_entry__new(pair, true);
2406 if (he) {
2407 rb_link_node(&he->rb_node_in, parent, p);
2408 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2409
2410 he->dummy = true;
2411 he->hists = hists;
2412 memset(&he->stat, 0, sizeof(he->stat));
2413 hists__inc_stats(hists, he);
2414 }
2415 out:
2416 return he;
2417 }
2418
hists__find_entry(struct hists * hists,struct hist_entry * he)2419 static struct hist_entry *hists__find_entry(struct hists *hists,
2420 struct hist_entry *he)
2421 {
2422 struct rb_node *n;
2423
2424 if (hists__has(hists, need_collapse))
2425 n = hists->entries_collapsed.rb_root.rb_node;
2426 else
2427 n = hists->entries_in->rb_root.rb_node;
2428
2429 while (n) {
2430 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2431 int64_t cmp = hist_entry__collapse(iter, he);
2432
2433 if (cmp < 0)
2434 n = n->rb_left;
2435 else if (cmp > 0)
2436 n = n->rb_right;
2437 else
2438 return iter;
2439 }
2440
2441 return NULL;
2442 }
2443
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2444 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2445 struct hist_entry *he)
2446 {
2447 struct rb_node *n = root->rb_root.rb_node;
2448
2449 while (n) {
2450 struct hist_entry *iter;
2451 struct perf_hpp_fmt *fmt;
2452 int64_t cmp = 0;
2453
2454 iter = rb_entry(n, struct hist_entry, rb_node_in);
2455 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2456 cmp = fmt->collapse(fmt, iter, he);
2457 if (cmp)
2458 break;
2459 }
2460
2461 if (cmp < 0)
2462 n = n->rb_left;
2463 else if (cmp > 0)
2464 n = n->rb_right;
2465 else
2466 return iter;
2467 }
2468
2469 return NULL;
2470 }
2471
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2472 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2473 struct rb_root_cached *other_root)
2474 {
2475 struct rb_node *nd;
2476 struct hist_entry *pos, *pair;
2477
2478 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2479 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2480 pair = hists__find_hierarchy_entry(other_root, pos);
2481
2482 if (pair) {
2483 hist_entry__add_pair(pair, pos);
2484 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2485 }
2486 }
2487 }
2488
2489 /*
2490 * Look for pairs to link to the leader buckets (hist_entries):
2491 */
hists__match(struct hists * leader,struct hists * other)2492 void hists__match(struct hists *leader, struct hists *other)
2493 {
2494 struct rb_root_cached *root;
2495 struct rb_node *nd;
2496 struct hist_entry *pos, *pair;
2497
2498 if (symbol_conf.report_hierarchy) {
2499 /* hierarchy report always collapses entries */
2500 return hists__match_hierarchy(&leader->entries_collapsed,
2501 &other->entries_collapsed);
2502 }
2503
2504 if (hists__has(leader, need_collapse))
2505 root = &leader->entries_collapsed;
2506 else
2507 root = leader->entries_in;
2508
2509 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2510 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2511 pair = hists__find_entry(other, pos);
2512
2513 if (pair)
2514 hist_entry__add_pair(pair, pos);
2515 }
2516 }
2517
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2518 static int hists__link_hierarchy(struct hists *leader_hists,
2519 struct hist_entry *parent,
2520 struct rb_root_cached *leader_root,
2521 struct rb_root_cached *other_root)
2522 {
2523 struct rb_node *nd;
2524 struct hist_entry *pos, *leader;
2525
2526 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2527 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2528
2529 if (hist_entry__has_pairs(pos)) {
2530 bool found = false;
2531
2532 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2533 if (leader->hists == leader_hists) {
2534 found = true;
2535 break;
2536 }
2537 }
2538 if (!found)
2539 return -1;
2540 } else {
2541 leader = add_dummy_hierarchy_entry(leader_hists,
2542 leader_root, pos);
2543 if (leader == NULL)
2544 return -1;
2545
2546 /* do not point parent in the pos */
2547 leader->parent_he = parent;
2548
2549 hist_entry__add_pair(pos, leader);
2550 }
2551
2552 if (!pos->leaf) {
2553 if (hists__link_hierarchy(leader_hists, leader,
2554 &leader->hroot_in,
2555 &pos->hroot_in) < 0)
2556 return -1;
2557 }
2558 }
2559 return 0;
2560 }
2561
2562 /*
2563 * Look for entries in the other hists that are not present in the leader, if
2564 * we find them, just add a dummy entry on the leader hists, with period=0,
2565 * nr_events=0, to serve as the list header.
2566 */
hists__link(struct hists * leader,struct hists * other)2567 int hists__link(struct hists *leader, struct hists *other)
2568 {
2569 struct rb_root_cached *root;
2570 struct rb_node *nd;
2571 struct hist_entry *pos, *pair;
2572
2573 if (symbol_conf.report_hierarchy) {
2574 /* hierarchy report always collapses entries */
2575 return hists__link_hierarchy(leader, NULL,
2576 &leader->entries_collapsed,
2577 &other->entries_collapsed);
2578 }
2579
2580 if (hists__has(other, need_collapse))
2581 root = &other->entries_collapsed;
2582 else
2583 root = other->entries_in;
2584
2585 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2586 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2587
2588 if (!hist_entry__has_pairs(pos)) {
2589 pair = hists__add_dummy_entry(leader, pos);
2590 if (pair == NULL)
2591 return -1;
2592 hist_entry__add_pair(pos, pair);
2593 }
2594 }
2595
2596 return 0;
2597 }
2598
hists__unlink(struct hists * hists)2599 int hists__unlink(struct hists *hists)
2600 {
2601 struct rb_root_cached *root;
2602 struct rb_node *nd;
2603 struct hist_entry *pos;
2604
2605 if (hists__has(hists, need_collapse))
2606 root = &hists->entries_collapsed;
2607 else
2608 root = hists->entries_in;
2609
2610 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2611 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2612 list_del_init(&pos->pairs.node);
2613 }
2614
2615 return 0;
2616 }
2617
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles)2618 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2619 struct perf_sample *sample, bool nonany_branch_mode,
2620 u64 *total_cycles)
2621 {
2622 struct branch_info *bi;
2623 struct branch_entry *entries = perf_sample__branch_entries(sample);
2624
2625 /* If we have branch cycles always annotate them. */
2626 if (bs && bs->nr && entries[0].flags.cycles) {
2627 int i;
2628
2629 bi = sample__resolve_bstack(sample, al);
2630 if (bi) {
2631 struct addr_map_symbol *prev = NULL;
2632
2633 /*
2634 * Ignore errors, still want to process the
2635 * other entries.
2636 *
2637 * For non standard branch modes always
2638 * force no IPC (prev == NULL)
2639 *
2640 * Note that perf stores branches reversed from
2641 * program order!
2642 */
2643 for (i = bs->nr - 1; i >= 0; i--) {
2644 addr_map_symbol__account_cycles(&bi[i].from,
2645 nonany_branch_mode ? NULL : prev,
2646 bi[i].flags.cycles);
2647 prev = &bi[i].to;
2648
2649 if (total_cycles)
2650 *total_cycles += bi[i].flags.cycles;
2651 }
2652 free(bi);
2653 }
2654 }
2655 }
2656
perf_evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp)2657 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2658 {
2659 struct evsel *pos;
2660 size_t ret = 0;
2661
2662 evlist__for_each_entry(evlist, pos) {
2663 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2664 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2665 }
2666
2667 return ret;
2668 }
2669
2670
hists__total_period(struct hists * hists)2671 u64 hists__total_period(struct hists *hists)
2672 {
2673 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2674 hists->stats.total_period;
2675 }
2676
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2677 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2678 {
2679 char unit;
2680 int printed;
2681 const struct dso *dso = hists->dso_filter;
2682 struct thread *thread = hists->thread_filter;
2683 int socket_id = hists->socket_filter;
2684 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2685 u64 nr_events = hists->stats.total_period;
2686 struct evsel *evsel = hists_to_evsel(hists);
2687 const char *ev_name = evsel__name(evsel);
2688 char buf[512], sample_freq_str[64] = "";
2689 size_t buflen = sizeof(buf);
2690 char ref[30] = " show reference callgraph, ";
2691 bool enable_ref = false;
2692
2693 if (symbol_conf.filter_relative) {
2694 nr_samples = hists->stats.nr_non_filtered_samples;
2695 nr_events = hists->stats.total_non_filtered_period;
2696 }
2697
2698 if (evsel__is_group_event(evsel)) {
2699 struct evsel *pos;
2700
2701 evsel__group_desc(evsel, buf, buflen);
2702 ev_name = buf;
2703
2704 for_each_group_member(pos, evsel) {
2705 struct hists *pos_hists = evsel__hists(pos);
2706
2707 if (symbol_conf.filter_relative) {
2708 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2709 nr_events += pos_hists->stats.total_non_filtered_period;
2710 } else {
2711 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2712 nr_events += pos_hists->stats.total_period;
2713 }
2714 }
2715 }
2716
2717 if (symbol_conf.show_ref_callgraph &&
2718 strstr(ev_name, "call-graph=no"))
2719 enable_ref = true;
2720
2721 if (show_freq)
2722 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2723
2724 nr_samples = convert_unit(nr_samples, &unit);
2725 printed = scnprintf(bf, size,
2726 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2727 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2728 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2729
2730
2731 if (hists->uid_filter_str)
2732 printed += snprintf(bf + printed, size - printed,
2733 ", UID: %s", hists->uid_filter_str);
2734 if (thread) {
2735 if (hists__has(hists, thread)) {
2736 printed += scnprintf(bf + printed, size - printed,
2737 ", Thread: %s(%d)",
2738 (thread->comm_set ? thread__comm_str(thread) : ""),
2739 thread->tid);
2740 } else {
2741 printed += scnprintf(bf + printed, size - printed,
2742 ", Thread: %s",
2743 (thread->comm_set ? thread__comm_str(thread) : ""));
2744 }
2745 }
2746 if (dso)
2747 printed += scnprintf(bf + printed, size - printed,
2748 ", DSO: %s", dso->short_name);
2749 if (socket_id > -1)
2750 printed += scnprintf(bf + printed, size - printed,
2751 ", Processor Socket: %d", socket_id);
2752
2753 return printed;
2754 }
2755
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2756 int parse_filter_percentage(const struct option *opt __maybe_unused,
2757 const char *arg, int unset __maybe_unused)
2758 {
2759 if (!strcmp(arg, "relative"))
2760 symbol_conf.filter_relative = true;
2761 else if (!strcmp(arg, "absolute"))
2762 symbol_conf.filter_relative = false;
2763 else {
2764 pr_debug("Invalid percentage: %s\n", arg);
2765 return -1;
2766 }
2767
2768 return 0;
2769 }
2770
perf_hist_config(const char * var,const char * value)2771 int perf_hist_config(const char *var, const char *value)
2772 {
2773 if (!strcmp(var, "hist.percentage"))
2774 return parse_filter_percentage(NULL, value, 0);
2775
2776 return 0;
2777 }
2778
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)2779 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2780 {
2781 memset(hists, 0, sizeof(*hists));
2782 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2783 hists->entries_in = &hists->entries_in_array[0];
2784 hists->entries_collapsed = RB_ROOT_CACHED;
2785 hists->entries = RB_ROOT_CACHED;
2786 pthread_mutex_init(&hists->lock, NULL);
2787 hists->socket_filter = -1;
2788 hists->hpp_list = hpp_list;
2789 INIT_LIST_HEAD(&hists->hpp_formats);
2790 return 0;
2791 }
2792
hists__delete_remaining_entries(struct rb_root_cached * root)2793 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2794 {
2795 struct rb_node *node;
2796 struct hist_entry *he;
2797
2798 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2799 node = rb_first_cached(root);
2800 rb_erase_cached(node, root);
2801
2802 he = rb_entry(node, struct hist_entry, rb_node_in);
2803 hist_entry__delete(he);
2804 }
2805 }
2806
hists__delete_all_entries(struct hists * hists)2807 static void hists__delete_all_entries(struct hists *hists)
2808 {
2809 hists__delete_entries(hists);
2810 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2811 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2812 hists__delete_remaining_entries(&hists->entries_collapsed);
2813 }
2814
hists_evsel__exit(struct evsel * evsel)2815 static void hists_evsel__exit(struct evsel *evsel)
2816 {
2817 struct hists *hists = evsel__hists(evsel);
2818 struct perf_hpp_fmt *fmt, *pos;
2819 struct perf_hpp_list_node *node, *tmp;
2820
2821 hists__delete_all_entries(hists);
2822
2823 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2824 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2825 list_del_init(&fmt->list);
2826 free(fmt);
2827 }
2828 list_del_init(&node->list);
2829 free(node);
2830 }
2831 }
2832
hists_evsel__init(struct evsel * evsel)2833 static int hists_evsel__init(struct evsel *evsel)
2834 {
2835 struct hists *hists = evsel__hists(evsel);
2836
2837 __hists__init(hists, &perf_hpp_list);
2838 return 0;
2839 }
2840
2841 /*
2842 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2843 * stored in the rbtree...
2844 */
2845
hists__init(void)2846 int hists__init(void)
2847 {
2848 int err = evsel__object_config(sizeof(struct hists_evsel),
2849 hists_evsel__init, hists_evsel__exit);
2850 if (err)
2851 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2852
2853 return err;
2854 }
2855
perf_hpp_list__init(struct perf_hpp_list * list)2856 void perf_hpp_list__init(struct perf_hpp_list *list)
2857 {
2858 INIT_LIST_HEAD(&list->fields);
2859 INIT_LIST_HEAD(&list->sorts);
2860 }
2861