1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include <math.h>
10
11 static bool hists__filter_entry_by_dso(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_thread(struct hists *hists,
14 struct hist_entry *he);
15 static bool hists__filter_entry_by_symbol(struct hists *hists,
16 struct hist_entry *he);
17
hists__col_len(struct hists * hists,enum hist_column col)18 u16 hists__col_len(struct hists *hists, enum hist_column col)
19 {
20 return hists->col_len[col];
21 }
22
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)23 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
24 {
25 hists->col_len[col] = len;
26 }
27
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)28 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
29 {
30 if (len > hists__col_len(hists, col)) {
31 hists__set_col_len(hists, col, len);
32 return true;
33 }
34 return false;
35 }
36
hists__reset_col_len(struct hists * hists)37 void hists__reset_col_len(struct hists *hists)
38 {
39 enum hist_column col;
40
41 for (col = 0; col < HISTC_NR_COLS; ++col)
42 hists__set_col_len(hists, col, 0);
43 }
44
hists__set_unres_dso_col_len(struct hists * hists,int dso)45 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
46 {
47 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
48
49 if (hists__col_len(hists, dso) < unresolved_col_width &&
50 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
51 !symbol_conf.dso_list)
52 hists__set_col_len(hists, dso, unresolved_col_width);
53 }
54
hists__calc_col_len(struct hists * hists,struct hist_entry * h)55 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
56 {
57 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
58 int symlen;
59 u16 len;
60
61 /*
62 * +4 accounts for '[x] ' priv level info
63 * +2 accounts for 0x prefix on raw addresses
64 * +3 accounts for ' y ' symtab origin info
65 */
66 if (h->ms.sym) {
67 symlen = h->ms.sym->namelen + 4;
68 if (verbose)
69 symlen += BITS_PER_LONG / 4 + 2 + 3;
70 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
71 } else {
72 symlen = unresolved_col_width + 4 + 2;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75 }
76
77 len = thread__comm_len(h->thread);
78 if (hists__new_col_len(hists, HISTC_COMM, len))
79 hists__set_col_len(hists, HISTC_THREAD, len + 6);
80
81 if (h->ms.map) {
82 len = dso__name_len(h->ms.map->dso);
83 hists__new_col_len(hists, HISTC_DSO, len);
84 }
85
86 if (h->parent)
87 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
88
89 if (h->branch_info) {
90 if (h->branch_info->from.sym) {
91 symlen = (int)h->branch_info->from.sym->namelen + 4;
92 if (verbose)
93 symlen += BITS_PER_LONG / 4 + 2 + 3;
94 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
95
96 symlen = dso__name_len(h->branch_info->from.map->dso);
97 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
98 } else {
99 symlen = unresolved_col_width + 4 + 2;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
102 }
103
104 if (h->branch_info->to.sym) {
105 symlen = (int)h->branch_info->to.sym->namelen + 4;
106 if (verbose)
107 symlen += BITS_PER_LONG / 4 + 2 + 3;
108 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
109
110 symlen = dso__name_len(h->branch_info->to.map->dso);
111 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
112 } else {
113 symlen = unresolved_col_width + 4 + 2;
114 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
115 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
116 }
117 }
118
119 if (h->mem_info) {
120 if (h->mem_info->daddr.sym) {
121 symlen = (int)h->mem_info->daddr.sym->namelen + 4
122 + unresolved_col_width + 2;
123 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
124 symlen);
125 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
126 symlen + 1);
127 } else {
128 symlen = unresolved_col_width + 4 + 2;
129 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
130 symlen);
131 }
132 if (h->mem_info->daddr.map) {
133 symlen = dso__name_len(h->mem_info->daddr.map->dso);
134 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
135 symlen);
136 } else {
137 symlen = unresolved_col_width + 4 + 2;
138 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
139 }
140 } else {
141 symlen = unresolved_col_width + 4 + 2;
142 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
143 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
144 }
145
146 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
147 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
148 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
149 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
150 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
151 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
152
153 if (h->srcline)
154 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
155
156 if (h->transaction)
157 hists__new_col_len(hists, HISTC_TRANSACTION,
158 hist_entry__transaction_len());
159 }
160
hists__output_recalc_col_len(struct hists * hists,int max_rows)161 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
162 {
163 struct rb_node *next = rb_first(&hists->entries);
164 struct hist_entry *n;
165 int row = 0;
166
167 hists__reset_col_len(hists);
168
169 while (next && row++ < max_rows) {
170 n = rb_entry(next, struct hist_entry, rb_node);
171 if (!n->filtered)
172 hists__calc_col_len(hists, n);
173 next = rb_next(&n->rb_node);
174 }
175 }
176
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)177 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
178 unsigned int cpumode, u64 period)
179 {
180 switch (cpumode) {
181 case PERF_RECORD_MISC_KERNEL:
182 he_stat->period_sys += period;
183 break;
184 case PERF_RECORD_MISC_USER:
185 he_stat->period_us += period;
186 break;
187 case PERF_RECORD_MISC_GUEST_KERNEL:
188 he_stat->period_guest_sys += period;
189 break;
190 case PERF_RECORD_MISC_GUEST_USER:
191 he_stat->period_guest_us += period;
192 break;
193 default:
194 break;
195 }
196 }
197
he_stat__add_period(struct he_stat * he_stat,u64 period,u64 weight)198 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
199 u64 weight)
200 {
201
202 he_stat->period += period;
203 he_stat->weight += weight;
204 he_stat->nr_events += 1;
205 }
206
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)207 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
208 {
209 dest->period += src->period;
210 dest->period_sys += src->period_sys;
211 dest->period_us += src->period_us;
212 dest->period_guest_sys += src->period_guest_sys;
213 dest->period_guest_us += src->period_guest_us;
214 dest->nr_events += src->nr_events;
215 dest->weight += src->weight;
216 }
217
he_stat__decay(struct he_stat * he_stat)218 static void he_stat__decay(struct he_stat *he_stat)
219 {
220 he_stat->period = (he_stat->period * 7) / 8;
221 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
222 /* XXX need decay for weight too? */
223 }
224
hists__decay_entry(struct hists * hists,struct hist_entry * he)225 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
226 {
227 u64 prev_period = he->stat.period;
228 u64 diff;
229
230 if (prev_period == 0)
231 return true;
232
233 he_stat__decay(&he->stat);
234 if (symbol_conf.cumulate_callchain)
235 he_stat__decay(he->stat_acc);
236
237 diff = prev_period - he->stat.period;
238
239 hists->stats.total_period -= diff;
240 if (!he->filtered)
241 hists->stats.total_non_filtered_period -= diff;
242
243 return he->stat.period == 0;
244 }
245
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)246 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
247 {
248 struct rb_node *next = rb_first(&hists->entries);
249 struct hist_entry *n;
250
251 while (next) {
252 n = rb_entry(next, struct hist_entry, rb_node);
253 next = rb_next(&n->rb_node);
254 /*
255 * We may be annotating this, for instance, so keep it here in
256 * case some it gets new samples, we'll eventually free it when
257 * the user stops browsing and it agains gets fully decayed.
258 */
259 if (((zap_user && n->level == '.') ||
260 (zap_kernel && n->level != '.') ||
261 hists__decay_entry(hists, n)) &&
262 !n->used) {
263 rb_erase(&n->rb_node, &hists->entries);
264
265 if (sort__need_collapse)
266 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
267
268 --hists->nr_entries;
269 if (!n->filtered)
270 --hists->nr_non_filtered_entries;
271
272 hist_entry__free(n);
273 }
274 }
275 }
276
hists__delete_entries(struct hists * hists)277 void hists__delete_entries(struct hists *hists)
278 {
279 struct rb_node *next = rb_first(&hists->entries);
280 struct hist_entry *n;
281
282 while (next) {
283 n = rb_entry(next, struct hist_entry, rb_node);
284 next = rb_next(&n->rb_node);
285
286 rb_erase(&n->rb_node, &hists->entries);
287
288 if (sort__need_collapse)
289 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
290
291 --hists->nr_entries;
292 if (!n->filtered)
293 --hists->nr_non_filtered_entries;
294
295 hist_entry__free(n);
296 }
297 }
298
299 /*
300 * histogram, sorted on item, collects periods
301 */
302
hist_entry__new(struct hist_entry * template,bool sample_self)303 static struct hist_entry *hist_entry__new(struct hist_entry *template,
304 bool sample_self)
305 {
306 size_t callchain_size = 0;
307 struct hist_entry *he;
308
309 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
310 callchain_size = sizeof(struct callchain_root);
311
312 he = zalloc(sizeof(*he) + callchain_size);
313
314 if (he != NULL) {
315 *he = *template;
316
317 if (symbol_conf.cumulate_callchain) {
318 he->stat_acc = malloc(sizeof(he->stat));
319 if (he->stat_acc == NULL) {
320 free(he);
321 return NULL;
322 }
323 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
324 if (!sample_self)
325 memset(&he->stat, 0, sizeof(he->stat));
326 }
327
328 if (he->ms.map)
329 he->ms.map->referenced = true;
330
331 if (he->branch_info) {
332 /*
333 * This branch info is (a part of) allocated from
334 * sample__resolve_bstack() and will be freed after
335 * adding new entries. So we need to save a copy.
336 */
337 he->branch_info = malloc(sizeof(*he->branch_info));
338 if (he->branch_info == NULL) {
339 free(he->stat_acc);
340 free(he);
341 return NULL;
342 }
343
344 memcpy(he->branch_info, template->branch_info,
345 sizeof(*he->branch_info));
346
347 if (he->branch_info->from.map)
348 he->branch_info->from.map->referenced = true;
349 if (he->branch_info->to.map)
350 he->branch_info->to.map->referenced = true;
351 }
352
353 if (he->mem_info) {
354 if (he->mem_info->iaddr.map)
355 he->mem_info->iaddr.map->referenced = true;
356 if (he->mem_info->daddr.map)
357 he->mem_info->daddr.map->referenced = true;
358 }
359
360 if (symbol_conf.use_callchain)
361 callchain_init(he->callchain);
362
363 INIT_LIST_HEAD(&he->pairs.node);
364 }
365
366 return he;
367 }
368
symbol__parent_filter(const struct symbol * parent)369 static u8 symbol__parent_filter(const struct symbol *parent)
370 {
371 if (symbol_conf.exclude_other && parent == NULL)
372 return 1 << HIST_FILTER__PARENT;
373 return 0;
374 }
375
add_hist_entry(struct hists * hists,struct hist_entry * entry,struct addr_location * al,bool sample_self)376 static struct hist_entry *add_hist_entry(struct hists *hists,
377 struct hist_entry *entry,
378 struct addr_location *al,
379 bool sample_self)
380 {
381 struct rb_node **p;
382 struct rb_node *parent = NULL;
383 struct hist_entry *he;
384 int64_t cmp;
385 u64 period = entry->stat.period;
386 u64 weight = entry->stat.weight;
387
388 p = &hists->entries_in->rb_node;
389
390 while (*p != NULL) {
391 parent = *p;
392 he = rb_entry(parent, struct hist_entry, rb_node_in);
393
394 /*
395 * Make sure that it receives arguments in a same order as
396 * hist_entry__collapse() so that we can use an appropriate
397 * function when searching an entry regardless which sort
398 * keys were used.
399 */
400 cmp = hist_entry__cmp(he, entry);
401
402 if (!cmp) {
403 if (sample_self)
404 he_stat__add_period(&he->stat, period, weight);
405 if (symbol_conf.cumulate_callchain)
406 he_stat__add_period(he->stat_acc, period, weight);
407
408 /*
409 * This mem info was allocated from sample__resolve_mem
410 * and will not be used anymore.
411 */
412 zfree(&entry->mem_info);
413
414 /* If the map of an existing hist_entry has
415 * become out-of-date due to an exec() or
416 * similar, update it. Otherwise we will
417 * mis-adjust symbol addresses when computing
418 * the history counter to increment.
419 */
420 if (he->ms.map != entry->ms.map) {
421 he->ms.map = entry->ms.map;
422 if (he->ms.map)
423 he->ms.map->referenced = true;
424 }
425 goto out;
426 }
427
428 if (cmp < 0)
429 p = &(*p)->rb_left;
430 else
431 p = &(*p)->rb_right;
432 }
433
434 he = hist_entry__new(entry, sample_self);
435 if (!he)
436 return NULL;
437
438 rb_link_node(&he->rb_node_in, parent, p);
439 rb_insert_color(&he->rb_node_in, hists->entries_in);
440 out:
441 if (sample_self)
442 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
443 if (symbol_conf.cumulate_callchain)
444 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
445 return he;
446 }
447
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,u64 period,u64 weight,u64 transaction,bool sample_self)448 struct hist_entry *__hists__add_entry(struct hists *hists,
449 struct addr_location *al,
450 struct symbol *sym_parent,
451 struct branch_info *bi,
452 struct mem_info *mi,
453 u64 period, u64 weight, u64 transaction,
454 bool sample_self)
455 {
456 struct hist_entry entry = {
457 .thread = al->thread,
458 .comm = thread__comm(al->thread),
459 .ms = {
460 .map = al->map,
461 .sym = al->sym,
462 },
463 .cpu = al->cpu,
464 .cpumode = al->cpumode,
465 .ip = al->addr,
466 .level = al->level,
467 .stat = {
468 .nr_events = 1,
469 .period = period,
470 .weight = weight,
471 },
472 .parent = sym_parent,
473 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
474 .hists = hists,
475 .branch_info = bi,
476 .mem_info = mi,
477 .transaction = transaction,
478 };
479
480 return add_hist_entry(hists, &entry, al, sample_self);
481 }
482
483 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)484 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
485 struct addr_location *al __maybe_unused)
486 {
487 return 0;
488 }
489
490 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)491 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
492 struct addr_location *al __maybe_unused)
493 {
494 return 0;
495 }
496
497 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)498 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
499 {
500 struct perf_sample *sample = iter->sample;
501 struct mem_info *mi;
502
503 mi = sample__resolve_mem(sample, al);
504 if (mi == NULL)
505 return -ENOMEM;
506
507 iter->priv = mi;
508 return 0;
509 }
510
511 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)512 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
513 {
514 u64 cost;
515 struct mem_info *mi = iter->priv;
516 struct hists *hists = evsel__hists(iter->evsel);
517 struct hist_entry *he;
518
519 if (mi == NULL)
520 return -EINVAL;
521
522 cost = iter->sample->weight;
523 if (!cost)
524 cost = 1;
525
526 /*
527 * must pass period=weight in order to get the correct
528 * sorting from hists__collapse_resort() which is solely
529 * based on periods. We want sorting be done on nr_events * weight
530 * and this is indirectly achieved by passing period=weight here
531 * and the he_stat__add_period() function.
532 */
533 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
534 cost, cost, 0, true);
535 if (!he)
536 return -ENOMEM;
537
538 iter->he = he;
539 return 0;
540 }
541
542 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)543 iter_finish_mem_entry(struct hist_entry_iter *iter,
544 struct addr_location *al __maybe_unused)
545 {
546 struct perf_evsel *evsel = iter->evsel;
547 struct hists *hists = evsel__hists(evsel);
548 struct hist_entry *he = iter->he;
549 int err = -EINVAL;
550
551 if (he == NULL)
552 goto out;
553
554 hists__inc_nr_samples(hists, he->filtered);
555
556 err = hist_entry__append_callchain(he, iter->sample);
557
558 out:
559 /*
560 * We don't need to free iter->priv (mem_info) here since
561 * the mem info was either already freed in add_hist_entry() or
562 * passed to a new hist entry by hist_entry__new().
563 */
564 iter->priv = NULL;
565
566 iter->he = NULL;
567 return err;
568 }
569
570 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)571 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
572 {
573 struct branch_info *bi;
574 struct perf_sample *sample = iter->sample;
575
576 bi = sample__resolve_bstack(sample, al);
577 if (!bi)
578 return -ENOMEM;
579
580 iter->curr = 0;
581 iter->total = sample->branch_stack->nr;
582
583 iter->priv = bi;
584 return 0;
585 }
586
587 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)588 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
589 struct addr_location *al __maybe_unused)
590 {
591 /* to avoid calling callback function */
592 iter->he = NULL;
593
594 return 0;
595 }
596
597 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)598 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
599 {
600 struct branch_info *bi = iter->priv;
601 int i = iter->curr;
602
603 if (bi == NULL)
604 return 0;
605
606 if (iter->curr >= iter->total)
607 return 0;
608
609 al->map = bi[i].to.map;
610 al->sym = bi[i].to.sym;
611 al->addr = bi[i].to.addr;
612 return 1;
613 }
614
615 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)616 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
617 {
618 struct branch_info *bi;
619 struct perf_evsel *evsel = iter->evsel;
620 struct hists *hists = evsel__hists(evsel);
621 struct hist_entry *he = NULL;
622 int i = iter->curr;
623 int err = 0;
624
625 bi = iter->priv;
626
627 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
628 goto out;
629
630 /*
631 * The report shows the percentage of total branches captured
632 * and not events sampled. Thus we use a pseudo period of 1.
633 */
634 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
635 1, 1, 0, true);
636 if (he == NULL)
637 return -ENOMEM;
638
639 hists__inc_nr_samples(hists, he->filtered);
640
641 out:
642 iter->he = he;
643 iter->curr++;
644 return err;
645 }
646
647 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)648 iter_finish_branch_entry(struct hist_entry_iter *iter,
649 struct addr_location *al __maybe_unused)
650 {
651 zfree(&iter->priv);
652 iter->he = NULL;
653
654 return iter->curr >= iter->total ? 0 : -1;
655 }
656
657 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)658 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
659 struct addr_location *al __maybe_unused)
660 {
661 return 0;
662 }
663
664 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)665 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
666 {
667 struct perf_evsel *evsel = iter->evsel;
668 struct perf_sample *sample = iter->sample;
669 struct hist_entry *he;
670
671 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
672 sample->period, sample->weight,
673 sample->transaction, true);
674 if (he == NULL)
675 return -ENOMEM;
676
677 iter->he = he;
678 return 0;
679 }
680
681 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)682 iter_finish_normal_entry(struct hist_entry_iter *iter,
683 struct addr_location *al __maybe_unused)
684 {
685 struct hist_entry *he = iter->he;
686 struct perf_evsel *evsel = iter->evsel;
687 struct perf_sample *sample = iter->sample;
688
689 if (he == NULL)
690 return 0;
691
692 iter->he = NULL;
693
694 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
695
696 return hist_entry__append_callchain(he, sample);
697 }
698
699 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)700 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
701 struct addr_location *al __maybe_unused)
702 {
703 struct hist_entry **he_cache;
704
705 callchain_cursor_commit(&callchain_cursor);
706
707 /*
708 * This is for detecting cycles or recursions so that they're
709 * cumulated only one time to prevent entries more than 100%
710 * overhead.
711 */
712 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
713 if (he_cache == NULL)
714 return -ENOMEM;
715
716 iter->priv = he_cache;
717 iter->curr = 0;
718
719 return 0;
720 }
721
722 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)723 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
724 struct addr_location *al)
725 {
726 struct perf_evsel *evsel = iter->evsel;
727 struct hists *hists = evsel__hists(evsel);
728 struct perf_sample *sample = iter->sample;
729 struct hist_entry **he_cache = iter->priv;
730 struct hist_entry *he;
731 int err = 0;
732
733 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
734 sample->period, sample->weight,
735 sample->transaction, true);
736 if (he == NULL)
737 return -ENOMEM;
738
739 iter->he = he;
740 he_cache[iter->curr++] = he;
741
742 callchain_append(he->callchain, &callchain_cursor, sample->period);
743
744 /*
745 * We need to re-initialize the cursor since callchain_append()
746 * advanced the cursor to the end.
747 */
748 callchain_cursor_commit(&callchain_cursor);
749
750 hists__inc_nr_samples(hists, he->filtered);
751
752 return err;
753 }
754
755 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)756 iter_next_cumulative_entry(struct hist_entry_iter *iter,
757 struct addr_location *al)
758 {
759 struct callchain_cursor_node *node;
760
761 node = callchain_cursor_current(&callchain_cursor);
762 if (node == NULL)
763 return 0;
764
765 return fill_callchain_info(al, node, iter->hide_unresolved);
766 }
767
768 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)769 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
770 struct addr_location *al)
771 {
772 struct perf_evsel *evsel = iter->evsel;
773 struct perf_sample *sample = iter->sample;
774 struct hist_entry **he_cache = iter->priv;
775 struct hist_entry *he;
776 struct hist_entry he_tmp = {
777 .cpu = al->cpu,
778 .thread = al->thread,
779 .comm = thread__comm(al->thread),
780 .ip = al->addr,
781 .ms = {
782 .map = al->map,
783 .sym = al->sym,
784 },
785 .parent = iter->parent,
786 };
787 int i;
788 struct callchain_cursor cursor;
789
790 callchain_cursor_snapshot(&cursor, &callchain_cursor);
791
792 callchain_cursor_advance(&callchain_cursor);
793
794 /*
795 * Check if there's duplicate entries in the callchain.
796 * It's possible that it has cycles or recursive calls.
797 */
798 for (i = 0; i < iter->curr; i++) {
799 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
800 /* to avoid calling callback function */
801 iter->he = NULL;
802 return 0;
803 }
804 }
805
806 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
807 sample->period, sample->weight,
808 sample->transaction, false);
809 if (he == NULL)
810 return -ENOMEM;
811
812 iter->he = he;
813 he_cache[iter->curr++] = he;
814
815 callchain_append(he->callchain, &cursor, sample->period);
816 return 0;
817 }
818
819 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)820 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
821 struct addr_location *al __maybe_unused)
822 {
823 zfree(&iter->priv);
824 iter->he = NULL;
825
826 return 0;
827 }
828
829 const struct hist_iter_ops hist_iter_mem = {
830 .prepare_entry = iter_prepare_mem_entry,
831 .add_single_entry = iter_add_single_mem_entry,
832 .next_entry = iter_next_nop_entry,
833 .add_next_entry = iter_add_next_nop_entry,
834 .finish_entry = iter_finish_mem_entry,
835 };
836
837 const struct hist_iter_ops hist_iter_branch = {
838 .prepare_entry = iter_prepare_branch_entry,
839 .add_single_entry = iter_add_single_branch_entry,
840 .next_entry = iter_next_branch_entry,
841 .add_next_entry = iter_add_next_branch_entry,
842 .finish_entry = iter_finish_branch_entry,
843 };
844
845 const struct hist_iter_ops hist_iter_normal = {
846 .prepare_entry = iter_prepare_normal_entry,
847 .add_single_entry = iter_add_single_normal_entry,
848 .next_entry = iter_next_nop_entry,
849 .add_next_entry = iter_add_next_nop_entry,
850 .finish_entry = iter_finish_normal_entry,
851 };
852
853 const struct hist_iter_ops hist_iter_cumulative = {
854 .prepare_entry = iter_prepare_cumulative_entry,
855 .add_single_entry = iter_add_single_cumulative_entry,
856 .next_entry = iter_next_cumulative_entry,
857 .add_next_entry = iter_add_next_cumulative_entry,
858 .finish_entry = iter_finish_cumulative_entry,
859 };
860
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,struct perf_evsel * evsel,struct perf_sample * sample,int max_stack_depth,void * arg)861 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
862 struct perf_evsel *evsel, struct perf_sample *sample,
863 int max_stack_depth, void *arg)
864 {
865 int err, err2;
866
867 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
868 max_stack_depth);
869 if (err)
870 return err;
871
872 iter->evsel = evsel;
873 iter->sample = sample;
874
875 err = iter->ops->prepare_entry(iter, al);
876 if (err)
877 goto out;
878
879 err = iter->ops->add_single_entry(iter, al);
880 if (err)
881 goto out;
882
883 if (iter->he && iter->add_entry_cb) {
884 err = iter->add_entry_cb(iter, al, true, arg);
885 if (err)
886 goto out;
887 }
888
889 while (iter->ops->next_entry(iter, al)) {
890 err = iter->ops->add_next_entry(iter, al);
891 if (err)
892 break;
893
894 if (iter->he && iter->add_entry_cb) {
895 err = iter->add_entry_cb(iter, al, false, arg);
896 if (err)
897 goto out;
898 }
899 }
900
901 out:
902 err2 = iter->ops->finish_entry(iter, al);
903 if (!err)
904 err = err2;
905
906 return err;
907 }
908
909 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)910 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
911 {
912 struct perf_hpp_fmt *fmt;
913 int64_t cmp = 0;
914
915 perf_hpp__for_each_sort_list(fmt) {
916 if (perf_hpp__should_skip(fmt))
917 continue;
918
919 cmp = fmt->cmp(left, right);
920 if (cmp)
921 break;
922 }
923
924 return cmp;
925 }
926
927 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)928 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
929 {
930 struct perf_hpp_fmt *fmt;
931 int64_t cmp = 0;
932
933 perf_hpp__for_each_sort_list(fmt) {
934 if (perf_hpp__should_skip(fmt))
935 continue;
936
937 cmp = fmt->collapse(left, right);
938 if (cmp)
939 break;
940 }
941
942 return cmp;
943 }
944
hist_entry__free(struct hist_entry * he)945 void hist_entry__free(struct hist_entry *he)
946 {
947 zfree(&he->branch_info);
948 zfree(&he->mem_info);
949 zfree(&he->stat_acc);
950 free_srcline(he->srcline);
951 free(he);
952 }
953
954 /*
955 * collapse the histogram
956 */
957
hists__collapse_insert_entry(struct hists * hists __maybe_unused,struct rb_root * root,struct hist_entry * he)958 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
959 struct rb_root *root,
960 struct hist_entry *he)
961 {
962 struct rb_node **p = &root->rb_node;
963 struct rb_node *parent = NULL;
964 struct hist_entry *iter;
965 int64_t cmp;
966
967 while (*p != NULL) {
968 parent = *p;
969 iter = rb_entry(parent, struct hist_entry, rb_node_in);
970
971 cmp = hist_entry__collapse(iter, he);
972
973 if (!cmp) {
974 he_stat__add_stat(&iter->stat, &he->stat);
975 if (symbol_conf.cumulate_callchain)
976 he_stat__add_stat(iter->stat_acc, he->stat_acc);
977
978 if (symbol_conf.use_callchain) {
979 callchain_cursor_reset(&callchain_cursor);
980 callchain_merge(&callchain_cursor,
981 iter->callchain,
982 he->callchain);
983 }
984 hist_entry__free(he);
985 return false;
986 }
987
988 if (cmp < 0)
989 p = &(*p)->rb_left;
990 else
991 p = &(*p)->rb_right;
992 }
993
994 rb_link_node(&he->rb_node_in, parent, p);
995 rb_insert_color(&he->rb_node_in, root);
996 return true;
997 }
998
hists__get_rotate_entries_in(struct hists * hists)999 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1000 {
1001 struct rb_root *root;
1002
1003 pthread_mutex_lock(&hists->lock);
1004
1005 root = hists->entries_in;
1006 if (++hists->entries_in > &hists->entries_in_array[1])
1007 hists->entries_in = &hists->entries_in_array[0];
1008
1009 pthread_mutex_unlock(&hists->lock);
1010
1011 return root;
1012 }
1013
hists__apply_filters(struct hists * hists,struct hist_entry * he)1014 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1015 {
1016 hists__filter_entry_by_dso(hists, he);
1017 hists__filter_entry_by_thread(hists, he);
1018 hists__filter_entry_by_symbol(hists, he);
1019 }
1020
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1021 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1022 {
1023 struct rb_root *root;
1024 struct rb_node *next;
1025 struct hist_entry *n;
1026
1027 if (!sort__need_collapse)
1028 return;
1029
1030 root = hists__get_rotate_entries_in(hists);
1031 next = rb_first(root);
1032
1033 while (next) {
1034 if (session_done())
1035 break;
1036 n = rb_entry(next, struct hist_entry, rb_node_in);
1037 next = rb_next(&n->rb_node_in);
1038
1039 rb_erase(&n->rb_node_in, root);
1040 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1041 /*
1042 * If it wasn't combined with one of the entries already
1043 * collapsed, we need to apply the filters that may have
1044 * been set by, say, the hist_browser.
1045 */
1046 hists__apply_filters(hists, n);
1047 }
1048 if (prog)
1049 ui_progress__update(prog, 1);
1050 }
1051 }
1052
hist_entry__sort(struct hist_entry * a,struct hist_entry * b)1053 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1054 {
1055 struct perf_hpp_fmt *fmt;
1056 int64_t cmp = 0;
1057
1058 perf_hpp__for_each_sort_list(fmt) {
1059 if (perf_hpp__should_skip(fmt))
1060 continue;
1061
1062 cmp = fmt->sort(a, b);
1063 if (cmp)
1064 break;
1065 }
1066
1067 return cmp;
1068 }
1069
hists__reset_filter_stats(struct hists * hists)1070 static void hists__reset_filter_stats(struct hists *hists)
1071 {
1072 hists->nr_non_filtered_entries = 0;
1073 hists->stats.total_non_filtered_period = 0;
1074 }
1075
hists__reset_stats(struct hists * hists)1076 void hists__reset_stats(struct hists *hists)
1077 {
1078 hists->nr_entries = 0;
1079 hists->stats.total_period = 0;
1080
1081 hists__reset_filter_stats(hists);
1082 }
1083
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1084 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1085 {
1086 hists->nr_non_filtered_entries++;
1087 hists->stats.total_non_filtered_period += h->stat.period;
1088 }
1089
hists__inc_stats(struct hists * hists,struct hist_entry * h)1090 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1091 {
1092 if (!h->filtered)
1093 hists__inc_filter_stats(hists, h);
1094
1095 hists->nr_entries++;
1096 hists->stats.total_period += h->stat.period;
1097 }
1098
__hists__insert_output_entry(struct rb_root * entries,struct hist_entry * he,u64 min_callchain_hits)1099 static void __hists__insert_output_entry(struct rb_root *entries,
1100 struct hist_entry *he,
1101 u64 min_callchain_hits)
1102 {
1103 struct rb_node **p = &entries->rb_node;
1104 struct rb_node *parent = NULL;
1105 struct hist_entry *iter;
1106
1107 if (symbol_conf.use_callchain)
1108 callchain_param.sort(&he->sorted_chain, he->callchain,
1109 min_callchain_hits, &callchain_param);
1110
1111 while (*p != NULL) {
1112 parent = *p;
1113 iter = rb_entry(parent, struct hist_entry, rb_node);
1114
1115 if (hist_entry__sort(he, iter) > 0)
1116 p = &(*p)->rb_left;
1117 else
1118 p = &(*p)->rb_right;
1119 }
1120
1121 rb_link_node(&he->rb_node, parent, p);
1122 rb_insert_color(&he->rb_node, entries);
1123 }
1124
hists__output_resort(struct hists * hists)1125 void hists__output_resort(struct hists *hists)
1126 {
1127 struct rb_root *root;
1128 struct rb_node *next;
1129 struct hist_entry *n;
1130 u64 min_callchain_hits;
1131
1132 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1133
1134 if (sort__need_collapse)
1135 root = &hists->entries_collapsed;
1136 else
1137 root = hists->entries_in;
1138
1139 next = rb_first(root);
1140 hists->entries = RB_ROOT;
1141
1142 hists__reset_stats(hists);
1143 hists__reset_col_len(hists);
1144
1145 while (next) {
1146 n = rb_entry(next, struct hist_entry, rb_node_in);
1147 next = rb_next(&n->rb_node_in);
1148
1149 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1150 hists__inc_stats(hists, n);
1151
1152 if (!n->filtered)
1153 hists__calc_col_len(hists, n);
1154 }
1155 }
1156
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)1157 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1158 enum hist_filter filter)
1159 {
1160 h->filtered &= ~(1 << filter);
1161 if (h->filtered)
1162 return;
1163
1164 /* force fold unfiltered entry for simplicity */
1165 h->ms.unfolded = false;
1166 h->row_offset = 0;
1167
1168 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1169
1170 hists__inc_filter_stats(hists, h);
1171 hists__calc_col_len(hists, h);
1172 }
1173
1174
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)1175 static bool hists__filter_entry_by_dso(struct hists *hists,
1176 struct hist_entry *he)
1177 {
1178 if (hists->dso_filter != NULL &&
1179 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1180 he->filtered |= (1 << HIST_FILTER__DSO);
1181 return true;
1182 }
1183
1184 return false;
1185 }
1186
hists__filter_by_dso(struct hists * hists)1187 void hists__filter_by_dso(struct hists *hists)
1188 {
1189 struct rb_node *nd;
1190
1191 hists->stats.nr_non_filtered_samples = 0;
1192
1193 hists__reset_filter_stats(hists);
1194 hists__reset_col_len(hists);
1195
1196 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1197 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1198
1199 if (symbol_conf.exclude_other && !h->parent)
1200 continue;
1201
1202 if (hists__filter_entry_by_dso(hists, h))
1203 continue;
1204
1205 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1206 }
1207 }
1208
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)1209 static bool hists__filter_entry_by_thread(struct hists *hists,
1210 struct hist_entry *he)
1211 {
1212 if (hists->thread_filter != NULL &&
1213 he->thread != hists->thread_filter) {
1214 he->filtered |= (1 << HIST_FILTER__THREAD);
1215 return true;
1216 }
1217
1218 return false;
1219 }
1220
hists__filter_by_thread(struct hists * hists)1221 void hists__filter_by_thread(struct hists *hists)
1222 {
1223 struct rb_node *nd;
1224
1225 hists->stats.nr_non_filtered_samples = 0;
1226
1227 hists__reset_filter_stats(hists);
1228 hists__reset_col_len(hists);
1229
1230 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1231 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1232
1233 if (hists__filter_entry_by_thread(hists, h))
1234 continue;
1235
1236 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1237 }
1238 }
1239
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)1240 static bool hists__filter_entry_by_symbol(struct hists *hists,
1241 struct hist_entry *he)
1242 {
1243 if (hists->symbol_filter_str != NULL &&
1244 (!he->ms.sym || strstr(he->ms.sym->name,
1245 hists->symbol_filter_str) == NULL)) {
1246 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1247 return true;
1248 }
1249
1250 return false;
1251 }
1252
hists__filter_by_symbol(struct hists * hists)1253 void hists__filter_by_symbol(struct hists *hists)
1254 {
1255 struct rb_node *nd;
1256
1257 hists->stats.nr_non_filtered_samples = 0;
1258
1259 hists__reset_filter_stats(hists);
1260 hists__reset_col_len(hists);
1261
1262 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1263 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1264
1265 if (hists__filter_entry_by_symbol(hists, h))
1266 continue;
1267
1268 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1269 }
1270 }
1271
events_stats__inc(struct events_stats * stats,u32 type)1272 void events_stats__inc(struct events_stats *stats, u32 type)
1273 {
1274 ++stats->nr_events[0];
1275 ++stats->nr_events[type];
1276 }
1277
hists__inc_nr_events(struct hists * hists,u32 type)1278 void hists__inc_nr_events(struct hists *hists, u32 type)
1279 {
1280 events_stats__inc(&hists->stats, type);
1281 }
1282
hists__inc_nr_samples(struct hists * hists,bool filtered)1283 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1284 {
1285 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1286 if (!filtered)
1287 hists->stats.nr_non_filtered_samples++;
1288 }
1289
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)1290 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1291 struct hist_entry *pair)
1292 {
1293 struct rb_root *root;
1294 struct rb_node **p;
1295 struct rb_node *parent = NULL;
1296 struct hist_entry *he;
1297 int64_t cmp;
1298
1299 if (sort__need_collapse)
1300 root = &hists->entries_collapsed;
1301 else
1302 root = hists->entries_in;
1303
1304 p = &root->rb_node;
1305
1306 while (*p != NULL) {
1307 parent = *p;
1308 he = rb_entry(parent, struct hist_entry, rb_node_in);
1309
1310 cmp = hist_entry__collapse(he, pair);
1311
1312 if (!cmp)
1313 goto out;
1314
1315 if (cmp < 0)
1316 p = &(*p)->rb_left;
1317 else
1318 p = &(*p)->rb_right;
1319 }
1320
1321 he = hist_entry__new(pair, true);
1322 if (he) {
1323 memset(&he->stat, 0, sizeof(he->stat));
1324 he->hists = hists;
1325 rb_link_node(&he->rb_node_in, parent, p);
1326 rb_insert_color(&he->rb_node_in, root);
1327 hists__inc_stats(hists, he);
1328 he->dummy = true;
1329 }
1330 out:
1331 return he;
1332 }
1333
hists__find_entry(struct hists * hists,struct hist_entry * he)1334 static struct hist_entry *hists__find_entry(struct hists *hists,
1335 struct hist_entry *he)
1336 {
1337 struct rb_node *n;
1338
1339 if (sort__need_collapse)
1340 n = hists->entries_collapsed.rb_node;
1341 else
1342 n = hists->entries_in->rb_node;
1343
1344 while (n) {
1345 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1346 int64_t cmp = hist_entry__collapse(iter, he);
1347
1348 if (cmp < 0)
1349 n = n->rb_left;
1350 else if (cmp > 0)
1351 n = n->rb_right;
1352 else
1353 return iter;
1354 }
1355
1356 return NULL;
1357 }
1358
1359 /*
1360 * Look for pairs to link to the leader buckets (hist_entries):
1361 */
hists__match(struct hists * leader,struct hists * other)1362 void hists__match(struct hists *leader, struct hists *other)
1363 {
1364 struct rb_root *root;
1365 struct rb_node *nd;
1366 struct hist_entry *pos, *pair;
1367
1368 if (sort__need_collapse)
1369 root = &leader->entries_collapsed;
1370 else
1371 root = leader->entries_in;
1372
1373 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1374 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1375 pair = hists__find_entry(other, pos);
1376
1377 if (pair)
1378 hist_entry__add_pair(pair, pos);
1379 }
1380 }
1381
1382 /*
1383 * Look for entries in the other hists that are not present in the leader, if
1384 * we find them, just add a dummy entry on the leader hists, with period=0,
1385 * nr_events=0, to serve as the list header.
1386 */
hists__link(struct hists * leader,struct hists * other)1387 int hists__link(struct hists *leader, struct hists *other)
1388 {
1389 struct rb_root *root;
1390 struct rb_node *nd;
1391 struct hist_entry *pos, *pair;
1392
1393 if (sort__need_collapse)
1394 root = &other->entries_collapsed;
1395 else
1396 root = other->entries_in;
1397
1398 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1399 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1400
1401 if (!hist_entry__has_pairs(pos)) {
1402 pair = hists__add_dummy_entry(leader, pos);
1403 if (pair == NULL)
1404 return -1;
1405 hist_entry__add_pair(pos, pair);
1406 }
1407 }
1408
1409 return 0;
1410 }
1411
1412
perf_evlist__fprintf_nr_events(struct perf_evlist * evlist,FILE * fp)1413 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1414 {
1415 struct perf_evsel *pos;
1416 size_t ret = 0;
1417
1418 evlist__for_each(evlist, pos) {
1419 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1420 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1421 }
1422
1423 return ret;
1424 }
1425
1426
hists__total_period(struct hists * hists)1427 u64 hists__total_period(struct hists *hists)
1428 {
1429 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1430 hists->stats.total_period;
1431 }
1432
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)1433 int parse_filter_percentage(const struct option *opt __maybe_unused,
1434 const char *arg, int unset __maybe_unused)
1435 {
1436 if (!strcmp(arg, "relative"))
1437 symbol_conf.filter_relative = true;
1438 else if (!strcmp(arg, "absolute"))
1439 symbol_conf.filter_relative = false;
1440 else
1441 return -1;
1442
1443 return 0;
1444 }
1445
perf_hist_config(const char * var,const char * value)1446 int perf_hist_config(const char *var, const char *value)
1447 {
1448 if (!strcmp(var, "hist.percentage"))
1449 return parse_filter_percentage(NULL, value, 0);
1450
1451 return 0;
1452 }
1453
hists_evsel__init(struct perf_evsel * evsel)1454 static int hists_evsel__init(struct perf_evsel *evsel)
1455 {
1456 struct hists *hists = evsel__hists(evsel);
1457
1458 memset(hists, 0, sizeof(*hists));
1459 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1460 hists->entries_in = &hists->entries_in_array[0];
1461 hists->entries_collapsed = RB_ROOT;
1462 hists->entries = RB_ROOT;
1463 pthread_mutex_init(&hists->lock, NULL);
1464 return 0;
1465 }
1466
1467 /*
1468 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1469 * stored in the rbtree...
1470 */
1471
hists__init(void)1472 int hists__init(void)
1473 {
1474 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1475 hists_evsel__init, NULL);
1476 if (err)
1477 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1478
1479 return err;
1480 }
1481