• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "map.h"
6 #include "session.h"
7 #include "namespaces.h"
8 #include "sort.h"
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "annotate.h"
12 #include "srcline.h"
13 #include "thread.h"
14 #include "ui/progress.h"
15 #include <errno.h>
16 #include <math.h>
17 #include <sys/param.h>
18 
19 static bool hists__filter_entry_by_dso(struct hists *hists,
20 				       struct hist_entry *he);
21 static bool hists__filter_entry_by_thread(struct hists *hists,
22 					  struct hist_entry *he);
23 static bool hists__filter_entry_by_symbol(struct hists *hists,
24 					  struct hist_entry *he);
25 static bool hists__filter_entry_by_socket(struct hists *hists,
26 					  struct hist_entry *he);
27 
hists__col_len(struct hists * hists,enum hist_column col)28 u16 hists__col_len(struct hists *hists, enum hist_column col)
29 {
30 	return hists->col_len[col];
31 }
32 
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)33 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
34 {
35 	hists->col_len[col] = len;
36 }
37 
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)38 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
39 {
40 	if (len > hists__col_len(hists, col)) {
41 		hists__set_col_len(hists, col, len);
42 		return true;
43 	}
44 	return false;
45 }
46 
hists__reset_col_len(struct hists * hists)47 void hists__reset_col_len(struct hists *hists)
48 {
49 	enum hist_column col;
50 
51 	for (col = 0; col < HISTC_NR_COLS; ++col)
52 		hists__set_col_len(hists, col, 0);
53 }
54 
hists__set_unres_dso_col_len(struct hists * hists,int dso)55 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
56 {
57 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
58 
59 	if (hists__col_len(hists, dso) < unresolved_col_width &&
60 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
61 	    !symbol_conf.dso_list)
62 		hists__set_col_len(hists, dso, unresolved_col_width);
63 }
64 
hists__calc_col_len(struct hists * hists,struct hist_entry * h)65 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
66 {
67 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
68 	int symlen;
69 	u16 len;
70 
71 	/*
72 	 * +4 accounts for '[x] ' priv level info
73 	 * +2 accounts for 0x prefix on raw addresses
74 	 * +3 accounts for ' y ' symtab origin info
75 	 */
76 	if (h->ms.sym) {
77 		symlen = h->ms.sym->namelen + 4;
78 		if (verbose > 0)
79 			symlen += BITS_PER_LONG / 4 + 2 + 3;
80 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
81 	} else {
82 		symlen = unresolved_col_width + 4 + 2;
83 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
85 	}
86 
87 	len = thread__comm_len(h->thread);
88 	if (hists__new_col_len(hists, HISTC_COMM, len))
89 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
90 
91 	if (h->ms.map) {
92 		len = dso__name_len(h->ms.map->dso);
93 		hists__new_col_len(hists, HISTC_DSO, len);
94 	}
95 
96 	if (h->parent)
97 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
98 
99 	if (h->branch_info) {
100 		if (h->branch_info->from.sym) {
101 			symlen = (int)h->branch_info->from.sym->namelen + 4;
102 			if (verbose > 0)
103 				symlen += BITS_PER_LONG / 4 + 2 + 3;
104 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
105 
106 			symlen = dso__name_len(h->branch_info->from.map->dso);
107 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
108 		} else {
109 			symlen = unresolved_col_width + 4 + 2;
110 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
111 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
112 		}
113 
114 		if (h->branch_info->to.sym) {
115 			symlen = (int)h->branch_info->to.sym->namelen + 4;
116 			if (verbose > 0)
117 				symlen += BITS_PER_LONG / 4 + 2 + 3;
118 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
119 
120 			symlen = dso__name_len(h->branch_info->to.map->dso);
121 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
122 		} else {
123 			symlen = unresolved_col_width + 4 + 2;
124 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
125 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
126 		}
127 
128 		if (h->branch_info->srcline_from)
129 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
130 					strlen(h->branch_info->srcline_from));
131 		if (h->branch_info->srcline_to)
132 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
133 					strlen(h->branch_info->srcline_to));
134 	}
135 
136 	if (h->mem_info) {
137 		if (h->mem_info->daddr.sym) {
138 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
139 			       + unresolved_col_width + 2;
140 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141 					   symlen);
142 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
143 					   symlen + 1);
144 		} else {
145 			symlen = unresolved_col_width + 4 + 2;
146 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
147 					   symlen);
148 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
149 					   symlen);
150 		}
151 
152 		if (h->mem_info->iaddr.sym) {
153 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
154 			       + unresolved_col_width + 2;
155 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
156 					   symlen);
157 		} else {
158 			symlen = unresolved_col_width + 4 + 2;
159 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
160 					   symlen);
161 		}
162 
163 		if (h->mem_info->daddr.map) {
164 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
165 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
166 					   symlen);
167 		} else {
168 			symlen = unresolved_col_width + 4 + 2;
169 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
170 		}
171 
172 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
173 				   unresolved_col_width + 4 + 2);
174 
175 	} else {
176 		symlen = unresolved_col_width + 4 + 2;
177 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
178 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
179 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
180 	}
181 
182 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
183 	hists__new_col_len(hists, HISTC_CPU, 3);
184 	hists__new_col_len(hists, HISTC_SOCKET, 6);
185 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
186 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
187 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
188 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
189 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
190 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
191 
192 	if (h->srcline) {
193 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
194 		hists__new_col_len(hists, HISTC_SRCLINE, len);
195 	}
196 
197 	if (h->srcfile)
198 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
199 
200 	if (h->transaction)
201 		hists__new_col_len(hists, HISTC_TRANSACTION,
202 				   hist_entry__transaction_len());
203 
204 	if (h->trace_output)
205 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
206 }
207 
hists__output_recalc_col_len(struct hists * hists,int max_rows)208 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
209 {
210 	struct rb_node *next = rb_first(&hists->entries);
211 	struct hist_entry *n;
212 	int row = 0;
213 
214 	hists__reset_col_len(hists);
215 
216 	while (next && row++ < max_rows) {
217 		n = rb_entry(next, struct hist_entry, rb_node);
218 		if (!n->filtered)
219 			hists__calc_col_len(hists, n);
220 		next = rb_next(&n->rb_node);
221 	}
222 }
223 
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)224 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
225 					unsigned int cpumode, u64 period)
226 {
227 	switch (cpumode) {
228 	case PERF_RECORD_MISC_KERNEL:
229 		he_stat->period_sys += period;
230 		break;
231 	case PERF_RECORD_MISC_USER:
232 		he_stat->period_us += period;
233 		break;
234 	case PERF_RECORD_MISC_GUEST_KERNEL:
235 		he_stat->period_guest_sys += period;
236 		break;
237 	case PERF_RECORD_MISC_GUEST_USER:
238 		he_stat->period_guest_us += period;
239 		break;
240 	default:
241 		break;
242 	}
243 }
244 
he_stat__add_period(struct he_stat * he_stat,u64 period,u64 weight)245 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
246 				u64 weight)
247 {
248 
249 	he_stat->period		+= period;
250 	he_stat->weight		+= weight;
251 	he_stat->nr_events	+= 1;
252 }
253 
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)254 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
255 {
256 	dest->period		+= src->period;
257 	dest->period_sys	+= src->period_sys;
258 	dest->period_us		+= src->period_us;
259 	dest->period_guest_sys	+= src->period_guest_sys;
260 	dest->period_guest_us	+= src->period_guest_us;
261 	dest->nr_events		+= src->nr_events;
262 	dest->weight		+= src->weight;
263 }
264 
he_stat__decay(struct he_stat * he_stat)265 static void he_stat__decay(struct he_stat *he_stat)
266 {
267 	he_stat->period = (he_stat->period * 7) / 8;
268 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
269 	/* XXX need decay for weight too? */
270 }
271 
272 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
273 
hists__decay_entry(struct hists * hists,struct hist_entry * he)274 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
275 {
276 	u64 prev_period = he->stat.period;
277 	u64 diff;
278 
279 	if (prev_period == 0)
280 		return true;
281 
282 	he_stat__decay(&he->stat);
283 	if (symbol_conf.cumulate_callchain)
284 		he_stat__decay(he->stat_acc);
285 	decay_callchain(he->callchain);
286 
287 	diff = prev_period - he->stat.period;
288 
289 	if (!he->depth) {
290 		hists->stats.total_period -= diff;
291 		if (!he->filtered)
292 			hists->stats.total_non_filtered_period -= diff;
293 	}
294 
295 	if (!he->leaf) {
296 		struct hist_entry *child;
297 		struct rb_node *node = rb_first(&he->hroot_out);
298 		while (node) {
299 			child = rb_entry(node, struct hist_entry, rb_node);
300 			node = rb_next(node);
301 
302 			if (hists__decay_entry(hists, child))
303 				hists__delete_entry(hists, child);
304 		}
305 	}
306 
307 	return he->stat.period == 0;
308 }
309 
hists__delete_entry(struct hists * hists,struct hist_entry * he)310 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
311 {
312 	struct rb_root *root_in;
313 	struct rb_root *root_out;
314 
315 	if (he->parent_he) {
316 		root_in  = &he->parent_he->hroot_in;
317 		root_out = &he->parent_he->hroot_out;
318 	} else {
319 		if (hists__has(hists, need_collapse))
320 			root_in = &hists->entries_collapsed;
321 		else
322 			root_in = hists->entries_in;
323 		root_out = &hists->entries;
324 	}
325 
326 	rb_erase(&he->rb_node_in, root_in);
327 	rb_erase(&he->rb_node, root_out);
328 
329 	--hists->nr_entries;
330 	if (!he->filtered)
331 		--hists->nr_non_filtered_entries;
332 
333 	hist_entry__delete(he);
334 }
335 
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)336 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
337 {
338 	struct rb_node *next = rb_first(&hists->entries);
339 	struct hist_entry *n;
340 
341 	while (next) {
342 		n = rb_entry(next, struct hist_entry, rb_node);
343 		next = rb_next(&n->rb_node);
344 		if (((zap_user && n->level == '.') ||
345 		     (zap_kernel && n->level != '.') ||
346 		     hists__decay_entry(hists, n))) {
347 			hists__delete_entry(hists, n);
348 		}
349 	}
350 }
351 
hists__delete_entries(struct hists * hists)352 void hists__delete_entries(struct hists *hists)
353 {
354 	struct rb_node *next = rb_first(&hists->entries);
355 	struct hist_entry *n;
356 
357 	while (next) {
358 		n = rb_entry(next, struct hist_entry, rb_node);
359 		next = rb_next(&n->rb_node);
360 
361 		hists__delete_entry(hists, n);
362 	}
363 }
364 
365 /*
366  * histogram, sorted on item, collects periods
367  */
368 
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self)369 static int hist_entry__init(struct hist_entry *he,
370 			    struct hist_entry *template,
371 			    bool sample_self)
372 {
373 	*he = *template;
374 
375 	if (symbol_conf.cumulate_callchain) {
376 		he->stat_acc = malloc(sizeof(he->stat));
377 		if (he->stat_acc == NULL)
378 			return -ENOMEM;
379 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
380 		if (!sample_self)
381 			memset(&he->stat, 0, sizeof(he->stat));
382 	}
383 
384 	map__get(he->ms.map);
385 
386 	if (he->branch_info) {
387 		/*
388 		 * This branch info is (a part of) allocated from
389 		 * sample__resolve_bstack() and will be freed after
390 		 * adding new entries.  So we need to save a copy.
391 		 */
392 		he->branch_info = malloc(sizeof(*he->branch_info));
393 		if (he->branch_info == NULL) {
394 			map__zput(he->ms.map);
395 			free(he->stat_acc);
396 			return -ENOMEM;
397 		}
398 
399 		memcpy(he->branch_info, template->branch_info,
400 		       sizeof(*he->branch_info));
401 
402 		map__get(he->branch_info->from.map);
403 		map__get(he->branch_info->to.map);
404 	}
405 
406 	if (he->mem_info) {
407 		map__get(he->mem_info->iaddr.map);
408 		map__get(he->mem_info->daddr.map);
409 	}
410 
411 	if (symbol_conf.use_callchain)
412 		callchain_init(he->callchain);
413 
414 	if (he->raw_data) {
415 		he->raw_data = memdup(he->raw_data, he->raw_size);
416 
417 		if (he->raw_data == NULL) {
418 			map__put(he->ms.map);
419 			if (he->branch_info) {
420 				map__put(he->branch_info->from.map);
421 				map__put(he->branch_info->to.map);
422 				free(he->branch_info);
423 			}
424 			if (he->mem_info) {
425 				map__put(he->mem_info->iaddr.map);
426 				map__put(he->mem_info->daddr.map);
427 			}
428 			free(he->stat_acc);
429 			return -ENOMEM;
430 		}
431 	}
432 	INIT_LIST_HEAD(&he->pairs.node);
433 	thread__get(he->thread);
434 	he->hroot_in  = RB_ROOT;
435 	he->hroot_out = RB_ROOT;
436 
437 	if (!symbol_conf.report_hierarchy)
438 		he->leaf = true;
439 
440 	return 0;
441 }
442 
hist_entry__zalloc(size_t size)443 static void *hist_entry__zalloc(size_t size)
444 {
445 	return zalloc(size + sizeof(struct hist_entry));
446 }
447 
hist_entry__free(void * ptr)448 static void hist_entry__free(void *ptr)
449 {
450 	free(ptr);
451 }
452 
453 static struct hist_entry_ops default_ops = {
454 	.new	= hist_entry__zalloc,
455 	.free	= hist_entry__free,
456 };
457 
hist_entry__new(struct hist_entry * template,bool sample_self)458 static struct hist_entry *hist_entry__new(struct hist_entry *template,
459 					  bool sample_self)
460 {
461 	struct hist_entry_ops *ops = template->ops;
462 	size_t callchain_size = 0;
463 	struct hist_entry *he;
464 	int err = 0;
465 
466 	if (!ops)
467 		ops = template->ops = &default_ops;
468 
469 	if (symbol_conf.use_callchain)
470 		callchain_size = sizeof(struct callchain_root);
471 
472 	he = ops->new(callchain_size);
473 	if (he) {
474 		err = hist_entry__init(he, template, sample_self);
475 		if (err) {
476 			ops->free(he);
477 			he = NULL;
478 		}
479 	}
480 
481 	return he;
482 }
483 
symbol__parent_filter(const struct symbol * parent)484 static u8 symbol__parent_filter(const struct symbol *parent)
485 {
486 	if (symbol_conf.exclude_other && parent == NULL)
487 		return 1 << HIST_FILTER__PARENT;
488 	return 0;
489 }
490 
hist_entry__add_callchain_period(struct hist_entry * he,u64 period)491 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
492 {
493 	if (!symbol_conf.use_callchain)
494 		return;
495 
496 	he->hists->callchain_period += period;
497 	if (!he->filtered)
498 		he->hists->callchain_non_filtered_period += period;
499 }
500 
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,struct addr_location * al,bool sample_self)501 static struct hist_entry *hists__findnew_entry(struct hists *hists,
502 					       struct hist_entry *entry,
503 					       struct addr_location *al,
504 					       bool sample_self)
505 {
506 	struct rb_node **p;
507 	struct rb_node *parent = NULL;
508 	struct hist_entry *he;
509 	int64_t cmp;
510 	u64 period = entry->stat.period;
511 	u64 weight = entry->stat.weight;
512 
513 	p = &hists->entries_in->rb_node;
514 
515 	while (*p != NULL) {
516 		parent = *p;
517 		he = rb_entry(parent, struct hist_entry, rb_node_in);
518 
519 		/*
520 		 * Make sure that it receives arguments in a same order as
521 		 * hist_entry__collapse() so that we can use an appropriate
522 		 * function when searching an entry regardless which sort
523 		 * keys were used.
524 		 */
525 		cmp = hist_entry__cmp(he, entry);
526 
527 		if (!cmp) {
528 			if (sample_self) {
529 				he_stat__add_period(&he->stat, period, weight);
530 				hist_entry__add_callchain_period(he, period);
531 			}
532 			if (symbol_conf.cumulate_callchain)
533 				he_stat__add_period(he->stat_acc, period, weight);
534 
535 			/*
536 			 * This mem info was allocated from sample__resolve_mem
537 			 * and will not be used anymore.
538 			 */
539 			zfree(&entry->mem_info);
540 
541 			/* If the map of an existing hist_entry has
542 			 * become out-of-date due to an exec() or
543 			 * similar, update it.  Otherwise we will
544 			 * mis-adjust symbol addresses when computing
545 			 * the history counter to increment.
546 			 */
547 			if (he->ms.map != entry->ms.map) {
548 				map__put(he->ms.map);
549 				he->ms.map = map__get(entry->ms.map);
550 			}
551 			goto out;
552 		}
553 
554 		if (cmp < 0)
555 			p = &(*p)->rb_left;
556 		else
557 			p = &(*p)->rb_right;
558 	}
559 
560 	he = hist_entry__new(entry, sample_self);
561 	if (!he)
562 		return NULL;
563 
564 	if (sample_self)
565 		hist_entry__add_callchain_period(he, period);
566 	hists->nr_entries++;
567 
568 	rb_link_node(&he->rb_node_in, parent, p);
569 	rb_insert_color(&he->rb_node_in, hists->entries_in);
570 out:
571 	if (sample_self)
572 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
573 	if (symbol_conf.cumulate_callchain)
574 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
575 	return he;
576 }
577 
578 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)579 __hists__add_entry(struct hists *hists,
580 		   struct addr_location *al,
581 		   struct symbol *sym_parent,
582 		   struct branch_info *bi,
583 		   struct mem_info *mi,
584 		   struct perf_sample *sample,
585 		   bool sample_self,
586 		   struct hist_entry_ops *ops)
587 {
588 	struct namespaces *ns = thread__namespaces(al->thread);
589 	struct hist_entry entry = {
590 		.thread	= al->thread,
591 		.comm = thread__comm(al->thread),
592 		.cgroup_id = {
593 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
594 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
595 		},
596 		.ms = {
597 			.map	= al->map,
598 			.sym	= al->sym,
599 		},
600 		.socket	 = al->socket,
601 		.cpu	 = al->cpu,
602 		.cpumode = al->cpumode,
603 		.ip	 = al->addr,
604 		.level	 = al->level,
605 		.stat = {
606 			.nr_events = 1,
607 			.period	= sample->period,
608 			.weight = sample->weight,
609 		},
610 		.parent = sym_parent,
611 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
612 		.hists	= hists,
613 		.branch_info = bi,
614 		.mem_info = mi,
615 		.transaction = sample->transaction,
616 		.raw_data = sample->raw_data,
617 		.raw_size = sample->raw_size,
618 		.ops = ops,
619 	};
620 
621 	return hists__findnew_entry(hists, &entry, al, sample_self);
622 }
623 
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct perf_sample * sample,bool sample_self)624 struct hist_entry *hists__add_entry(struct hists *hists,
625 				    struct addr_location *al,
626 				    struct symbol *sym_parent,
627 				    struct branch_info *bi,
628 				    struct mem_info *mi,
629 				    struct perf_sample *sample,
630 				    bool sample_self)
631 {
632 	return __hists__add_entry(hists, al, sym_parent, bi, mi,
633 				  sample, sample_self, NULL);
634 }
635 
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct perf_sample * sample,bool sample_self)636 struct hist_entry *hists__add_entry_ops(struct hists *hists,
637 					struct hist_entry_ops *ops,
638 					struct addr_location *al,
639 					struct symbol *sym_parent,
640 					struct branch_info *bi,
641 					struct mem_info *mi,
642 					struct perf_sample *sample,
643 					bool sample_self)
644 {
645 	return __hists__add_entry(hists, al, sym_parent, bi, mi,
646 				  sample, sample_self, ops);
647 }
648 
649 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)650 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
651 		    struct addr_location *al __maybe_unused)
652 {
653 	return 0;
654 }
655 
656 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)657 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
658 			struct addr_location *al __maybe_unused)
659 {
660 	return 0;
661 }
662 
663 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)664 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
665 {
666 	struct perf_sample *sample = iter->sample;
667 	struct mem_info *mi;
668 
669 	mi = sample__resolve_mem(sample, al);
670 	if (mi == NULL)
671 		return -ENOMEM;
672 
673 	iter->priv = mi;
674 	return 0;
675 }
676 
677 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)678 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
679 {
680 	u64 cost;
681 	struct mem_info *mi = iter->priv;
682 	struct hists *hists = evsel__hists(iter->evsel);
683 	struct perf_sample *sample = iter->sample;
684 	struct hist_entry *he;
685 
686 	if (mi == NULL)
687 		return -EINVAL;
688 
689 	cost = sample->weight;
690 	if (!cost)
691 		cost = 1;
692 
693 	/*
694 	 * must pass period=weight in order to get the correct
695 	 * sorting from hists__collapse_resort() which is solely
696 	 * based on periods. We want sorting be done on nr_events * weight
697 	 * and this is indirectly achieved by passing period=weight here
698 	 * and the he_stat__add_period() function.
699 	 */
700 	sample->period = cost;
701 
702 	he = hists__add_entry(hists, al, iter->parent, NULL, mi,
703 			      sample, true);
704 	if (!he)
705 		return -ENOMEM;
706 
707 	iter->he = he;
708 	return 0;
709 }
710 
711 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)712 iter_finish_mem_entry(struct hist_entry_iter *iter,
713 		      struct addr_location *al __maybe_unused)
714 {
715 	struct perf_evsel *evsel = iter->evsel;
716 	struct hists *hists = evsel__hists(evsel);
717 	struct hist_entry *he = iter->he;
718 	int err = -EINVAL;
719 
720 	if (he == NULL)
721 		goto out;
722 
723 	hists__inc_nr_samples(hists, he->filtered);
724 
725 	err = hist_entry__append_callchain(he, iter->sample);
726 
727 out:
728 	/*
729 	 * We don't need to free iter->priv (mem_info) here since the mem info
730 	 * was either already freed in hists__findnew_entry() or passed to a
731 	 * new hist entry by hist_entry__new().
732 	 */
733 	iter->priv = NULL;
734 
735 	iter->he = NULL;
736 	return err;
737 }
738 
739 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)740 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
741 {
742 	struct branch_info *bi;
743 	struct perf_sample *sample = iter->sample;
744 
745 	bi = sample__resolve_bstack(sample, al);
746 	if (!bi)
747 		return -ENOMEM;
748 
749 	iter->curr = 0;
750 	iter->total = sample->branch_stack->nr;
751 
752 	iter->priv = bi;
753 	return 0;
754 }
755 
756 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)757 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
758 			     struct addr_location *al __maybe_unused)
759 {
760 	return 0;
761 }
762 
763 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)764 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
765 {
766 	struct branch_info *bi = iter->priv;
767 	int i = iter->curr;
768 
769 	if (bi == NULL)
770 		return 0;
771 
772 	if (iter->curr >= iter->total)
773 		return 0;
774 
775 	al->map = bi[i].to.map;
776 	al->sym = bi[i].to.sym;
777 	al->addr = bi[i].to.addr;
778 	return 1;
779 }
780 
781 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)782 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
783 {
784 	struct branch_info *bi;
785 	struct perf_evsel *evsel = iter->evsel;
786 	struct hists *hists = evsel__hists(evsel);
787 	struct perf_sample *sample = iter->sample;
788 	struct hist_entry *he = NULL;
789 	int i = iter->curr;
790 	int err = 0;
791 
792 	bi = iter->priv;
793 
794 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
795 		goto out;
796 
797 	/*
798 	 * The report shows the percentage of total branches captured
799 	 * and not events sampled. Thus we use a pseudo period of 1.
800 	 */
801 	sample->period = 1;
802 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
803 
804 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
805 			      sample, true);
806 	if (he == NULL)
807 		return -ENOMEM;
808 
809 	hists__inc_nr_samples(hists, he->filtered);
810 
811 out:
812 	iter->he = he;
813 	iter->curr++;
814 	return err;
815 }
816 
817 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)818 iter_finish_branch_entry(struct hist_entry_iter *iter,
819 			 struct addr_location *al __maybe_unused)
820 {
821 	zfree(&iter->priv);
822 	iter->he = NULL;
823 
824 	return iter->curr >= iter->total ? 0 : -1;
825 }
826 
827 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)828 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
829 			  struct addr_location *al __maybe_unused)
830 {
831 	return 0;
832 }
833 
834 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)835 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
836 {
837 	struct perf_evsel *evsel = iter->evsel;
838 	struct perf_sample *sample = iter->sample;
839 	struct hist_entry *he;
840 
841 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
842 			      sample, true);
843 	if (he == NULL)
844 		return -ENOMEM;
845 
846 	iter->he = he;
847 	return 0;
848 }
849 
850 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)851 iter_finish_normal_entry(struct hist_entry_iter *iter,
852 			 struct addr_location *al __maybe_unused)
853 {
854 	struct hist_entry *he = iter->he;
855 	struct perf_evsel *evsel = iter->evsel;
856 	struct perf_sample *sample = iter->sample;
857 
858 	if (he == NULL)
859 		return 0;
860 
861 	iter->he = NULL;
862 
863 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
864 
865 	return hist_entry__append_callchain(he, sample);
866 }
867 
868 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)869 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
870 			      struct addr_location *al __maybe_unused)
871 {
872 	struct hist_entry **he_cache;
873 
874 	callchain_cursor_commit(&callchain_cursor);
875 
876 	/*
877 	 * This is for detecting cycles or recursions so that they're
878 	 * cumulated only one time to prevent entries more than 100%
879 	 * overhead.
880 	 */
881 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
882 	if (he_cache == NULL)
883 		return -ENOMEM;
884 
885 	iter->priv = he_cache;
886 	iter->curr = 0;
887 
888 	return 0;
889 }
890 
891 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)892 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
893 				 struct addr_location *al)
894 {
895 	struct perf_evsel *evsel = iter->evsel;
896 	struct hists *hists = evsel__hists(evsel);
897 	struct perf_sample *sample = iter->sample;
898 	struct hist_entry **he_cache = iter->priv;
899 	struct hist_entry *he;
900 	int err = 0;
901 
902 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
903 			      sample, true);
904 	if (he == NULL)
905 		return -ENOMEM;
906 
907 	iter->he = he;
908 	he_cache[iter->curr++] = he;
909 
910 	hist_entry__append_callchain(he, sample);
911 
912 	/*
913 	 * We need to re-initialize the cursor since callchain_append()
914 	 * advanced the cursor to the end.
915 	 */
916 	callchain_cursor_commit(&callchain_cursor);
917 
918 	hists__inc_nr_samples(hists, he->filtered);
919 
920 	return err;
921 }
922 
923 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)924 iter_next_cumulative_entry(struct hist_entry_iter *iter,
925 			   struct addr_location *al)
926 {
927 	struct callchain_cursor_node *node;
928 
929 	node = callchain_cursor_current(&callchain_cursor);
930 	if (node == NULL)
931 		return 0;
932 
933 	return fill_callchain_info(al, node, iter->hide_unresolved);
934 }
935 
936 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)937 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
938 			       struct addr_location *al)
939 {
940 	struct perf_evsel *evsel = iter->evsel;
941 	struct perf_sample *sample = iter->sample;
942 	struct hist_entry **he_cache = iter->priv;
943 	struct hist_entry *he;
944 	struct hist_entry he_tmp = {
945 		.hists = evsel__hists(evsel),
946 		.cpu = al->cpu,
947 		.thread = al->thread,
948 		.comm = thread__comm(al->thread),
949 		.ip = al->addr,
950 		.ms = {
951 			.map = al->map,
952 			.sym = al->sym,
953 		},
954 		.parent = iter->parent,
955 		.raw_data = sample->raw_data,
956 		.raw_size = sample->raw_size,
957 	};
958 	int i;
959 	struct callchain_cursor cursor;
960 
961 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
962 
963 	callchain_cursor_advance(&callchain_cursor);
964 
965 	/*
966 	 * Check if there's duplicate entries in the callchain.
967 	 * It's possible that it has cycles or recursive calls.
968 	 */
969 	for (i = 0; i < iter->curr; i++) {
970 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
971 			/* to avoid calling callback function */
972 			iter->he = NULL;
973 			return 0;
974 		}
975 	}
976 
977 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
978 			      sample, false);
979 	if (he == NULL)
980 		return -ENOMEM;
981 
982 	iter->he = he;
983 	he_cache[iter->curr++] = he;
984 
985 	if (symbol_conf.use_callchain)
986 		callchain_append(he->callchain, &cursor, sample->period);
987 	return 0;
988 }
989 
990 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)991 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
992 			     struct addr_location *al __maybe_unused)
993 {
994 	zfree(&iter->priv);
995 	iter->he = NULL;
996 
997 	return 0;
998 }
999 
1000 const struct hist_iter_ops hist_iter_mem = {
1001 	.prepare_entry 		= iter_prepare_mem_entry,
1002 	.add_single_entry 	= iter_add_single_mem_entry,
1003 	.next_entry 		= iter_next_nop_entry,
1004 	.add_next_entry 	= iter_add_next_nop_entry,
1005 	.finish_entry 		= iter_finish_mem_entry,
1006 };
1007 
1008 const struct hist_iter_ops hist_iter_branch = {
1009 	.prepare_entry 		= iter_prepare_branch_entry,
1010 	.add_single_entry 	= iter_add_single_branch_entry,
1011 	.next_entry 		= iter_next_branch_entry,
1012 	.add_next_entry 	= iter_add_next_branch_entry,
1013 	.finish_entry 		= iter_finish_branch_entry,
1014 };
1015 
1016 const struct hist_iter_ops hist_iter_normal = {
1017 	.prepare_entry 		= iter_prepare_normal_entry,
1018 	.add_single_entry 	= iter_add_single_normal_entry,
1019 	.next_entry 		= iter_next_nop_entry,
1020 	.add_next_entry 	= iter_add_next_nop_entry,
1021 	.finish_entry 		= iter_finish_normal_entry,
1022 };
1023 
1024 const struct hist_iter_ops hist_iter_cumulative = {
1025 	.prepare_entry 		= iter_prepare_cumulative_entry,
1026 	.add_single_entry 	= iter_add_single_cumulative_entry,
1027 	.next_entry 		= iter_next_cumulative_entry,
1028 	.add_next_entry 	= iter_add_next_cumulative_entry,
1029 	.finish_entry 		= iter_finish_cumulative_entry,
1030 };
1031 
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1032 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1033 			 int max_stack_depth, void *arg)
1034 {
1035 	int err, err2;
1036 	struct map *alm = NULL;
1037 
1038 	if (al && al->map)
1039 		alm = map__get(al->map);
1040 
1041 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1042 					iter->evsel, al, max_stack_depth);
1043 	if (err) {
1044 		map__put(alm);
1045 		return err;
1046 	}
1047 
1048 	err = iter->ops->prepare_entry(iter, al);
1049 	if (err)
1050 		goto out;
1051 
1052 	err = iter->ops->add_single_entry(iter, al);
1053 	if (err)
1054 		goto out;
1055 
1056 	if (iter->he && iter->add_entry_cb) {
1057 		err = iter->add_entry_cb(iter, al, true, arg);
1058 		if (err)
1059 			goto out;
1060 	}
1061 
1062 	while (iter->ops->next_entry(iter, al)) {
1063 		err = iter->ops->add_next_entry(iter, al);
1064 		if (err)
1065 			break;
1066 
1067 		if (iter->he && iter->add_entry_cb) {
1068 			err = iter->add_entry_cb(iter, al, false, arg);
1069 			if (err)
1070 				goto out;
1071 		}
1072 	}
1073 
1074 out:
1075 	err2 = iter->ops->finish_entry(iter, al);
1076 	if (!err)
1077 		err = err2;
1078 
1079 	map__put(alm);
1080 
1081 	return err;
1082 }
1083 
1084 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1085 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1086 {
1087 	struct hists *hists = left->hists;
1088 	struct perf_hpp_fmt *fmt;
1089 	int64_t cmp = 0;
1090 
1091 	hists__for_each_sort_list(hists, fmt) {
1092 		if (perf_hpp__is_dynamic_entry(fmt) &&
1093 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1094 			continue;
1095 
1096 		cmp = fmt->cmp(fmt, left, right);
1097 		if (cmp)
1098 			break;
1099 	}
1100 
1101 	return cmp;
1102 }
1103 
1104 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1105 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1106 {
1107 	struct hists *hists = left->hists;
1108 	struct perf_hpp_fmt *fmt;
1109 	int64_t cmp = 0;
1110 
1111 	hists__for_each_sort_list(hists, fmt) {
1112 		if (perf_hpp__is_dynamic_entry(fmt) &&
1113 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1114 			continue;
1115 
1116 		cmp = fmt->collapse(fmt, left, right);
1117 		if (cmp)
1118 			break;
1119 	}
1120 
1121 	return cmp;
1122 }
1123 
hist_entry__delete(struct hist_entry * he)1124 void hist_entry__delete(struct hist_entry *he)
1125 {
1126 	struct hist_entry_ops *ops = he->ops;
1127 
1128 	thread__zput(he->thread);
1129 	map__zput(he->ms.map);
1130 
1131 	if (he->branch_info) {
1132 		map__zput(he->branch_info->from.map);
1133 		map__zput(he->branch_info->to.map);
1134 		free_srcline(he->branch_info->srcline_from);
1135 		free_srcline(he->branch_info->srcline_to);
1136 		zfree(&he->branch_info);
1137 	}
1138 
1139 	if (he->mem_info) {
1140 		map__zput(he->mem_info->iaddr.map);
1141 		map__zput(he->mem_info->daddr.map);
1142 		zfree(&he->mem_info);
1143 	}
1144 
1145 	if (he->inline_node) {
1146 		inline_node__delete(he->inline_node);
1147 		he->inline_node = NULL;
1148 	}
1149 
1150 	zfree(&he->stat_acc);
1151 	free_srcline(he->srcline);
1152 	if (he->srcfile && he->srcfile[0])
1153 		free(he->srcfile);
1154 	free_callchain(he->callchain);
1155 	free(he->trace_output);
1156 	free(he->raw_data);
1157 	ops->free(he);
1158 }
1159 
1160 /*
1161  * If this is not the last column, then we need to pad it according to the
1162  * pre-calculated max lenght for this column, otherwise don't bother adding
1163  * spaces because that would break viewing this with, for instance, 'less',
1164  * that would show tons of trailing spaces when a long C++ demangled method
1165  * names is sampled.
1166 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1167 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1168 				   struct perf_hpp_fmt *fmt, int printed)
1169 {
1170 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1171 		const int width = fmt->width(fmt, hpp, he->hists);
1172 		if (printed < width) {
1173 			advance_hpp(hpp, printed);
1174 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1175 		}
1176 	}
1177 
1178 	return printed;
1179 }
1180 
1181 /*
1182  * collapse the histogram
1183  */
1184 
1185 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1186 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1187 				       enum hist_filter type);
1188 
1189 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1190 
check_thread_entry(struct perf_hpp_fmt * fmt)1191 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1192 {
1193 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1194 }
1195 
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1196 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1197 						enum hist_filter type,
1198 						fmt_chk_fn check)
1199 {
1200 	struct perf_hpp_fmt *fmt;
1201 	bool type_match = false;
1202 	struct hist_entry *parent = he->parent_he;
1203 
1204 	switch (type) {
1205 	case HIST_FILTER__THREAD:
1206 		if (symbol_conf.comm_list == NULL &&
1207 		    symbol_conf.pid_list == NULL &&
1208 		    symbol_conf.tid_list == NULL)
1209 			return;
1210 		break;
1211 	case HIST_FILTER__DSO:
1212 		if (symbol_conf.dso_list == NULL)
1213 			return;
1214 		break;
1215 	case HIST_FILTER__SYMBOL:
1216 		if (symbol_conf.sym_list == NULL)
1217 			return;
1218 		break;
1219 	case HIST_FILTER__PARENT:
1220 	case HIST_FILTER__GUEST:
1221 	case HIST_FILTER__HOST:
1222 	case HIST_FILTER__SOCKET:
1223 	case HIST_FILTER__C2C:
1224 	default:
1225 		return;
1226 	}
1227 
1228 	/* if it's filtered by own fmt, it has to have filter bits */
1229 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1230 		if (check(fmt)) {
1231 			type_match = true;
1232 			break;
1233 		}
1234 	}
1235 
1236 	if (type_match) {
1237 		/*
1238 		 * If the filter is for current level entry, propagate
1239 		 * filter marker to parents.  The marker bit was
1240 		 * already set by default so it only needs to clear
1241 		 * non-filtered entries.
1242 		 */
1243 		if (!(he->filtered & (1 << type))) {
1244 			while (parent) {
1245 				parent->filtered &= ~(1 << type);
1246 				parent = parent->parent_he;
1247 			}
1248 		}
1249 	} else {
1250 		/*
1251 		 * If current entry doesn't have matching formats, set
1252 		 * filter marker for upper level entries.  it will be
1253 		 * cleared if its lower level entries is not filtered.
1254 		 *
1255 		 * For lower-level entries, it inherits parent's
1256 		 * filter bit so that lower level entries of a
1257 		 * non-filtered entry won't set the filter marker.
1258 		 */
1259 		if (parent == NULL)
1260 			he->filtered |= (1 << type);
1261 		else
1262 			he->filtered |= (parent->filtered & (1 << type));
1263 	}
1264 }
1265 
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1266 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1267 {
1268 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1269 					    check_thread_entry);
1270 
1271 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1272 					    perf_hpp__is_dso_entry);
1273 
1274 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1275 					    perf_hpp__is_sym_entry);
1276 
1277 	hists__apply_filters(he->hists, he);
1278 }
1279 
hierarchy_insert_entry(struct hists * hists,struct rb_root * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1280 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1281 						 struct rb_root *root,
1282 						 struct hist_entry *he,
1283 						 struct hist_entry *parent_he,
1284 						 struct perf_hpp_list *hpp_list)
1285 {
1286 	struct rb_node **p = &root->rb_node;
1287 	struct rb_node *parent = NULL;
1288 	struct hist_entry *iter, *new;
1289 	struct perf_hpp_fmt *fmt;
1290 	int64_t cmp;
1291 
1292 	while (*p != NULL) {
1293 		parent = *p;
1294 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1295 
1296 		cmp = 0;
1297 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1298 			cmp = fmt->collapse(fmt, iter, he);
1299 			if (cmp)
1300 				break;
1301 		}
1302 
1303 		if (!cmp) {
1304 			he_stat__add_stat(&iter->stat, &he->stat);
1305 			return iter;
1306 		}
1307 
1308 		if (cmp < 0)
1309 			p = &parent->rb_left;
1310 		else
1311 			p = &parent->rb_right;
1312 	}
1313 
1314 	new = hist_entry__new(he, true);
1315 	if (new == NULL)
1316 		return NULL;
1317 
1318 	hists->nr_entries++;
1319 
1320 	/* save related format list for output */
1321 	new->hpp_list = hpp_list;
1322 	new->parent_he = parent_he;
1323 
1324 	hist_entry__apply_hierarchy_filters(new);
1325 
1326 	/* some fields are now passed to 'new' */
1327 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1328 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1329 			he->trace_output = NULL;
1330 		else
1331 			new->trace_output = NULL;
1332 
1333 		if (perf_hpp__is_srcline_entry(fmt))
1334 			he->srcline = NULL;
1335 		else
1336 			new->srcline = NULL;
1337 
1338 		if (perf_hpp__is_srcfile_entry(fmt))
1339 			he->srcfile = NULL;
1340 		else
1341 			new->srcfile = NULL;
1342 	}
1343 
1344 	rb_link_node(&new->rb_node_in, parent, p);
1345 	rb_insert_color(&new->rb_node_in, root);
1346 	return new;
1347 }
1348 
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root * root,struct hist_entry * he)1349 static int hists__hierarchy_insert_entry(struct hists *hists,
1350 					 struct rb_root *root,
1351 					 struct hist_entry *he)
1352 {
1353 	struct perf_hpp_list_node *node;
1354 	struct hist_entry *new_he = NULL;
1355 	struct hist_entry *parent = NULL;
1356 	int depth = 0;
1357 	int ret = 0;
1358 
1359 	list_for_each_entry(node, &hists->hpp_formats, list) {
1360 		/* skip period (overhead) and elided columns */
1361 		if (node->level == 0 || node->skip)
1362 			continue;
1363 
1364 		/* insert copy of 'he' for each fmt into the hierarchy */
1365 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1366 		if (new_he == NULL) {
1367 			ret = -1;
1368 			break;
1369 		}
1370 
1371 		root = &new_he->hroot_in;
1372 		new_he->depth = depth++;
1373 		parent = new_he;
1374 	}
1375 
1376 	if (new_he) {
1377 		new_he->leaf = true;
1378 
1379 		if (symbol_conf.use_callchain) {
1380 			callchain_cursor_reset(&callchain_cursor);
1381 			if (callchain_merge(&callchain_cursor,
1382 					    new_he->callchain,
1383 					    he->callchain) < 0)
1384 				ret = -1;
1385 		}
1386 	}
1387 
1388 	/* 'he' is no longer used */
1389 	hist_entry__delete(he);
1390 
1391 	/* return 0 (or -1) since it already applied filters */
1392 	return ret;
1393 }
1394 
hists__collapse_insert_entry(struct hists * hists,struct rb_root * root,struct hist_entry * he)1395 static int hists__collapse_insert_entry(struct hists *hists,
1396 					struct rb_root *root,
1397 					struct hist_entry *he)
1398 {
1399 	struct rb_node **p = &root->rb_node;
1400 	struct rb_node *parent = NULL;
1401 	struct hist_entry *iter;
1402 	int64_t cmp;
1403 
1404 	if (symbol_conf.report_hierarchy)
1405 		return hists__hierarchy_insert_entry(hists, root, he);
1406 
1407 	while (*p != NULL) {
1408 		parent = *p;
1409 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1410 
1411 		cmp = hist_entry__collapse(iter, he);
1412 
1413 		if (!cmp) {
1414 			int ret = 0;
1415 
1416 			he_stat__add_stat(&iter->stat, &he->stat);
1417 			if (symbol_conf.cumulate_callchain)
1418 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1419 
1420 			if (symbol_conf.use_callchain) {
1421 				callchain_cursor_reset(&callchain_cursor);
1422 				if (callchain_merge(&callchain_cursor,
1423 						    iter->callchain,
1424 						    he->callchain) < 0)
1425 					ret = -1;
1426 			}
1427 			hist_entry__delete(he);
1428 			return ret;
1429 		}
1430 
1431 		if (cmp < 0)
1432 			p = &(*p)->rb_left;
1433 		else
1434 			p = &(*p)->rb_right;
1435 	}
1436 	hists->nr_entries++;
1437 
1438 	rb_link_node(&he->rb_node_in, parent, p);
1439 	rb_insert_color(&he->rb_node_in, root);
1440 	return 1;
1441 }
1442 
hists__get_rotate_entries_in(struct hists * hists)1443 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1444 {
1445 	struct rb_root *root;
1446 
1447 	pthread_mutex_lock(&hists->lock);
1448 
1449 	root = hists->entries_in;
1450 	if (++hists->entries_in > &hists->entries_in_array[1])
1451 		hists->entries_in = &hists->entries_in_array[0];
1452 
1453 	pthread_mutex_unlock(&hists->lock);
1454 
1455 	return root;
1456 }
1457 
hists__apply_filters(struct hists * hists,struct hist_entry * he)1458 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1459 {
1460 	hists__filter_entry_by_dso(hists, he);
1461 	hists__filter_entry_by_thread(hists, he);
1462 	hists__filter_entry_by_symbol(hists, he);
1463 	hists__filter_entry_by_socket(hists, he);
1464 }
1465 
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1466 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1467 {
1468 	struct rb_root *root;
1469 	struct rb_node *next;
1470 	struct hist_entry *n;
1471 	int ret;
1472 
1473 	if (!hists__has(hists, need_collapse))
1474 		return 0;
1475 
1476 	hists->nr_entries = 0;
1477 
1478 	root = hists__get_rotate_entries_in(hists);
1479 
1480 	next = rb_first(root);
1481 
1482 	while (next) {
1483 		if (session_done())
1484 			break;
1485 		n = rb_entry(next, struct hist_entry, rb_node_in);
1486 		next = rb_next(&n->rb_node_in);
1487 
1488 		rb_erase(&n->rb_node_in, root);
1489 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1490 		if (ret < 0)
1491 			return -1;
1492 
1493 		if (ret) {
1494 			/*
1495 			 * If it wasn't combined with one of the entries already
1496 			 * collapsed, we need to apply the filters that may have
1497 			 * been set by, say, the hist_browser.
1498 			 */
1499 			hists__apply_filters(hists, n);
1500 		}
1501 		if (prog)
1502 			ui_progress__update(prog, 1);
1503 	}
1504 	return 0;
1505 }
1506 
hist_entry__sort(struct hist_entry * a,struct hist_entry * b)1507 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1508 {
1509 	struct hists *hists = a->hists;
1510 	struct perf_hpp_fmt *fmt;
1511 	int64_t cmp = 0;
1512 
1513 	hists__for_each_sort_list(hists, fmt) {
1514 		if (perf_hpp__should_skip(fmt, a->hists))
1515 			continue;
1516 
1517 		cmp = fmt->sort(fmt, a, b);
1518 		if (cmp)
1519 			break;
1520 	}
1521 
1522 	return cmp;
1523 }
1524 
hists__reset_filter_stats(struct hists * hists)1525 static void hists__reset_filter_stats(struct hists *hists)
1526 {
1527 	hists->nr_non_filtered_entries = 0;
1528 	hists->stats.total_non_filtered_period = 0;
1529 }
1530 
hists__reset_stats(struct hists * hists)1531 void hists__reset_stats(struct hists *hists)
1532 {
1533 	hists->nr_entries = 0;
1534 	hists->stats.total_period = 0;
1535 
1536 	hists__reset_filter_stats(hists);
1537 }
1538 
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1539 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1540 {
1541 	hists->nr_non_filtered_entries++;
1542 	hists->stats.total_non_filtered_period += h->stat.period;
1543 }
1544 
hists__inc_stats(struct hists * hists,struct hist_entry * h)1545 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1546 {
1547 	if (!h->filtered)
1548 		hists__inc_filter_stats(hists, h);
1549 
1550 	hists->nr_entries++;
1551 	hists->stats.total_period += h->stat.period;
1552 }
1553 
hierarchy_recalc_total_periods(struct hists * hists)1554 static void hierarchy_recalc_total_periods(struct hists *hists)
1555 {
1556 	struct rb_node *node;
1557 	struct hist_entry *he;
1558 
1559 	node = rb_first(&hists->entries);
1560 
1561 	hists->stats.total_period = 0;
1562 	hists->stats.total_non_filtered_period = 0;
1563 
1564 	/*
1565 	 * recalculate total period using top-level entries only
1566 	 * since lower level entries only see non-filtered entries
1567 	 * but upper level entries have sum of both entries.
1568 	 */
1569 	while (node) {
1570 		he = rb_entry(node, struct hist_entry, rb_node);
1571 		node = rb_next(node);
1572 
1573 		hists->stats.total_period += he->stat.period;
1574 		if (!he->filtered)
1575 			hists->stats.total_non_filtered_period += he->stat.period;
1576 	}
1577 }
1578 
hierarchy_insert_output_entry(struct rb_root * root,struct hist_entry * he)1579 static void hierarchy_insert_output_entry(struct rb_root *root,
1580 					  struct hist_entry *he)
1581 {
1582 	struct rb_node **p = &root->rb_node;
1583 	struct rb_node *parent = NULL;
1584 	struct hist_entry *iter;
1585 	struct perf_hpp_fmt *fmt;
1586 
1587 	while (*p != NULL) {
1588 		parent = *p;
1589 		iter = rb_entry(parent, struct hist_entry, rb_node);
1590 
1591 		if (hist_entry__sort(he, iter) > 0)
1592 			p = &parent->rb_left;
1593 		else
1594 			p = &parent->rb_right;
1595 	}
1596 
1597 	rb_link_node(&he->rb_node, parent, p);
1598 	rb_insert_color(&he->rb_node, root);
1599 
1600 	/* update column width of dynamic entry */
1601 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1602 		if (perf_hpp__is_dynamic_entry(fmt))
1603 			fmt->sort(fmt, he, NULL);
1604 	}
1605 }
1606 
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root * root_in,struct rb_root * root_out,u64 min_callchain_hits,bool use_callchain)1607 static void hists__hierarchy_output_resort(struct hists *hists,
1608 					   struct ui_progress *prog,
1609 					   struct rb_root *root_in,
1610 					   struct rb_root *root_out,
1611 					   u64 min_callchain_hits,
1612 					   bool use_callchain)
1613 {
1614 	struct rb_node *node;
1615 	struct hist_entry *he;
1616 
1617 	*root_out = RB_ROOT;
1618 	node = rb_first(root_in);
1619 
1620 	while (node) {
1621 		he = rb_entry(node, struct hist_entry, rb_node_in);
1622 		node = rb_next(node);
1623 
1624 		hierarchy_insert_output_entry(root_out, he);
1625 
1626 		if (prog)
1627 			ui_progress__update(prog, 1);
1628 
1629 		hists->nr_entries++;
1630 		if (!he->filtered) {
1631 			hists->nr_non_filtered_entries++;
1632 			hists__calc_col_len(hists, he);
1633 		}
1634 
1635 		if (!he->leaf) {
1636 			hists__hierarchy_output_resort(hists, prog,
1637 						       &he->hroot_in,
1638 						       &he->hroot_out,
1639 						       min_callchain_hits,
1640 						       use_callchain);
1641 			continue;
1642 		}
1643 
1644 		if (!use_callchain)
1645 			continue;
1646 
1647 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1648 			u64 total = he->stat.period;
1649 
1650 			if (symbol_conf.cumulate_callchain)
1651 				total = he->stat_acc->period;
1652 
1653 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1654 		}
1655 
1656 		callchain_param.sort(&he->sorted_chain, he->callchain,
1657 				     min_callchain_hits, &callchain_param);
1658 	}
1659 }
1660 
__hists__insert_output_entry(struct rb_root * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)1661 static void __hists__insert_output_entry(struct rb_root *entries,
1662 					 struct hist_entry *he,
1663 					 u64 min_callchain_hits,
1664 					 bool use_callchain)
1665 {
1666 	struct rb_node **p = &entries->rb_node;
1667 	struct rb_node *parent = NULL;
1668 	struct hist_entry *iter;
1669 	struct perf_hpp_fmt *fmt;
1670 
1671 	if (use_callchain) {
1672 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1673 			u64 total = he->stat.period;
1674 
1675 			if (symbol_conf.cumulate_callchain)
1676 				total = he->stat_acc->period;
1677 
1678 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1679 		}
1680 		callchain_param.sort(&he->sorted_chain, he->callchain,
1681 				      min_callchain_hits, &callchain_param);
1682 	}
1683 
1684 	while (*p != NULL) {
1685 		parent = *p;
1686 		iter = rb_entry(parent, struct hist_entry, rb_node);
1687 
1688 		if (hist_entry__sort(he, iter) > 0)
1689 			p = &(*p)->rb_left;
1690 		else
1691 			p = &(*p)->rb_right;
1692 	}
1693 
1694 	rb_link_node(&he->rb_node, parent, p);
1695 	rb_insert_color(&he->rb_node, entries);
1696 
1697 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1698 		if (perf_hpp__is_dynamic_entry(fmt) &&
1699 		    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1700 			fmt->sort(fmt, he, NULL);  /* update column width */
1701 	}
1702 }
1703 
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb)1704 static void output_resort(struct hists *hists, struct ui_progress *prog,
1705 			  bool use_callchain, hists__resort_cb_t cb)
1706 {
1707 	struct rb_root *root;
1708 	struct rb_node *next;
1709 	struct hist_entry *n;
1710 	u64 callchain_total;
1711 	u64 min_callchain_hits;
1712 
1713 	callchain_total = hists->callchain_period;
1714 	if (symbol_conf.filter_relative)
1715 		callchain_total = hists->callchain_non_filtered_period;
1716 
1717 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1718 
1719 	hists__reset_stats(hists);
1720 	hists__reset_col_len(hists);
1721 
1722 	if (symbol_conf.report_hierarchy) {
1723 		hists__hierarchy_output_resort(hists, prog,
1724 					       &hists->entries_collapsed,
1725 					       &hists->entries,
1726 					       min_callchain_hits,
1727 					       use_callchain);
1728 		hierarchy_recalc_total_periods(hists);
1729 		return;
1730 	}
1731 
1732 	if (hists__has(hists, need_collapse))
1733 		root = &hists->entries_collapsed;
1734 	else
1735 		root = hists->entries_in;
1736 
1737 	next = rb_first(root);
1738 	hists->entries = RB_ROOT;
1739 
1740 	while (next) {
1741 		n = rb_entry(next, struct hist_entry, rb_node_in);
1742 		next = rb_next(&n->rb_node_in);
1743 
1744 		if (cb && cb(n))
1745 			continue;
1746 
1747 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1748 		hists__inc_stats(hists, n);
1749 
1750 		if (!n->filtered)
1751 			hists__calc_col_len(hists, n);
1752 
1753 		if (prog)
1754 			ui_progress__update(prog, 1);
1755 	}
1756 }
1757 
perf_evsel__output_resort(struct perf_evsel * evsel,struct ui_progress * prog)1758 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1759 {
1760 	bool use_callchain;
1761 
1762 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1763 		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1764 	else
1765 		use_callchain = symbol_conf.use_callchain;
1766 
1767 	use_callchain |= symbol_conf.show_branchflag_count;
1768 
1769 	output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1770 }
1771 
hists__output_resort(struct hists * hists,struct ui_progress * prog)1772 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1773 {
1774 	output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1775 }
1776 
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)1777 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1778 			     hists__resort_cb_t cb)
1779 {
1780 	output_resort(hists, prog, symbol_conf.use_callchain, cb);
1781 }
1782 
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)1783 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1784 {
1785 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1786 		return false;
1787 
1788 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1789 		return true;
1790 
1791 	return false;
1792 }
1793 
rb_hierarchy_last(struct rb_node * node)1794 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1795 {
1796 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1797 
1798 	while (can_goto_child(he, HMD_NORMAL)) {
1799 		node = rb_last(&he->hroot_out);
1800 		he = rb_entry(node, struct hist_entry, rb_node);
1801 	}
1802 	return node;
1803 }
1804 
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)1805 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1806 {
1807 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1808 
1809 	if (can_goto_child(he, hmd))
1810 		node = rb_first(&he->hroot_out);
1811 	else
1812 		node = rb_next(node);
1813 
1814 	while (node == NULL) {
1815 		he = he->parent_he;
1816 		if (he == NULL)
1817 			break;
1818 
1819 		node = rb_next(&he->rb_node);
1820 	}
1821 	return node;
1822 }
1823 
rb_hierarchy_prev(struct rb_node * node)1824 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1825 {
1826 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1827 
1828 	node = rb_prev(node);
1829 	if (node)
1830 		return rb_hierarchy_last(node);
1831 
1832 	he = he->parent_he;
1833 	if (he == NULL)
1834 		return NULL;
1835 
1836 	return &he->rb_node;
1837 }
1838 
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)1839 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1840 {
1841 	struct rb_node *node;
1842 	struct hist_entry *child;
1843 	float percent;
1844 
1845 	if (he->leaf)
1846 		return false;
1847 
1848 	node = rb_first(&he->hroot_out);
1849 	child = rb_entry(node, struct hist_entry, rb_node);
1850 
1851 	while (node && child->filtered) {
1852 		node = rb_next(node);
1853 		child = rb_entry(node, struct hist_entry, rb_node);
1854 	}
1855 
1856 	if (node)
1857 		percent = hist_entry__get_percent_limit(child);
1858 	else
1859 		percent = 0;
1860 
1861 	return node && percent >= limit;
1862 }
1863 
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)1864 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1865 				       enum hist_filter filter)
1866 {
1867 	h->filtered &= ~(1 << filter);
1868 
1869 	if (symbol_conf.report_hierarchy) {
1870 		struct hist_entry *parent = h->parent_he;
1871 
1872 		while (parent) {
1873 			he_stat__add_stat(&parent->stat, &h->stat);
1874 
1875 			parent->filtered &= ~(1 << filter);
1876 
1877 			if (parent->filtered)
1878 				goto next;
1879 
1880 			/* force fold unfiltered entry for simplicity */
1881 			parent->unfolded = false;
1882 			parent->has_no_entry = false;
1883 			parent->row_offset = 0;
1884 			parent->nr_rows = 0;
1885 next:
1886 			parent = parent->parent_he;
1887 		}
1888 	}
1889 
1890 	if (h->filtered)
1891 		return;
1892 
1893 	/* force fold unfiltered entry for simplicity */
1894 	h->unfolded = false;
1895 	h->has_no_entry = false;
1896 	h->row_offset = 0;
1897 	h->nr_rows = 0;
1898 
1899 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1900 
1901 	hists__inc_filter_stats(hists, h);
1902 	hists__calc_col_len(hists, h);
1903 }
1904 
1905 
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)1906 static bool hists__filter_entry_by_dso(struct hists *hists,
1907 				       struct hist_entry *he)
1908 {
1909 	if (hists->dso_filter != NULL &&
1910 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1911 		he->filtered |= (1 << HIST_FILTER__DSO);
1912 		return true;
1913 	}
1914 
1915 	return false;
1916 }
1917 
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)1918 static bool hists__filter_entry_by_thread(struct hists *hists,
1919 					  struct hist_entry *he)
1920 {
1921 	if (hists->thread_filter != NULL &&
1922 	    he->thread != hists->thread_filter) {
1923 		he->filtered |= (1 << HIST_FILTER__THREAD);
1924 		return true;
1925 	}
1926 
1927 	return false;
1928 }
1929 
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)1930 static bool hists__filter_entry_by_symbol(struct hists *hists,
1931 					  struct hist_entry *he)
1932 {
1933 	if (hists->symbol_filter_str != NULL &&
1934 	    (!he->ms.sym || strstr(he->ms.sym->name,
1935 				   hists->symbol_filter_str) == NULL)) {
1936 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1937 		return true;
1938 	}
1939 
1940 	return false;
1941 }
1942 
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)1943 static bool hists__filter_entry_by_socket(struct hists *hists,
1944 					  struct hist_entry *he)
1945 {
1946 	if ((hists->socket_filter > -1) &&
1947 	    (he->socket != hists->socket_filter)) {
1948 		he->filtered |= (1 << HIST_FILTER__SOCKET);
1949 		return true;
1950 	}
1951 
1952 	return false;
1953 }
1954 
1955 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1956 
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)1957 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1958 {
1959 	struct rb_node *nd;
1960 
1961 	hists->stats.nr_non_filtered_samples = 0;
1962 
1963 	hists__reset_filter_stats(hists);
1964 	hists__reset_col_len(hists);
1965 
1966 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1967 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1968 
1969 		if (filter(hists, h))
1970 			continue;
1971 
1972 		hists__remove_entry_filter(hists, h, type);
1973 	}
1974 }
1975 
resort_filtered_entry(struct rb_root * root,struct hist_entry * he)1976 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1977 {
1978 	struct rb_node **p = &root->rb_node;
1979 	struct rb_node *parent = NULL;
1980 	struct hist_entry *iter;
1981 	struct rb_root new_root = RB_ROOT;
1982 	struct rb_node *nd;
1983 
1984 	while (*p != NULL) {
1985 		parent = *p;
1986 		iter = rb_entry(parent, struct hist_entry, rb_node);
1987 
1988 		if (hist_entry__sort(he, iter) > 0)
1989 			p = &(*p)->rb_left;
1990 		else
1991 			p = &(*p)->rb_right;
1992 	}
1993 
1994 	rb_link_node(&he->rb_node, parent, p);
1995 	rb_insert_color(&he->rb_node, root);
1996 
1997 	if (he->leaf || he->filtered)
1998 		return;
1999 
2000 	nd = rb_first(&he->hroot_out);
2001 	while (nd) {
2002 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2003 
2004 		nd = rb_next(nd);
2005 		rb_erase(&h->rb_node, &he->hroot_out);
2006 
2007 		resort_filtered_entry(&new_root, h);
2008 	}
2009 
2010 	he->hroot_out = new_root;
2011 }
2012 
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2013 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2014 {
2015 	struct rb_node *nd;
2016 	struct rb_root new_root = RB_ROOT;
2017 
2018 	hists->stats.nr_non_filtered_samples = 0;
2019 
2020 	hists__reset_filter_stats(hists);
2021 	hists__reset_col_len(hists);
2022 
2023 	nd = rb_first(&hists->entries);
2024 	while (nd) {
2025 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2026 		int ret;
2027 
2028 		ret = hist_entry__filter(h, type, arg);
2029 
2030 		/*
2031 		 * case 1. non-matching type
2032 		 * zero out the period, set filter marker and move to child
2033 		 */
2034 		if (ret < 0) {
2035 			memset(&h->stat, 0, sizeof(h->stat));
2036 			h->filtered |= (1 << type);
2037 
2038 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2039 		}
2040 		/*
2041 		 * case 2. matched type (filter out)
2042 		 * set filter marker and move to next
2043 		 */
2044 		else if (ret == 1) {
2045 			h->filtered |= (1 << type);
2046 
2047 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2048 		}
2049 		/*
2050 		 * case 3. ok (not filtered)
2051 		 * add period to hists and parents, erase the filter marker
2052 		 * and move to next sibling
2053 		 */
2054 		else {
2055 			hists__remove_entry_filter(hists, h, type);
2056 
2057 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2058 		}
2059 	}
2060 
2061 	hierarchy_recalc_total_periods(hists);
2062 
2063 	/*
2064 	 * resort output after applying a new filter since filter in a lower
2065 	 * hierarchy can change periods in a upper hierarchy.
2066 	 */
2067 	nd = rb_first(&hists->entries);
2068 	while (nd) {
2069 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2070 
2071 		nd = rb_next(nd);
2072 		rb_erase(&h->rb_node, &hists->entries);
2073 
2074 		resort_filtered_entry(&new_root, h);
2075 	}
2076 
2077 	hists->entries = new_root;
2078 }
2079 
hists__filter_by_thread(struct hists * hists)2080 void hists__filter_by_thread(struct hists *hists)
2081 {
2082 	if (symbol_conf.report_hierarchy)
2083 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2084 					hists->thread_filter);
2085 	else
2086 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2087 				      hists__filter_entry_by_thread);
2088 }
2089 
hists__filter_by_dso(struct hists * hists)2090 void hists__filter_by_dso(struct hists *hists)
2091 {
2092 	if (symbol_conf.report_hierarchy)
2093 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2094 					hists->dso_filter);
2095 	else
2096 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2097 				      hists__filter_entry_by_dso);
2098 }
2099 
hists__filter_by_symbol(struct hists * hists)2100 void hists__filter_by_symbol(struct hists *hists)
2101 {
2102 	if (symbol_conf.report_hierarchy)
2103 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2104 					hists->symbol_filter_str);
2105 	else
2106 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2107 				      hists__filter_entry_by_symbol);
2108 }
2109 
hists__filter_by_socket(struct hists * hists)2110 void hists__filter_by_socket(struct hists *hists)
2111 {
2112 	if (symbol_conf.report_hierarchy)
2113 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2114 					&hists->socket_filter);
2115 	else
2116 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2117 				      hists__filter_entry_by_socket);
2118 }
2119 
events_stats__inc(struct events_stats * stats,u32 type)2120 void events_stats__inc(struct events_stats *stats, u32 type)
2121 {
2122 	++stats->nr_events[0];
2123 	++stats->nr_events[type];
2124 }
2125 
hists__inc_nr_events(struct hists * hists,u32 type)2126 void hists__inc_nr_events(struct hists *hists, u32 type)
2127 {
2128 	events_stats__inc(&hists->stats, type);
2129 }
2130 
hists__inc_nr_samples(struct hists * hists,bool filtered)2131 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2132 {
2133 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2134 	if (!filtered)
2135 		hists->stats.nr_non_filtered_samples++;
2136 }
2137 
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2138 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2139 						 struct hist_entry *pair)
2140 {
2141 	struct rb_root *root;
2142 	struct rb_node **p;
2143 	struct rb_node *parent = NULL;
2144 	struct hist_entry *he;
2145 	int64_t cmp;
2146 
2147 	if (hists__has(hists, need_collapse))
2148 		root = &hists->entries_collapsed;
2149 	else
2150 		root = hists->entries_in;
2151 
2152 	p = &root->rb_node;
2153 
2154 	while (*p != NULL) {
2155 		parent = *p;
2156 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2157 
2158 		cmp = hist_entry__collapse(he, pair);
2159 
2160 		if (!cmp)
2161 			goto out;
2162 
2163 		if (cmp < 0)
2164 			p = &(*p)->rb_left;
2165 		else
2166 			p = &(*p)->rb_right;
2167 	}
2168 
2169 	he = hist_entry__new(pair, true);
2170 	if (he) {
2171 		memset(&he->stat, 0, sizeof(he->stat));
2172 		he->hists = hists;
2173 		if (symbol_conf.cumulate_callchain)
2174 			memset(he->stat_acc, 0, sizeof(he->stat));
2175 		rb_link_node(&he->rb_node_in, parent, p);
2176 		rb_insert_color(&he->rb_node_in, root);
2177 		hists__inc_stats(hists, he);
2178 		he->dummy = true;
2179 	}
2180 out:
2181 	return he;
2182 }
2183 
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root * root,struct hist_entry * pair)2184 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2185 						    struct rb_root *root,
2186 						    struct hist_entry *pair)
2187 {
2188 	struct rb_node **p;
2189 	struct rb_node *parent = NULL;
2190 	struct hist_entry *he;
2191 	struct perf_hpp_fmt *fmt;
2192 
2193 	p = &root->rb_node;
2194 	while (*p != NULL) {
2195 		int64_t cmp = 0;
2196 
2197 		parent = *p;
2198 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2199 
2200 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2201 			cmp = fmt->collapse(fmt, he, pair);
2202 			if (cmp)
2203 				break;
2204 		}
2205 		if (!cmp)
2206 			goto out;
2207 
2208 		if (cmp < 0)
2209 			p = &parent->rb_left;
2210 		else
2211 			p = &parent->rb_right;
2212 	}
2213 
2214 	he = hist_entry__new(pair, true);
2215 	if (he) {
2216 		rb_link_node(&he->rb_node_in, parent, p);
2217 		rb_insert_color(&he->rb_node_in, root);
2218 
2219 		he->dummy = true;
2220 		he->hists = hists;
2221 		memset(&he->stat, 0, sizeof(he->stat));
2222 		hists__inc_stats(hists, he);
2223 	}
2224 out:
2225 	return he;
2226 }
2227 
hists__find_entry(struct hists * hists,struct hist_entry * he)2228 static struct hist_entry *hists__find_entry(struct hists *hists,
2229 					    struct hist_entry *he)
2230 {
2231 	struct rb_node *n;
2232 
2233 	if (hists__has(hists, need_collapse))
2234 		n = hists->entries_collapsed.rb_node;
2235 	else
2236 		n = hists->entries_in->rb_node;
2237 
2238 	while (n) {
2239 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2240 		int64_t cmp = hist_entry__collapse(iter, he);
2241 
2242 		if (cmp < 0)
2243 			n = n->rb_left;
2244 		else if (cmp > 0)
2245 			n = n->rb_right;
2246 		else
2247 			return iter;
2248 	}
2249 
2250 	return NULL;
2251 }
2252 
hists__find_hierarchy_entry(struct rb_root * root,struct hist_entry * he)2253 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2254 						      struct hist_entry *he)
2255 {
2256 	struct rb_node *n = root->rb_node;
2257 
2258 	while (n) {
2259 		struct hist_entry *iter;
2260 		struct perf_hpp_fmt *fmt;
2261 		int64_t cmp = 0;
2262 
2263 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2264 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2265 			cmp = fmt->collapse(fmt, iter, he);
2266 			if (cmp)
2267 				break;
2268 		}
2269 
2270 		if (cmp < 0)
2271 			n = n->rb_left;
2272 		else if (cmp > 0)
2273 			n = n->rb_right;
2274 		else
2275 			return iter;
2276 	}
2277 
2278 	return NULL;
2279 }
2280 
hists__match_hierarchy(struct rb_root * leader_root,struct rb_root * other_root)2281 static void hists__match_hierarchy(struct rb_root *leader_root,
2282 				   struct rb_root *other_root)
2283 {
2284 	struct rb_node *nd;
2285 	struct hist_entry *pos, *pair;
2286 
2287 	for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2288 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2289 		pair = hists__find_hierarchy_entry(other_root, pos);
2290 
2291 		if (pair) {
2292 			hist_entry__add_pair(pair, pos);
2293 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2294 		}
2295 	}
2296 }
2297 
2298 /*
2299  * Look for pairs to link to the leader buckets (hist_entries):
2300  */
hists__match(struct hists * leader,struct hists * other)2301 void hists__match(struct hists *leader, struct hists *other)
2302 {
2303 	struct rb_root *root;
2304 	struct rb_node *nd;
2305 	struct hist_entry *pos, *pair;
2306 
2307 	if (symbol_conf.report_hierarchy) {
2308 		/* hierarchy report always collapses entries */
2309 		return hists__match_hierarchy(&leader->entries_collapsed,
2310 					      &other->entries_collapsed);
2311 	}
2312 
2313 	if (hists__has(leader, need_collapse))
2314 		root = &leader->entries_collapsed;
2315 	else
2316 		root = leader->entries_in;
2317 
2318 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2319 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2320 		pair = hists__find_entry(other, pos);
2321 
2322 		if (pair)
2323 			hist_entry__add_pair(pair, pos);
2324 	}
2325 }
2326 
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root * leader_root,struct rb_root * other_root)2327 static int hists__link_hierarchy(struct hists *leader_hists,
2328 				 struct hist_entry *parent,
2329 				 struct rb_root *leader_root,
2330 				 struct rb_root *other_root)
2331 {
2332 	struct rb_node *nd;
2333 	struct hist_entry *pos, *leader;
2334 
2335 	for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2336 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2337 
2338 		if (hist_entry__has_pairs(pos)) {
2339 			bool found = false;
2340 
2341 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2342 				if (leader->hists == leader_hists) {
2343 					found = true;
2344 					break;
2345 				}
2346 			}
2347 			if (!found)
2348 				return -1;
2349 		} else {
2350 			leader = add_dummy_hierarchy_entry(leader_hists,
2351 							   leader_root, pos);
2352 			if (leader == NULL)
2353 				return -1;
2354 
2355 			/* do not point parent in the pos */
2356 			leader->parent_he = parent;
2357 
2358 			hist_entry__add_pair(pos, leader);
2359 		}
2360 
2361 		if (!pos->leaf) {
2362 			if (hists__link_hierarchy(leader_hists, leader,
2363 						  &leader->hroot_in,
2364 						  &pos->hroot_in) < 0)
2365 				return -1;
2366 		}
2367 	}
2368 	return 0;
2369 }
2370 
2371 /*
2372  * Look for entries in the other hists that are not present in the leader, if
2373  * we find them, just add a dummy entry on the leader hists, with period=0,
2374  * nr_events=0, to serve as the list header.
2375  */
hists__link(struct hists * leader,struct hists * other)2376 int hists__link(struct hists *leader, struct hists *other)
2377 {
2378 	struct rb_root *root;
2379 	struct rb_node *nd;
2380 	struct hist_entry *pos, *pair;
2381 
2382 	if (symbol_conf.report_hierarchy) {
2383 		/* hierarchy report always collapses entries */
2384 		return hists__link_hierarchy(leader, NULL,
2385 					     &leader->entries_collapsed,
2386 					     &other->entries_collapsed);
2387 	}
2388 
2389 	if (hists__has(other, need_collapse))
2390 		root = &other->entries_collapsed;
2391 	else
2392 		root = other->entries_in;
2393 
2394 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2395 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2396 
2397 		if (!hist_entry__has_pairs(pos)) {
2398 			pair = hists__add_dummy_entry(leader, pos);
2399 			if (pair == NULL)
2400 				return -1;
2401 			hist_entry__add_pair(pos, pair);
2402 		}
2403 	}
2404 
2405 	return 0;
2406 }
2407 
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode)2408 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2409 			  struct perf_sample *sample, bool nonany_branch_mode)
2410 {
2411 	struct branch_info *bi;
2412 
2413 	/* If we have branch cycles always annotate them. */
2414 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
2415 		int i;
2416 
2417 		bi = sample__resolve_bstack(sample, al);
2418 		if (bi) {
2419 			struct addr_map_symbol *prev = NULL;
2420 
2421 			/*
2422 			 * Ignore errors, still want to process the
2423 			 * other entries.
2424 			 *
2425 			 * For non standard branch modes always
2426 			 * force no IPC (prev == NULL)
2427 			 *
2428 			 * Note that perf stores branches reversed from
2429 			 * program order!
2430 			 */
2431 			for (i = bs->nr - 1; i >= 0; i--) {
2432 				addr_map_symbol__account_cycles(&bi[i].from,
2433 					nonany_branch_mode ? NULL : prev,
2434 					bi[i].flags.cycles);
2435 				prev = &bi[i].to;
2436 			}
2437 			free(bi);
2438 		}
2439 	}
2440 }
2441 
perf_evlist__fprintf_nr_events(struct perf_evlist * evlist,FILE * fp)2442 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2443 {
2444 	struct perf_evsel *pos;
2445 	size_t ret = 0;
2446 
2447 	evlist__for_each_entry(evlist, pos) {
2448 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2449 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2450 	}
2451 
2452 	return ret;
2453 }
2454 
2455 
hists__total_period(struct hists * hists)2456 u64 hists__total_period(struct hists *hists)
2457 {
2458 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2459 		hists->stats.total_period;
2460 }
2461 
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2462 int parse_filter_percentage(const struct option *opt __maybe_unused,
2463 			    const char *arg, int unset __maybe_unused)
2464 {
2465 	if (!strcmp(arg, "relative"))
2466 		symbol_conf.filter_relative = true;
2467 	else if (!strcmp(arg, "absolute"))
2468 		symbol_conf.filter_relative = false;
2469 	else {
2470 		pr_debug("Invalid percentage: %s\n", arg);
2471 		return -1;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
perf_hist_config(const char * var,const char * value)2477 int perf_hist_config(const char *var, const char *value)
2478 {
2479 	if (!strcmp(var, "hist.percentage"))
2480 		return parse_filter_percentage(NULL, value, 0);
2481 
2482 	return 0;
2483 }
2484 
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)2485 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2486 {
2487 	memset(hists, 0, sizeof(*hists));
2488 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2489 	hists->entries_in = &hists->entries_in_array[0];
2490 	hists->entries_collapsed = RB_ROOT;
2491 	hists->entries = RB_ROOT;
2492 	pthread_mutex_init(&hists->lock, NULL);
2493 	hists->socket_filter = -1;
2494 	hists->hpp_list = hpp_list;
2495 	INIT_LIST_HEAD(&hists->hpp_formats);
2496 	return 0;
2497 }
2498 
hists__delete_remaining_entries(struct rb_root * root)2499 static void hists__delete_remaining_entries(struct rb_root *root)
2500 {
2501 	struct rb_node *node;
2502 	struct hist_entry *he;
2503 
2504 	while (!RB_EMPTY_ROOT(root)) {
2505 		node = rb_first(root);
2506 		rb_erase(node, root);
2507 
2508 		he = rb_entry(node, struct hist_entry, rb_node_in);
2509 		hist_entry__delete(he);
2510 	}
2511 }
2512 
hists__delete_all_entries(struct hists * hists)2513 static void hists__delete_all_entries(struct hists *hists)
2514 {
2515 	hists__delete_entries(hists);
2516 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2517 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2518 	hists__delete_remaining_entries(&hists->entries_collapsed);
2519 }
2520 
hists_evsel__exit(struct perf_evsel * evsel)2521 static void hists_evsel__exit(struct perf_evsel *evsel)
2522 {
2523 	struct hists *hists = evsel__hists(evsel);
2524 	struct perf_hpp_fmt *fmt, *pos;
2525 	struct perf_hpp_list_node *node, *tmp;
2526 
2527 	hists__delete_all_entries(hists);
2528 
2529 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2530 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2531 			list_del(&fmt->list);
2532 			free(fmt);
2533 		}
2534 		list_del(&node->list);
2535 		free(node);
2536 	}
2537 }
2538 
hists_evsel__init(struct perf_evsel * evsel)2539 static int hists_evsel__init(struct perf_evsel *evsel)
2540 {
2541 	struct hists *hists = evsel__hists(evsel);
2542 
2543 	__hists__init(hists, &perf_hpp_list);
2544 	return 0;
2545 }
2546 
2547 /*
2548  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2549  * stored in the rbtree...
2550  */
2551 
hists__init(void)2552 int hists__init(void)
2553 {
2554 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2555 					    hists_evsel__init,
2556 					    hists_evsel__exit);
2557 	if (err)
2558 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2559 
2560 	return err;
2561 }
2562 
perf_hpp_list__init(struct perf_hpp_list * list)2563 void perf_hpp_list__init(struct perf_hpp_list *list)
2564 {
2565 	INIT_LIST_HEAD(&list->fields);
2566 	INIT_LIST_HEAD(&list->sorts);
2567 }
2568