• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4  *
5  * Handle the callchains from the stream in an ad-hoc radix tree and then
6  * sort them in an rbtree.
7  *
8  * Using a radix for code path provides a fast retrieval and factorizes
9  * memory use. Also that lets us use the paths in a hierarchical graph view.
10  *
11  */
12 
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19 #include <linux/string.h>
20 #include <linux/zalloc.h>
21 
22 #include "asm/bug.h"
23 
24 #include "debug.h"
25 #include "dso.h"
26 #include "event.h"
27 #include "hist.h"
28 #include "sort.h"
29 #include "machine.h"
30 #include "map.h"
31 #include "callchain.h"
32 #include "branch.h"
33 #include "symbol.h"
34 #include "../perf.h"
35 
36 #define CALLCHAIN_PARAM_DEFAULT			\
37 	.mode		= CHAIN_GRAPH_ABS,	\
38 	.min_percent	= 0.5,			\
39 	.order		= ORDER_CALLEE,		\
40 	.key		= CCKEY_FUNCTION,	\
41 	.value		= CCVAL_PERCENT,	\
42 
43 struct callchain_param callchain_param = {
44 	CALLCHAIN_PARAM_DEFAULT
45 };
46 
47 /*
48  * Are there any events usind DWARF callchains?
49  *
50  * I.e.
51  *
52  * -e cycles/call-graph=dwarf/
53  */
54 bool dwarf_callchain_users;
55 
56 struct callchain_param callchain_param_default = {
57 	CALLCHAIN_PARAM_DEFAULT
58 };
59 
60 __thread struct callchain_cursor callchain_cursor;
61 
parse_callchain_record_opt(const char * arg,struct callchain_param * param)62 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
63 {
64 	return parse_callchain_record(arg, param);
65 }
66 
parse_callchain_mode(const char * value)67 static int parse_callchain_mode(const char *value)
68 {
69 	if (!strncmp(value, "graph", strlen(value))) {
70 		callchain_param.mode = CHAIN_GRAPH_ABS;
71 		return 0;
72 	}
73 	if (!strncmp(value, "flat", strlen(value))) {
74 		callchain_param.mode = CHAIN_FLAT;
75 		return 0;
76 	}
77 	if (!strncmp(value, "fractal", strlen(value))) {
78 		callchain_param.mode = CHAIN_GRAPH_REL;
79 		return 0;
80 	}
81 	if (!strncmp(value, "folded", strlen(value))) {
82 		callchain_param.mode = CHAIN_FOLDED;
83 		return 0;
84 	}
85 	return -1;
86 }
87 
parse_callchain_order(const char * value)88 static int parse_callchain_order(const char *value)
89 {
90 	if (!strncmp(value, "caller", strlen(value))) {
91 		callchain_param.order = ORDER_CALLER;
92 		callchain_param.order_set = true;
93 		return 0;
94 	}
95 	if (!strncmp(value, "callee", strlen(value))) {
96 		callchain_param.order = ORDER_CALLEE;
97 		callchain_param.order_set = true;
98 		return 0;
99 	}
100 	return -1;
101 }
102 
parse_callchain_sort_key(const char * value)103 static int parse_callchain_sort_key(const char *value)
104 {
105 	if (!strncmp(value, "function", strlen(value))) {
106 		callchain_param.key = CCKEY_FUNCTION;
107 		return 0;
108 	}
109 	if (!strncmp(value, "address", strlen(value))) {
110 		callchain_param.key = CCKEY_ADDRESS;
111 		return 0;
112 	}
113 	if (!strncmp(value, "srcline", strlen(value))) {
114 		callchain_param.key = CCKEY_SRCLINE;
115 		return 0;
116 	}
117 	if (!strncmp(value, "branch", strlen(value))) {
118 		callchain_param.branch_callstack = 1;
119 		return 0;
120 	}
121 	return -1;
122 }
123 
parse_callchain_value(const char * value)124 static int parse_callchain_value(const char *value)
125 {
126 	if (!strncmp(value, "percent", strlen(value))) {
127 		callchain_param.value = CCVAL_PERCENT;
128 		return 0;
129 	}
130 	if (!strncmp(value, "period", strlen(value))) {
131 		callchain_param.value = CCVAL_PERIOD;
132 		return 0;
133 	}
134 	if (!strncmp(value, "count", strlen(value))) {
135 		callchain_param.value = CCVAL_COUNT;
136 		return 0;
137 	}
138 	return -1;
139 }
140 
get_stack_size(const char * str,unsigned long * _size)141 static int get_stack_size(const char *str, unsigned long *_size)
142 {
143 	char *endptr;
144 	unsigned long size;
145 	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
146 
147 	size = strtoul(str, &endptr, 0);
148 
149 	do {
150 		if (*endptr)
151 			break;
152 
153 		size = round_up(size, sizeof(u64));
154 		if (!size || size > max_size)
155 			break;
156 
157 		*_size = size;
158 		return 0;
159 
160 	} while (0);
161 
162 	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
163 	       max_size, str);
164 	return -1;
165 }
166 
167 static int
__parse_callchain_report_opt(const char * arg,bool allow_record_opt)168 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
169 {
170 	char *tok;
171 	char *endptr, *saveptr = NULL;
172 	bool minpcnt_set = false;
173 	bool record_opt_set = false;
174 	bool try_stack_size = false;
175 
176 	callchain_param.enabled = true;
177 	symbol_conf.use_callchain = true;
178 
179 	if (!arg)
180 		return 0;
181 
182 	while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
183 		if (!strncmp(tok, "none", strlen(tok))) {
184 			callchain_param.mode = CHAIN_NONE;
185 			callchain_param.enabled = false;
186 			symbol_conf.use_callchain = false;
187 			return 0;
188 		}
189 
190 		if (!parse_callchain_mode(tok) ||
191 		    !parse_callchain_order(tok) ||
192 		    !parse_callchain_sort_key(tok) ||
193 		    !parse_callchain_value(tok)) {
194 			/* parsing ok - move on to the next */
195 			try_stack_size = false;
196 			goto next;
197 		} else if (allow_record_opt && !record_opt_set) {
198 			if (parse_callchain_record(tok, &callchain_param))
199 				goto try_numbers;
200 
201 			/* assume that number followed by 'dwarf' is stack size */
202 			if (callchain_param.record_mode == CALLCHAIN_DWARF)
203 				try_stack_size = true;
204 
205 			record_opt_set = true;
206 			goto next;
207 		}
208 
209 try_numbers:
210 		if (try_stack_size) {
211 			unsigned long size = 0;
212 
213 			if (get_stack_size(tok, &size) < 0)
214 				return -1;
215 			callchain_param.dump_size = size;
216 			try_stack_size = false;
217 		} else if (!minpcnt_set) {
218 			/* try to get the min percent */
219 			callchain_param.min_percent = strtod(tok, &endptr);
220 			if (tok == endptr)
221 				return -1;
222 			minpcnt_set = true;
223 		} else {
224 			/* try print limit at last */
225 			callchain_param.print_limit = strtoul(tok, &endptr, 0);
226 			if (tok == endptr)
227 				return -1;
228 		}
229 next:
230 		arg = NULL;
231 	}
232 
233 	if (callchain_register_param(&callchain_param) < 0) {
234 		pr_err("Can't register callchain params\n");
235 		return -1;
236 	}
237 	return 0;
238 }
239 
parse_callchain_report_opt(const char * arg)240 int parse_callchain_report_opt(const char *arg)
241 {
242 	return __parse_callchain_report_opt(arg, false);
243 }
244 
parse_callchain_top_opt(const char * arg)245 int parse_callchain_top_opt(const char *arg)
246 {
247 	return __parse_callchain_report_opt(arg, true);
248 }
249 
parse_callchain_record(const char * arg,struct callchain_param * param)250 int parse_callchain_record(const char *arg, struct callchain_param *param)
251 {
252 	char *tok, *name, *saveptr = NULL;
253 	char *buf;
254 	int ret = -1;
255 
256 	/* We need buffer that we know we can write to. */
257 	buf = malloc(strlen(arg) + 1);
258 	if (!buf)
259 		return -ENOMEM;
260 
261 	strcpy(buf, arg);
262 
263 	tok = strtok_r((char *)buf, ",", &saveptr);
264 	name = tok ? : (char *)buf;
265 
266 	do {
267 		/* Framepointer style */
268 		if (!strncmp(name, "fp", sizeof("fp"))) {
269 			if (!strtok_r(NULL, ",", &saveptr)) {
270 				param->record_mode = CALLCHAIN_FP;
271 				ret = 0;
272 			} else
273 				pr_err("callchain: No more arguments "
274 				       "needed for --call-graph fp\n");
275 			break;
276 
277 		/* Dwarf style */
278 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
279 			const unsigned long default_stack_dump_size = 8192;
280 
281 			ret = 0;
282 			param->record_mode = CALLCHAIN_DWARF;
283 			param->dump_size = default_stack_dump_size;
284 			dwarf_callchain_users = true;
285 
286 			tok = strtok_r(NULL, ",", &saveptr);
287 			if (tok) {
288 				unsigned long size = 0;
289 
290 				ret = get_stack_size(tok, &size);
291 				param->dump_size = size;
292 			}
293 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
294 			if (!strtok_r(NULL, ",", &saveptr)) {
295 				param->record_mode = CALLCHAIN_LBR;
296 				ret = 0;
297 			} else
298 				pr_err("callchain: No more arguments "
299 					"needed for --call-graph lbr\n");
300 			break;
301 		} else {
302 			pr_err("callchain: Unknown --call-graph option "
303 			       "value: %s\n", arg);
304 			break;
305 		}
306 
307 	} while (0);
308 
309 	free(buf);
310 	return ret;
311 }
312 
perf_callchain_config(const char * var,const char * value)313 int perf_callchain_config(const char *var, const char *value)
314 {
315 	char *endptr;
316 
317 	if (!strstarts(var, "call-graph."))
318 		return 0;
319 	var += sizeof("call-graph.") - 1;
320 
321 	if (!strcmp(var, "record-mode"))
322 		return parse_callchain_record_opt(value, &callchain_param);
323 	if (!strcmp(var, "dump-size")) {
324 		unsigned long size = 0;
325 		int ret;
326 
327 		ret = get_stack_size(value, &size);
328 		callchain_param.dump_size = size;
329 
330 		return ret;
331 	}
332 	if (!strcmp(var, "print-type")){
333 		int ret;
334 		ret = parse_callchain_mode(value);
335 		if (ret == -1)
336 			pr_err("Invalid callchain mode: %s\n", value);
337 		return ret;
338 	}
339 	if (!strcmp(var, "order")){
340 		int ret;
341 		ret = parse_callchain_order(value);
342 		if (ret == -1)
343 			pr_err("Invalid callchain order: %s\n", value);
344 		return ret;
345 	}
346 	if (!strcmp(var, "sort-key")){
347 		int ret;
348 		ret = parse_callchain_sort_key(value);
349 		if (ret == -1)
350 			pr_err("Invalid callchain sort key: %s\n", value);
351 		return ret;
352 	}
353 	if (!strcmp(var, "threshold")) {
354 		callchain_param.min_percent = strtod(value, &endptr);
355 		if (value == endptr) {
356 			pr_err("Invalid callchain threshold: %s\n", value);
357 			return -1;
358 		}
359 	}
360 	if (!strcmp(var, "print-limit")) {
361 		callchain_param.print_limit = strtod(value, &endptr);
362 		if (value == endptr) {
363 			pr_err("Invalid callchain print limit: %s\n", value);
364 			return -1;
365 		}
366 	}
367 
368 	return 0;
369 }
370 
371 static void
rb_insert_callchain(struct rb_root * root,struct callchain_node * chain,enum chain_mode mode)372 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
373 		    enum chain_mode mode)
374 {
375 	struct rb_node **p = &root->rb_node;
376 	struct rb_node *parent = NULL;
377 	struct callchain_node *rnode;
378 	u64 chain_cumul = callchain_cumul_hits(chain);
379 
380 	while (*p) {
381 		u64 rnode_cumul;
382 
383 		parent = *p;
384 		rnode = rb_entry(parent, struct callchain_node, rb_node);
385 		rnode_cumul = callchain_cumul_hits(rnode);
386 
387 		switch (mode) {
388 		case CHAIN_FLAT:
389 		case CHAIN_FOLDED:
390 			if (rnode->hit < chain->hit)
391 				p = &(*p)->rb_left;
392 			else
393 				p = &(*p)->rb_right;
394 			break;
395 		case CHAIN_GRAPH_ABS: /* Falldown */
396 		case CHAIN_GRAPH_REL:
397 			if (rnode_cumul < chain_cumul)
398 				p = &(*p)->rb_left;
399 			else
400 				p = &(*p)->rb_right;
401 			break;
402 		case CHAIN_NONE:
403 		default:
404 			break;
405 		}
406 	}
407 
408 	rb_link_node(&chain->rb_node, parent, p);
409 	rb_insert_color(&chain->rb_node, root);
410 }
411 
412 static void
__sort_chain_flat(struct rb_root * rb_root,struct callchain_node * node,u64 min_hit)413 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
414 		  u64 min_hit)
415 {
416 	struct rb_node *n;
417 	struct callchain_node *child;
418 
419 	n = rb_first(&node->rb_root_in);
420 	while (n) {
421 		child = rb_entry(n, struct callchain_node, rb_node_in);
422 		n = rb_next(n);
423 
424 		__sort_chain_flat(rb_root, child, min_hit);
425 	}
426 
427 	if (node->hit && node->hit >= min_hit)
428 		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
429 }
430 
431 /*
432  * Once we get every callchains from the stream, we can now
433  * sort them by hit
434  */
435 static void
sort_chain_flat(struct rb_root * rb_root,struct callchain_root * root,u64 min_hit,struct callchain_param * param __maybe_unused)436 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
437 		u64 min_hit, struct callchain_param *param __maybe_unused)
438 {
439 	*rb_root = RB_ROOT;
440 	__sort_chain_flat(rb_root, &root->node, min_hit);
441 }
442 
__sort_chain_graph_abs(struct callchain_node * node,u64 min_hit)443 static void __sort_chain_graph_abs(struct callchain_node *node,
444 				   u64 min_hit)
445 {
446 	struct rb_node *n;
447 	struct callchain_node *child;
448 
449 	node->rb_root = RB_ROOT;
450 	n = rb_first(&node->rb_root_in);
451 
452 	while (n) {
453 		child = rb_entry(n, struct callchain_node, rb_node_in);
454 		n = rb_next(n);
455 
456 		__sort_chain_graph_abs(child, min_hit);
457 		if (callchain_cumul_hits(child) >= min_hit)
458 			rb_insert_callchain(&node->rb_root, child,
459 					    CHAIN_GRAPH_ABS);
460 	}
461 }
462 
463 static void
sort_chain_graph_abs(struct rb_root * rb_root,struct callchain_root * chain_root,u64 min_hit,struct callchain_param * param __maybe_unused)464 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
465 		     u64 min_hit, struct callchain_param *param __maybe_unused)
466 {
467 	__sort_chain_graph_abs(&chain_root->node, min_hit);
468 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
469 }
470 
__sort_chain_graph_rel(struct callchain_node * node,double min_percent)471 static void __sort_chain_graph_rel(struct callchain_node *node,
472 				   double min_percent)
473 {
474 	struct rb_node *n;
475 	struct callchain_node *child;
476 	u64 min_hit;
477 
478 	node->rb_root = RB_ROOT;
479 	min_hit = ceil(node->children_hit * min_percent);
480 
481 	n = rb_first(&node->rb_root_in);
482 	while (n) {
483 		child = rb_entry(n, struct callchain_node, rb_node_in);
484 		n = rb_next(n);
485 
486 		__sort_chain_graph_rel(child, min_percent);
487 		if (callchain_cumul_hits(child) >= min_hit)
488 			rb_insert_callchain(&node->rb_root, child,
489 					    CHAIN_GRAPH_REL);
490 	}
491 }
492 
493 static void
sort_chain_graph_rel(struct rb_root * rb_root,struct callchain_root * chain_root,u64 min_hit __maybe_unused,struct callchain_param * param)494 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
495 		     u64 min_hit __maybe_unused, struct callchain_param *param)
496 {
497 	__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
498 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
499 }
500 
callchain_register_param(struct callchain_param * param)501 int callchain_register_param(struct callchain_param *param)
502 {
503 	switch (param->mode) {
504 	case CHAIN_GRAPH_ABS:
505 		param->sort = sort_chain_graph_abs;
506 		break;
507 	case CHAIN_GRAPH_REL:
508 		param->sort = sort_chain_graph_rel;
509 		break;
510 	case CHAIN_FLAT:
511 	case CHAIN_FOLDED:
512 		param->sort = sort_chain_flat;
513 		break;
514 	case CHAIN_NONE:
515 	default:
516 		return -1;
517 	}
518 	return 0;
519 }
520 
521 /*
522  * Create a child for a parent. If inherit_children, then the new child
523  * will become the new parent of it's parent children
524  */
525 static struct callchain_node *
create_child(struct callchain_node * parent,bool inherit_children)526 create_child(struct callchain_node *parent, bool inherit_children)
527 {
528 	struct callchain_node *new;
529 
530 	new = zalloc(sizeof(*new));
531 	if (!new) {
532 		perror("not enough memory to create child for code path tree");
533 		return NULL;
534 	}
535 	new->parent = parent;
536 	INIT_LIST_HEAD(&new->val);
537 	INIT_LIST_HEAD(&new->parent_val);
538 
539 	if (inherit_children) {
540 		struct rb_node *n;
541 		struct callchain_node *child;
542 
543 		new->rb_root_in = parent->rb_root_in;
544 		parent->rb_root_in = RB_ROOT;
545 
546 		n = rb_first(&new->rb_root_in);
547 		while (n) {
548 			child = rb_entry(n, struct callchain_node, rb_node_in);
549 			child->parent = new;
550 			n = rb_next(n);
551 		}
552 
553 		/* make it the first child */
554 		rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
555 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
556 	}
557 
558 	return new;
559 }
560 
561 
562 /*
563  * Fill the node with callchain values
564  */
565 static int
fill_node(struct callchain_node * node,struct callchain_cursor * cursor)566 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
567 {
568 	struct callchain_cursor_node *cursor_node;
569 
570 	node->val_nr = cursor->nr - cursor->pos;
571 	if (!node->val_nr)
572 		pr_warning("Warning: empty node in callchain tree\n");
573 
574 	cursor_node = callchain_cursor_current(cursor);
575 
576 	while (cursor_node) {
577 		struct callchain_list *call;
578 
579 		call = zalloc(sizeof(*call));
580 		if (!call) {
581 			perror("not enough memory for the code path tree");
582 			return -1;
583 		}
584 		call->ip = cursor_node->ip;
585 		call->ms = cursor_node->ms;
586 		map__get(call->ms.map);
587 		call->srcline = cursor_node->srcline;
588 
589 		if (cursor_node->branch) {
590 			call->branch_count = 1;
591 
592 			if (cursor_node->branch_from) {
593 				/*
594 				 * branch_from is set with value somewhere else
595 				 * to imply it's "to" of a branch.
596 				 */
597 				call->brtype_stat.branch_to = true;
598 
599 				if (cursor_node->branch_flags.predicted)
600 					call->predicted_count = 1;
601 
602 				if (cursor_node->branch_flags.abort)
603 					call->abort_count = 1;
604 
605 				branch_type_count(&call->brtype_stat,
606 						  &cursor_node->branch_flags,
607 						  cursor_node->branch_from,
608 						  cursor_node->ip);
609 			} else {
610 				/*
611 				 * It's "from" of a branch
612 				 */
613 				call->brtype_stat.branch_to = false;
614 				call->cycles_count =
615 					cursor_node->branch_flags.cycles;
616 				call->iter_count = cursor_node->nr_loop_iter;
617 				call->iter_cycles = cursor_node->iter_cycles;
618 			}
619 		}
620 
621 		list_add_tail(&call->list, &node->val);
622 
623 		callchain_cursor_advance(cursor);
624 		cursor_node = callchain_cursor_current(cursor);
625 	}
626 	return 0;
627 }
628 
629 static struct callchain_node *
add_child(struct callchain_node * parent,struct callchain_cursor * cursor,u64 period)630 add_child(struct callchain_node *parent,
631 	  struct callchain_cursor *cursor,
632 	  u64 period)
633 {
634 	struct callchain_node *new;
635 
636 	new = create_child(parent, false);
637 	if (new == NULL)
638 		return NULL;
639 
640 	if (fill_node(new, cursor) < 0) {
641 		struct callchain_list *call, *tmp;
642 
643 		list_for_each_entry_safe(call, tmp, &new->val, list) {
644 			list_del_init(&call->list);
645 			map__zput(call->ms.map);
646 			free(call);
647 		}
648 		free(new);
649 		return NULL;
650 	}
651 
652 	new->children_hit = 0;
653 	new->hit = period;
654 	new->children_count = 0;
655 	new->count = 1;
656 	return new;
657 }
658 
659 enum match_result {
660 	MATCH_ERROR  = -1,
661 	MATCH_EQ,
662 	MATCH_LT,
663 	MATCH_GT,
664 };
665 
match_chain_strings(const char * left,const char * right)666 static enum match_result match_chain_strings(const char *left,
667 					     const char *right)
668 {
669 	enum match_result ret = MATCH_EQ;
670 	int cmp;
671 
672 	if (left && right)
673 		cmp = strcmp(left, right);
674 	else if (!left && right)
675 		cmp = 1;
676 	else if (left && !right)
677 		cmp = -1;
678 	else
679 		return MATCH_ERROR;
680 
681 	if (cmp != 0)
682 		ret = cmp < 0 ? MATCH_LT : MATCH_GT;
683 
684 	return ret;
685 }
686 
687 /*
688  * We need to always use relative addresses because we're aggregating
689  * callchains from multiple threads, i.e. different address spaces, so
690  * comparing absolute addresses make no sense as a symbol in a DSO may end up
691  * in a different address when used in a different binary or even the same
692  * binary but with some sort of address randomization technique, thus we need
693  * to compare just relative addresses. -acme
694  */
match_chain_dso_addresses(struct map * left_map,u64 left_ip,struct map * right_map,u64 right_ip)695 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
696 						   struct map *right_map, u64 right_ip)
697 {
698 	struct dso *left_dso = left_map ? left_map->dso : NULL;
699 	struct dso *right_dso = right_map ? right_map->dso : NULL;
700 
701 	if (left_dso != right_dso)
702 		return left_dso < right_dso ? MATCH_LT : MATCH_GT;
703 
704 	if (left_ip != right_ip)
705  		return left_ip < right_ip ? MATCH_LT : MATCH_GT;
706 
707 	return MATCH_EQ;
708 }
709 
match_chain(struct callchain_cursor_node * node,struct callchain_list * cnode)710 static enum match_result match_chain(struct callchain_cursor_node *node,
711 				     struct callchain_list *cnode)
712 {
713 	enum match_result match = MATCH_ERROR;
714 
715 	switch (callchain_param.key) {
716 	case CCKEY_SRCLINE:
717 		match = match_chain_strings(cnode->srcline, node->srcline);
718 		if (match != MATCH_ERROR)
719 			break;
720 		/* otherwise fall-back to symbol-based comparison below */
721 		__fallthrough;
722 	case CCKEY_FUNCTION:
723 		if (node->ms.sym && cnode->ms.sym) {
724 			/*
725 			 * Compare inlined frames based on their symbol name
726 			 * because different inlined frames will have the same
727 			 * symbol start. Otherwise do a faster comparison based
728 			 * on the symbol start address.
729 			 */
730 			if (cnode->ms.sym->inlined || node->ms.sym->inlined) {
731 				match = match_chain_strings(cnode->ms.sym->name,
732 							    node->ms.sym->name);
733 				if (match != MATCH_ERROR)
734 					break;
735 			} else {
736 				match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
737 								  node->ms.map, node->ms.sym->start);
738 				break;
739 			}
740 		}
741 		/* otherwise fall-back to IP-based comparison below */
742 		__fallthrough;
743 	case CCKEY_ADDRESS:
744 	default:
745 		match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
746 		break;
747 	}
748 
749 	if (match == MATCH_EQ && node->branch) {
750 		cnode->branch_count++;
751 
752 		if (node->branch_from) {
753 			/*
754 			 * It's "to" of a branch
755 			 */
756 			cnode->brtype_stat.branch_to = true;
757 
758 			if (node->branch_flags.predicted)
759 				cnode->predicted_count++;
760 
761 			if (node->branch_flags.abort)
762 				cnode->abort_count++;
763 
764 			branch_type_count(&cnode->brtype_stat,
765 					  &node->branch_flags,
766 					  node->branch_from,
767 					  node->ip);
768 		} else {
769 			/*
770 			 * It's "from" of a branch
771 			 */
772 			cnode->brtype_stat.branch_to = false;
773 			cnode->cycles_count += node->branch_flags.cycles;
774 			cnode->iter_count += node->nr_loop_iter;
775 			cnode->iter_cycles += node->iter_cycles;
776 			cnode->from_count++;
777 		}
778 	}
779 
780 	return match;
781 }
782 
783 /*
784  * Split the parent in two parts (a new child is created) and
785  * give a part of its callchain to the created child.
786  * Then create another child to host the given callchain of new branch
787  */
788 static int
split_add_child(struct callchain_node * parent,struct callchain_cursor * cursor,struct callchain_list * to_split,u64 idx_parents,u64 idx_local,u64 period)789 split_add_child(struct callchain_node *parent,
790 		struct callchain_cursor *cursor,
791 		struct callchain_list *to_split,
792 		u64 idx_parents, u64 idx_local, u64 period)
793 {
794 	struct callchain_node *new;
795 	struct list_head *old_tail;
796 	unsigned int idx_total = idx_parents + idx_local;
797 
798 	/* split */
799 	new = create_child(parent, true);
800 	if (new == NULL)
801 		return -1;
802 
803 	/* split the callchain and move a part to the new child */
804 	old_tail = parent->val.prev;
805 	list_del_range(&to_split->list, old_tail);
806 	new->val.next = &to_split->list;
807 	new->val.prev = old_tail;
808 	to_split->list.prev = &new->val;
809 	old_tail->next = &new->val;
810 
811 	/* split the hits */
812 	new->hit = parent->hit;
813 	new->children_hit = parent->children_hit;
814 	parent->children_hit = callchain_cumul_hits(new);
815 	new->val_nr = parent->val_nr - idx_local;
816 	parent->val_nr = idx_local;
817 	new->count = parent->count;
818 	new->children_count = parent->children_count;
819 	parent->children_count = callchain_cumul_counts(new);
820 
821 	/* create a new child for the new branch if any */
822 	if (idx_total < cursor->nr) {
823 		struct callchain_node *first;
824 		struct callchain_list *cnode;
825 		struct callchain_cursor_node *node;
826 		struct rb_node *p, **pp;
827 
828 		parent->hit = 0;
829 		parent->children_hit += period;
830 		parent->count = 0;
831 		parent->children_count += 1;
832 
833 		node = callchain_cursor_current(cursor);
834 		new = add_child(parent, cursor, period);
835 		if (new == NULL)
836 			return -1;
837 
838 		/*
839 		 * This is second child since we moved parent's children
840 		 * to new (first) child above.
841 		 */
842 		p = parent->rb_root_in.rb_node;
843 		first = rb_entry(p, struct callchain_node, rb_node_in);
844 		cnode = list_first_entry(&first->val, struct callchain_list,
845 					 list);
846 
847 		if (match_chain(node, cnode) == MATCH_LT)
848 			pp = &p->rb_left;
849 		else
850 			pp = &p->rb_right;
851 
852 		rb_link_node(&new->rb_node_in, p, pp);
853 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
854 	} else {
855 		parent->hit = period;
856 		parent->count = 1;
857 	}
858 	return 0;
859 }
860 
861 static enum match_result
862 append_chain(struct callchain_node *root,
863 	     struct callchain_cursor *cursor,
864 	     u64 period);
865 
866 static int
append_chain_children(struct callchain_node * root,struct callchain_cursor * cursor,u64 period)867 append_chain_children(struct callchain_node *root,
868 		      struct callchain_cursor *cursor,
869 		      u64 period)
870 {
871 	struct callchain_node *rnode;
872 	struct callchain_cursor_node *node;
873 	struct rb_node **p = &root->rb_root_in.rb_node;
874 	struct rb_node *parent = NULL;
875 
876 	node = callchain_cursor_current(cursor);
877 	if (!node)
878 		return -1;
879 
880 	/* lookup in childrens */
881 	while (*p) {
882 		enum match_result ret;
883 
884 		parent = *p;
885 		rnode = rb_entry(parent, struct callchain_node, rb_node_in);
886 
887 		/* If at least first entry matches, rely to children */
888 		ret = append_chain(rnode, cursor, period);
889 		if (ret == MATCH_EQ)
890 			goto inc_children_hit;
891 		if (ret == MATCH_ERROR)
892 			return -1;
893 
894 		if (ret == MATCH_LT)
895 			p = &parent->rb_left;
896 		else
897 			p = &parent->rb_right;
898 	}
899 	/* nothing in children, add to the current node */
900 	rnode = add_child(root, cursor, period);
901 	if (rnode == NULL)
902 		return -1;
903 
904 	rb_link_node(&rnode->rb_node_in, parent, p);
905 	rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
906 
907 inc_children_hit:
908 	root->children_hit += period;
909 	root->children_count++;
910 	return 0;
911 }
912 
913 static enum match_result
append_chain(struct callchain_node * root,struct callchain_cursor * cursor,u64 period)914 append_chain(struct callchain_node *root,
915 	     struct callchain_cursor *cursor,
916 	     u64 period)
917 {
918 	struct callchain_list *cnode;
919 	u64 start = cursor->pos;
920 	bool found = false;
921 	u64 matches;
922 	enum match_result cmp = MATCH_ERROR;
923 
924 	/*
925 	 * Lookup in the current node
926 	 * If we have a symbol, then compare the start to match
927 	 * anywhere inside a function, unless function
928 	 * mode is disabled.
929 	 */
930 	list_for_each_entry(cnode, &root->val, list) {
931 		struct callchain_cursor_node *node;
932 
933 		node = callchain_cursor_current(cursor);
934 		if (!node)
935 			break;
936 
937 		cmp = match_chain(node, cnode);
938 		if (cmp != MATCH_EQ)
939 			break;
940 
941 		found = true;
942 
943 		callchain_cursor_advance(cursor);
944 	}
945 
946 	/* matches not, relay no the parent */
947 	if (!found) {
948 		WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
949 		return cmp;
950 	}
951 
952 	matches = cursor->pos - start;
953 
954 	/* we match only a part of the node. Split it and add the new chain */
955 	if (matches < root->val_nr) {
956 		if (split_add_child(root, cursor, cnode, start, matches,
957 				    period) < 0)
958 			return MATCH_ERROR;
959 
960 		return MATCH_EQ;
961 	}
962 
963 	/* we match 100% of the path, increment the hit */
964 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
965 		root->hit += period;
966 		root->count++;
967 		return MATCH_EQ;
968 	}
969 
970 	/* We match the node and still have a part remaining */
971 	if (append_chain_children(root, cursor, period) < 0)
972 		return MATCH_ERROR;
973 
974 	return MATCH_EQ;
975 }
976 
callchain_append(struct callchain_root * root,struct callchain_cursor * cursor,u64 period)977 int callchain_append(struct callchain_root *root,
978 		     struct callchain_cursor *cursor,
979 		     u64 period)
980 {
981 	if (!cursor->nr)
982 		return 0;
983 
984 	callchain_cursor_commit(cursor);
985 
986 	if (append_chain_children(&root->node, cursor, period) < 0)
987 		return -1;
988 
989 	if (cursor->nr > root->max_depth)
990 		root->max_depth = cursor->nr;
991 
992 	return 0;
993 }
994 
995 static int
merge_chain_branch(struct callchain_cursor * cursor,struct callchain_node * dst,struct callchain_node * src)996 merge_chain_branch(struct callchain_cursor *cursor,
997 		   struct callchain_node *dst, struct callchain_node *src)
998 {
999 	struct callchain_cursor_node **old_last = cursor->last;
1000 	struct callchain_node *child;
1001 	struct callchain_list *list, *next_list;
1002 	struct rb_node *n;
1003 	int old_pos = cursor->nr;
1004 	int err = 0;
1005 
1006 	list_for_each_entry_safe(list, next_list, &src->val, list) {
1007 		callchain_cursor_append(cursor, list->ip, &list->ms,
1008 					false, NULL, 0, 0, 0, list->srcline);
1009 		list_del_init(&list->list);
1010 		map__zput(list->ms.map);
1011 		free(list);
1012 	}
1013 
1014 	if (src->hit) {
1015 		callchain_cursor_commit(cursor);
1016 		if (append_chain_children(dst, cursor, src->hit) < 0)
1017 			return -1;
1018 	}
1019 
1020 	n = rb_first(&src->rb_root_in);
1021 	while (n) {
1022 		child = container_of(n, struct callchain_node, rb_node_in);
1023 		n = rb_next(n);
1024 		rb_erase(&child->rb_node_in, &src->rb_root_in);
1025 
1026 		err = merge_chain_branch(cursor, dst, child);
1027 		if (err)
1028 			break;
1029 
1030 		free(child);
1031 	}
1032 
1033 	cursor->nr = old_pos;
1034 	cursor->last = old_last;
1035 
1036 	return err;
1037 }
1038 
callchain_merge(struct callchain_cursor * cursor,struct callchain_root * dst,struct callchain_root * src)1039 int callchain_merge(struct callchain_cursor *cursor,
1040 		    struct callchain_root *dst, struct callchain_root *src)
1041 {
1042 	return merge_chain_branch(cursor, &dst->node, &src->node);
1043 }
1044 
callchain_cursor_append(struct callchain_cursor * cursor,u64 ip,struct map_symbol * ms,bool branch,struct branch_flags * flags,int nr_loop_iter,u64 iter_cycles,u64 branch_from,const char * srcline)1045 int callchain_cursor_append(struct callchain_cursor *cursor,
1046 			    u64 ip, struct map_symbol *ms,
1047 			    bool branch, struct branch_flags *flags,
1048 			    int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1049 			    const char *srcline)
1050 {
1051 	struct callchain_cursor_node *node = *cursor->last;
1052 
1053 	if (!node) {
1054 		node = calloc(1, sizeof(*node));
1055 		if (!node)
1056 			return -ENOMEM;
1057 
1058 		*cursor->last = node;
1059 	}
1060 
1061 	node->ip = ip;
1062 	map__zput(node->ms.map);
1063 	node->ms = *ms;
1064 	map__get(node->ms.map);
1065 	node->branch = branch;
1066 	node->nr_loop_iter = nr_loop_iter;
1067 	node->iter_cycles = iter_cycles;
1068 	node->srcline = srcline;
1069 
1070 	if (flags)
1071 		memcpy(&node->branch_flags, flags,
1072 			sizeof(struct branch_flags));
1073 
1074 	node->branch_from = branch_from;
1075 	cursor->nr++;
1076 
1077 	cursor->last = &node->next;
1078 
1079 	return 0;
1080 }
1081 
sample__resolve_callchain(struct perf_sample * sample,struct callchain_cursor * cursor,struct symbol ** parent,struct evsel * evsel,struct addr_location * al,int max_stack)1082 int sample__resolve_callchain(struct perf_sample *sample,
1083 			      struct callchain_cursor *cursor, struct symbol **parent,
1084 			      struct evsel *evsel, struct addr_location *al,
1085 			      int max_stack)
1086 {
1087 	if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1088 		return 0;
1089 
1090 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1091 	    perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1092 		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1093 						 parent, al, max_stack);
1094 	}
1095 	return 0;
1096 }
1097 
hist_entry__append_callchain(struct hist_entry * he,struct perf_sample * sample)1098 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1099 {
1100 	if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1101 		!symbol_conf.show_branchflag_count)
1102 		return 0;
1103 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
1104 }
1105 
fill_callchain_info(struct addr_location * al,struct callchain_cursor_node * node,bool hide_unresolved)1106 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1107 			bool hide_unresolved)
1108 {
1109 	al->maps = node->ms.maps;
1110 	al->map = node->ms.map;
1111 	al->sym = node->ms.sym;
1112 	al->srcline = node->srcline;
1113 	al->addr = node->ip;
1114 
1115 	if (al->sym == NULL) {
1116 		if (hide_unresolved)
1117 			return 0;
1118 		if (al->map == NULL)
1119 			goto out;
1120 	}
1121 
1122 	if (al->maps == &al->maps->machine->kmaps) {
1123 		if (machine__is_host(al->maps->machine)) {
1124 			al->cpumode = PERF_RECORD_MISC_KERNEL;
1125 			al->level = 'k';
1126 		} else {
1127 			al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1128 			al->level = 'g';
1129 		}
1130 	} else {
1131 		if (machine__is_host(al->maps->machine)) {
1132 			al->cpumode = PERF_RECORD_MISC_USER;
1133 			al->level = '.';
1134 		} else if (perf_guest) {
1135 			al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1136 			al->level = 'u';
1137 		} else {
1138 			al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1139 			al->level = 'H';
1140 		}
1141 	}
1142 
1143 out:
1144 	return 1;
1145 }
1146 
callchain_list__sym_name(struct callchain_list * cl,char * bf,size_t bfsize,bool show_dso)1147 char *callchain_list__sym_name(struct callchain_list *cl,
1148 			       char *bf, size_t bfsize, bool show_dso)
1149 {
1150 	bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1151 	bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1152 	int printed;
1153 
1154 	if (cl->ms.sym) {
1155 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1156 
1157 		if (show_srcline && cl->srcline)
1158 			printed = scnprintf(bf, bfsize, "%s %s%s",
1159 					    cl->ms.sym->name, cl->srcline,
1160 					    inlined);
1161 		else
1162 			printed = scnprintf(bf, bfsize, "%s%s",
1163 					    cl->ms.sym->name, inlined);
1164 	} else
1165 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1166 
1167 	if (show_dso)
1168 		scnprintf(bf + printed, bfsize - printed, " %s",
1169 			  cl->ms.map ?
1170 			  cl->ms.map->dso->short_name :
1171 			  "unknown");
1172 
1173 	return bf;
1174 }
1175 
callchain_node__scnprintf_value(struct callchain_node * node,char * bf,size_t bfsize,u64 total)1176 char *callchain_node__scnprintf_value(struct callchain_node *node,
1177 				      char *bf, size_t bfsize, u64 total)
1178 {
1179 	double percent = 0.0;
1180 	u64 period = callchain_cumul_hits(node);
1181 	unsigned count = callchain_cumul_counts(node);
1182 
1183 	if (callchain_param.mode == CHAIN_FOLDED) {
1184 		period = node->hit;
1185 		count = node->count;
1186 	}
1187 
1188 	switch (callchain_param.value) {
1189 	case CCVAL_PERIOD:
1190 		scnprintf(bf, bfsize, "%"PRIu64, period);
1191 		break;
1192 	case CCVAL_COUNT:
1193 		scnprintf(bf, bfsize, "%u", count);
1194 		break;
1195 	case CCVAL_PERCENT:
1196 	default:
1197 		if (total)
1198 			percent = period * 100.0 / total;
1199 		scnprintf(bf, bfsize, "%.2f%%", percent);
1200 		break;
1201 	}
1202 	return bf;
1203 }
1204 
callchain_node__fprintf_value(struct callchain_node * node,FILE * fp,u64 total)1205 int callchain_node__fprintf_value(struct callchain_node *node,
1206 				 FILE *fp, u64 total)
1207 {
1208 	double percent = 0.0;
1209 	u64 period = callchain_cumul_hits(node);
1210 	unsigned count = callchain_cumul_counts(node);
1211 
1212 	if (callchain_param.mode == CHAIN_FOLDED) {
1213 		period = node->hit;
1214 		count = node->count;
1215 	}
1216 
1217 	switch (callchain_param.value) {
1218 	case CCVAL_PERIOD:
1219 		return fprintf(fp, "%"PRIu64, period);
1220 	case CCVAL_COUNT:
1221 		return fprintf(fp, "%u", count);
1222 	case CCVAL_PERCENT:
1223 	default:
1224 		if (total)
1225 			percent = period * 100.0 / total;
1226 		return percent_color_fprintf(fp, "%.2f%%", percent);
1227 	}
1228 	return 0;
1229 }
1230 
callchain_counts_value(struct callchain_node * node,u64 * branch_count,u64 * predicted_count,u64 * abort_count,u64 * cycles_count)1231 static void callchain_counts_value(struct callchain_node *node,
1232 				   u64 *branch_count, u64 *predicted_count,
1233 				   u64 *abort_count, u64 *cycles_count)
1234 {
1235 	struct callchain_list *clist;
1236 
1237 	list_for_each_entry(clist, &node->val, list) {
1238 		if (branch_count)
1239 			*branch_count += clist->branch_count;
1240 
1241 		if (predicted_count)
1242 			*predicted_count += clist->predicted_count;
1243 
1244 		if (abort_count)
1245 			*abort_count += clist->abort_count;
1246 
1247 		if (cycles_count)
1248 			*cycles_count += clist->cycles_count;
1249 	}
1250 }
1251 
callchain_node_branch_counts_cumul(struct callchain_node * node,u64 * branch_count,u64 * predicted_count,u64 * abort_count,u64 * cycles_count)1252 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1253 					      u64 *branch_count,
1254 					      u64 *predicted_count,
1255 					      u64 *abort_count,
1256 					      u64 *cycles_count)
1257 {
1258 	struct callchain_node *child;
1259 	struct rb_node *n;
1260 
1261 	n = rb_first(&node->rb_root_in);
1262 	while (n) {
1263 		child = rb_entry(n, struct callchain_node, rb_node_in);
1264 		n = rb_next(n);
1265 
1266 		callchain_node_branch_counts_cumul(child, branch_count,
1267 						   predicted_count,
1268 						   abort_count,
1269 						   cycles_count);
1270 
1271 		callchain_counts_value(child, branch_count,
1272 				       predicted_count, abort_count,
1273 				       cycles_count);
1274 	}
1275 
1276 	return 0;
1277 }
1278 
callchain_branch_counts(struct callchain_root * root,u64 * branch_count,u64 * predicted_count,u64 * abort_count,u64 * cycles_count)1279 int callchain_branch_counts(struct callchain_root *root,
1280 			    u64 *branch_count, u64 *predicted_count,
1281 			    u64 *abort_count, u64 *cycles_count)
1282 {
1283 	if (branch_count)
1284 		*branch_count = 0;
1285 
1286 	if (predicted_count)
1287 		*predicted_count = 0;
1288 
1289 	if (abort_count)
1290 		*abort_count = 0;
1291 
1292 	if (cycles_count)
1293 		*cycles_count = 0;
1294 
1295 	return callchain_node_branch_counts_cumul(&root->node,
1296 						  branch_count,
1297 						  predicted_count,
1298 						  abort_count,
1299 						  cycles_count);
1300 }
1301 
count_pri64_printf(int idx,const char * str,u64 value,char * bf,int bfsize)1302 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1303 {
1304 	int printed;
1305 
1306 	printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1307 
1308 	return printed;
1309 }
1310 
count_float_printf(int idx,const char * str,float value,char * bf,int bfsize,float threshold)1311 static int count_float_printf(int idx, const char *str, float value,
1312 			      char *bf, int bfsize, float threshold)
1313 {
1314 	int printed;
1315 
1316 	if (threshold != 0.0 && value < threshold)
1317 		return 0;
1318 
1319 	printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1320 
1321 	return printed;
1322 }
1323 
branch_to_str(char * bf,int bfsize,u64 branch_count,u64 predicted_count,u64 abort_count,struct branch_type_stat * brtype_stat)1324 static int branch_to_str(char *bf, int bfsize,
1325 			 u64 branch_count, u64 predicted_count,
1326 			 u64 abort_count,
1327 			 struct branch_type_stat *brtype_stat)
1328 {
1329 	int printed, i = 0;
1330 
1331 	printed = branch_type_str(brtype_stat, bf, bfsize);
1332 	if (printed)
1333 		i++;
1334 
1335 	if (predicted_count < branch_count) {
1336 		printed += count_float_printf(i++, "predicted",
1337 				predicted_count * 100.0 / branch_count,
1338 				bf + printed, bfsize - printed, 0.0);
1339 	}
1340 
1341 	if (abort_count) {
1342 		printed += count_float_printf(i++, "abort",
1343 				abort_count * 100.0 / branch_count,
1344 				bf + printed, bfsize - printed, 0.1);
1345 	}
1346 
1347 	if (i)
1348 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1349 
1350 	return printed;
1351 }
1352 
branch_from_str(char * bf,int bfsize,u64 branch_count,u64 cycles_count,u64 iter_count,u64 iter_cycles,u64 from_count)1353 static int branch_from_str(char *bf, int bfsize,
1354 			   u64 branch_count,
1355 			   u64 cycles_count, u64 iter_count,
1356 			   u64 iter_cycles, u64 from_count)
1357 {
1358 	int printed = 0, i = 0;
1359 	u64 cycles, v = 0;
1360 
1361 	cycles = cycles_count / branch_count;
1362 	if (cycles) {
1363 		printed += count_pri64_printf(i++, "cycles",
1364 				cycles,
1365 				bf + printed, bfsize - printed);
1366 	}
1367 
1368 	if (iter_count && from_count) {
1369 		v = iter_count / from_count;
1370 		if (v) {
1371 			printed += count_pri64_printf(i++, "iter",
1372 					v, bf + printed, bfsize - printed);
1373 
1374 			printed += count_pri64_printf(i++, "avg_cycles",
1375 					iter_cycles / iter_count,
1376 					bf + printed, bfsize - printed);
1377 		}
1378 	}
1379 
1380 	if (i)
1381 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1382 
1383 	return printed;
1384 }
1385 
counts_str_build(char * bf,int bfsize,u64 branch_count,u64 predicted_count,u64 abort_count,u64 cycles_count,u64 iter_count,u64 iter_cycles,u64 from_count,struct branch_type_stat * brtype_stat)1386 static int counts_str_build(char *bf, int bfsize,
1387 			     u64 branch_count, u64 predicted_count,
1388 			     u64 abort_count, u64 cycles_count,
1389 			     u64 iter_count, u64 iter_cycles,
1390 			     u64 from_count,
1391 			     struct branch_type_stat *brtype_stat)
1392 {
1393 	int printed;
1394 
1395 	if (branch_count == 0)
1396 		return scnprintf(bf, bfsize, " (calltrace)");
1397 
1398 	if (brtype_stat->branch_to) {
1399 		printed = branch_to_str(bf, bfsize, branch_count,
1400 				predicted_count, abort_count, brtype_stat);
1401 	} else {
1402 		printed = branch_from_str(bf, bfsize, branch_count,
1403 				cycles_count, iter_count, iter_cycles,
1404 				from_count);
1405 	}
1406 
1407 	if (!printed)
1408 		bf[0] = 0;
1409 
1410 	return printed;
1411 }
1412 
callchain_counts_printf(FILE * fp,char * bf,int bfsize,u64 branch_count,u64 predicted_count,u64 abort_count,u64 cycles_count,u64 iter_count,u64 iter_cycles,u64 from_count,struct branch_type_stat * brtype_stat)1413 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1414 				   u64 branch_count, u64 predicted_count,
1415 				   u64 abort_count, u64 cycles_count,
1416 				   u64 iter_count, u64 iter_cycles,
1417 				   u64 from_count,
1418 				   struct branch_type_stat *brtype_stat)
1419 {
1420 	char str[256];
1421 
1422 	counts_str_build(str, sizeof(str), branch_count,
1423 			 predicted_count, abort_count, cycles_count,
1424 			 iter_count, iter_cycles, from_count, brtype_stat);
1425 
1426 	if (fp)
1427 		return fprintf(fp, "%s", str);
1428 
1429 	return scnprintf(bf, bfsize, "%s", str);
1430 }
1431 
callchain_list_counts__printf_value(struct callchain_list * clist,FILE * fp,char * bf,int bfsize)1432 int callchain_list_counts__printf_value(struct callchain_list *clist,
1433 					FILE *fp, char *bf, int bfsize)
1434 {
1435 	u64 branch_count, predicted_count;
1436 	u64 abort_count, cycles_count;
1437 	u64 iter_count, iter_cycles;
1438 	u64 from_count;
1439 
1440 	branch_count = clist->branch_count;
1441 	predicted_count = clist->predicted_count;
1442 	abort_count = clist->abort_count;
1443 	cycles_count = clist->cycles_count;
1444 	iter_count = clist->iter_count;
1445 	iter_cycles = clist->iter_cycles;
1446 	from_count = clist->from_count;
1447 
1448 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
1449 				       predicted_count, abort_count,
1450 				       cycles_count, iter_count, iter_cycles,
1451 				       from_count, &clist->brtype_stat);
1452 }
1453 
free_callchain_node(struct callchain_node * node)1454 static void free_callchain_node(struct callchain_node *node)
1455 {
1456 	struct callchain_list *list, *tmp;
1457 	struct callchain_node *child;
1458 	struct rb_node *n;
1459 
1460 	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1461 		list_del_init(&list->list);
1462 		map__zput(list->ms.map);
1463 		free(list);
1464 	}
1465 
1466 	list_for_each_entry_safe(list, tmp, &node->val, list) {
1467 		list_del_init(&list->list);
1468 		map__zput(list->ms.map);
1469 		free(list);
1470 	}
1471 
1472 	n = rb_first(&node->rb_root_in);
1473 	while (n) {
1474 		child = container_of(n, struct callchain_node, rb_node_in);
1475 		n = rb_next(n);
1476 		rb_erase(&child->rb_node_in, &node->rb_root_in);
1477 
1478 		free_callchain_node(child);
1479 		free(child);
1480 	}
1481 }
1482 
free_callchain(struct callchain_root * root)1483 void free_callchain(struct callchain_root *root)
1484 {
1485 	if (!symbol_conf.use_callchain)
1486 		return;
1487 
1488 	free_callchain_node(&root->node);
1489 }
1490 
decay_callchain_node(struct callchain_node * node)1491 static u64 decay_callchain_node(struct callchain_node *node)
1492 {
1493 	struct callchain_node *child;
1494 	struct rb_node *n;
1495 	u64 child_hits = 0;
1496 
1497 	n = rb_first(&node->rb_root_in);
1498 	while (n) {
1499 		child = container_of(n, struct callchain_node, rb_node_in);
1500 
1501 		child_hits += decay_callchain_node(child);
1502 		n = rb_next(n);
1503 	}
1504 
1505 	node->hit = (node->hit * 7) / 8;
1506 	node->children_hit = child_hits;
1507 
1508 	return node->hit;
1509 }
1510 
decay_callchain(struct callchain_root * root)1511 void decay_callchain(struct callchain_root *root)
1512 {
1513 	if (!symbol_conf.use_callchain)
1514 		return;
1515 
1516 	decay_callchain_node(&root->node);
1517 }
1518 
callchain_node__make_parent_list(struct callchain_node * node)1519 int callchain_node__make_parent_list(struct callchain_node *node)
1520 {
1521 	struct callchain_node *parent = node->parent;
1522 	struct callchain_list *chain, *new;
1523 	LIST_HEAD(head);
1524 
1525 	while (parent) {
1526 		list_for_each_entry_reverse(chain, &parent->val, list) {
1527 			new = malloc(sizeof(*new));
1528 			if (new == NULL)
1529 				goto out;
1530 			*new = *chain;
1531 			new->has_children = false;
1532 			map__get(new->ms.map);
1533 			list_add_tail(&new->list, &head);
1534 		}
1535 		parent = parent->parent;
1536 	}
1537 
1538 	list_for_each_entry_safe_reverse(chain, new, &head, list)
1539 		list_move_tail(&chain->list, &node->parent_val);
1540 
1541 	if (!list_empty(&node->parent_val)) {
1542 		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1543 		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1544 
1545 		chain = list_first_entry(&node->val, struct callchain_list, list);
1546 		chain->has_children = false;
1547 	}
1548 	return 0;
1549 
1550 out:
1551 	list_for_each_entry_safe(chain, new, &head, list) {
1552 		list_del_init(&chain->list);
1553 		map__zput(chain->ms.map);
1554 		free(chain);
1555 	}
1556 	return -ENOMEM;
1557 }
1558 
callchain_cursor__copy(struct callchain_cursor * dst,struct callchain_cursor * src)1559 int callchain_cursor__copy(struct callchain_cursor *dst,
1560 			   struct callchain_cursor *src)
1561 {
1562 	int rc = 0;
1563 
1564 	callchain_cursor_reset(dst);
1565 	callchain_cursor_commit(src);
1566 
1567 	while (true) {
1568 		struct callchain_cursor_node *node;
1569 
1570 		node = callchain_cursor_current(src);
1571 		if (node == NULL)
1572 			break;
1573 
1574 		rc = callchain_cursor_append(dst, node->ip, &node->ms,
1575 					     node->branch, &node->branch_flags,
1576 					     node->nr_loop_iter,
1577 					     node->iter_cycles,
1578 					     node->branch_from, node->srcline);
1579 		if (rc)
1580 			break;
1581 
1582 		callchain_cursor_advance(src);
1583 	}
1584 
1585 	return rc;
1586 }
1587 
1588 /*
1589  * Initialize a cursor before adding entries inside, but keep
1590  * the previously allocated entries as a cache.
1591  */
callchain_cursor_reset(struct callchain_cursor * cursor)1592 void callchain_cursor_reset(struct callchain_cursor *cursor)
1593 {
1594 	struct callchain_cursor_node *node;
1595 
1596 	cursor->nr = 0;
1597 	cursor->last = &cursor->first;
1598 
1599 	for (node = cursor->first; node != NULL; node = node->next)
1600 		map__zput(node->ms.map);
1601 }
1602 
callchain_param_setup(u64 sample_type)1603 void callchain_param_setup(u64 sample_type)
1604 {
1605 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
1606 		if ((sample_type & PERF_SAMPLE_REGS_USER) &&
1607 		    (sample_type & PERF_SAMPLE_STACK_USER)) {
1608 			callchain_param.record_mode = CALLCHAIN_DWARF;
1609 			dwarf_callchain_users = true;
1610 		} else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1611 			callchain_param.record_mode = CALLCHAIN_LBR;
1612 		else
1613 			callchain_param.record_mode = CALLCHAIN_FP;
1614 	}
1615 }
1616 
chain_match(struct callchain_list * base_chain,struct callchain_list * pair_chain)1617 static bool chain_match(struct callchain_list *base_chain,
1618 			struct callchain_list *pair_chain)
1619 {
1620 	enum match_result match;
1621 
1622 	match = match_chain_strings(base_chain->srcline,
1623 				    pair_chain->srcline);
1624 	if (match != MATCH_ERROR)
1625 		return match == MATCH_EQ;
1626 
1627 	match = match_chain_dso_addresses(base_chain->ms.map,
1628 					  base_chain->ip,
1629 					  pair_chain->ms.map,
1630 					  pair_chain->ip);
1631 
1632 	return match == MATCH_EQ;
1633 }
1634 
callchain_cnode_matched(struct callchain_node * base_cnode,struct callchain_node * pair_cnode)1635 bool callchain_cnode_matched(struct callchain_node *base_cnode,
1636 			     struct callchain_node *pair_cnode)
1637 {
1638 	struct callchain_list *base_chain, *pair_chain;
1639 	bool match = false;
1640 
1641 	pair_chain = list_first_entry(&pair_cnode->val,
1642 				      struct callchain_list,
1643 				      list);
1644 
1645 	list_for_each_entry(base_chain, &base_cnode->val, list) {
1646 		if (&pair_chain->list == &pair_cnode->val)
1647 			return false;
1648 
1649 		if (!base_chain->srcline || !pair_chain->srcline) {
1650 			pair_chain = list_next_entry(pair_chain, list);
1651 			continue;
1652 		}
1653 
1654 		match = chain_match(base_chain, pair_chain);
1655 		if (!match)
1656 			return false;
1657 
1658 		pair_chain = list_next_entry(pair_chain, list);
1659 	}
1660 
1661 	/*
1662 	 * Say chain1 is ABC, chain2 is ABCD, we consider they are
1663 	 * not fully matched.
1664 	 */
1665 	if (pair_chain && (&pair_chain->list != &pair_cnode->val))
1666 		return false;
1667 
1668 	return match;
1669 }
1670 
count_callchain_hits(struct hist_entry * he)1671 static u64 count_callchain_hits(struct hist_entry *he)
1672 {
1673 	struct rb_root *root = &he->sorted_chain;
1674 	struct rb_node *rb_node = rb_first(root);
1675 	struct callchain_node *node;
1676 	u64 chain_hits = 0;
1677 
1678 	while (rb_node) {
1679 		node = rb_entry(rb_node, struct callchain_node, rb_node);
1680 		chain_hits += node->hit;
1681 		rb_node = rb_next(rb_node);
1682 	}
1683 
1684 	return chain_hits;
1685 }
1686 
callchain_total_hits(struct hists * hists)1687 u64 callchain_total_hits(struct hists *hists)
1688 {
1689 	struct rb_node *next = rb_first_cached(&hists->entries);
1690 	u64 chain_hits = 0;
1691 
1692 	while (next) {
1693 		struct hist_entry *he = rb_entry(next, struct hist_entry,
1694 						 rb_node);
1695 
1696 		chain_hits += count_callchain_hits(he);
1697 		next = rb_next(&he->rb_node);
1698 	}
1699 
1700 	return chain_hits;
1701 }
1702 
callchain_avg_cycles(struct callchain_node * cnode)1703 s64 callchain_avg_cycles(struct callchain_node *cnode)
1704 {
1705 	struct callchain_list *chain;
1706 	s64 cycles = 0;
1707 
1708 	list_for_each_entry(chain, &cnode->val, list) {
1709 		if (chain->srcline && chain->branch_count)
1710 			cycles += chain->cycles_count / chain->branch_count;
1711 	}
1712 
1713 	return cycles;
1714 }
1715