• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-annotate.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <errno.h>
11 #include <inttypes.h>
12 #include "util.h"
13 #include "ui/ui.h"
14 #include "sort.h"
15 #include "build-id.h"
16 #include "color.h"
17 #include "cache.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "annotate.h"
21 #include "evsel.h"
22 #include "block-range.h"
23 #include "string2.h"
24 #include "arch/common.h"
25 #include <regex.h>
26 #include <pthread.h>
27 #include <linux/bitops.h>
28 #include <linux/kernel.h>
29 #include <sys/utsname.h>
30 
31 #include "sane_ctype.h"
32 
33 const char 	*disassembler_style;
34 const char	*objdump_path;
35 static regex_t	 file_lineno;
36 
37 static struct ins_ops *ins__find(struct arch *arch, const char *name);
38 static void ins__sort(struct arch *arch);
39 static int disasm_line__parse(char *line, const char **namep, char **rawp);
40 
41 struct arch {
42 	const char	*name;
43 	struct ins	*instructions;
44 	size_t		nr_instructions;
45 	size_t		nr_instructions_allocated;
46 	struct ins_ops  *(*associate_instruction_ops)(struct arch *arch, const char *name);
47 	bool		sorted_instructions;
48 	bool		initialized;
49 	void		*priv;
50 	unsigned int	model;
51 	unsigned int	family;
52 	int		(*init)(struct arch *arch);
53 	bool		(*ins_is_fused)(struct arch *arch, const char *ins1,
54 					const char *ins2);
55 	int		(*cpuid_parse)(struct arch *arch, char *cpuid);
56 	struct		{
57 		char comment_char;
58 		char skip_functions_char;
59 	} objdump;
60 };
61 
62 static struct ins_ops call_ops;
63 static struct ins_ops dec_ops;
64 static struct ins_ops jump_ops;
65 static struct ins_ops mov_ops;
66 static struct ins_ops nop_ops;
67 static struct ins_ops lock_ops;
68 static struct ins_ops ret_ops;
69 
arch__grow_instructions(struct arch * arch)70 static int arch__grow_instructions(struct arch *arch)
71 {
72 	struct ins *new_instructions;
73 	size_t new_nr_allocated;
74 
75 	if (arch->nr_instructions_allocated == 0 && arch->instructions)
76 		goto grow_from_non_allocated_table;
77 
78 	new_nr_allocated = arch->nr_instructions_allocated + 128;
79 	new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
80 	if (new_instructions == NULL)
81 		return -1;
82 
83 out_update_instructions:
84 	arch->instructions = new_instructions;
85 	arch->nr_instructions_allocated = new_nr_allocated;
86 	return 0;
87 
88 grow_from_non_allocated_table:
89 	new_nr_allocated = arch->nr_instructions + 128;
90 	new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
91 	if (new_instructions == NULL)
92 		return -1;
93 
94 	memcpy(new_instructions, arch->instructions, arch->nr_instructions);
95 	goto out_update_instructions;
96 }
97 
arch__associate_ins_ops(struct arch * arch,const char * name,struct ins_ops * ops)98 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
99 {
100 	struct ins *ins;
101 
102 	if (arch->nr_instructions == arch->nr_instructions_allocated &&
103 	    arch__grow_instructions(arch))
104 		return -1;
105 
106 	ins = &arch->instructions[arch->nr_instructions];
107 	ins->name = strdup(name);
108 	if (!ins->name)
109 		return -1;
110 
111 	ins->ops  = ops;
112 	arch->nr_instructions++;
113 
114 	ins__sort(arch);
115 	return 0;
116 }
117 
118 #include "arch/arm/annotate/instructions.c"
119 #include "arch/arm64/annotate/instructions.c"
120 #include "arch/x86/annotate/instructions.c"
121 #include "arch/powerpc/annotate/instructions.c"
122 #include "arch/s390/annotate/instructions.c"
123 
124 static struct arch architectures[] = {
125 	{
126 		.name = "arm",
127 		.init = arm__annotate_init,
128 	},
129 	{
130 		.name = "arm64",
131 		.init = arm64__annotate_init,
132 	},
133 	{
134 		.name = "x86",
135 		.instructions = x86__instructions,
136 		.nr_instructions = ARRAY_SIZE(x86__instructions),
137 		.ins_is_fused = x86__ins_is_fused,
138 		.cpuid_parse = x86__cpuid_parse,
139 		.objdump =  {
140 			.comment_char = '#',
141 		},
142 	},
143 	{
144 		.name = "powerpc",
145 		.init = powerpc__annotate_init,
146 	},
147 	{
148 		.name = "s390",
149 		.init = s390__annotate_init,
150 		.objdump =  {
151 			.comment_char = '#',
152 		},
153 	},
154 };
155 
ins__delete(struct ins_operands * ops)156 static void ins__delete(struct ins_operands *ops)
157 {
158 	if (ops == NULL)
159 		return;
160 	zfree(&ops->source.raw);
161 	zfree(&ops->source.name);
162 	zfree(&ops->target.raw);
163 	zfree(&ops->target.name);
164 }
165 
ins__raw_scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)166 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
167 			      struct ins_operands *ops)
168 {
169 	return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
170 }
171 
ins__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)172 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
173 		  struct ins_operands *ops)
174 {
175 	if (ins->ops->scnprintf)
176 		return ins->ops->scnprintf(ins, bf, size, ops);
177 
178 	return ins__raw_scnprintf(ins, bf, size, ops);
179 }
180 
ins__is_fused(struct arch * arch,const char * ins1,const char * ins2)181 bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
182 {
183 	if (!arch || !arch->ins_is_fused)
184 		return false;
185 
186 	return arch->ins_is_fused(arch, ins1, ins2);
187 }
188 
call__parse(struct arch * arch,struct ins_operands * ops,struct map * map)189 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
190 {
191 	char *endptr, *tok, *name;
192 
193 	ops->target.addr = strtoull(ops->raw, &endptr, 16);
194 
195 	name = strchr(endptr, '<');
196 	if (name == NULL)
197 		goto indirect_call;
198 
199 	name++;
200 
201 	if (arch->objdump.skip_functions_char &&
202 	    strchr(name, arch->objdump.skip_functions_char))
203 		return -1;
204 
205 	tok = strchr(name, '>');
206 	if (tok == NULL)
207 		return -1;
208 
209 	*tok = '\0';
210 	ops->target.name = strdup(name);
211 	*tok = '>';
212 
213 	return ops->target.name == NULL ? -1 : 0;
214 
215 indirect_call:
216 	tok = strchr(endptr, '*');
217 	if (tok == NULL) {
218 		struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
219 		if (sym != NULL)
220 			ops->target.name = strdup(sym->name);
221 		else
222 			ops->target.addr = 0;
223 		return 0;
224 	}
225 
226 	ops->target.addr = strtoull(tok + 1, NULL, 16);
227 	return 0;
228 }
229 
call__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)230 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
231 			   struct ins_operands *ops)
232 {
233 	if (ops->target.name)
234 		return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
235 
236 	if (ops->target.addr == 0)
237 		return ins__raw_scnprintf(ins, bf, size, ops);
238 
239 	return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
240 }
241 
242 static struct ins_ops call_ops = {
243 	.parse	   = call__parse,
244 	.scnprintf = call__scnprintf,
245 };
246 
ins__is_call(const struct ins * ins)247 bool ins__is_call(const struct ins *ins)
248 {
249 	return ins->ops == &call_ops;
250 }
251 
jump__parse(struct arch * arch __maybe_unused,struct ins_operands * ops,struct map * map __maybe_unused)252 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
253 {
254 	const char *s = strchr(ops->raw, '+');
255 	const char *c = strchr(ops->raw, ',');
256 
257 	/*
258 	 * skip over possible up to 2 operands to get to address, e.g.:
259 	 * tbnz	 w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
260 	 */
261 	if (c++ != NULL) {
262 		ops->target.addr = strtoull(c, NULL, 16);
263 		if (!ops->target.addr) {
264 			c = strchr(c, ',');
265 			if (c++ != NULL)
266 				ops->target.addr = strtoull(c, NULL, 16);
267 		}
268 	} else {
269 		ops->target.addr = strtoull(ops->raw, NULL, 16);
270 	}
271 
272 	if (s++ != NULL) {
273 		ops->target.offset = strtoull(s, NULL, 16);
274 		ops->target.offset_avail = true;
275 	} else {
276 		ops->target.offset_avail = false;
277 	}
278 
279 	return 0;
280 }
281 
jump__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)282 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
283 			   struct ins_operands *ops)
284 {
285 	const char *c = strchr(ops->raw, ',');
286 
287 	if (!ops->target.addr || ops->target.offset < 0)
288 		return ins__raw_scnprintf(ins, bf, size, ops);
289 
290 	if (c != NULL) {
291 		const char *c2 = strchr(c + 1, ',');
292 
293 		/* check for 3-op insn */
294 		if (c2 != NULL)
295 			c = c2;
296 		c++;
297 
298 		/* mirror arch objdump's space-after-comma style */
299 		if (*c == ' ')
300 			c++;
301 	}
302 
303 	return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
304 			 ins->name, c ? c - ops->raw : 0, ops->raw,
305 			 ops->target.offset);
306 }
307 
308 static struct ins_ops jump_ops = {
309 	.parse	   = jump__parse,
310 	.scnprintf = jump__scnprintf,
311 };
312 
ins__is_jump(const struct ins * ins)313 bool ins__is_jump(const struct ins *ins)
314 {
315 	return ins->ops == &jump_ops;
316 }
317 
comment__symbol(char * raw,char * comment,u64 * addrp,char ** namep)318 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
319 {
320 	char *endptr, *name, *t;
321 
322 	if (strstr(raw, "(%rip)") == NULL)
323 		return 0;
324 
325 	*addrp = strtoull(comment, &endptr, 16);
326 	if (endptr == comment)
327 		return 0;
328 	name = strchr(endptr, '<');
329 	if (name == NULL)
330 		return -1;
331 
332 	name++;
333 
334 	t = strchr(name, '>');
335 	if (t == NULL)
336 		return 0;
337 
338 	*t = '\0';
339 	*namep = strdup(name);
340 	*t = '>';
341 
342 	return 0;
343 }
344 
lock__parse(struct arch * arch,struct ins_operands * ops,struct map * map)345 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
346 {
347 	ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
348 	if (ops->locked.ops == NULL)
349 		return 0;
350 
351 	if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
352 		goto out_free_ops;
353 
354 	ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
355 
356 	if (ops->locked.ins.ops == NULL)
357 		goto out_free_ops;
358 
359 	if (ops->locked.ins.ops->parse &&
360 	    ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
361 		goto out_free_ops;
362 
363 	return 0;
364 
365 out_free_ops:
366 	zfree(&ops->locked.ops);
367 	return 0;
368 }
369 
lock__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)370 static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
371 			   struct ins_operands *ops)
372 {
373 	int printed;
374 
375 	if (ops->locked.ins.ops == NULL)
376 		return ins__raw_scnprintf(ins, bf, size, ops);
377 
378 	printed = scnprintf(bf, size, "%-6s ", ins->name);
379 	return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
380 					size - printed, ops->locked.ops);
381 }
382 
lock__delete(struct ins_operands * ops)383 static void lock__delete(struct ins_operands *ops)
384 {
385 	struct ins *ins = &ops->locked.ins;
386 
387 	if (ins->ops && ins->ops->free)
388 		ins->ops->free(ops->locked.ops);
389 	else
390 		ins__delete(ops->locked.ops);
391 
392 	zfree(&ops->locked.ops);
393 	zfree(&ops->target.raw);
394 	zfree(&ops->target.name);
395 }
396 
397 static struct ins_ops lock_ops = {
398 	.free	   = lock__delete,
399 	.parse	   = lock__parse,
400 	.scnprintf = lock__scnprintf,
401 };
402 
mov__parse(struct arch * arch,struct ins_operands * ops,struct map * map __maybe_unused)403 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
404 {
405 	char *s = strchr(ops->raw, ','), *target, *comment, prev;
406 
407 	if (s == NULL)
408 		return -1;
409 
410 	*s = '\0';
411 	ops->source.raw = strdup(ops->raw);
412 	*s = ',';
413 
414 	if (ops->source.raw == NULL)
415 		return -1;
416 
417 	target = ++s;
418 	comment = strchr(s, arch->objdump.comment_char);
419 
420 	if (comment != NULL)
421 		s = comment - 1;
422 	else
423 		s = strchr(s, '\0') - 1;
424 
425 	while (s > target && isspace(s[0]))
426 		--s;
427 	s++;
428 	prev = *s;
429 	*s = '\0';
430 
431 	ops->target.raw = strdup(target);
432 	*s = prev;
433 
434 	if (ops->target.raw == NULL)
435 		goto out_free_source;
436 
437 	if (comment == NULL)
438 		return 0;
439 
440 	comment = ltrim(comment);
441 	comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
442 	comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
443 
444 	return 0;
445 
446 out_free_source:
447 	zfree(&ops->source.raw);
448 	return -1;
449 }
450 
mov__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)451 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
452 			   struct ins_operands *ops)
453 {
454 	return scnprintf(bf, size, "%-6s %s,%s", ins->name,
455 			 ops->source.name ?: ops->source.raw,
456 			 ops->target.name ?: ops->target.raw);
457 }
458 
459 static struct ins_ops mov_ops = {
460 	.parse	   = mov__parse,
461 	.scnprintf = mov__scnprintf,
462 };
463 
dec__parse(struct arch * arch __maybe_unused,struct ins_operands * ops,struct map * map __maybe_unused)464 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
465 {
466 	char *target, *comment, *s, prev;
467 
468 	target = s = ops->raw;
469 
470 	while (s[0] != '\0' && !isspace(s[0]))
471 		++s;
472 	prev = *s;
473 	*s = '\0';
474 
475 	ops->target.raw = strdup(target);
476 	*s = prev;
477 
478 	if (ops->target.raw == NULL)
479 		return -1;
480 
481 	comment = strchr(s, arch->objdump.comment_char);
482 	if (comment == NULL)
483 		return 0;
484 
485 	comment = ltrim(comment);
486 	comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
487 
488 	return 0;
489 }
490 
dec__scnprintf(struct ins * ins,char * bf,size_t size,struct ins_operands * ops)491 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
492 			   struct ins_operands *ops)
493 {
494 	return scnprintf(bf, size, "%-6s %s", ins->name,
495 			 ops->target.name ?: ops->target.raw);
496 }
497 
498 static struct ins_ops dec_ops = {
499 	.parse	   = dec__parse,
500 	.scnprintf = dec__scnprintf,
501 };
502 
nop__scnprintf(struct ins * ins __maybe_unused,char * bf,size_t size,struct ins_operands * ops __maybe_unused)503 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
504 			  struct ins_operands *ops __maybe_unused)
505 {
506 	return scnprintf(bf, size, "%-6s", "nop");
507 }
508 
509 static struct ins_ops nop_ops = {
510 	.scnprintf = nop__scnprintf,
511 };
512 
513 static struct ins_ops ret_ops = {
514 	.scnprintf = ins__raw_scnprintf,
515 };
516 
ins__is_ret(const struct ins * ins)517 bool ins__is_ret(const struct ins *ins)
518 {
519 	return ins->ops == &ret_ops;
520 }
521 
ins__is_lock(const struct ins * ins)522 bool ins__is_lock(const struct ins *ins)
523 {
524 	return ins->ops == &lock_ops;
525 }
526 
ins__key_cmp(const void * name,const void * insp)527 static int ins__key_cmp(const void *name, const void *insp)
528 {
529 	const struct ins *ins = insp;
530 
531 	return strcmp(name, ins->name);
532 }
533 
ins__cmp(const void * a,const void * b)534 static int ins__cmp(const void *a, const void *b)
535 {
536 	const struct ins *ia = a;
537 	const struct ins *ib = b;
538 
539 	return strcmp(ia->name, ib->name);
540 }
541 
ins__sort(struct arch * arch)542 static void ins__sort(struct arch *arch)
543 {
544 	const int nmemb = arch->nr_instructions;
545 
546 	qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
547 }
548 
__ins__find(struct arch * arch,const char * name)549 static struct ins_ops *__ins__find(struct arch *arch, const char *name)
550 {
551 	struct ins *ins;
552 	const int nmemb = arch->nr_instructions;
553 
554 	if (!arch->sorted_instructions) {
555 		ins__sort(arch);
556 		arch->sorted_instructions = true;
557 	}
558 
559 	ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
560 	return ins ? ins->ops : NULL;
561 }
562 
ins__find(struct arch * arch,const char * name)563 static struct ins_ops *ins__find(struct arch *arch, const char *name)
564 {
565 	struct ins_ops *ops = __ins__find(arch, name);
566 
567 	if (!ops && arch->associate_instruction_ops)
568 		ops = arch->associate_instruction_ops(arch, name);
569 
570 	return ops;
571 }
572 
arch__key_cmp(const void * name,const void * archp)573 static int arch__key_cmp(const void *name, const void *archp)
574 {
575 	const struct arch *arch = archp;
576 
577 	return strcmp(name, arch->name);
578 }
579 
arch__cmp(const void * a,const void * b)580 static int arch__cmp(const void *a, const void *b)
581 {
582 	const struct arch *aa = a;
583 	const struct arch *ab = b;
584 
585 	return strcmp(aa->name, ab->name);
586 }
587 
arch__sort(void)588 static void arch__sort(void)
589 {
590 	const int nmemb = ARRAY_SIZE(architectures);
591 
592 	qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
593 }
594 
arch__find(const char * name)595 static struct arch *arch__find(const char *name)
596 {
597 	const int nmemb = ARRAY_SIZE(architectures);
598 	static bool sorted;
599 
600 	if (!sorted) {
601 		arch__sort();
602 		sorted = true;
603 	}
604 
605 	return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
606 }
607 
symbol__alloc_hist(struct symbol * sym)608 int symbol__alloc_hist(struct symbol *sym)
609 {
610 	struct annotation *notes = symbol__annotation(sym);
611 	size_t size = symbol__size(sym);
612 	size_t sizeof_sym_hist;
613 
614 	/*
615 	 * Add buffer of one element for zero length symbol.
616 	 * When sample is taken from first instruction of
617 	 * zero length symbol, perf still resolves it and
618 	 * shows symbol name in perf report and allows to
619 	 * annotate it.
620 	 */
621 	if (size == 0)
622 		size = 1;
623 
624 	/* Check for overflow when calculating sizeof_sym_hist */
625 	if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry))
626 		return -1;
627 
628 	sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
629 
630 	/* Check for overflow in zalloc argument */
631 	if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
632 				/ symbol_conf.nr_events)
633 		return -1;
634 
635 	notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
636 	if (notes->src == NULL)
637 		return -1;
638 	notes->src->sizeof_sym_hist = sizeof_sym_hist;
639 	notes->src->nr_histograms   = symbol_conf.nr_events;
640 	INIT_LIST_HEAD(&notes->src->source);
641 	return 0;
642 }
643 
644 /* The cycles histogram is lazily allocated. */
symbol__alloc_hist_cycles(struct symbol * sym)645 static int symbol__alloc_hist_cycles(struct symbol *sym)
646 {
647 	struct annotation *notes = symbol__annotation(sym);
648 	const size_t size = symbol__size(sym);
649 
650 	notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
651 	if (notes->src->cycles_hist == NULL)
652 		return -1;
653 	return 0;
654 }
655 
symbol__annotate_zero_histograms(struct symbol * sym)656 void symbol__annotate_zero_histograms(struct symbol *sym)
657 {
658 	struct annotation *notes = symbol__annotation(sym);
659 
660 	pthread_mutex_lock(&notes->lock);
661 	if (notes->src != NULL) {
662 		memset(notes->src->histograms, 0,
663 		       notes->src->nr_histograms * notes->src->sizeof_sym_hist);
664 		if (notes->src->cycles_hist)
665 			memset(notes->src->cycles_hist, 0,
666 				symbol__size(sym) * sizeof(struct cyc_hist));
667 	}
668 	pthread_mutex_unlock(&notes->lock);
669 }
670 
__symbol__account_cycles(struct annotation * notes,u64 start,unsigned offset,unsigned cycles,unsigned have_start)671 static int __symbol__account_cycles(struct annotation *notes,
672 				    u64 start,
673 				    unsigned offset, unsigned cycles,
674 				    unsigned have_start)
675 {
676 	struct cyc_hist *ch;
677 
678 	ch = notes->src->cycles_hist;
679 	/*
680 	 * For now we can only account one basic block per
681 	 * final jump. But multiple could be overlapping.
682 	 * Always account the longest one. So when
683 	 * a shorter one has been already seen throw it away.
684 	 *
685 	 * We separately always account the full cycles.
686 	 */
687 	ch[offset].num_aggr++;
688 	ch[offset].cycles_aggr += cycles;
689 
690 	if (!have_start && ch[offset].have_start)
691 		return 0;
692 	if (ch[offset].num) {
693 		if (have_start && (!ch[offset].have_start ||
694 				   ch[offset].start > start)) {
695 			ch[offset].have_start = 0;
696 			ch[offset].cycles = 0;
697 			ch[offset].num = 0;
698 			if (ch[offset].reset < 0xffff)
699 				ch[offset].reset++;
700 		} else if (have_start &&
701 			   ch[offset].start < start)
702 			return 0;
703 	}
704 	ch[offset].have_start = have_start;
705 	ch[offset].start = start;
706 	ch[offset].cycles += cycles;
707 	ch[offset].num++;
708 	return 0;
709 }
710 
__symbol__inc_addr_samples(struct symbol * sym,struct map * map,struct annotation * notes,int evidx,u64 addr,struct perf_sample * sample)711 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
712 				      struct annotation *notes, int evidx, u64 addr,
713 				      struct perf_sample *sample)
714 {
715 	unsigned offset;
716 	struct sym_hist *h;
717 
718 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
719 
720 	if ((addr < sym->start || addr >= sym->end) &&
721 	    (addr != sym->end || sym->start != sym->end)) {
722 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
723 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
724 		return -ERANGE;
725 	}
726 
727 	offset = addr - sym->start;
728 	h = annotation__histogram(notes, evidx);
729 	h->nr_samples++;
730 	h->addr[offset].nr_samples++;
731 	h->period += sample->period;
732 	h->addr[offset].period += sample->period;
733 
734 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
735 		  ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
736 		  sym->start, sym->name, addr, addr - sym->start, evidx,
737 		  h->addr[offset].nr_samples, h->addr[offset].period);
738 	return 0;
739 }
740 
symbol__get_annotation(struct symbol * sym,bool cycles)741 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
742 {
743 	struct annotation *notes = symbol__annotation(sym);
744 
745 	if (notes->src == NULL) {
746 		if (symbol__alloc_hist(sym) < 0)
747 			return NULL;
748 	}
749 	if (!notes->src->cycles_hist && cycles) {
750 		if (symbol__alloc_hist_cycles(sym) < 0)
751 			return NULL;
752 	}
753 	return notes;
754 }
755 
symbol__inc_addr_samples(struct symbol * sym,struct map * map,int evidx,u64 addr,struct perf_sample * sample)756 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
757 				    int evidx, u64 addr,
758 				    struct perf_sample *sample)
759 {
760 	struct annotation *notes;
761 
762 	if (sym == NULL)
763 		return 0;
764 	notes = symbol__get_annotation(sym, false);
765 	if (notes == NULL)
766 		return -ENOMEM;
767 	return __symbol__inc_addr_samples(sym, map, notes, evidx, addr, sample);
768 }
769 
symbol__account_cycles(u64 addr,u64 start,struct symbol * sym,unsigned cycles)770 static int symbol__account_cycles(u64 addr, u64 start,
771 				  struct symbol *sym, unsigned cycles)
772 {
773 	struct annotation *notes;
774 	unsigned offset;
775 
776 	if (sym == NULL)
777 		return 0;
778 	notes = symbol__get_annotation(sym, true);
779 	if (notes == NULL)
780 		return -ENOMEM;
781 	if (addr < sym->start || addr >= sym->end)
782 		return -ERANGE;
783 
784 	if (start) {
785 		if (start < sym->start || start >= sym->end)
786 			return -ERANGE;
787 		if (start >= addr)
788 			start = 0;
789 	}
790 	offset = addr - sym->start;
791 	return __symbol__account_cycles(notes,
792 					start ? start - sym->start : 0,
793 					offset, cycles,
794 					!!start);
795 }
796 
addr_map_symbol__account_cycles(struct addr_map_symbol * ams,struct addr_map_symbol * start,unsigned cycles)797 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
798 				    struct addr_map_symbol *start,
799 				    unsigned cycles)
800 {
801 	u64 saddr = 0;
802 	int err;
803 
804 	if (!cycles)
805 		return 0;
806 
807 	/*
808 	 * Only set start when IPC can be computed. We can only
809 	 * compute it when the basic block is completely in a single
810 	 * function.
811 	 * Special case the case when the jump is elsewhere, but
812 	 * it starts on the function start.
813 	 */
814 	if (start &&
815 		(start->sym == ams->sym ||
816 		 (ams->sym &&
817 		   start->addr == ams->sym->start + ams->map->start)))
818 		saddr = start->al_addr;
819 	if (saddr == 0)
820 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
821 			ams->addr,
822 			start ? start->addr : 0,
823 			ams->sym ? ams->sym->start + ams->map->start : 0,
824 			saddr);
825 	err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
826 	if (err)
827 		pr_debug2("account_cycles failed %d\n", err);
828 	return err;
829 }
830 
addr_map_symbol__inc_samples(struct addr_map_symbol * ams,struct perf_sample * sample,int evidx)831 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
832 				 int evidx)
833 {
834 	return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr, sample);
835 }
836 
hist_entry__inc_addr_samples(struct hist_entry * he,struct perf_sample * sample,int evidx,u64 ip)837 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
838 				 int evidx, u64 ip)
839 {
840 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample);
841 }
842 
disasm_line__init_ins(struct disasm_line * dl,struct arch * arch,struct map * map)843 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
844 {
845 	dl->ins.ops = ins__find(arch, dl->ins.name);
846 
847 	if (!dl->ins.ops)
848 		return;
849 
850 	if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
851 		dl->ins.ops = NULL;
852 }
853 
disasm_line__parse(char * line,const char ** namep,char ** rawp)854 static int disasm_line__parse(char *line, const char **namep, char **rawp)
855 {
856 	char tmp, *name = ltrim(line);
857 
858 	if (name[0] == '\0')
859 		return -1;
860 
861 	*rawp = name + 1;
862 
863 	while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
864 		++*rawp;
865 
866 	tmp = (*rawp)[0];
867 	(*rawp)[0] = '\0';
868 	*namep = strdup(name);
869 
870 	if (*namep == NULL)
871 		goto out;
872 
873 	(*rawp)[0] = tmp;
874 	*rawp = ltrim(*rawp);
875 
876 	return 0;
877 
878 out:
879 	return -1;
880 }
881 
disasm_line__new(s64 offset,char * line,size_t privsize,int line_nr,struct arch * arch,struct map * map)882 static struct disasm_line *disasm_line__new(s64 offset, char *line,
883 					    size_t privsize, int line_nr,
884 					    struct arch *arch,
885 					    struct map *map)
886 {
887 	struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
888 
889 	if (dl != NULL) {
890 		dl->offset = offset;
891 		dl->line = strdup(line);
892 		dl->line_nr = line_nr;
893 		if (dl->line == NULL)
894 			goto out_delete;
895 
896 		if (offset != -1) {
897 			if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
898 				goto out_free_line;
899 
900 			disasm_line__init_ins(dl, arch, map);
901 		}
902 	}
903 
904 	return dl;
905 
906 out_free_line:
907 	zfree(&dl->line);
908 out_delete:
909 	free(dl);
910 	return NULL;
911 }
912 
disasm_line__free(struct disasm_line * dl)913 void disasm_line__free(struct disasm_line *dl)
914 {
915 	zfree(&dl->line);
916 	if (dl->ins.ops && dl->ins.ops->free)
917 		dl->ins.ops->free(&dl->ops);
918 	else
919 		ins__delete(&dl->ops);
920 	free((void *)dl->ins.name);
921 	dl->ins.name = NULL;
922 	free(dl);
923 }
924 
disasm_line__scnprintf(struct disasm_line * dl,char * bf,size_t size,bool raw)925 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
926 {
927 	if (raw || !dl->ins.ops)
928 		return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
929 
930 	return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
931 }
932 
disasm__add(struct list_head * head,struct disasm_line * line)933 static void disasm__add(struct list_head *head, struct disasm_line *line)
934 {
935 	list_add_tail(&line->node, head);
936 }
937 
disasm__get_next_ip_line(struct list_head * head,struct disasm_line * pos)938 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
939 {
940 	list_for_each_entry_continue(pos, head, node)
941 		if (pos->offset >= 0)
942 			return pos;
943 
944 	return NULL;
945 }
946 
disasm__calc_percent(struct annotation * notes,int evidx,s64 offset,s64 end,const char ** path,struct sym_hist_entry * sample)947 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
948 			    s64 end, const char **path, struct sym_hist_entry *sample)
949 {
950 	struct source_line *src_line = notes->src->lines;
951 	double percent = 0.0;
952 
953 	sample->nr_samples = sample->period = 0;
954 
955 	if (src_line) {
956 		size_t sizeof_src_line = sizeof(*src_line) +
957 				sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
958 
959 		while (offset < end) {
960 			src_line = (void *)notes->src->lines +
961 					(sizeof_src_line * offset);
962 
963 			if (*path == NULL)
964 				*path = src_line->path;
965 
966 			percent += src_line->samples[evidx].percent;
967 			sample->nr_samples += src_line->samples[evidx].nr;
968 			offset++;
969 		}
970 	} else {
971 		struct sym_hist *h = annotation__histogram(notes, evidx);
972 		unsigned int hits = 0;
973 		u64 period = 0;
974 
975 		while (offset < end) {
976 			hits   += h->addr[offset].nr_samples;
977 			period += h->addr[offset].period;
978 			++offset;
979 		}
980 
981 		if (h->nr_samples) {
982 			sample->period	   = period;
983 			sample->nr_samples = hits;
984 			percent = 100.0 * hits / h->nr_samples;
985 		}
986 	}
987 
988 	return percent;
989 }
990 
annotate__address_color(struct block_range * br)991 static const char *annotate__address_color(struct block_range *br)
992 {
993 	double cov = block_range__coverage(br);
994 
995 	if (cov >= 0) {
996 		/* mark red for >75% coverage */
997 		if (cov > 0.75)
998 			return PERF_COLOR_RED;
999 
1000 		/* mark dull for <1% coverage */
1001 		if (cov < 0.01)
1002 			return PERF_COLOR_NORMAL;
1003 	}
1004 
1005 	return PERF_COLOR_MAGENTA;
1006 }
1007 
annotate__asm_color(struct block_range * br)1008 static const char *annotate__asm_color(struct block_range *br)
1009 {
1010 	double cov = block_range__coverage(br);
1011 
1012 	if (cov >= 0) {
1013 		/* mark dull for <1% coverage */
1014 		if (cov < 0.01)
1015 			return PERF_COLOR_NORMAL;
1016 	}
1017 
1018 	return PERF_COLOR_BLUE;
1019 }
1020 
annotate__branch_printf(struct block_range * br,u64 addr)1021 static void annotate__branch_printf(struct block_range *br, u64 addr)
1022 {
1023 	bool emit_comment = true;
1024 
1025 	if (!br)
1026 		return;
1027 
1028 #if 1
1029 	if (br->is_target && br->start == addr) {
1030 		struct block_range *branch = br;
1031 		double p;
1032 
1033 		/*
1034 		 * Find matching branch to our target.
1035 		 */
1036 		while (!branch->is_branch)
1037 			branch = block_range__next(branch);
1038 
1039 		p = 100 *(double)br->entry / branch->coverage;
1040 
1041 		if (p > 0.1) {
1042 			if (emit_comment) {
1043 				emit_comment = false;
1044 				printf("\t#");
1045 			}
1046 
1047 			/*
1048 			 * The percentage of coverage joined at this target in relation
1049 			 * to the next branch.
1050 			 */
1051 			printf(" +%.2f%%", p);
1052 		}
1053 	}
1054 #endif
1055 	if (br->is_branch && br->end == addr) {
1056 		double p = 100*(double)br->taken / br->coverage;
1057 
1058 		if (p > 0.1) {
1059 			if (emit_comment) {
1060 				emit_comment = false;
1061 				printf("\t#");
1062 			}
1063 
1064 			/*
1065 			 * The percentage of coverage leaving at this branch, and
1066 			 * its prediction ratio.
1067 			 */
1068 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
1069 		}
1070 	}
1071 }
1072 
1073 
disasm_line__print(struct disasm_line * dl,struct symbol * sym,u64 start,struct perf_evsel * evsel,u64 len,int min_pcnt,int printed,int max_lines,struct disasm_line * queue)1074 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
1075 		      struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
1076 		      int max_lines, struct disasm_line *queue)
1077 {
1078 	static const char *prev_line;
1079 	static const char *prev_color;
1080 
1081 	if (dl->offset != -1) {
1082 		const char *path = NULL;
1083 		double percent, max_percent = 0.0;
1084 		double *ppercents = &percent;
1085 		struct sym_hist_entry sample;
1086 		struct sym_hist_entry *psamples = &sample;
1087 		int i, nr_percent = 1;
1088 		const char *color;
1089 		struct annotation *notes = symbol__annotation(sym);
1090 		s64 offset = dl->offset;
1091 		const u64 addr = start + offset;
1092 		struct disasm_line *next;
1093 		struct block_range *br;
1094 
1095 		next = disasm__get_next_ip_line(&notes->src->source, dl);
1096 
1097 		if (perf_evsel__is_group_event(evsel)) {
1098 			nr_percent = evsel->nr_members;
1099 			ppercents = calloc(nr_percent, sizeof(double));
1100 			psamples = calloc(nr_percent, sizeof(struct sym_hist_entry));
1101 			if (ppercents == NULL || psamples == NULL) {
1102 				return -1;
1103 			}
1104 		}
1105 
1106 		for (i = 0; i < nr_percent; i++) {
1107 			percent = disasm__calc_percent(notes,
1108 					notes->src->lines ? i : evsel->idx + i,
1109 					offset,
1110 					next ? next->offset : (s64) len,
1111 					&path, &sample);
1112 
1113 			ppercents[i] = percent;
1114 			psamples[i] = sample;
1115 			if (percent > max_percent)
1116 				max_percent = percent;
1117 		}
1118 
1119 		if (max_percent < min_pcnt)
1120 			return -1;
1121 
1122 		if (max_lines && printed >= max_lines)
1123 			return 1;
1124 
1125 		if (queue != NULL) {
1126 			list_for_each_entry_from(queue, &notes->src->source, node) {
1127 				if (queue == dl)
1128 					break;
1129 				disasm_line__print(queue, sym, start, evsel, len,
1130 						    0, 0, 1, NULL);
1131 			}
1132 		}
1133 
1134 		color = get_percent_color(max_percent);
1135 
1136 		/*
1137 		 * Also color the filename and line if needed, with
1138 		 * the same color than the percentage. Don't print it
1139 		 * twice for close colored addr with the same filename:line
1140 		 */
1141 		if (path) {
1142 			if (!prev_line || strcmp(prev_line, path)
1143 				       || color != prev_color) {
1144 				color_fprintf(stdout, color, " %s", path);
1145 				prev_line = path;
1146 				prev_color = color;
1147 			}
1148 		}
1149 
1150 		for (i = 0; i < nr_percent; i++) {
1151 			percent = ppercents[i];
1152 			sample = psamples[i];
1153 			color = get_percent_color(percent);
1154 
1155 			if (symbol_conf.show_total_period)
1156 				color_fprintf(stdout, color, " %11" PRIu64,
1157 					      sample.period);
1158 			else if (symbol_conf.show_nr_samples)
1159 				color_fprintf(stdout, color, " %7" PRIu64,
1160 					      sample.nr_samples);
1161 			else
1162 				color_fprintf(stdout, color, " %7.2f", percent);
1163 		}
1164 
1165 		printf(" :	");
1166 
1167 		br = block_range__find(addr);
1168 		color_fprintf(stdout, annotate__address_color(br), "  %" PRIx64 ":", addr);
1169 		color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
1170 		annotate__branch_printf(br, addr);
1171 		printf("\n");
1172 
1173 		if (ppercents != &percent)
1174 			free(ppercents);
1175 
1176 		if (psamples != &sample)
1177 			free(psamples);
1178 
1179 	} else if (max_lines && printed >= max_lines)
1180 		return 1;
1181 	else {
1182 		int width = symbol_conf.show_total_period ? 12 : 8;
1183 
1184 		if (queue)
1185 			return -1;
1186 
1187 		if (perf_evsel__is_group_event(evsel))
1188 			width *= evsel->nr_members;
1189 
1190 		if (!*dl->line)
1191 			printf(" %*s:\n", width, " ");
1192 		else
1193 			printf(" %*s:	%s\n", width, " ", dl->line);
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /*
1200  * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1201  * which looks like following
1202  *
1203  *  0000000000415500 <_init>:
1204  *    415500:       sub    $0x8,%rsp
1205  *    415504:       mov    0x2f5ad5(%rip),%rax        # 70afe0 <_DYNAMIC+0x2f8>
1206  *    41550b:       test   %rax,%rax
1207  *    41550e:       je     415515 <_init+0x15>
1208  *    415510:       callq  416e70 <__gmon_start__@plt>
1209  *    415515:       add    $0x8,%rsp
1210  *    415519:       retq
1211  *
1212  * it will be parsed and saved into struct disasm_line as
1213  *  <offset>       <name>  <ops.raw>
1214  *
1215  * The offset will be a relative offset from the start of the symbol and -1
1216  * means that it's not a disassembly line so should be treated differently.
1217  * The ops.raw part will be parsed further according to type of the instruction.
1218  */
symbol__parse_objdump_line(struct symbol * sym,struct map * map,struct arch * arch,FILE * file,size_t privsize,int * line_nr)1219 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
1220 				      struct arch *arch,
1221 				      FILE *file, size_t privsize,
1222 				      int *line_nr)
1223 {
1224 	struct annotation *notes = symbol__annotation(sym);
1225 	struct disasm_line *dl;
1226 	char *line = NULL, *parsed_line, *tmp, *tmp2;
1227 	size_t line_len;
1228 	s64 line_ip, offset = -1;
1229 	regmatch_t match[2];
1230 
1231 	if (getline(&line, &line_len, file) < 0)
1232 		return -1;
1233 
1234 	if (!line)
1235 		return -1;
1236 
1237 	line_ip = -1;
1238 	parsed_line = rtrim(line);
1239 
1240 	/* /filename:linenr ? Save line number and ignore. */
1241 	if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
1242 		*line_nr = atoi(parsed_line + match[1].rm_so);
1243 		return 0;
1244 	}
1245 
1246 	tmp = ltrim(parsed_line);
1247 	if (*tmp) {
1248 		/*
1249 		 * Parse hexa addresses followed by ':'
1250 		 */
1251 		line_ip = strtoull(tmp, &tmp2, 16);
1252 		if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1253 			line_ip = -1;
1254 	}
1255 
1256 	if (line_ip != -1) {
1257 		u64 start = map__rip_2objdump(map, sym->start),
1258 		    end = map__rip_2objdump(map, sym->end);
1259 
1260 		offset = line_ip - start;
1261 		if ((u64)line_ip < start || (u64)line_ip >= end)
1262 			offset = -1;
1263 		else
1264 			parsed_line = tmp2 + 1;
1265 	}
1266 
1267 	dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
1268 	free(line);
1269 	(*line_nr)++;
1270 
1271 	if (dl == NULL)
1272 		return -1;
1273 
1274 	if (!disasm_line__has_offset(dl)) {
1275 		dl->ops.target.offset = dl->ops.target.addr -
1276 					map__rip_2objdump(map, sym->start);
1277 		dl->ops.target.offset_avail = true;
1278 	}
1279 
1280 	/* kcore has no symbols, so add the call target name */
1281 	if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
1282 		struct addr_map_symbol target = {
1283 			.map = map,
1284 			.addr = dl->ops.target.addr,
1285 		};
1286 
1287 		if (!map_groups__find_ams(&target) &&
1288 		    target.sym->start == target.al_addr)
1289 			dl->ops.target.name = strdup(target.sym->name);
1290 	}
1291 
1292 	disasm__add(&notes->src->source, dl);
1293 
1294 	return 0;
1295 }
1296 
symbol__init_regexpr(void)1297 static __attribute__((constructor)) void symbol__init_regexpr(void)
1298 {
1299 	regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1300 }
1301 
delete_last_nop(struct symbol * sym)1302 static void delete_last_nop(struct symbol *sym)
1303 {
1304 	struct annotation *notes = symbol__annotation(sym);
1305 	struct list_head *list = &notes->src->source;
1306 	struct disasm_line *dl;
1307 
1308 	while (!list_empty(list)) {
1309 		dl = list_entry(list->prev, struct disasm_line, node);
1310 
1311 		if (dl->ins.ops) {
1312 			if (dl->ins.ops != &nop_ops)
1313 				return;
1314 		} else {
1315 			if (!strstr(dl->line, " nop ") &&
1316 			    !strstr(dl->line, " nopl ") &&
1317 			    !strstr(dl->line, " nopw "))
1318 				return;
1319 		}
1320 
1321 		list_del(&dl->node);
1322 		disasm_line__free(dl);
1323 	}
1324 }
1325 
symbol__strerror_disassemble(struct symbol * sym __maybe_unused,struct map * map,int errnum,char * buf,size_t buflen)1326 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
1327 			      int errnum, char *buf, size_t buflen)
1328 {
1329 	struct dso *dso = map->dso;
1330 
1331 	BUG_ON(buflen == 0);
1332 
1333 	if (errnum >= 0) {
1334 		str_error_r(errnum, buf, buflen);
1335 		return 0;
1336 	}
1337 
1338 	switch (errnum) {
1339 	case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1340 		char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1341 		char *build_id_msg = NULL;
1342 
1343 		if (dso->has_build_id) {
1344 			build_id__sprintf(dso->build_id,
1345 					  sizeof(dso->build_id), bf + 15);
1346 			build_id_msg = bf;
1347 		}
1348 		scnprintf(buf, buflen,
1349 			  "No vmlinux file%s\nwas found in the path.\n\n"
1350 			  "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1351 			  "Please use:\n\n"
1352 			  "  perf buildid-cache -vu vmlinux\n\n"
1353 			  "or:\n\n"
1354 			  "  --vmlinux vmlinux\n", build_id_msg ?: "");
1355 	}
1356 		break;
1357 	default:
1358 		scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1359 		break;
1360 	}
1361 
1362 	return 0;
1363 }
1364 
dso__disassemble_filename(struct dso * dso,char * filename,size_t filename_size)1365 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1366 {
1367 	char linkname[PATH_MAX];
1368 	char *build_id_filename;
1369 	char *build_id_path = NULL;
1370 	char *pos;
1371 
1372 	if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1373 	    !dso__is_kcore(dso))
1374 		return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1375 
1376 	build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
1377 	if (build_id_filename) {
1378 		__symbol__join_symfs(filename, filename_size, build_id_filename);
1379 		free(build_id_filename);
1380 	} else {
1381 		if (dso->has_build_id)
1382 			return ENOMEM;
1383 		goto fallback;
1384 	}
1385 
1386 	build_id_path = strdup(filename);
1387 	if (!build_id_path)
1388 		return -1;
1389 
1390 	/*
1391 	 * old style build-id cache has name of XX/XXXXXXX.. while
1392 	 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1393 	 * extract the build-id part of dirname in the new style only.
1394 	 */
1395 	pos = strrchr(build_id_path, '/');
1396 	if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1397 		dirname(build_id_path);
1398 
1399 	if (dso__is_kcore(dso) ||
1400 	    readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
1401 	    strstr(linkname, DSO__NAME_KALLSYMS) ||
1402 	    access(filename, R_OK)) {
1403 fallback:
1404 		/*
1405 		 * If we don't have build-ids or the build-id file isn't in the
1406 		 * cache, or is just a kallsyms file, well, lets hope that this
1407 		 * DSO is the same as when 'perf record' ran.
1408 		 */
1409 		__symbol__join_symfs(filename, filename_size, dso->long_name);
1410 	}
1411 
1412 	free(build_id_path);
1413 	return 0;
1414 }
1415 
annotate__norm_arch(const char * arch_name)1416 static const char *annotate__norm_arch(const char *arch_name)
1417 {
1418 	struct utsname uts;
1419 
1420 	if (!arch_name) { /* Assume we are annotating locally. */
1421 		if (uname(&uts) < 0)
1422 			return NULL;
1423 		arch_name = uts.machine;
1424 	}
1425 	return normalize_arch((char *)arch_name);
1426 }
1427 
symbol__disassemble(struct symbol * sym,struct map * map,const char * arch_name,size_t privsize,struct arch ** parch,char * cpuid)1428 int symbol__disassemble(struct symbol *sym, struct map *map,
1429 			const char *arch_name, size_t privsize,
1430 			struct arch **parch, char *cpuid)
1431 {
1432 	struct dso *dso = map->dso;
1433 	char *command;
1434 	struct arch *arch = NULL;
1435 	FILE *file;
1436 	char symfs_filename[PATH_MAX];
1437 	struct kcore_extract kce;
1438 	bool delete_extract = false;
1439 	int stdout_fd[2];
1440 	int lineno = 0;
1441 	int nline;
1442 	pid_t pid;
1443 	int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
1444 
1445 	if (err)
1446 		return err;
1447 
1448 	arch_name = annotate__norm_arch(arch_name);
1449 	if (!arch_name)
1450 		return -1;
1451 
1452 	arch = arch__find(arch_name);
1453 	if (arch == NULL)
1454 		return -ENOTSUP;
1455 
1456 	if (parch)
1457 		*parch = arch;
1458 
1459 	if (arch->init) {
1460 		err = arch->init(arch);
1461 		if (err) {
1462 			pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1463 			return err;
1464 		}
1465 	}
1466 
1467 	if (arch->cpuid_parse && cpuid)
1468 		arch->cpuid_parse(arch, cpuid);
1469 
1470 	pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1471 		 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1472 		 map->unmap_ip(map, sym->end));
1473 
1474 	pr_debug("annotating [%p] %30s : [%p] %30s\n",
1475 		 dso, dso->long_name, sym, sym->name);
1476 
1477 	if (dso__is_kcore(dso)) {
1478 		kce.kcore_filename = symfs_filename;
1479 		kce.addr = map__rip_2objdump(map, sym->start);
1480 		kce.offs = sym->start;
1481 		kce.len = sym->end - sym->start;
1482 		if (!kcore_extract__create(&kce)) {
1483 			delete_extract = true;
1484 			strlcpy(symfs_filename, kce.extract_filename,
1485 				sizeof(symfs_filename));
1486 		}
1487 	} else if (dso__needs_decompress(dso)) {
1488 		char tmp[KMOD_DECOMP_LEN];
1489 
1490 		if (dso__decompress_kmodule_path(dso, symfs_filename,
1491 						 tmp, sizeof(tmp)) < 0)
1492 			goto out;
1493 
1494 		strcpy(symfs_filename, tmp);
1495 	}
1496 
1497 	err = asprintf(&command,
1498 		 "%s %s%s --start-address=0x%016" PRIx64
1499 		 " --stop-address=0x%016" PRIx64
1500 		 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1501 		 objdump_path ? objdump_path : "objdump",
1502 		 disassembler_style ? "-M " : "",
1503 		 disassembler_style ? disassembler_style : "",
1504 		 map__rip_2objdump(map, sym->start),
1505 		 map__rip_2objdump(map, sym->end),
1506 		 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
1507 		 symbol_conf.annotate_src ? "-S" : "",
1508 		 symfs_filename, symfs_filename);
1509 
1510 	if (err < 0) {
1511 		pr_err("Failure allocating memory for the command to run\n");
1512 		goto out_remove_tmp;
1513 	}
1514 
1515 	pr_debug("Executing: %s\n", command);
1516 
1517 	err = -1;
1518 	if (pipe(stdout_fd) < 0) {
1519 		pr_err("Failure creating the pipe to run %s\n", command);
1520 		goto out_free_command;
1521 	}
1522 
1523 	pid = fork();
1524 	if (pid < 0) {
1525 		pr_err("Failure forking to run %s\n", command);
1526 		goto out_close_stdout;
1527 	}
1528 
1529 	if (pid == 0) {
1530 		close(stdout_fd[0]);
1531 		dup2(stdout_fd[1], 1);
1532 		close(stdout_fd[1]);
1533 		execl("/bin/sh", "sh", "-c", command, NULL);
1534 		perror(command);
1535 		exit(-1);
1536 	}
1537 
1538 	close(stdout_fd[1]);
1539 
1540 	file = fdopen(stdout_fd[0], "r");
1541 	if (!file) {
1542 		pr_err("Failure creating FILE stream for %s\n", command);
1543 		/*
1544 		 * If we were using debug info should retry with
1545 		 * original binary.
1546 		 */
1547 		goto out_free_command;
1548 	}
1549 
1550 	nline = 0;
1551 	while (!feof(file)) {
1552 		/*
1553 		 * The source code line number (lineno) needs to be kept in
1554 		 * accross calls to symbol__parse_objdump_line(), so that it
1555 		 * can associate it with the instructions till the next one.
1556 		 * See disasm_line__new() and struct disasm_line::line_nr.
1557 		 */
1558 		if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
1559 			    &lineno) < 0)
1560 			break;
1561 		nline++;
1562 	}
1563 
1564 	if (nline == 0)
1565 		pr_err("No output from %s\n", command);
1566 
1567 	/*
1568 	 * kallsyms does not have symbol sizes so there may a nop at the end.
1569 	 * Remove it.
1570 	 */
1571 	if (dso__is_kcore(dso))
1572 		delete_last_nop(sym);
1573 
1574 	fclose(file);
1575 	err = 0;
1576 out_free_command:
1577 	free(command);
1578 out_remove_tmp:
1579 	close(stdout_fd[0]);
1580 
1581 	if (dso__needs_decompress(dso))
1582 		unlink(symfs_filename);
1583 
1584 	if (delete_extract)
1585 		kcore_extract__delete(&kce);
1586 out:
1587 	return err;
1588 
1589 out_close_stdout:
1590 	close(stdout_fd[1]);
1591 	goto out_free_command;
1592 }
1593 
insert_source_line(struct rb_root * root,struct source_line * src_line)1594 static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1595 {
1596 	struct source_line *iter;
1597 	struct rb_node **p = &root->rb_node;
1598 	struct rb_node *parent = NULL;
1599 	int i, ret;
1600 
1601 	while (*p != NULL) {
1602 		parent = *p;
1603 		iter = rb_entry(parent, struct source_line, node);
1604 
1605 		ret = strcmp(iter->path, src_line->path);
1606 		if (ret == 0) {
1607 			for (i = 0; i < src_line->nr_pcnt; i++)
1608 				iter->samples[i].percent_sum += src_line->samples[i].percent;
1609 			return;
1610 		}
1611 
1612 		if (ret < 0)
1613 			p = &(*p)->rb_left;
1614 		else
1615 			p = &(*p)->rb_right;
1616 	}
1617 
1618 	for (i = 0; i < src_line->nr_pcnt; i++)
1619 		src_line->samples[i].percent_sum = src_line->samples[i].percent;
1620 
1621 	rb_link_node(&src_line->node, parent, p);
1622 	rb_insert_color(&src_line->node, root);
1623 }
1624 
cmp_source_line(struct source_line * a,struct source_line * b)1625 static int cmp_source_line(struct source_line *a, struct source_line *b)
1626 {
1627 	int i;
1628 
1629 	for (i = 0; i < a->nr_pcnt; i++) {
1630 		if (a->samples[i].percent_sum == b->samples[i].percent_sum)
1631 			continue;
1632 		return a->samples[i].percent_sum > b->samples[i].percent_sum;
1633 	}
1634 
1635 	return 0;
1636 }
1637 
__resort_source_line(struct rb_root * root,struct source_line * src_line)1638 static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
1639 {
1640 	struct source_line *iter;
1641 	struct rb_node **p = &root->rb_node;
1642 	struct rb_node *parent = NULL;
1643 
1644 	while (*p != NULL) {
1645 		parent = *p;
1646 		iter = rb_entry(parent, struct source_line, node);
1647 
1648 		if (cmp_source_line(src_line, iter))
1649 			p = &(*p)->rb_left;
1650 		else
1651 			p = &(*p)->rb_right;
1652 	}
1653 
1654 	rb_link_node(&src_line->node, parent, p);
1655 	rb_insert_color(&src_line->node, root);
1656 }
1657 
resort_source_line(struct rb_root * dest_root,struct rb_root * src_root)1658 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1659 {
1660 	struct source_line *src_line;
1661 	struct rb_node *node;
1662 
1663 	node = rb_first(src_root);
1664 	while (node) {
1665 		struct rb_node *next;
1666 
1667 		src_line = rb_entry(node, struct source_line, node);
1668 		next = rb_next(node);
1669 		rb_erase(node, src_root);
1670 
1671 		__resort_source_line(dest_root, src_line);
1672 		node = next;
1673 	}
1674 }
1675 
symbol__free_source_line(struct symbol * sym,int len)1676 static void symbol__free_source_line(struct symbol *sym, int len)
1677 {
1678 	struct annotation *notes = symbol__annotation(sym);
1679 	struct source_line *src_line = notes->src->lines;
1680 	size_t sizeof_src_line;
1681 	int i;
1682 
1683 	sizeof_src_line = sizeof(*src_line) +
1684 			  (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
1685 
1686 	for (i = 0; i < len; i++) {
1687 		free_srcline(src_line->path);
1688 		src_line = (void *)src_line + sizeof_src_line;
1689 	}
1690 
1691 	zfree(&notes->src->lines);
1692 }
1693 
1694 /* Get the filename:line for the colored entries */
symbol__get_source_line(struct symbol * sym,struct map * map,struct perf_evsel * evsel,struct rb_root * root,int len)1695 static int symbol__get_source_line(struct symbol *sym, struct map *map,
1696 				   struct perf_evsel *evsel,
1697 				   struct rb_root *root, int len)
1698 {
1699 	u64 start;
1700 	int i, k;
1701 	int evidx = evsel->idx;
1702 	struct source_line *src_line;
1703 	struct annotation *notes = symbol__annotation(sym);
1704 	struct sym_hist *h = annotation__histogram(notes, evidx);
1705 	struct rb_root tmp_root = RB_ROOT;
1706 	int nr_pcnt = 1;
1707 	u64 nr_samples = h->nr_samples;
1708 	size_t sizeof_src_line = sizeof(struct source_line);
1709 
1710 	if (perf_evsel__is_group_event(evsel)) {
1711 		for (i = 1; i < evsel->nr_members; i++) {
1712 			h = annotation__histogram(notes, evidx + i);
1713 			nr_samples += h->nr_samples;
1714 		}
1715 		nr_pcnt = evsel->nr_members;
1716 		sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
1717 	}
1718 
1719 	if (!nr_samples)
1720 		return 0;
1721 
1722 	src_line = notes->src->lines = calloc(len, sizeof_src_line);
1723 	if (!notes->src->lines)
1724 		return -1;
1725 
1726 	start = map__rip_2objdump(map, sym->start);
1727 
1728 	for (i = 0; i < len; i++) {
1729 		u64 offset;
1730 		double percent_max = 0.0;
1731 
1732 		src_line->nr_pcnt = nr_pcnt;
1733 
1734 		for (k = 0; k < nr_pcnt; k++) {
1735 			double percent = 0.0;
1736 
1737 			h = annotation__histogram(notes, evidx + k);
1738 			nr_samples = h->addr[i].nr_samples;
1739 			if (h->nr_samples)
1740 				percent = 100.0 * nr_samples / h->nr_samples;
1741 
1742 			if (percent > percent_max)
1743 				percent_max = percent;
1744 			src_line->samples[k].percent = percent;
1745 			src_line->samples[k].nr = nr_samples;
1746 		}
1747 
1748 		if (percent_max <= 0.5)
1749 			goto next;
1750 
1751 		offset = start + i;
1752 		src_line->path = get_srcline(map->dso, offset, NULL,
1753 					     false, true);
1754 		insert_source_line(&tmp_root, src_line);
1755 
1756 	next:
1757 		src_line = (void *)src_line + sizeof_src_line;
1758 	}
1759 
1760 	resort_source_line(root, &tmp_root);
1761 	return 0;
1762 }
1763 
print_summary(struct rb_root * root,const char * filename)1764 static void print_summary(struct rb_root *root, const char *filename)
1765 {
1766 	struct source_line *src_line;
1767 	struct rb_node *node;
1768 
1769 	printf("\nSorted summary for file %s\n", filename);
1770 	printf("----------------------------------------------\n\n");
1771 
1772 	if (RB_EMPTY_ROOT(root)) {
1773 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1774 		return;
1775 	}
1776 
1777 	node = rb_first(root);
1778 	while (node) {
1779 		double percent, percent_max = 0.0;
1780 		const char *color;
1781 		char *path;
1782 		int i;
1783 
1784 		src_line = rb_entry(node, struct source_line, node);
1785 		for (i = 0; i < src_line->nr_pcnt; i++) {
1786 			percent = src_line->samples[i].percent_sum;
1787 			color = get_percent_color(percent);
1788 			color_fprintf(stdout, color, " %7.2f", percent);
1789 
1790 			if (percent > percent_max)
1791 				percent_max = percent;
1792 		}
1793 
1794 		path = src_line->path;
1795 		color = get_percent_color(percent_max);
1796 		color_fprintf(stdout, color, " %s\n", path);
1797 
1798 		node = rb_next(node);
1799 	}
1800 }
1801 
symbol__annotate_hits(struct symbol * sym,struct perf_evsel * evsel)1802 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1803 {
1804 	struct annotation *notes = symbol__annotation(sym);
1805 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1806 	u64 len = symbol__size(sym), offset;
1807 
1808 	for (offset = 0; offset < len; ++offset)
1809 		if (h->addr[offset].nr_samples != 0)
1810 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1811 			       sym->start + offset, h->addr[offset].nr_samples);
1812 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1813 }
1814 
symbol__annotate_printf(struct symbol * sym,struct map * map,struct perf_evsel * evsel,bool full_paths,int min_pcnt,int max_lines,int context)1815 int symbol__annotate_printf(struct symbol *sym, struct map *map,
1816 			    struct perf_evsel *evsel, bool full_paths,
1817 			    int min_pcnt, int max_lines, int context)
1818 {
1819 	struct dso *dso = map->dso;
1820 	char *filename;
1821 	const char *d_filename;
1822 	const char *evsel_name = perf_evsel__name(evsel);
1823 	struct annotation *notes = symbol__annotation(sym);
1824 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1825 	struct disasm_line *pos, *queue = NULL;
1826 	u64 start = map__rip_2objdump(map, sym->start);
1827 	int printed = 2, queue_len = 0;
1828 	int more = 0;
1829 	u64 len;
1830 	int width = symbol_conf.show_total_period ? 12 : 8;
1831 	int graph_dotted_len;
1832 
1833 	filename = strdup(dso->long_name);
1834 	if (!filename)
1835 		return -ENOMEM;
1836 
1837 	if (full_paths)
1838 		d_filename = filename;
1839 	else
1840 		d_filename = basename(filename);
1841 
1842 	len = symbol__size(sym);
1843 
1844 	if (perf_evsel__is_group_event(evsel))
1845 		width *= evsel->nr_members;
1846 
1847 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1848 				  width, width, symbol_conf.show_total_period ? "Period" :
1849 				  symbol_conf.show_nr_samples ? "Samples" : "Percent",
1850 				  d_filename, evsel_name, h->nr_samples);
1851 
1852 	printf("%-*.*s----\n",
1853 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1854 
1855 	if (verbose > 0)
1856 		symbol__annotate_hits(sym, evsel);
1857 
1858 	list_for_each_entry(pos, &notes->src->source, node) {
1859 		if (context && queue == NULL) {
1860 			queue = pos;
1861 			queue_len = 0;
1862 		}
1863 
1864 		switch (disasm_line__print(pos, sym, start, evsel, len,
1865 					    min_pcnt, printed, max_lines,
1866 					    queue)) {
1867 		case 0:
1868 			++printed;
1869 			if (context) {
1870 				printed += queue_len;
1871 				queue = NULL;
1872 				queue_len = 0;
1873 			}
1874 			break;
1875 		case 1:
1876 			/* filtered by max_lines */
1877 			++more;
1878 			break;
1879 		case -1:
1880 		default:
1881 			/*
1882 			 * Filtered by min_pcnt or non IP lines when
1883 			 * context != 0
1884 			 */
1885 			if (!context)
1886 				break;
1887 			if (queue_len == context)
1888 				queue = list_entry(queue->node.next, typeof(*queue), node);
1889 			else
1890 				++queue_len;
1891 			break;
1892 		}
1893 	}
1894 
1895 	free(filename);
1896 
1897 	return more;
1898 }
1899 
symbol__annotate_zero_histogram(struct symbol * sym,int evidx)1900 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1901 {
1902 	struct annotation *notes = symbol__annotation(sym);
1903 	struct sym_hist *h = annotation__histogram(notes, evidx);
1904 
1905 	memset(h, 0, notes->src->sizeof_sym_hist);
1906 }
1907 
symbol__annotate_decay_histogram(struct symbol * sym,int evidx)1908 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1909 {
1910 	struct annotation *notes = symbol__annotation(sym);
1911 	struct sym_hist *h = annotation__histogram(notes, evidx);
1912 	int len = symbol__size(sym), offset;
1913 
1914 	h->nr_samples = 0;
1915 	for (offset = 0; offset < len; ++offset) {
1916 		h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8;
1917 		h->nr_samples += h->addr[offset].nr_samples;
1918 	}
1919 }
1920 
disasm__purge(struct list_head * head)1921 void disasm__purge(struct list_head *head)
1922 {
1923 	struct disasm_line *pos, *n;
1924 
1925 	list_for_each_entry_safe(pos, n, head, node) {
1926 		list_del(&pos->node);
1927 		disasm_line__free(pos);
1928 	}
1929 }
1930 
disasm_line__fprintf(struct disasm_line * dl,FILE * fp)1931 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1932 {
1933 	size_t printed;
1934 
1935 	if (dl->offset == -1)
1936 		return fprintf(fp, "%s\n", dl->line);
1937 
1938 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
1939 
1940 	if (dl->ops.raw[0] != '\0') {
1941 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1942 				   dl->ops.raw);
1943 	}
1944 
1945 	return printed + fprintf(fp, "\n");
1946 }
1947 
disasm__fprintf(struct list_head * head,FILE * fp)1948 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1949 {
1950 	struct disasm_line *pos;
1951 	size_t printed = 0;
1952 
1953 	list_for_each_entry(pos, head, node)
1954 		printed += disasm_line__fprintf(pos, fp);
1955 
1956 	return printed;
1957 }
1958 
symbol__tty_annotate(struct symbol * sym,struct map * map,struct perf_evsel * evsel,bool print_lines,bool full_paths,int min_pcnt,int max_lines)1959 int symbol__tty_annotate(struct symbol *sym, struct map *map,
1960 			 struct perf_evsel *evsel, bool print_lines,
1961 			 bool full_paths, int min_pcnt, int max_lines)
1962 {
1963 	struct dso *dso = map->dso;
1964 	struct rb_root source_line = RB_ROOT;
1965 	u64 len;
1966 
1967 	if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
1968 				0, NULL, NULL) < 0)
1969 		return -1;
1970 
1971 	len = symbol__size(sym);
1972 
1973 	if (print_lines) {
1974 		srcline_full_filename = full_paths;
1975 		symbol__get_source_line(sym, map, evsel, &source_line, len);
1976 		print_summary(&source_line, dso->long_name);
1977 	}
1978 
1979 	symbol__annotate_printf(sym, map, evsel, full_paths,
1980 				min_pcnt, max_lines, 0);
1981 	if (print_lines)
1982 		symbol__free_source_line(sym, len);
1983 
1984 	disasm__purge(&symbol__annotation(sym)->src->source);
1985 
1986 	return 0;
1987 }
1988 
ui__has_annotation(void)1989 bool ui__has_annotation(void)
1990 {
1991 	return use_browser == 1 && perf_hpp_list.sym;
1992 }
1993