1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9 #include "mem-events.h"
10
11 regex_t parent_regex;
12 const char default_parent_pattern[] = "^sys_|^do_page_fault";
13 const char *parent_pattern = default_parent_pattern;
14 const char *default_sort_order = "comm,dso,symbol";
15 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17 const char default_top_sort_order[] = "dso,symbol";
18 const char default_diff_sort_order[] = "dso,symbol";
19 const char default_tracepoint_sort_order[] = "trace";
20 const char *sort_order;
21 const char *field_order;
22 regex_t ignore_callees_regex;
23 int have_ignore_callees = 0;
24 enum sort_mode sort__mode = SORT_MODE__NORMAL;
25
26 /*
27 * Replaces all occurrences of a char used with the:
28 *
29 * -t, --field-separator
30 *
31 * option, that uses a special separator character and don't pad with spaces,
32 * replacing all occurances of this separator in symbol names (and other
33 * output) with a '.' character, that thus it's the only non valid separator.
34 */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)35 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
36 {
37 int n;
38 va_list ap;
39
40 va_start(ap, fmt);
41 n = vsnprintf(bf, size, fmt, ap);
42 if (symbol_conf.field_sep && n > 0) {
43 char *sep = bf;
44
45 while (1) {
46 sep = strchr(sep, *symbol_conf.field_sep);
47 if (sep == NULL)
48 break;
49 *sep = '.';
50 }
51 }
52 va_end(ap);
53
54 if (n >= (int)size)
55 return size - 1;
56 return n;
57 }
58
cmp_null(const void * l,const void * r)59 static int64_t cmp_null(const void *l, const void *r)
60 {
61 if (!l && !r)
62 return 0;
63 else if (!l)
64 return -1;
65 else
66 return 1;
67 }
68
69 /* --sort pid */
70
71 static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)72 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
73 {
74 return right->thread->tid - left->thread->tid;
75 }
76
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)77 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
78 size_t size, unsigned int width)
79 {
80 const char *comm = thread__comm_str(he->thread);
81
82 width = max(7U, width) - 8;
83 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
84 width, width, comm ?: "");
85 }
86
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)87 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
88 {
89 const struct thread *th = arg;
90
91 if (type != HIST_FILTER__THREAD)
92 return -1;
93
94 return th && he->thread != th;
95 }
96
97 struct sort_entry sort_thread = {
98 .se_header = " Pid:Command",
99 .se_cmp = sort__thread_cmp,
100 .se_snprintf = hist_entry__thread_snprintf,
101 .se_filter = hist_entry__thread_filter,
102 .se_width_idx = HISTC_THREAD,
103 };
104
105 /* --sort comm */
106
107 static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)108 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
109 {
110 /* Compare the addr that should be unique among comm */
111 return strcmp(comm__str(right->comm), comm__str(left->comm));
112 }
113
114 static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)115 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
116 {
117 /* Compare the addr that should be unique among comm */
118 return strcmp(comm__str(right->comm), comm__str(left->comm));
119 }
120
121 static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)122 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
123 {
124 return strcmp(comm__str(right->comm), comm__str(left->comm));
125 }
126
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)127 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
128 size_t size, unsigned int width)
129 {
130 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
131 }
132
133 struct sort_entry sort_comm = {
134 .se_header = "Command",
135 .se_cmp = sort__comm_cmp,
136 .se_collapse = sort__comm_collapse,
137 .se_sort = sort__comm_sort,
138 .se_snprintf = hist_entry__comm_snprintf,
139 .se_filter = hist_entry__thread_filter,
140 .se_width_idx = HISTC_COMM,
141 };
142
143 /* --sort dso */
144
_sort__dso_cmp(struct map * map_l,struct map * map_r)145 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
146 {
147 struct dso *dso_l = map_l ? map_l->dso : NULL;
148 struct dso *dso_r = map_r ? map_r->dso : NULL;
149 const char *dso_name_l, *dso_name_r;
150
151 if (!dso_l || !dso_r)
152 return cmp_null(dso_r, dso_l);
153
154 if (verbose) {
155 dso_name_l = dso_l->long_name;
156 dso_name_r = dso_r->long_name;
157 } else {
158 dso_name_l = dso_l->short_name;
159 dso_name_r = dso_r->short_name;
160 }
161
162 return strcmp(dso_name_l, dso_name_r);
163 }
164
165 static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)166 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
167 {
168 return _sort__dso_cmp(right->ms.map, left->ms.map);
169 }
170
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)171 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
172 size_t size, unsigned int width)
173 {
174 if (map && map->dso) {
175 const char *dso_name = !verbose ? map->dso->short_name :
176 map->dso->long_name;
177 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
178 }
179
180 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
181 }
182
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)183 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
184 size_t size, unsigned int width)
185 {
186 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
187 }
188
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)189 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
190 {
191 const struct dso *dso = arg;
192
193 if (type != HIST_FILTER__DSO)
194 return -1;
195
196 return dso && (!he->ms.map || he->ms.map->dso != dso);
197 }
198
199 struct sort_entry sort_dso = {
200 .se_header = "Shared Object",
201 .se_cmp = sort__dso_cmp,
202 .se_snprintf = hist_entry__dso_snprintf,
203 .se_filter = hist_entry__dso_filter,
204 .se_width_idx = HISTC_DSO,
205 };
206
207 /* --sort symbol */
208
_sort__addr_cmp(u64 left_ip,u64 right_ip)209 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
210 {
211 return (int64_t)(right_ip - left_ip);
212 }
213
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)214 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
215 {
216 if (!sym_l || !sym_r)
217 return cmp_null(sym_l, sym_r);
218
219 if (sym_l == sym_r)
220 return 0;
221
222 if (sym_l->start != sym_r->start)
223 return (int64_t)(sym_r->start - sym_l->start);
224
225 return (int64_t)(sym_r->end - sym_l->end);
226 }
227
228 static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)229 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
230 {
231 int64_t ret;
232
233 if (!left->ms.sym && !right->ms.sym)
234 return _sort__addr_cmp(left->ip, right->ip);
235
236 /*
237 * comparing symbol address alone is not enough since it's a
238 * relative address within a dso.
239 */
240 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
241 ret = sort__dso_cmp(left, right);
242 if (ret != 0)
243 return ret;
244 }
245
246 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
247 }
248
249 static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)250 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
251 {
252 if (!left->ms.sym || !right->ms.sym)
253 return cmp_null(left->ms.sym, right->ms.sym);
254
255 return strcmp(right->ms.sym->name, left->ms.sym->name);
256 }
257
_hist_entry__sym_snprintf(struct map * map,struct symbol * sym,u64 ip,char level,char * bf,size_t size,unsigned int width)258 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
259 u64 ip, char level, char *bf, size_t size,
260 unsigned int width)
261 {
262 size_t ret = 0;
263
264 if (verbose) {
265 char o = map ? dso__symtab_origin(map->dso) : '!';
266 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
267 BITS_PER_LONG / 4 + 2, ip, o);
268 }
269
270 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
271 if (sym && map) {
272 if (map->type == MAP__VARIABLE) {
273 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
274 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
275 ip - map->unmap_ip(map, sym->start));
276 } else {
277 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
278 width - ret,
279 sym->name);
280 }
281 } else {
282 size_t len = BITS_PER_LONG / 4;
283 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
284 len, ip);
285 }
286
287 return ret;
288 }
289
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)290 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
291 size_t size, unsigned int width)
292 {
293 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
294 he->level, bf, size, width);
295 }
296
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)297 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
298 {
299 const char *sym = arg;
300
301 if (type != HIST_FILTER__SYMBOL)
302 return -1;
303
304 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
305 }
306
307 struct sort_entry sort_sym = {
308 .se_header = "Symbol",
309 .se_cmp = sort__sym_cmp,
310 .se_sort = sort__sym_sort,
311 .se_snprintf = hist_entry__sym_snprintf,
312 .se_filter = hist_entry__sym_filter,
313 .se_width_idx = HISTC_SYMBOL,
314 };
315
316 /* --sort srcline */
317
hist_entry__get_srcline(struct hist_entry * he)318 static char *hist_entry__get_srcline(struct hist_entry *he)
319 {
320 struct map *map = he->ms.map;
321
322 if (!map)
323 return SRCLINE_UNKNOWN;
324
325 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
326 he->ms.sym, true);
327 }
328
329 static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)330 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
331 {
332 if (!left->srcline)
333 left->srcline = hist_entry__get_srcline(left);
334 if (!right->srcline)
335 right->srcline = hist_entry__get_srcline(right);
336
337 return strcmp(right->srcline, left->srcline);
338 }
339
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)340 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
341 size_t size, unsigned int width)
342 {
343 if (!he->srcline)
344 he->srcline = hist_entry__get_srcline(he);
345
346 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
347 }
348
349 struct sort_entry sort_srcline = {
350 .se_header = "Source:Line",
351 .se_cmp = sort__srcline_cmp,
352 .se_snprintf = hist_entry__srcline_snprintf,
353 .se_width_idx = HISTC_SRCLINE,
354 };
355
356 /* --sort srcline_from */
357
358 static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)359 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
360 {
361 if (!left->branch_info->srcline_from) {
362 struct map *map = left->branch_info->from.map;
363 if (!map)
364 left->branch_info->srcline_from = SRCLINE_UNKNOWN;
365 else
366 left->branch_info->srcline_from = get_srcline(map->dso,
367 map__rip_2objdump(map,
368 left->branch_info->from.al_addr),
369 left->branch_info->from.sym, true);
370 }
371 if (!right->branch_info->srcline_from) {
372 struct map *map = right->branch_info->from.map;
373 if (!map)
374 right->branch_info->srcline_from = SRCLINE_UNKNOWN;
375 else
376 right->branch_info->srcline_from = get_srcline(map->dso,
377 map__rip_2objdump(map,
378 right->branch_info->from.al_addr),
379 right->branch_info->from.sym, true);
380 }
381 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
382 }
383
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)384 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
385 size_t size, unsigned int width)
386 {
387 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
388 }
389
390 struct sort_entry sort_srcline_from = {
391 .se_header = "From Source:Line",
392 .se_cmp = sort__srcline_from_cmp,
393 .se_snprintf = hist_entry__srcline_from_snprintf,
394 .se_width_idx = HISTC_SRCLINE_FROM,
395 };
396
397 /* --sort srcline_to */
398
399 static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)400 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
401 {
402 if (!left->branch_info->srcline_to) {
403 struct map *map = left->branch_info->to.map;
404 if (!map)
405 left->branch_info->srcline_to = SRCLINE_UNKNOWN;
406 else
407 left->branch_info->srcline_to = get_srcline(map->dso,
408 map__rip_2objdump(map,
409 left->branch_info->to.al_addr),
410 left->branch_info->from.sym, true);
411 }
412 if (!right->branch_info->srcline_to) {
413 struct map *map = right->branch_info->to.map;
414 if (!map)
415 right->branch_info->srcline_to = SRCLINE_UNKNOWN;
416 else
417 right->branch_info->srcline_to = get_srcline(map->dso,
418 map__rip_2objdump(map,
419 right->branch_info->to.al_addr),
420 right->branch_info->to.sym, true);
421 }
422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
423 }
424
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)425 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
426 size_t size, unsigned int width)
427 {
428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
429 }
430
431 struct sort_entry sort_srcline_to = {
432 .se_header = "To Source:Line",
433 .se_cmp = sort__srcline_to_cmp,
434 .se_snprintf = hist_entry__srcline_to_snprintf,
435 .se_width_idx = HISTC_SRCLINE_TO,
436 };
437
438 /* --sort srcfile */
439
440 static char no_srcfile[1];
441
hist_entry__get_srcfile(struct hist_entry * e)442 static char *hist_entry__get_srcfile(struct hist_entry *e)
443 {
444 char *sf, *p;
445 struct map *map = e->ms.map;
446
447 if (!map)
448 return no_srcfile;
449
450 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
451 e->ms.sym, false, true);
452 if (!strcmp(sf, SRCLINE_UNKNOWN))
453 return no_srcfile;
454 p = strchr(sf, ':');
455 if (p && *sf) {
456 *p = 0;
457 return sf;
458 }
459 free(sf);
460 return no_srcfile;
461 }
462
463 static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)464 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
465 {
466 if (!left->srcfile)
467 left->srcfile = hist_entry__get_srcfile(left);
468 if (!right->srcfile)
469 right->srcfile = hist_entry__get_srcfile(right);
470
471 return strcmp(right->srcfile, left->srcfile);
472 }
473
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)474 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
475 size_t size, unsigned int width)
476 {
477 if (!he->srcfile)
478 he->srcfile = hist_entry__get_srcfile(he);
479
480 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
481 }
482
483 struct sort_entry sort_srcfile = {
484 .se_header = "Source File",
485 .se_cmp = sort__srcfile_cmp,
486 .se_snprintf = hist_entry__srcfile_snprintf,
487 .se_width_idx = HISTC_SRCFILE,
488 };
489
490 /* --sort parent */
491
492 static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)493 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
494 {
495 struct symbol *sym_l = left->parent;
496 struct symbol *sym_r = right->parent;
497
498 if (!sym_l || !sym_r)
499 return cmp_null(sym_l, sym_r);
500
501 return strcmp(sym_r->name, sym_l->name);
502 }
503
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)504 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
505 size_t size, unsigned int width)
506 {
507 return repsep_snprintf(bf, size, "%-*.*s", width, width,
508 he->parent ? he->parent->name : "[other]");
509 }
510
511 struct sort_entry sort_parent = {
512 .se_header = "Parent symbol",
513 .se_cmp = sort__parent_cmp,
514 .se_snprintf = hist_entry__parent_snprintf,
515 .se_width_idx = HISTC_PARENT,
516 };
517
518 /* --sort cpu */
519
520 static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)521 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
522 {
523 return right->cpu - left->cpu;
524 }
525
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)526 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
527 size_t size, unsigned int width)
528 {
529 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
530 }
531
532 struct sort_entry sort_cpu = {
533 .se_header = "CPU",
534 .se_cmp = sort__cpu_cmp,
535 .se_snprintf = hist_entry__cpu_snprintf,
536 .se_width_idx = HISTC_CPU,
537 };
538
539 /* --sort socket */
540
541 static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)542 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
543 {
544 return right->socket - left->socket;
545 }
546
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)547 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
548 size_t size, unsigned int width)
549 {
550 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
551 }
552
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)553 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
554 {
555 int sk = *(const int *)arg;
556
557 if (type != HIST_FILTER__SOCKET)
558 return -1;
559
560 return sk >= 0 && he->socket != sk;
561 }
562
563 struct sort_entry sort_socket = {
564 .se_header = "Socket",
565 .se_cmp = sort__socket_cmp,
566 .se_snprintf = hist_entry__socket_snprintf,
567 .se_filter = hist_entry__socket_filter,
568 .se_width_idx = HISTC_SOCKET,
569 };
570
571 /* --sort trace */
572
get_trace_output(struct hist_entry * he)573 static char *get_trace_output(struct hist_entry *he)
574 {
575 struct trace_seq seq;
576 struct perf_evsel *evsel;
577 struct pevent_record rec = {
578 .data = he->raw_data,
579 .size = he->raw_size,
580 };
581
582 evsel = hists_to_evsel(he->hists);
583
584 trace_seq_init(&seq);
585 if (symbol_conf.raw_trace) {
586 pevent_print_fields(&seq, he->raw_data, he->raw_size,
587 evsel->tp_format);
588 } else {
589 pevent_event_info(&seq, evsel->tp_format, &rec);
590 }
591 /*
592 * Trim the buffer, it starts at 4KB and we're not going to
593 * add anything more to this buffer.
594 */
595 return realloc(seq.buffer, seq.len + 1);
596 }
597
598 static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)599 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
600 {
601 struct perf_evsel *evsel;
602
603 evsel = hists_to_evsel(left->hists);
604 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
605 return 0;
606
607 if (left->trace_output == NULL)
608 left->trace_output = get_trace_output(left);
609 if (right->trace_output == NULL)
610 right->trace_output = get_trace_output(right);
611
612 return strcmp(right->trace_output, left->trace_output);
613 }
614
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)615 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
616 size_t size, unsigned int width)
617 {
618 struct perf_evsel *evsel;
619
620 evsel = hists_to_evsel(he->hists);
621 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
622 return scnprintf(bf, size, "%-.*s", width, "N/A");
623
624 if (he->trace_output == NULL)
625 he->trace_output = get_trace_output(he);
626 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
627 }
628
629 struct sort_entry sort_trace = {
630 .se_header = "Trace output",
631 .se_cmp = sort__trace_cmp,
632 .se_snprintf = hist_entry__trace_snprintf,
633 .se_width_idx = HISTC_TRACE,
634 };
635
636 /* sort keys for branch stacks */
637
638 static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)639 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
640 {
641 if (!left->branch_info || !right->branch_info)
642 return cmp_null(left->branch_info, right->branch_info);
643
644 return _sort__dso_cmp(left->branch_info->from.map,
645 right->branch_info->from.map);
646 }
647
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)648 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
649 size_t size, unsigned int width)
650 {
651 if (he->branch_info)
652 return _hist_entry__dso_snprintf(he->branch_info->from.map,
653 bf, size, width);
654 else
655 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
656 }
657
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)658 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
659 const void *arg)
660 {
661 const struct dso *dso = arg;
662
663 if (type != HIST_FILTER__DSO)
664 return -1;
665
666 return dso && (!he->branch_info || !he->branch_info->from.map ||
667 he->branch_info->from.map->dso != dso);
668 }
669
670 static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)671 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
672 {
673 if (!left->branch_info || !right->branch_info)
674 return cmp_null(left->branch_info, right->branch_info);
675
676 return _sort__dso_cmp(left->branch_info->to.map,
677 right->branch_info->to.map);
678 }
679
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)680 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
681 size_t size, unsigned int width)
682 {
683 if (he->branch_info)
684 return _hist_entry__dso_snprintf(he->branch_info->to.map,
685 bf, size, width);
686 else
687 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
688 }
689
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)690 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
691 const void *arg)
692 {
693 const struct dso *dso = arg;
694
695 if (type != HIST_FILTER__DSO)
696 return -1;
697
698 return dso && (!he->branch_info || !he->branch_info->to.map ||
699 he->branch_info->to.map->dso != dso);
700 }
701
702 static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)703 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
704 {
705 struct addr_map_symbol *from_l = &left->branch_info->from;
706 struct addr_map_symbol *from_r = &right->branch_info->from;
707
708 if (!left->branch_info || !right->branch_info)
709 return cmp_null(left->branch_info, right->branch_info);
710
711 from_l = &left->branch_info->from;
712 from_r = &right->branch_info->from;
713
714 if (!from_l->sym && !from_r->sym)
715 return _sort__addr_cmp(from_l->addr, from_r->addr);
716
717 return _sort__sym_cmp(from_l->sym, from_r->sym);
718 }
719
720 static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)721 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
722 {
723 struct addr_map_symbol *to_l, *to_r;
724
725 if (!left->branch_info || !right->branch_info)
726 return cmp_null(left->branch_info, right->branch_info);
727
728 to_l = &left->branch_info->to;
729 to_r = &right->branch_info->to;
730
731 if (!to_l->sym && !to_r->sym)
732 return _sort__addr_cmp(to_l->addr, to_r->addr);
733
734 return _sort__sym_cmp(to_l->sym, to_r->sym);
735 }
736
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)737 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
738 size_t size, unsigned int width)
739 {
740 if (he->branch_info) {
741 struct addr_map_symbol *from = &he->branch_info->from;
742
743 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
744 he->level, bf, size, width);
745 }
746
747 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
748 }
749
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)750 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
751 size_t size, unsigned int width)
752 {
753 if (he->branch_info) {
754 struct addr_map_symbol *to = &he->branch_info->to;
755
756 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
757 he->level, bf, size, width);
758 }
759
760 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
761 }
762
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)763 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
764 const void *arg)
765 {
766 const char *sym = arg;
767
768 if (type != HIST_FILTER__SYMBOL)
769 return -1;
770
771 return sym && !(he->branch_info && he->branch_info->from.sym &&
772 strstr(he->branch_info->from.sym->name, sym));
773 }
774
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)775 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
776 const void *arg)
777 {
778 const char *sym = arg;
779
780 if (type != HIST_FILTER__SYMBOL)
781 return -1;
782
783 return sym && !(he->branch_info && he->branch_info->to.sym &&
784 strstr(he->branch_info->to.sym->name, sym));
785 }
786
787 struct sort_entry sort_dso_from = {
788 .se_header = "Source Shared Object",
789 .se_cmp = sort__dso_from_cmp,
790 .se_snprintf = hist_entry__dso_from_snprintf,
791 .se_filter = hist_entry__dso_from_filter,
792 .se_width_idx = HISTC_DSO_FROM,
793 };
794
795 struct sort_entry sort_dso_to = {
796 .se_header = "Target Shared Object",
797 .se_cmp = sort__dso_to_cmp,
798 .se_snprintf = hist_entry__dso_to_snprintf,
799 .se_filter = hist_entry__dso_to_filter,
800 .se_width_idx = HISTC_DSO_TO,
801 };
802
803 struct sort_entry sort_sym_from = {
804 .se_header = "Source Symbol",
805 .se_cmp = sort__sym_from_cmp,
806 .se_snprintf = hist_entry__sym_from_snprintf,
807 .se_filter = hist_entry__sym_from_filter,
808 .se_width_idx = HISTC_SYMBOL_FROM,
809 };
810
811 struct sort_entry sort_sym_to = {
812 .se_header = "Target Symbol",
813 .se_cmp = sort__sym_to_cmp,
814 .se_snprintf = hist_entry__sym_to_snprintf,
815 .se_filter = hist_entry__sym_to_filter,
816 .se_width_idx = HISTC_SYMBOL_TO,
817 };
818
819 static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)820 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
821 {
822 unsigned char mp, p;
823
824 if (!left->branch_info || !right->branch_info)
825 return cmp_null(left->branch_info, right->branch_info);
826
827 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
828 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
829 return mp || p;
830 }
831
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)832 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
833 size_t size, unsigned int width){
834 static const char *out = "N/A";
835
836 if (he->branch_info) {
837 if (he->branch_info->flags.predicted)
838 out = "N";
839 else if (he->branch_info->flags.mispred)
840 out = "Y";
841 }
842
843 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
844 }
845
846 static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)847 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
848 {
849 if (!left->branch_info || !right->branch_info)
850 return cmp_null(left->branch_info, right->branch_info);
851
852 return left->branch_info->flags.cycles -
853 right->branch_info->flags.cycles;
854 }
855
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)856 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
857 size_t size, unsigned int width)
858 {
859 if (!he->branch_info)
860 return scnprintf(bf, size, "%-.*s", width, "N/A");
861 if (he->branch_info->flags.cycles == 0)
862 return repsep_snprintf(bf, size, "%-*s", width, "-");
863 return repsep_snprintf(bf, size, "%-*hd", width,
864 he->branch_info->flags.cycles);
865 }
866
867 struct sort_entry sort_cycles = {
868 .se_header = "Basic Block Cycles",
869 .se_cmp = sort__cycles_cmp,
870 .se_snprintf = hist_entry__cycles_snprintf,
871 .se_width_idx = HISTC_CYCLES,
872 };
873
874 /* --sort daddr_sym */
875 int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)876 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
877 {
878 uint64_t l = 0, r = 0;
879
880 if (left->mem_info)
881 l = left->mem_info->daddr.addr;
882 if (right->mem_info)
883 r = right->mem_info->daddr.addr;
884
885 return (int64_t)(r - l);
886 }
887
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)888 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
889 size_t size, unsigned int width)
890 {
891 uint64_t addr = 0;
892 struct map *map = NULL;
893 struct symbol *sym = NULL;
894
895 if (he->mem_info) {
896 addr = he->mem_info->daddr.addr;
897 map = he->mem_info->daddr.map;
898 sym = he->mem_info->daddr.sym;
899 }
900 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
901 width);
902 }
903
904 int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)905 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
906 {
907 uint64_t l = 0, r = 0;
908
909 if (left->mem_info)
910 l = left->mem_info->iaddr.addr;
911 if (right->mem_info)
912 r = right->mem_info->iaddr.addr;
913
914 return (int64_t)(r - l);
915 }
916
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)917 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
918 size_t size, unsigned int width)
919 {
920 uint64_t addr = 0;
921 struct map *map = NULL;
922 struct symbol *sym = NULL;
923
924 if (he->mem_info) {
925 addr = he->mem_info->iaddr.addr;
926 map = he->mem_info->iaddr.map;
927 sym = he->mem_info->iaddr.sym;
928 }
929 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
930 width);
931 }
932
933 static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)934 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
935 {
936 struct map *map_l = NULL;
937 struct map *map_r = NULL;
938
939 if (left->mem_info)
940 map_l = left->mem_info->daddr.map;
941 if (right->mem_info)
942 map_r = right->mem_info->daddr.map;
943
944 return _sort__dso_cmp(map_l, map_r);
945 }
946
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)947 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
948 size_t size, unsigned int width)
949 {
950 struct map *map = NULL;
951
952 if (he->mem_info)
953 map = he->mem_info->daddr.map;
954
955 return _hist_entry__dso_snprintf(map, bf, size, width);
956 }
957
958 static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)959 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
960 {
961 union perf_mem_data_src data_src_l;
962 union perf_mem_data_src data_src_r;
963
964 if (left->mem_info)
965 data_src_l = left->mem_info->data_src;
966 else
967 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
968
969 if (right->mem_info)
970 data_src_r = right->mem_info->data_src;
971 else
972 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
973
974 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
975 }
976
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)977 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
978 size_t size, unsigned int width)
979 {
980 char out[10];
981
982 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
983 return repsep_snprintf(bf, size, "%.*s", width, out);
984 }
985
986 static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)987 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
988 {
989 union perf_mem_data_src data_src_l;
990 union perf_mem_data_src data_src_r;
991
992 if (left->mem_info)
993 data_src_l = left->mem_info->data_src;
994 else
995 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
996
997 if (right->mem_info)
998 data_src_r = right->mem_info->data_src;
999 else
1000 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1001
1002 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1003 }
1004
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1005 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1006 size_t size, unsigned int width)
1007 {
1008 char out[64];
1009
1010 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1011 return repsep_snprintf(bf, size, "%-*s", width, out);
1012 }
1013
1014 static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1015 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1016 {
1017 union perf_mem_data_src data_src_l;
1018 union perf_mem_data_src data_src_r;
1019
1020 if (left->mem_info)
1021 data_src_l = left->mem_info->data_src;
1022 else
1023 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1024
1025 if (right->mem_info)
1026 data_src_r = right->mem_info->data_src;
1027 else
1028 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1029
1030 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1031 }
1032
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1033 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1034 size_t size, unsigned int width)
1035 {
1036 char out[64];
1037
1038 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1039 return repsep_snprintf(bf, size, "%-*s", width, out);
1040 }
1041
1042 static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1043 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1044 {
1045 union perf_mem_data_src data_src_l;
1046 union perf_mem_data_src data_src_r;
1047
1048 if (left->mem_info)
1049 data_src_l = left->mem_info->data_src;
1050 else
1051 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1052
1053 if (right->mem_info)
1054 data_src_r = right->mem_info->data_src;
1055 else
1056 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1057
1058 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1059 }
1060
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1061 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1062 size_t size, unsigned int width)
1063 {
1064 char out[64];
1065
1066 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1067 return repsep_snprintf(bf, size, "%-*s", width, out);
1068 }
1069
1070 int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1071 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1072 {
1073 u64 l, r;
1074 struct map *l_map, *r_map;
1075
1076 if (!left->mem_info) return -1;
1077 if (!right->mem_info) return 1;
1078
1079 /* group event types together */
1080 if (left->cpumode > right->cpumode) return -1;
1081 if (left->cpumode < right->cpumode) return 1;
1082
1083 l_map = left->mem_info->daddr.map;
1084 r_map = right->mem_info->daddr.map;
1085
1086 /* if both are NULL, jump to sort on al_addr instead */
1087 if (!l_map && !r_map)
1088 goto addr;
1089
1090 if (!l_map) return -1;
1091 if (!r_map) return 1;
1092
1093 if (l_map->maj > r_map->maj) return -1;
1094 if (l_map->maj < r_map->maj) return 1;
1095
1096 if (l_map->min > r_map->min) return -1;
1097 if (l_map->min < r_map->min) return 1;
1098
1099 if (l_map->ino > r_map->ino) return -1;
1100 if (l_map->ino < r_map->ino) return 1;
1101
1102 if (l_map->ino_generation > r_map->ino_generation) return -1;
1103 if (l_map->ino_generation < r_map->ino_generation) return 1;
1104
1105 /*
1106 * Addresses with no major/minor numbers are assumed to be
1107 * anonymous in userspace. Sort those on pid then address.
1108 *
1109 * The kernel and non-zero major/minor mapped areas are
1110 * assumed to be unity mapped. Sort those on address.
1111 */
1112
1113 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1114 (!(l_map->flags & MAP_SHARED)) &&
1115 !l_map->maj && !l_map->min && !l_map->ino &&
1116 !l_map->ino_generation) {
1117 /* userspace anonymous */
1118
1119 if (left->thread->pid_ > right->thread->pid_) return -1;
1120 if (left->thread->pid_ < right->thread->pid_) return 1;
1121 }
1122
1123 addr:
1124 /* al_addr does all the right addr - start + offset calculations */
1125 l = cl_address(left->mem_info->daddr.al_addr);
1126 r = cl_address(right->mem_info->daddr.al_addr);
1127
1128 if (l > r) return -1;
1129 if (l < r) return 1;
1130
1131 return 0;
1132 }
1133
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1134 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1135 size_t size, unsigned int width)
1136 {
1137
1138 uint64_t addr = 0;
1139 struct map *map = NULL;
1140 struct symbol *sym = NULL;
1141 char level = he->level;
1142
1143 if (he->mem_info) {
1144 addr = cl_address(he->mem_info->daddr.al_addr);
1145 map = he->mem_info->daddr.map;
1146 sym = he->mem_info->daddr.sym;
1147
1148 /* print [s] for shared data mmaps */
1149 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1150 map && (map->type == MAP__VARIABLE) &&
1151 (map->flags & MAP_SHARED) &&
1152 (map->maj || map->min || map->ino ||
1153 map->ino_generation))
1154 level = 's';
1155 else if (!map)
1156 level = 'X';
1157 }
1158 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1159 width);
1160 }
1161
1162 struct sort_entry sort_mispredict = {
1163 .se_header = "Branch Mispredicted",
1164 .se_cmp = sort__mispredict_cmp,
1165 .se_snprintf = hist_entry__mispredict_snprintf,
1166 .se_width_idx = HISTC_MISPREDICT,
1167 };
1168
he_weight(struct hist_entry * he)1169 static u64 he_weight(struct hist_entry *he)
1170 {
1171 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1172 }
1173
1174 static int64_t
sort__local_weight_cmp(struct hist_entry * left,struct hist_entry * right)1175 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1176 {
1177 return he_weight(left) - he_weight(right);
1178 }
1179
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1180 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1181 size_t size, unsigned int width)
1182 {
1183 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1184 }
1185
1186 struct sort_entry sort_local_weight = {
1187 .se_header = "Local Weight",
1188 .se_cmp = sort__local_weight_cmp,
1189 .se_snprintf = hist_entry__local_weight_snprintf,
1190 .se_width_idx = HISTC_LOCAL_WEIGHT,
1191 };
1192
1193 static int64_t
sort__global_weight_cmp(struct hist_entry * left,struct hist_entry * right)1194 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1195 {
1196 return left->stat.weight - right->stat.weight;
1197 }
1198
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1199 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1200 size_t size, unsigned int width)
1201 {
1202 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1203 }
1204
1205 struct sort_entry sort_global_weight = {
1206 .se_header = "Weight",
1207 .se_cmp = sort__global_weight_cmp,
1208 .se_snprintf = hist_entry__global_weight_snprintf,
1209 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1210 };
1211
1212 struct sort_entry sort_mem_daddr_sym = {
1213 .se_header = "Data Symbol",
1214 .se_cmp = sort__daddr_cmp,
1215 .se_snprintf = hist_entry__daddr_snprintf,
1216 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1217 };
1218
1219 struct sort_entry sort_mem_iaddr_sym = {
1220 .se_header = "Code Symbol",
1221 .se_cmp = sort__iaddr_cmp,
1222 .se_snprintf = hist_entry__iaddr_snprintf,
1223 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1224 };
1225
1226 struct sort_entry sort_mem_daddr_dso = {
1227 .se_header = "Data Object",
1228 .se_cmp = sort__dso_daddr_cmp,
1229 .se_snprintf = hist_entry__dso_daddr_snprintf,
1230 .se_width_idx = HISTC_MEM_DADDR_DSO,
1231 };
1232
1233 struct sort_entry sort_mem_locked = {
1234 .se_header = "Locked",
1235 .se_cmp = sort__locked_cmp,
1236 .se_snprintf = hist_entry__locked_snprintf,
1237 .se_width_idx = HISTC_MEM_LOCKED,
1238 };
1239
1240 struct sort_entry sort_mem_tlb = {
1241 .se_header = "TLB access",
1242 .se_cmp = sort__tlb_cmp,
1243 .se_snprintf = hist_entry__tlb_snprintf,
1244 .se_width_idx = HISTC_MEM_TLB,
1245 };
1246
1247 struct sort_entry sort_mem_lvl = {
1248 .se_header = "Memory access",
1249 .se_cmp = sort__lvl_cmp,
1250 .se_snprintf = hist_entry__lvl_snprintf,
1251 .se_width_idx = HISTC_MEM_LVL,
1252 };
1253
1254 struct sort_entry sort_mem_snoop = {
1255 .se_header = "Snoop",
1256 .se_cmp = sort__snoop_cmp,
1257 .se_snprintf = hist_entry__snoop_snprintf,
1258 .se_width_idx = HISTC_MEM_SNOOP,
1259 };
1260
1261 struct sort_entry sort_mem_dcacheline = {
1262 .se_header = "Data Cacheline",
1263 .se_cmp = sort__dcacheline_cmp,
1264 .se_snprintf = hist_entry__dcacheline_snprintf,
1265 .se_width_idx = HISTC_MEM_DCACHELINE,
1266 };
1267
1268 static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)1269 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1270 {
1271 if (!left->branch_info || !right->branch_info)
1272 return cmp_null(left->branch_info, right->branch_info);
1273
1274 return left->branch_info->flags.abort !=
1275 right->branch_info->flags.abort;
1276 }
1277
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1278 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1279 size_t size, unsigned int width)
1280 {
1281 static const char *out = "N/A";
1282
1283 if (he->branch_info) {
1284 if (he->branch_info->flags.abort)
1285 out = "A";
1286 else
1287 out = ".";
1288 }
1289
1290 return repsep_snprintf(bf, size, "%-*s", width, out);
1291 }
1292
1293 struct sort_entry sort_abort = {
1294 .se_header = "Transaction abort",
1295 .se_cmp = sort__abort_cmp,
1296 .se_snprintf = hist_entry__abort_snprintf,
1297 .se_width_idx = HISTC_ABORT,
1298 };
1299
1300 static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)1301 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1302 {
1303 if (!left->branch_info || !right->branch_info)
1304 return cmp_null(left->branch_info, right->branch_info);
1305
1306 return left->branch_info->flags.in_tx !=
1307 right->branch_info->flags.in_tx;
1308 }
1309
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1310 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1311 size_t size, unsigned int width)
1312 {
1313 static const char *out = "N/A";
1314
1315 if (he->branch_info) {
1316 if (he->branch_info->flags.in_tx)
1317 out = "T";
1318 else
1319 out = ".";
1320 }
1321
1322 return repsep_snprintf(bf, size, "%-*s", width, out);
1323 }
1324
1325 struct sort_entry sort_in_tx = {
1326 .se_header = "Branch in transaction",
1327 .se_cmp = sort__in_tx_cmp,
1328 .se_snprintf = hist_entry__in_tx_snprintf,
1329 .se_width_idx = HISTC_IN_TX,
1330 };
1331
1332 static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)1333 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1334 {
1335 return left->transaction - right->transaction;
1336 }
1337
add_str(char * p,const char * str)1338 static inline char *add_str(char *p, const char *str)
1339 {
1340 strcpy(p, str);
1341 return p + strlen(str);
1342 }
1343
1344 static struct txbit {
1345 unsigned flag;
1346 const char *name;
1347 int skip_for_len;
1348 } txbits[] = {
1349 { PERF_TXN_ELISION, "EL ", 0 },
1350 { PERF_TXN_TRANSACTION, "TX ", 1 },
1351 { PERF_TXN_SYNC, "SYNC ", 1 },
1352 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1353 { PERF_TXN_RETRY, "RETRY ", 0 },
1354 { PERF_TXN_CONFLICT, "CON ", 0 },
1355 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1356 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1357 { 0, NULL, 0 }
1358 };
1359
hist_entry__transaction_len(void)1360 int hist_entry__transaction_len(void)
1361 {
1362 int i;
1363 int len = 0;
1364
1365 for (i = 0; txbits[i].name; i++) {
1366 if (!txbits[i].skip_for_len)
1367 len += strlen(txbits[i].name);
1368 }
1369 len += 4; /* :XX<space> */
1370 return len;
1371 }
1372
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1373 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1374 size_t size, unsigned int width)
1375 {
1376 u64 t = he->transaction;
1377 char buf[128];
1378 char *p = buf;
1379 int i;
1380
1381 buf[0] = 0;
1382 for (i = 0; txbits[i].name; i++)
1383 if (txbits[i].flag & t)
1384 p = add_str(p, txbits[i].name);
1385 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1386 p = add_str(p, "NEITHER ");
1387 if (t & PERF_TXN_ABORT_MASK) {
1388 sprintf(p, ":%" PRIx64,
1389 (t & PERF_TXN_ABORT_MASK) >>
1390 PERF_TXN_ABORT_SHIFT);
1391 p += strlen(p);
1392 }
1393
1394 return repsep_snprintf(bf, size, "%-*s", width, buf);
1395 }
1396
1397 struct sort_entry sort_transaction = {
1398 .se_header = "Transaction ",
1399 .se_cmp = sort__transaction_cmp,
1400 .se_snprintf = hist_entry__transaction_snprintf,
1401 .se_width_idx = HISTC_TRANSACTION,
1402 };
1403
1404 struct sort_dimension {
1405 const char *name;
1406 struct sort_entry *entry;
1407 int taken;
1408 };
1409
1410 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1411
1412 static struct sort_dimension common_sort_dimensions[] = {
1413 DIM(SORT_PID, "pid", sort_thread),
1414 DIM(SORT_COMM, "comm", sort_comm),
1415 DIM(SORT_DSO, "dso", sort_dso),
1416 DIM(SORT_SYM, "symbol", sort_sym),
1417 DIM(SORT_PARENT, "parent", sort_parent),
1418 DIM(SORT_CPU, "cpu", sort_cpu),
1419 DIM(SORT_SOCKET, "socket", sort_socket),
1420 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1421 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1422 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1423 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1424 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1425 DIM(SORT_TRACE, "trace", sort_trace),
1426 };
1427
1428 #undef DIM
1429
1430 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1431
1432 static struct sort_dimension bstack_sort_dimensions[] = {
1433 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1434 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1435 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1436 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1437 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1438 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1439 DIM(SORT_ABORT, "abort", sort_abort),
1440 DIM(SORT_CYCLES, "cycles", sort_cycles),
1441 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1442 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1443 };
1444
1445 #undef DIM
1446
1447 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1448
1449 static struct sort_dimension memory_sort_dimensions[] = {
1450 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1451 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1452 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1453 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1454 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1455 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1456 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1457 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1458 };
1459
1460 #undef DIM
1461
1462 struct hpp_dimension {
1463 const char *name;
1464 struct perf_hpp_fmt *fmt;
1465 int taken;
1466 };
1467
1468 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1469
1470 static struct hpp_dimension hpp_sort_dimensions[] = {
1471 DIM(PERF_HPP__OVERHEAD, "overhead"),
1472 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1473 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1474 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1475 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1476 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1477 DIM(PERF_HPP__SAMPLES, "sample"),
1478 DIM(PERF_HPP__PERIOD, "period"),
1479 };
1480
1481 #undef DIM
1482
1483 struct hpp_sort_entry {
1484 struct perf_hpp_fmt hpp;
1485 struct sort_entry *se;
1486 };
1487
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)1488 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1489 {
1490 struct hpp_sort_entry *hse;
1491
1492 if (!perf_hpp__is_sort_entry(fmt))
1493 return;
1494
1495 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1496 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1497 }
1498
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)1499 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1500 struct hists *hists, int line __maybe_unused,
1501 int *span __maybe_unused)
1502 {
1503 struct hpp_sort_entry *hse;
1504 size_t len = fmt->user_len;
1505
1506 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1507
1508 if (!len)
1509 len = hists__col_len(hists, hse->se->se_width_idx);
1510
1511 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1512 }
1513
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)1514 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1515 struct perf_hpp *hpp __maybe_unused,
1516 struct hists *hists)
1517 {
1518 struct hpp_sort_entry *hse;
1519 size_t len = fmt->user_len;
1520
1521 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1522
1523 if (!len)
1524 len = hists__col_len(hists, hse->se->se_width_idx);
1525
1526 return len;
1527 }
1528
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)1529 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1530 struct hist_entry *he)
1531 {
1532 struct hpp_sort_entry *hse;
1533 size_t len = fmt->user_len;
1534
1535 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1536
1537 if (!len)
1538 len = hists__col_len(he->hists, hse->se->se_width_idx);
1539
1540 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1541 }
1542
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1543 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1544 struct hist_entry *a, struct hist_entry *b)
1545 {
1546 struct hpp_sort_entry *hse;
1547
1548 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1549 return hse->se->se_cmp(a, b);
1550 }
1551
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1552 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1553 struct hist_entry *a, struct hist_entry *b)
1554 {
1555 struct hpp_sort_entry *hse;
1556 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1557
1558 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1559 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1560 return collapse_fn(a, b);
1561 }
1562
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1563 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1564 struct hist_entry *a, struct hist_entry *b)
1565 {
1566 struct hpp_sort_entry *hse;
1567 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1568
1569 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1570 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1571 return sort_fn(a, b);
1572 }
1573
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)1574 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1575 {
1576 return format->header == __sort__hpp_header;
1577 }
1578
1579 #define MK_SORT_ENTRY_CHK(key) \
1580 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1581 { \
1582 struct hpp_sort_entry *hse; \
1583 \
1584 if (!perf_hpp__is_sort_entry(fmt)) \
1585 return false; \
1586 \
1587 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1588 return hse->se == &sort_ ## key ; \
1589 }
1590
1591 MK_SORT_ENTRY_CHK(trace)
MK_SORT_ENTRY_CHK(srcline)1592 MK_SORT_ENTRY_CHK(srcline)
1593 MK_SORT_ENTRY_CHK(srcfile)
1594 MK_SORT_ENTRY_CHK(thread)
1595 MK_SORT_ENTRY_CHK(comm)
1596 MK_SORT_ENTRY_CHK(dso)
1597 MK_SORT_ENTRY_CHK(sym)
1598
1599
1600 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1601 {
1602 struct hpp_sort_entry *hse_a;
1603 struct hpp_sort_entry *hse_b;
1604
1605 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1606 return false;
1607
1608 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1609 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1610
1611 return hse_a->se == hse_b->se;
1612 }
1613
hse_free(struct perf_hpp_fmt * fmt)1614 static void hse_free(struct perf_hpp_fmt *fmt)
1615 {
1616 struct hpp_sort_entry *hse;
1617
1618 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1619 free(hse);
1620 }
1621
1622 static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)1623 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1624 {
1625 struct hpp_sort_entry *hse;
1626
1627 hse = malloc(sizeof(*hse));
1628 if (hse == NULL) {
1629 pr_err("Memory allocation failed\n");
1630 return NULL;
1631 }
1632
1633 hse->se = sd->entry;
1634 hse->hpp.name = sd->entry->se_header;
1635 hse->hpp.header = __sort__hpp_header;
1636 hse->hpp.width = __sort__hpp_width;
1637 hse->hpp.entry = __sort__hpp_entry;
1638 hse->hpp.color = NULL;
1639
1640 hse->hpp.cmp = __sort__hpp_cmp;
1641 hse->hpp.collapse = __sort__hpp_collapse;
1642 hse->hpp.sort = __sort__hpp_sort;
1643 hse->hpp.equal = __sort__hpp_equal;
1644 hse->hpp.free = hse_free;
1645
1646 INIT_LIST_HEAD(&hse->hpp.list);
1647 INIT_LIST_HEAD(&hse->hpp.sort_list);
1648 hse->hpp.elide = false;
1649 hse->hpp.len = 0;
1650 hse->hpp.user_len = 0;
1651 hse->hpp.level = level;
1652
1653 return hse;
1654 }
1655
hpp_free(struct perf_hpp_fmt * fmt)1656 static void hpp_free(struct perf_hpp_fmt *fmt)
1657 {
1658 free(fmt);
1659 }
1660
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)1661 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1662 int level)
1663 {
1664 struct perf_hpp_fmt *fmt;
1665
1666 fmt = memdup(hd->fmt, sizeof(*fmt));
1667 if (fmt) {
1668 INIT_LIST_HEAD(&fmt->list);
1669 INIT_LIST_HEAD(&fmt->sort_list);
1670 fmt->free = hpp_free;
1671 fmt->level = level;
1672 }
1673
1674 return fmt;
1675 }
1676
hist_entry__filter(struct hist_entry * he,int type,const void * arg)1677 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1678 {
1679 struct perf_hpp_fmt *fmt;
1680 struct hpp_sort_entry *hse;
1681 int ret = -1;
1682 int r;
1683
1684 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1685 if (!perf_hpp__is_sort_entry(fmt))
1686 continue;
1687
1688 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1689 if (hse->se->se_filter == NULL)
1690 continue;
1691
1692 /*
1693 * hist entry is filtered if any of sort key in the hpp list
1694 * is applied. But it should skip non-matched filter types.
1695 */
1696 r = hse->se->se_filter(he, type, arg);
1697 if (r >= 0) {
1698 if (ret < 0)
1699 ret = 0;
1700 ret |= r;
1701 }
1702 }
1703
1704 return ret;
1705 }
1706
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)1707 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1708 struct perf_hpp_list *list,
1709 int level)
1710 {
1711 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1712
1713 if (hse == NULL)
1714 return -1;
1715
1716 perf_hpp_list__register_sort_field(list, &hse->hpp);
1717 return 0;
1718 }
1719
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list)1720 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1721 struct perf_hpp_list *list)
1722 {
1723 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1724
1725 if (hse == NULL)
1726 return -1;
1727
1728 perf_hpp_list__column_register(list, &hse->hpp);
1729 return 0;
1730 }
1731
1732 struct hpp_dynamic_entry {
1733 struct perf_hpp_fmt hpp;
1734 struct perf_evsel *evsel;
1735 struct format_field *field;
1736 unsigned dynamic_len;
1737 bool raw_trace;
1738 };
1739
hde_width(struct hpp_dynamic_entry * hde)1740 static int hde_width(struct hpp_dynamic_entry *hde)
1741 {
1742 if (!hde->hpp.len) {
1743 int len = hde->dynamic_len;
1744 int namelen = strlen(hde->field->name);
1745 int fieldlen = hde->field->size;
1746
1747 if (namelen > len)
1748 len = namelen;
1749
1750 if (!(hde->field->flags & FIELD_IS_STRING)) {
1751 /* length for print hex numbers */
1752 fieldlen = hde->field->size * 2 + 2;
1753 }
1754 if (fieldlen > len)
1755 len = fieldlen;
1756
1757 hde->hpp.len = len;
1758 }
1759 return hde->hpp.len;
1760 }
1761
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)1762 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1763 struct hist_entry *he)
1764 {
1765 char *str, *pos;
1766 struct format_field *field = hde->field;
1767 size_t namelen;
1768 bool last = false;
1769
1770 if (hde->raw_trace)
1771 return;
1772
1773 /* parse pretty print result and update max length */
1774 if (!he->trace_output)
1775 he->trace_output = get_trace_output(he);
1776
1777 namelen = strlen(field->name);
1778 str = he->trace_output;
1779
1780 while (str) {
1781 pos = strchr(str, ' ');
1782 if (pos == NULL) {
1783 last = true;
1784 pos = str + strlen(str);
1785 }
1786
1787 if (!strncmp(str, field->name, namelen)) {
1788 size_t len;
1789
1790 str += namelen + 1;
1791 len = pos - str;
1792
1793 if (len > hde->dynamic_len)
1794 hde->dynamic_len = len;
1795 break;
1796 }
1797
1798 if (last)
1799 str = NULL;
1800 else
1801 str = pos + 1;
1802 }
1803 }
1804
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)1805 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1806 struct hists *hists __maybe_unused,
1807 int line __maybe_unused,
1808 int *span __maybe_unused)
1809 {
1810 struct hpp_dynamic_entry *hde;
1811 size_t len = fmt->user_len;
1812
1813 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1814
1815 if (!len)
1816 len = hde_width(hde);
1817
1818 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1819 }
1820
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)1821 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1822 struct perf_hpp *hpp __maybe_unused,
1823 struct hists *hists __maybe_unused)
1824 {
1825 struct hpp_dynamic_entry *hde;
1826 size_t len = fmt->user_len;
1827
1828 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1829
1830 if (!len)
1831 len = hde_width(hde);
1832
1833 return len;
1834 }
1835
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)1836 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1837 {
1838 struct hpp_dynamic_entry *hde;
1839
1840 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1841
1842 return hists_to_evsel(hists) == hde->evsel;
1843 }
1844
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)1845 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1846 struct hist_entry *he)
1847 {
1848 struct hpp_dynamic_entry *hde;
1849 size_t len = fmt->user_len;
1850 char *str, *pos;
1851 struct format_field *field;
1852 size_t namelen;
1853 bool last = false;
1854 int ret;
1855
1856 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1857
1858 if (!len)
1859 len = hde_width(hde);
1860
1861 if (hde->raw_trace)
1862 goto raw_field;
1863
1864 if (!he->trace_output)
1865 he->trace_output = get_trace_output(he);
1866
1867 field = hde->field;
1868 namelen = strlen(field->name);
1869 str = he->trace_output;
1870
1871 while (str) {
1872 pos = strchr(str, ' ');
1873 if (pos == NULL) {
1874 last = true;
1875 pos = str + strlen(str);
1876 }
1877
1878 if (!strncmp(str, field->name, namelen)) {
1879 str += namelen + 1;
1880 str = strndup(str, pos - str);
1881
1882 if (str == NULL)
1883 return scnprintf(hpp->buf, hpp->size,
1884 "%*.*s", len, len, "ERROR");
1885 break;
1886 }
1887
1888 if (last)
1889 str = NULL;
1890 else
1891 str = pos + 1;
1892 }
1893
1894 if (str == NULL) {
1895 struct trace_seq seq;
1896 raw_field:
1897 trace_seq_init(&seq);
1898 pevent_print_field(&seq, he->raw_data, hde->field);
1899 str = seq.buffer;
1900 }
1901
1902 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1903 free(str);
1904 return ret;
1905 }
1906
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1907 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1908 struct hist_entry *a, struct hist_entry *b)
1909 {
1910 struct hpp_dynamic_entry *hde;
1911 struct format_field *field;
1912 unsigned offset, size;
1913
1914 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1915
1916 if (b == NULL) {
1917 update_dynamic_len(hde, a);
1918 return 0;
1919 }
1920
1921 field = hde->field;
1922 if (field->flags & FIELD_IS_DYNAMIC) {
1923 unsigned long long dyn;
1924
1925 pevent_read_number_field(field, a->raw_data, &dyn);
1926 offset = dyn & 0xffff;
1927 size = (dyn >> 16) & 0xffff;
1928
1929 /* record max width for output */
1930 if (size > hde->dynamic_len)
1931 hde->dynamic_len = size;
1932 } else {
1933 offset = field->offset;
1934 size = field->size;
1935 }
1936
1937 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1938 }
1939
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)1940 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1941 {
1942 return fmt->cmp == __sort__hde_cmp;
1943 }
1944
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)1945 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1946 {
1947 struct hpp_dynamic_entry *hde_a;
1948 struct hpp_dynamic_entry *hde_b;
1949
1950 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1951 return false;
1952
1953 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1954 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1955
1956 return hde_a->field == hde_b->field;
1957 }
1958
hde_free(struct perf_hpp_fmt * fmt)1959 static void hde_free(struct perf_hpp_fmt *fmt)
1960 {
1961 struct hpp_dynamic_entry *hde;
1962
1963 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1964 free(hde);
1965 }
1966
1967 static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct perf_evsel * evsel,struct format_field * field,int level)1968 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1969 int level)
1970 {
1971 struct hpp_dynamic_entry *hde;
1972
1973 hde = malloc(sizeof(*hde));
1974 if (hde == NULL) {
1975 pr_debug("Memory allocation failed\n");
1976 return NULL;
1977 }
1978
1979 hde->evsel = evsel;
1980 hde->field = field;
1981 hde->dynamic_len = 0;
1982
1983 hde->hpp.name = field->name;
1984 hde->hpp.header = __sort__hde_header;
1985 hde->hpp.width = __sort__hde_width;
1986 hde->hpp.entry = __sort__hde_entry;
1987 hde->hpp.color = NULL;
1988
1989 hde->hpp.cmp = __sort__hde_cmp;
1990 hde->hpp.collapse = __sort__hde_cmp;
1991 hde->hpp.sort = __sort__hde_cmp;
1992 hde->hpp.equal = __sort__hde_equal;
1993 hde->hpp.free = hde_free;
1994
1995 INIT_LIST_HEAD(&hde->hpp.list);
1996 INIT_LIST_HEAD(&hde->hpp.sort_list);
1997 hde->hpp.elide = false;
1998 hde->hpp.len = 0;
1999 hde->hpp.user_len = 0;
2000 hde->hpp.level = level;
2001
2002 return hde;
2003 }
2004
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)2005 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2006 {
2007 struct perf_hpp_fmt *new_fmt = NULL;
2008
2009 if (perf_hpp__is_sort_entry(fmt)) {
2010 struct hpp_sort_entry *hse, *new_hse;
2011
2012 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2013 new_hse = memdup(hse, sizeof(*hse));
2014 if (new_hse)
2015 new_fmt = &new_hse->hpp;
2016 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2017 struct hpp_dynamic_entry *hde, *new_hde;
2018
2019 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2020 new_hde = memdup(hde, sizeof(*hde));
2021 if (new_hde)
2022 new_fmt = &new_hde->hpp;
2023 } else {
2024 new_fmt = memdup(fmt, sizeof(*fmt));
2025 }
2026
2027 INIT_LIST_HEAD(&new_fmt->list);
2028 INIT_LIST_HEAD(&new_fmt->sort_list);
2029
2030 return new_fmt;
2031 }
2032
parse_field_name(char * str,char ** event,char ** field,char ** opt)2033 static int parse_field_name(char *str, char **event, char **field, char **opt)
2034 {
2035 char *event_name, *field_name, *opt_name;
2036
2037 event_name = str;
2038 field_name = strchr(str, '.');
2039
2040 if (field_name) {
2041 *field_name++ = '\0';
2042 } else {
2043 event_name = NULL;
2044 field_name = str;
2045 }
2046
2047 opt_name = strchr(field_name, '/');
2048 if (opt_name)
2049 *opt_name++ = '\0';
2050
2051 *event = event_name;
2052 *field = field_name;
2053 *opt = opt_name;
2054
2055 return 0;
2056 }
2057
2058 /* find match evsel using a given event name. The event name can be:
2059 * 1. '%' + event index (e.g. '%1' for first event)
2060 * 2. full event name (e.g. sched:sched_switch)
2061 * 3. partial event name (should not contain ':')
2062 */
find_evsel(struct perf_evlist * evlist,char * event_name)2063 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2064 {
2065 struct perf_evsel *evsel = NULL;
2066 struct perf_evsel *pos;
2067 bool full_name;
2068
2069 /* case 1 */
2070 if (event_name[0] == '%') {
2071 int nr = strtol(event_name+1, NULL, 0);
2072
2073 if (nr > evlist->nr_entries)
2074 return NULL;
2075
2076 evsel = perf_evlist__first(evlist);
2077 while (--nr > 0)
2078 evsel = perf_evsel__next(evsel);
2079
2080 return evsel;
2081 }
2082
2083 full_name = !!strchr(event_name, ':');
2084 evlist__for_each_entry(evlist, pos) {
2085 /* case 2 */
2086 if (full_name && !strcmp(pos->name, event_name))
2087 return pos;
2088 /* case 3 */
2089 if (!full_name && strstr(pos->name, event_name)) {
2090 if (evsel) {
2091 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2092 event_name, evsel->name, pos->name);
2093 return NULL;
2094 }
2095 evsel = pos;
2096 }
2097 }
2098
2099 return evsel;
2100 }
2101
__dynamic_dimension__add(struct perf_evsel * evsel,struct format_field * field,bool raw_trace,int level)2102 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2103 struct format_field *field,
2104 bool raw_trace, int level)
2105 {
2106 struct hpp_dynamic_entry *hde;
2107
2108 hde = __alloc_dynamic_entry(evsel, field, level);
2109 if (hde == NULL)
2110 return -ENOMEM;
2111
2112 hde->raw_trace = raw_trace;
2113
2114 perf_hpp__register_sort_field(&hde->hpp);
2115 return 0;
2116 }
2117
add_evsel_fields(struct perf_evsel * evsel,bool raw_trace,int level)2118 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2119 {
2120 int ret;
2121 struct format_field *field;
2122
2123 field = evsel->tp_format->format.fields;
2124 while (field) {
2125 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2126 if (ret < 0)
2127 return ret;
2128
2129 field = field->next;
2130 }
2131 return 0;
2132 }
2133
add_all_dynamic_fields(struct perf_evlist * evlist,bool raw_trace,int level)2134 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2135 int level)
2136 {
2137 int ret;
2138 struct perf_evsel *evsel;
2139
2140 evlist__for_each_entry(evlist, evsel) {
2141 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2142 continue;
2143
2144 ret = add_evsel_fields(evsel, raw_trace, level);
2145 if (ret < 0)
2146 return ret;
2147 }
2148 return 0;
2149 }
2150
add_all_matching_fields(struct perf_evlist * evlist,char * field_name,bool raw_trace,int level)2151 static int add_all_matching_fields(struct perf_evlist *evlist,
2152 char *field_name, bool raw_trace, int level)
2153 {
2154 int ret = -ESRCH;
2155 struct perf_evsel *evsel;
2156 struct format_field *field;
2157
2158 evlist__for_each_entry(evlist, evsel) {
2159 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2160 continue;
2161
2162 field = pevent_find_any_field(evsel->tp_format, field_name);
2163 if (field == NULL)
2164 continue;
2165
2166 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2167 if (ret < 0)
2168 break;
2169 }
2170 return ret;
2171 }
2172
add_dynamic_entry(struct perf_evlist * evlist,const char * tok,int level)2173 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2174 int level)
2175 {
2176 char *str, *event_name, *field_name, *opt_name;
2177 struct perf_evsel *evsel;
2178 struct format_field *field;
2179 bool raw_trace = symbol_conf.raw_trace;
2180 int ret = 0;
2181
2182 if (evlist == NULL)
2183 return -ENOENT;
2184
2185 str = strdup(tok);
2186 if (str == NULL)
2187 return -ENOMEM;
2188
2189 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2190 ret = -EINVAL;
2191 goto out;
2192 }
2193
2194 if (opt_name) {
2195 if (strcmp(opt_name, "raw")) {
2196 pr_debug("unsupported field option %s\n", opt_name);
2197 ret = -EINVAL;
2198 goto out;
2199 }
2200 raw_trace = true;
2201 }
2202
2203 if (!strcmp(field_name, "trace_fields")) {
2204 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2205 goto out;
2206 }
2207
2208 if (event_name == NULL) {
2209 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2210 goto out;
2211 }
2212
2213 evsel = find_evsel(evlist, event_name);
2214 if (evsel == NULL) {
2215 pr_debug("Cannot find event: %s\n", event_name);
2216 ret = -ENOENT;
2217 goto out;
2218 }
2219
2220 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2221 pr_debug("%s is not a tracepoint event\n", event_name);
2222 ret = -EINVAL;
2223 goto out;
2224 }
2225
2226 if (!strcmp(field_name, "*")) {
2227 ret = add_evsel_fields(evsel, raw_trace, level);
2228 } else {
2229 field = pevent_find_any_field(evsel->tp_format, field_name);
2230 if (field == NULL) {
2231 pr_debug("Cannot find event field for %s.%s\n",
2232 event_name, field_name);
2233 return -ENOENT;
2234 }
2235
2236 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2237 }
2238
2239 out:
2240 free(str);
2241 return ret;
2242 }
2243
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2244 static int __sort_dimension__add(struct sort_dimension *sd,
2245 struct perf_hpp_list *list,
2246 int level)
2247 {
2248 if (sd->taken)
2249 return 0;
2250
2251 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2252 return -1;
2253
2254 if (sd->entry->se_collapse)
2255 list->need_collapse = 1;
2256
2257 sd->taken = 1;
2258
2259 return 0;
2260 }
2261
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)2262 static int __hpp_dimension__add(struct hpp_dimension *hd,
2263 struct perf_hpp_list *list,
2264 int level)
2265 {
2266 struct perf_hpp_fmt *fmt;
2267
2268 if (hd->taken)
2269 return 0;
2270
2271 fmt = __hpp_dimension__alloc_hpp(hd, level);
2272 if (!fmt)
2273 return -1;
2274
2275 hd->taken = 1;
2276 perf_hpp_list__register_sort_field(list, fmt);
2277 return 0;
2278 }
2279
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd)2280 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2281 struct sort_dimension *sd)
2282 {
2283 if (sd->taken)
2284 return 0;
2285
2286 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2287 return -1;
2288
2289 sd->taken = 1;
2290 return 0;
2291 }
2292
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd)2293 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2294 struct hpp_dimension *hd)
2295 {
2296 struct perf_hpp_fmt *fmt;
2297
2298 if (hd->taken)
2299 return 0;
2300
2301 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2302 if (!fmt)
2303 return -1;
2304
2305 hd->taken = 1;
2306 perf_hpp_list__column_register(list, fmt);
2307 return 0;
2308 }
2309
hpp_dimension__add_output(unsigned col)2310 int hpp_dimension__add_output(unsigned col)
2311 {
2312 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2313 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2314 }
2315
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct perf_evlist * evlist,int level)2316 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2317 struct perf_evlist *evlist,
2318 int level)
2319 {
2320 unsigned int i;
2321
2322 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2323 struct sort_dimension *sd = &common_sort_dimensions[i];
2324
2325 if (strncasecmp(tok, sd->name, strlen(tok)))
2326 continue;
2327
2328 if (sd->entry == &sort_parent) {
2329 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2330 if (ret) {
2331 char err[BUFSIZ];
2332
2333 regerror(ret, &parent_regex, err, sizeof(err));
2334 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2335 return -EINVAL;
2336 }
2337 list->parent = 1;
2338 } else if (sd->entry == &sort_sym) {
2339 list->sym = 1;
2340 /*
2341 * perf diff displays the performance difference amongst
2342 * two or more perf.data files. Those files could come
2343 * from different binaries. So we should not compare
2344 * their ips, but the name of symbol.
2345 */
2346 if (sort__mode == SORT_MODE__DIFF)
2347 sd->entry->se_collapse = sort__sym_sort;
2348
2349 } else if (sd->entry == &sort_dso) {
2350 list->dso = 1;
2351 } else if (sd->entry == &sort_socket) {
2352 list->socket = 1;
2353 } else if (sd->entry == &sort_thread) {
2354 list->thread = 1;
2355 } else if (sd->entry == &sort_comm) {
2356 list->comm = 1;
2357 }
2358
2359 return __sort_dimension__add(sd, list, level);
2360 }
2361
2362 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2363 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2364
2365 if (strncasecmp(tok, hd->name, strlen(tok)))
2366 continue;
2367
2368 return __hpp_dimension__add(hd, list, level);
2369 }
2370
2371 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2372 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2373
2374 if (strncasecmp(tok, sd->name, strlen(tok)))
2375 continue;
2376
2377 if (sort__mode != SORT_MODE__BRANCH)
2378 return -EINVAL;
2379
2380 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2381 list->sym = 1;
2382
2383 __sort_dimension__add(sd, list, level);
2384 return 0;
2385 }
2386
2387 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2388 struct sort_dimension *sd = &memory_sort_dimensions[i];
2389
2390 if (strncasecmp(tok, sd->name, strlen(tok)))
2391 continue;
2392
2393 if (sort__mode != SORT_MODE__MEMORY)
2394 return -EINVAL;
2395
2396 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0)
2397 return -EINVAL;
2398
2399 if (sd->entry == &sort_mem_daddr_sym)
2400 list->sym = 1;
2401
2402 __sort_dimension__add(sd, list, level);
2403 return 0;
2404 }
2405
2406 if (!add_dynamic_entry(evlist, tok, level))
2407 return 0;
2408
2409 return -ESRCH;
2410 }
2411
setup_sort_list(struct perf_hpp_list * list,char * str,struct perf_evlist * evlist)2412 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2413 struct perf_evlist *evlist)
2414 {
2415 char *tmp, *tok;
2416 int ret = 0;
2417 int level = 0;
2418 int next_level = 1;
2419 bool in_group = false;
2420
2421 do {
2422 tok = str;
2423 tmp = strpbrk(str, "{}, ");
2424 if (tmp) {
2425 if (in_group)
2426 next_level = level;
2427 else
2428 next_level = level + 1;
2429
2430 if (*tmp == '{')
2431 in_group = true;
2432 else if (*tmp == '}')
2433 in_group = false;
2434
2435 *tmp = '\0';
2436 str = tmp + 1;
2437 }
2438
2439 if (*tok) {
2440 ret = sort_dimension__add(list, tok, evlist, level);
2441 if (ret == -EINVAL) {
2442 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
2443 error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2444 else
2445 error("Invalid --sort key: `%s'", tok);
2446 break;
2447 } else if (ret == -ESRCH) {
2448 error("Unknown --sort key: `%s'", tok);
2449 break;
2450 }
2451 }
2452
2453 level = next_level;
2454 } while (tmp);
2455
2456 return ret;
2457 }
2458
get_default_sort_order(struct perf_evlist * evlist)2459 static const char *get_default_sort_order(struct perf_evlist *evlist)
2460 {
2461 const char *default_sort_orders[] = {
2462 default_sort_order,
2463 default_branch_sort_order,
2464 default_mem_sort_order,
2465 default_top_sort_order,
2466 default_diff_sort_order,
2467 default_tracepoint_sort_order,
2468 };
2469 bool use_trace = true;
2470 struct perf_evsel *evsel;
2471
2472 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2473
2474 if (evlist == NULL)
2475 goto out_no_evlist;
2476
2477 evlist__for_each_entry(evlist, evsel) {
2478 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2479 use_trace = false;
2480 break;
2481 }
2482 }
2483
2484 if (use_trace) {
2485 sort__mode = SORT_MODE__TRACEPOINT;
2486 if (symbol_conf.raw_trace)
2487 return "trace_fields";
2488 }
2489 out_no_evlist:
2490 return default_sort_orders[sort__mode];
2491 }
2492
setup_sort_order(struct perf_evlist * evlist)2493 static int setup_sort_order(struct perf_evlist *evlist)
2494 {
2495 char *new_sort_order;
2496
2497 /*
2498 * Append '+'-prefixed sort order to the default sort
2499 * order string.
2500 */
2501 if (!sort_order || is_strict_order(sort_order))
2502 return 0;
2503
2504 if (sort_order[1] == '\0') {
2505 error("Invalid --sort key: `+'");
2506 return -EINVAL;
2507 }
2508
2509 /*
2510 * We allocate new sort_order string, but we never free it,
2511 * because it's checked over the rest of the code.
2512 */
2513 if (asprintf(&new_sort_order, "%s,%s",
2514 get_default_sort_order(evlist), sort_order + 1) < 0) {
2515 error("Not enough memory to set up --sort");
2516 return -ENOMEM;
2517 }
2518
2519 sort_order = new_sort_order;
2520 return 0;
2521 }
2522
2523 /*
2524 * Adds 'pre,' prefix into 'str' is 'pre' is
2525 * not already part of 'str'.
2526 */
prefix_if_not_in(const char * pre,char * str)2527 static char *prefix_if_not_in(const char *pre, char *str)
2528 {
2529 char *n;
2530
2531 if (!str || strstr(str, pre))
2532 return str;
2533
2534 if (asprintf(&n, "%s,%s", pre, str) < 0)
2535 return NULL;
2536
2537 free(str);
2538 return n;
2539 }
2540
setup_overhead(char * keys)2541 static char *setup_overhead(char *keys)
2542 {
2543 if (sort__mode == SORT_MODE__DIFF)
2544 return keys;
2545
2546 keys = prefix_if_not_in("overhead", keys);
2547
2548 if (symbol_conf.cumulate_callchain)
2549 keys = prefix_if_not_in("overhead_children", keys);
2550
2551 return keys;
2552 }
2553
__setup_sorting(struct perf_evlist * evlist)2554 static int __setup_sorting(struct perf_evlist *evlist)
2555 {
2556 char *str;
2557 const char *sort_keys;
2558 int ret = 0;
2559
2560 ret = setup_sort_order(evlist);
2561 if (ret)
2562 return ret;
2563
2564 sort_keys = sort_order;
2565 if (sort_keys == NULL) {
2566 if (is_strict_order(field_order)) {
2567 /*
2568 * If user specified field order but no sort order,
2569 * we'll honor it and not add default sort orders.
2570 */
2571 return 0;
2572 }
2573
2574 sort_keys = get_default_sort_order(evlist);
2575 }
2576
2577 str = strdup(sort_keys);
2578 if (str == NULL) {
2579 error("Not enough memory to setup sort keys");
2580 return -ENOMEM;
2581 }
2582
2583 /*
2584 * Prepend overhead fields for backward compatibility.
2585 */
2586 if (!is_strict_order(field_order)) {
2587 str = setup_overhead(str);
2588 if (str == NULL) {
2589 error("Not enough memory to setup overhead keys");
2590 return -ENOMEM;
2591 }
2592 }
2593
2594 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2595
2596 free(str);
2597 return ret;
2598 }
2599
perf_hpp__set_elide(int idx,bool elide)2600 void perf_hpp__set_elide(int idx, bool elide)
2601 {
2602 struct perf_hpp_fmt *fmt;
2603 struct hpp_sort_entry *hse;
2604
2605 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2606 if (!perf_hpp__is_sort_entry(fmt))
2607 continue;
2608
2609 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2610 if (hse->se->se_width_idx == idx) {
2611 fmt->elide = elide;
2612 break;
2613 }
2614 }
2615 }
2616
__get_elide(struct strlist * list,const char * list_name,FILE * fp)2617 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2618 {
2619 if (list && strlist__nr_entries(list) == 1) {
2620 if (fp != NULL)
2621 fprintf(fp, "# %s: %s\n", list_name,
2622 strlist__entry(list, 0)->s);
2623 return true;
2624 }
2625 return false;
2626 }
2627
get_elide(int idx,FILE * output)2628 static bool get_elide(int idx, FILE *output)
2629 {
2630 switch (idx) {
2631 case HISTC_SYMBOL:
2632 return __get_elide(symbol_conf.sym_list, "symbol", output);
2633 case HISTC_DSO:
2634 return __get_elide(symbol_conf.dso_list, "dso", output);
2635 case HISTC_COMM:
2636 return __get_elide(symbol_conf.comm_list, "comm", output);
2637 default:
2638 break;
2639 }
2640
2641 if (sort__mode != SORT_MODE__BRANCH)
2642 return false;
2643
2644 switch (idx) {
2645 case HISTC_SYMBOL_FROM:
2646 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2647 case HISTC_SYMBOL_TO:
2648 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2649 case HISTC_DSO_FROM:
2650 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2651 case HISTC_DSO_TO:
2652 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2653 default:
2654 break;
2655 }
2656
2657 return false;
2658 }
2659
sort__setup_elide(FILE * output)2660 void sort__setup_elide(FILE *output)
2661 {
2662 struct perf_hpp_fmt *fmt;
2663 struct hpp_sort_entry *hse;
2664
2665 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2666 if (!perf_hpp__is_sort_entry(fmt))
2667 continue;
2668
2669 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2670 fmt->elide = get_elide(hse->se->se_width_idx, output);
2671 }
2672
2673 /*
2674 * It makes no sense to elide all of sort entries.
2675 * Just revert them to show up again.
2676 */
2677 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2678 if (!perf_hpp__is_sort_entry(fmt))
2679 continue;
2680
2681 if (!fmt->elide)
2682 return;
2683 }
2684
2685 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2686 if (!perf_hpp__is_sort_entry(fmt))
2687 continue;
2688
2689 fmt->elide = false;
2690 }
2691 }
2692
output_field_add(struct perf_hpp_list * list,char * tok)2693 int output_field_add(struct perf_hpp_list *list, char *tok)
2694 {
2695 unsigned int i;
2696
2697 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2698 struct sort_dimension *sd = &common_sort_dimensions[i];
2699
2700 if (strncasecmp(tok, sd->name, strlen(tok)))
2701 continue;
2702
2703 return __sort_dimension__add_output(list, sd);
2704 }
2705
2706 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2707 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2708
2709 if (strncasecmp(tok, hd->name, strlen(tok)))
2710 continue;
2711
2712 return __hpp_dimension__add_output(list, hd);
2713 }
2714
2715 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2716 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2717
2718 if (strncasecmp(tok, sd->name, strlen(tok)))
2719 continue;
2720
2721 return __sort_dimension__add_output(list, sd);
2722 }
2723
2724 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2725 struct sort_dimension *sd = &memory_sort_dimensions[i];
2726
2727 if (strncasecmp(tok, sd->name, strlen(tok)))
2728 continue;
2729
2730 return __sort_dimension__add_output(list, sd);
2731 }
2732
2733 return -ESRCH;
2734 }
2735
setup_output_list(struct perf_hpp_list * list,char * str)2736 static int setup_output_list(struct perf_hpp_list *list, char *str)
2737 {
2738 char *tmp, *tok;
2739 int ret = 0;
2740
2741 for (tok = strtok_r(str, ", ", &tmp);
2742 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2743 ret = output_field_add(list, tok);
2744 if (ret == -EINVAL) {
2745 error("Invalid --fields key: `%s'", tok);
2746 break;
2747 } else if (ret == -ESRCH) {
2748 error("Unknown --fields key: `%s'", tok);
2749 break;
2750 }
2751 }
2752
2753 return ret;
2754 }
2755
reset_dimensions(void)2756 void reset_dimensions(void)
2757 {
2758 unsigned int i;
2759
2760 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2761 common_sort_dimensions[i].taken = 0;
2762
2763 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2764 hpp_sort_dimensions[i].taken = 0;
2765
2766 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2767 bstack_sort_dimensions[i].taken = 0;
2768
2769 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2770 memory_sort_dimensions[i].taken = 0;
2771 }
2772
is_strict_order(const char * order)2773 bool is_strict_order(const char *order)
2774 {
2775 return order && (*order != '+');
2776 }
2777
__setup_output_field(void)2778 static int __setup_output_field(void)
2779 {
2780 char *str, *strp;
2781 int ret = -EINVAL;
2782
2783 if (field_order == NULL)
2784 return 0;
2785
2786 strp = str = strdup(field_order);
2787 if (str == NULL) {
2788 error("Not enough memory to setup output fields");
2789 return -ENOMEM;
2790 }
2791
2792 if (!is_strict_order(field_order))
2793 strp++;
2794
2795 if (!strlen(strp)) {
2796 error("Invalid --fields key: `+'");
2797 goto out;
2798 }
2799
2800 ret = setup_output_list(&perf_hpp_list, strp);
2801
2802 out:
2803 free(str);
2804 return ret;
2805 }
2806
setup_sorting(struct perf_evlist * evlist)2807 int setup_sorting(struct perf_evlist *evlist)
2808 {
2809 int err;
2810
2811 err = __setup_sorting(evlist);
2812 if (err < 0)
2813 return err;
2814
2815 if (parent_pattern != default_parent_pattern) {
2816 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2817 if (err < 0)
2818 return err;
2819 }
2820
2821 reset_dimensions();
2822
2823 /*
2824 * perf diff doesn't use default hpp output fields.
2825 */
2826 if (sort__mode != SORT_MODE__DIFF)
2827 perf_hpp__init();
2828
2829 err = __setup_output_field();
2830 if (err < 0)
2831 return err;
2832
2833 /* copy sort keys to output fields */
2834 perf_hpp__setup_output_field(&perf_hpp_list);
2835 /* and then copy output fields to sort keys */
2836 perf_hpp__append_sort_keys(&perf_hpp_list);
2837
2838 /* setup hists-specific output fields */
2839 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2840 return -1;
2841
2842 return 0;
2843 }
2844
reset_output_field(void)2845 void reset_output_field(void)
2846 {
2847 perf_hpp_list.need_collapse = 0;
2848 perf_hpp_list.parent = 0;
2849 perf_hpp_list.sym = 0;
2850 perf_hpp_list.dso = 0;
2851
2852 field_order = NULL;
2853 sort_order = NULL;
2854
2855 reset_dimensions();
2856 perf_hpp__reset_output_field(&perf_hpp_list);
2857 }
2858