• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <math.h>
2 #include <linux/compiler.h>
3 
4 #include "../util/hist.h"
5 #include "../util/util.h"
6 #include "../util/sort.h"
7 #include "../util/evsel.h"
8 #include "../util/evlist.h"
9 
10 /* hist period print (hpp) functions */
11 
12 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
13 ({								\
14 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
15 	advance_hpp(hpp, __ret);				\
16 	__ret;							\
17 })
18 
__hpp__fmt(struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmt,int len,hpp_snprint_fn print_fn,bool fmt_percent)19 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
20 		      hpp_field_fn get_field, const char *fmt, int len,
21 		      hpp_snprint_fn print_fn, bool fmt_percent)
22 {
23 	int ret;
24 	struct hists *hists = he->hists;
25 	struct perf_evsel *evsel = hists_to_evsel(hists);
26 	char *buf = hpp->buf;
27 	size_t size = hpp->size;
28 
29 	if (fmt_percent) {
30 		double percent = 0.0;
31 		u64 total = hists__total_period(hists);
32 
33 		if (total)
34 			percent = 100.0 * get_field(he) / total;
35 
36 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
37 	} else
38 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
39 
40 	if (perf_evsel__is_group_event(evsel)) {
41 		int prev_idx, idx_delta;
42 		struct hist_entry *pair;
43 		int nr_members = evsel->nr_members;
44 
45 		prev_idx = perf_evsel__group_idx(evsel);
46 
47 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
48 			u64 period = get_field(pair);
49 			u64 total = hists__total_period(pair->hists);
50 
51 			if (!total)
52 				continue;
53 
54 			evsel = hists_to_evsel(pair->hists);
55 			idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
56 
57 			while (idx_delta--) {
58 				/*
59 				 * zero-fill group members in the middle which
60 				 * have no sample
61 				 */
62 				if (fmt_percent) {
63 					ret += hpp__call_print_fn(hpp, print_fn,
64 								  fmt, len, 0.0);
65 				} else {
66 					ret += hpp__call_print_fn(hpp, print_fn,
67 								  fmt, len, 0ULL);
68 				}
69 			}
70 
71 			if (fmt_percent) {
72 				ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
73 							  100.0 * period / total);
74 			} else {
75 				ret += hpp__call_print_fn(hpp, print_fn, fmt,
76 							  len, period);
77 			}
78 
79 			prev_idx = perf_evsel__group_idx(evsel);
80 		}
81 
82 		idx_delta = nr_members - prev_idx - 1;
83 
84 		while (idx_delta--) {
85 			/*
86 			 * zero-fill group members at last which have no sample
87 			 */
88 			if (fmt_percent) {
89 				ret += hpp__call_print_fn(hpp, print_fn,
90 							  fmt, len, 0.0);
91 			} else {
92 				ret += hpp__call_print_fn(hpp, print_fn,
93 							  fmt, len, 0ULL);
94 			}
95 		}
96 	}
97 
98 	/*
99 	 * Restore original buf and size as it's where caller expects
100 	 * the result will be saved.
101 	 */
102 	hpp->buf = buf;
103 	hpp->size = size;
104 
105 	return ret;
106 }
107 
hpp__fmt(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,bool fmt_percent)108 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
109 	     struct hist_entry *he, hpp_field_fn get_field,
110 	     const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
111 {
112 	int len = fmt->user_len ?: fmt->len;
113 
114 	if (symbol_conf.field_sep) {
115 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
116 				  print_fn, fmt_percent);
117 	}
118 
119 	if (fmt_percent)
120 		len -= 2; /* 2 for a space and a % sign */
121 	else
122 		len -= 1;
123 
124 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
125 }
126 
hpp__fmt_acc(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,bool fmt_percent)127 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
128 		 struct hist_entry *he, hpp_field_fn get_field,
129 		 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
130 {
131 	if (!symbol_conf.cumulate_callchain) {
132 		int len = fmt->user_len ?: fmt->len;
133 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
134 	}
135 
136 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
137 }
138 
field_cmp(u64 field_a,u64 field_b)139 static int field_cmp(u64 field_a, u64 field_b)
140 {
141 	if (field_a > field_b)
142 		return 1;
143 	if (field_a < field_b)
144 		return -1;
145 	return 0;
146 }
147 
__hpp__sort(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)148 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
149 		       hpp_field_fn get_field)
150 {
151 	s64 ret;
152 	int i, nr_members;
153 	struct perf_evsel *evsel;
154 	struct hist_entry *pair;
155 	u64 *fields_a, *fields_b;
156 
157 	ret = field_cmp(get_field(a), get_field(b));
158 	if (ret || !symbol_conf.event_group)
159 		return ret;
160 
161 	evsel = hists_to_evsel(a->hists);
162 	if (!perf_evsel__is_group_event(evsel))
163 		return ret;
164 
165 	nr_members = evsel->nr_members;
166 	fields_a = calloc(nr_members, sizeof(*fields_a));
167 	fields_b = calloc(nr_members, sizeof(*fields_b));
168 
169 	if (!fields_a || !fields_b)
170 		goto out;
171 
172 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
173 		evsel = hists_to_evsel(pair->hists);
174 		fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
175 	}
176 
177 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
178 		evsel = hists_to_evsel(pair->hists);
179 		fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
180 	}
181 
182 	for (i = 1; i < nr_members; i++) {
183 		ret = field_cmp(fields_a[i], fields_b[i]);
184 		if (ret)
185 			break;
186 	}
187 
188 out:
189 	free(fields_a);
190 	free(fields_b);
191 
192 	return ret;
193 }
194 
__hpp__sort_acc(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)195 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
196 			   hpp_field_fn get_field)
197 {
198 	s64 ret = 0;
199 
200 	if (symbol_conf.cumulate_callchain) {
201 		/*
202 		 * Put caller above callee when they have equal period.
203 		 */
204 		ret = field_cmp(get_field(a), get_field(b));
205 		if (ret)
206 			return ret;
207 
208 		if (a->thread != b->thread || !symbol_conf.use_callchain)
209 			return 0;
210 
211 		ret = b->callchain->max_depth - a->callchain->max_depth;
212 	}
213 	return ret;
214 }
215 
hpp__width_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)216 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
217 			 struct perf_hpp *hpp __maybe_unused,
218 			 struct hists *hists)
219 {
220 	int len = fmt->user_len ?: fmt->len;
221 	struct perf_evsel *evsel = hists_to_evsel(hists);
222 
223 	if (symbol_conf.event_group)
224 		len = max(len, evsel->nr_members * fmt->len);
225 
226 	if (len < (int)strlen(fmt->name))
227 		len = strlen(fmt->name);
228 
229 	return len;
230 }
231 
hpp__header_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)232 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
233 			  struct hists *hists, int line __maybe_unused,
234 			  int *span __maybe_unused)
235 {
236 	int len = hpp__width_fn(fmt, hpp, hists);
237 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
238 }
239 
hpp_color_scnprintf(struct perf_hpp * hpp,const char * fmt,...)240 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
241 {
242 	va_list args;
243 	ssize_t ssize = hpp->size;
244 	double percent;
245 	int ret, len;
246 
247 	va_start(args, fmt);
248 	len = va_arg(args, int);
249 	percent = va_arg(args, double);
250 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
251 	va_end(args);
252 
253 	return (ret >= ssize) ? (ssize - 1) : ret;
254 }
255 
hpp_entry_scnprintf(struct perf_hpp * hpp,const char * fmt,...)256 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
257 {
258 	va_list args;
259 	ssize_t ssize = hpp->size;
260 	int ret;
261 
262 	va_start(args, fmt);
263 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
264 	va_end(args);
265 
266 	return (ret >= ssize) ? (ssize - 1) : ret;
267 }
268 
269 #define __HPP_COLOR_PERCENT_FN(_type, _field)					\
270 static u64 he_get_##_field(struct hist_entry *he)				\
271 {										\
272 	return he->stat._field;							\
273 }										\
274 										\
275 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
276 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
277 {										\
278 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
279 			hpp_color_scnprintf, true);				\
280 }
281 
282 #define __HPP_ENTRY_PERCENT_FN(_type, _field)					\
283 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
284 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
285 {										\
286 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
287 			hpp_entry_scnprintf, true);				\
288 }
289 
290 #define __HPP_SORT_FN(_type, _field)						\
291 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
292 				 struct hist_entry *a, struct hist_entry *b) 	\
293 {										\
294 	return __hpp__sort(a, b, he_get_##_field);				\
295 }
296 
297 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
298 static u64 he_get_acc_##_field(struct hist_entry *he)				\
299 {										\
300 	return he->stat_acc->_field;						\
301 }										\
302 										\
303 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
304 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
305 {										\
306 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
307 			    hpp_color_scnprintf, true);				\
308 }
309 
310 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
311 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
312 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
313 {										\
314 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
315 			    hpp_entry_scnprintf, true);				\
316 }
317 
318 #define __HPP_SORT_ACC_FN(_type, _field)					\
319 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
320 				 struct hist_entry *a, struct hist_entry *b) 	\
321 {										\
322 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
323 }
324 
325 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
326 static u64 he_get_raw_##_field(struct hist_entry *he)				\
327 {										\
328 	return he->stat._field;							\
329 }										\
330 										\
331 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
332 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
333 {										\
334 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
335 			hpp_entry_scnprintf, false);				\
336 }
337 
338 #define __HPP_SORT_RAW_FN(_type, _field)					\
339 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
340 				 struct hist_entry *a, struct hist_entry *b) 	\
341 {										\
342 	return __hpp__sort(a, b, he_get_raw_##_field);				\
343 }
344 
345 
346 #define HPP_PERCENT_FNS(_type, _field)					\
347 __HPP_COLOR_PERCENT_FN(_type, _field)					\
348 __HPP_ENTRY_PERCENT_FN(_type, _field)					\
349 __HPP_SORT_FN(_type, _field)
350 
351 #define HPP_PERCENT_ACC_FNS(_type, _field)				\
352 __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
353 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
354 __HPP_SORT_ACC_FN(_type, _field)
355 
356 #define HPP_RAW_FNS(_type, _field)					\
357 __HPP_ENTRY_RAW_FN(_type, _field)					\
358 __HPP_SORT_RAW_FN(_type, _field)
359 
HPP_PERCENT_FNS(overhead,period)360 HPP_PERCENT_FNS(overhead, period)
361 HPP_PERCENT_FNS(overhead_sys, period_sys)
362 HPP_PERCENT_FNS(overhead_us, period_us)
363 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
364 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
365 HPP_PERCENT_ACC_FNS(overhead_acc, period)
366 
367 HPP_RAW_FNS(samples, nr_events)
368 HPP_RAW_FNS(period, period)
369 
370 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
371 			    struct hist_entry *a __maybe_unused,
372 			    struct hist_entry *b __maybe_unused)
373 {
374 	return 0;
375 }
376 
perf_hpp__is_hpp_entry(struct perf_hpp_fmt * a)377 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
378 {
379 	return a->header == hpp__header_fn;
380 }
381 
hpp__equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)382 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
383 {
384 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
385 		return false;
386 
387 	return a->idx == b->idx;
388 }
389 
390 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
391 	{						\
392 		.name   = _name,			\
393 		.header	= hpp__header_fn,		\
394 		.width	= hpp__width_fn,		\
395 		.color	= hpp__color_ ## _fn,		\
396 		.entry	= hpp__entry_ ## _fn,		\
397 		.cmp	= hpp__nop_cmp,			\
398 		.collapse = hpp__nop_cmp,		\
399 		.sort	= hpp__sort_ ## _fn,		\
400 		.idx	= PERF_HPP__ ## _idx,		\
401 		.equal	= hpp__equal,			\
402 	}
403 
404 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
405 	{						\
406 		.name   = _name,			\
407 		.header	= hpp__header_fn,		\
408 		.width	= hpp__width_fn,		\
409 		.color	= hpp__color_ ## _fn,		\
410 		.entry	= hpp__entry_ ## _fn,		\
411 		.cmp	= hpp__nop_cmp,			\
412 		.collapse = hpp__nop_cmp,		\
413 		.sort	= hpp__sort_ ## _fn,		\
414 		.idx	= PERF_HPP__ ## _idx,		\
415 		.equal	= hpp__equal,			\
416 	}
417 
418 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
419 	{						\
420 		.name   = _name,			\
421 		.header	= hpp__header_fn,		\
422 		.width	= hpp__width_fn,		\
423 		.entry	= hpp__entry_ ## _fn,		\
424 		.cmp	= hpp__nop_cmp,			\
425 		.collapse = hpp__nop_cmp,		\
426 		.sort	= hpp__sort_ ## _fn,		\
427 		.idx	= PERF_HPP__ ## _idx,		\
428 		.equal	= hpp__equal,			\
429 	}
430 
431 struct perf_hpp_fmt perf_hpp__format[] = {
432 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
433 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
434 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
435 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
436 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
437 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
438 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
439 	HPP__PRINT_FNS("Period", period, PERIOD)
440 };
441 
442 struct perf_hpp_list perf_hpp_list = {
443 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
444 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
445 	.nr_header_lines = 1,
446 };
447 
448 #undef HPP__COLOR_PRINT_FNS
449 #undef HPP__COLOR_ACC_PRINT_FNS
450 #undef HPP__PRINT_FNS
451 
452 #undef HPP_PERCENT_FNS
453 #undef HPP_PERCENT_ACC_FNS
454 #undef HPP_RAW_FNS
455 
456 #undef __HPP_HEADER_FN
457 #undef __HPP_WIDTH_FN
458 #undef __HPP_COLOR_PERCENT_FN
459 #undef __HPP_ENTRY_PERCENT_FN
460 #undef __HPP_COLOR_ACC_PERCENT_FN
461 #undef __HPP_ENTRY_ACC_PERCENT_FN
462 #undef __HPP_ENTRY_RAW_FN
463 #undef __HPP_SORT_FN
464 #undef __HPP_SORT_ACC_FN
465 #undef __HPP_SORT_RAW_FN
466 
467 
perf_hpp__init(void)468 void perf_hpp__init(void)
469 {
470 	int i;
471 
472 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
473 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
474 
475 		INIT_LIST_HEAD(&fmt->list);
476 
477 		/* sort_list may be linked by setup_sorting() */
478 		if (fmt->sort_list.next == NULL)
479 			INIT_LIST_HEAD(&fmt->sort_list);
480 	}
481 
482 	/*
483 	 * If user specified field order, no need to setup default fields.
484 	 */
485 	if (is_strict_order(field_order))
486 		return;
487 
488 	if (symbol_conf.cumulate_callchain) {
489 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
490 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
491 	}
492 
493 	hpp_dimension__add_output(PERF_HPP__OVERHEAD);
494 
495 	if (symbol_conf.show_cpu_utilization) {
496 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
497 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
498 
499 		if (perf_guest) {
500 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
501 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
502 		}
503 	}
504 
505 	if (symbol_conf.show_nr_samples)
506 		hpp_dimension__add_output(PERF_HPP__SAMPLES);
507 
508 	if (symbol_conf.show_total_period)
509 		hpp_dimension__add_output(PERF_HPP__PERIOD);
510 }
511 
perf_hpp_list__column_register(struct perf_hpp_list * list,struct perf_hpp_fmt * format)512 void perf_hpp_list__column_register(struct perf_hpp_list *list,
513 				    struct perf_hpp_fmt *format)
514 {
515 	list_add_tail(&format->list, &list->fields);
516 }
517 
perf_hpp_list__register_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)518 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
519 					struct perf_hpp_fmt *format)
520 {
521 	list_add_tail(&format->sort_list, &list->sorts);
522 }
523 
perf_hpp_list__prepend_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)524 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
525 				       struct perf_hpp_fmt *format)
526 {
527 	list_add(&format->sort_list, &list->sorts);
528 }
529 
perf_hpp__column_unregister(struct perf_hpp_fmt * format)530 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
531 {
532 	list_del(&format->list);
533 }
534 
perf_hpp__cancel_cumulate(void)535 void perf_hpp__cancel_cumulate(void)
536 {
537 	struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
538 
539 	if (is_strict_order(field_order))
540 		return;
541 
542 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
543 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
544 
545 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
546 		if (acc->equal(acc, fmt)) {
547 			perf_hpp__column_unregister(fmt);
548 			continue;
549 		}
550 
551 		if (ovh->equal(ovh, fmt))
552 			fmt->name = "Overhead";
553 	}
554 }
555 
fmt_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)556 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
557 {
558 	return a->equal && a->equal(a, b);
559 }
560 
perf_hpp__setup_output_field(struct perf_hpp_list * list)561 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
562 {
563 	struct perf_hpp_fmt *fmt;
564 
565 	/* append sort keys to output field */
566 	perf_hpp_list__for_each_sort_list(list, fmt) {
567 		struct perf_hpp_fmt *pos;
568 
569 		/* skip sort-only fields ("sort_compute" in perf diff) */
570 		if (!fmt->entry && !fmt->color)
571 			continue;
572 
573 		perf_hpp_list__for_each_format(list, pos) {
574 			if (fmt_equal(fmt, pos))
575 				goto next;
576 		}
577 
578 		perf_hpp__column_register(fmt);
579 next:
580 		continue;
581 	}
582 }
583 
perf_hpp__append_sort_keys(struct perf_hpp_list * list)584 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
585 {
586 	struct perf_hpp_fmt *fmt;
587 
588 	/* append output fields to sort keys */
589 	perf_hpp_list__for_each_format(list, fmt) {
590 		struct perf_hpp_fmt *pos;
591 
592 		perf_hpp_list__for_each_sort_list(list, pos) {
593 			if (fmt_equal(fmt, pos))
594 				goto next;
595 		}
596 
597 		perf_hpp__register_sort_field(fmt);
598 next:
599 		continue;
600 	}
601 }
602 
603 
fmt_free(struct perf_hpp_fmt * fmt)604 static void fmt_free(struct perf_hpp_fmt *fmt)
605 {
606 	if (fmt->free)
607 		fmt->free(fmt);
608 }
609 
perf_hpp__reset_output_field(struct perf_hpp_list * list)610 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
611 {
612 	struct perf_hpp_fmt *fmt, *tmp;
613 
614 	/* reset output fields */
615 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
616 		list_del_init(&fmt->list);
617 		list_del_init(&fmt->sort_list);
618 		fmt_free(fmt);
619 	}
620 
621 	/* reset sort keys */
622 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
623 		list_del_init(&fmt->list);
624 		list_del_init(&fmt->sort_list);
625 		fmt_free(fmt);
626 	}
627 }
628 
629 /*
630  * See hists__fprintf to match the column widths
631  */
hists__sort_list_width(struct hists * hists)632 unsigned int hists__sort_list_width(struct hists *hists)
633 {
634 	struct perf_hpp_fmt *fmt;
635 	int ret = 0;
636 	bool first = true;
637 	struct perf_hpp dummy_hpp;
638 
639 	hists__for_each_format(hists, fmt) {
640 		if (perf_hpp__should_skip(fmt, hists))
641 			continue;
642 
643 		if (first)
644 			first = false;
645 		else
646 			ret += 2;
647 
648 		ret += fmt->width(fmt, &dummy_hpp, hists);
649 	}
650 
651 	if (verbose && hists__has(hists, sym)) /* Addr + origin */
652 		ret += 3 + BITS_PER_LONG / 4;
653 
654 	return ret;
655 }
656 
hists__overhead_width(struct hists * hists)657 unsigned int hists__overhead_width(struct hists *hists)
658 {
659 	struct perf_hpp_fmt *fmt;
660 	int ret = 0;
661 	bool first = true;
662 	struct perf_hpp dummy_hpp;
663 
664 	hists__for_each_format(hists, fmt) {
665 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
666 			break;
667 
668 		if (first)
669 			first = false;
670 		else
671 			ret += 2;
672 
673 		ret += fmt->width(fmt, &dummy_hpp, hists);
674 	}
675 
676 	return ret;
677 }
678 
perf_hpp__reset_width(struct perf_hpp_fmt * fmt,struct hists * hists)679 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
680 {
681 	if (perf_hpp__is_sort_entry(fmt))
682 		return perf_hpp__reset_sort_width(fmt, hists);
683 
684 	if (perf_hpp__is_dynamic_entry(fmt))
685 		return;
686 
687 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
688 
689 	switch (fmt->idx) {
690 	case PERF_HPP__OVERHEAD:
691 	case PERF_HPP__OVERHEAD_SYS:
692 	case PERF_HPP__OVERHEAD_US:
693 	case PERF_HPP__OVERHEAD_ACC:
694 		fmt->len = 8;
695 		break;
696 
697 	case PERF_HPP__OVERHEAD_GUEST_SYS:
698 	case PERF_HPP__OVERHEAD_GUEST_US:
699 		fmt->len = 9;
700 		break;
701 
702 	case PERF_HPP__SAMPLES:
703 	case PERF_HPP__PERIOD:
704 		fmt->len = 12;
705 		break;
706 
707 	default:
708 		break;
709 	}
710 }
711 
hists__reset_column_width(struct hists * hists)712 void hists__reset_column_width(struct hists *hists)
713 {
714 	struct perf_hpp_fmt *fmt;
715 	struct perf_hpp_list_node *node;
716 
717 	hists__for_each_format(hists, fmt)
718 		perf_hpp__reset_width(fmt, hists);
719 
720 	/* hierarchy entries have their own hpp list */
721 	list_for_each_entry(node, &hists->hpp_formats, list) {
722 		perf_hpp_list__for_each_format(&node->hpp, fmt)
723 			perf_hpp__reset_width(fmt, hists);
724 	}
725 }
726 
perf_hpp__set_user_width(const char * width_list_str)727 void perf_hpp__set_user_width(const char *width_list_str)
728 {
729 	struct perf_hpp_fmt *fmt;
730 	const char *ptr = width_list_str;
731 
732 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
733 		char *p;
734 
735 		int len = strtol(ptr, &p, 10);
736 		fmt->user_len = len;
737 
738 		if (*p == ',')
739 			ptr = p + 1;
740 		else
741 			break;
742 	}
743 }
744 
add_hierarchy_fmt(struct hists * hists,struct perf_hpp_fmt * fmt)745 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
746 {
747 	struct perf_hpp_list_node *node = NULL;
748 	struct perf_hpp_fmt *fmt_copy;
749 	bool found = false;
750 	bool skip = perf_hpp__should_skip(fmt, hists);
751 
752 	list_for_each_entry(node, &hists->hpp_formats, list) {
753 		if (node->level == fmt->level) {
754 			found = true;
755 			break;
756 		}
757 	}
758 
759 	if (!found) {
760 		node = malloc(sizeof(*node));
761 		if (node == NULL)
762 			return -1;
763 
764 		node->skip = skip;
765 		node->level = fmt->level;
766 		perf_hpp_list__init(&node->hpp);
767 
768 		hists->nr_hpp_node++;
769 		list_add_tail(&node->list, &hists->hpp_formats);
770 	}
771 
772 	fmt_copy = perf_hpp_fmt__dup(fmt);
773 	if (fmt_copy == NULL)
774 		return -1;
775 
776 	if (!skip)
777 		node->skip = false;
778 
779 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
780 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
781 
782 	return 0;
783 }
784 
perf_hpp__setup_hists_formats(struct perf_hpp_list * list,struct perf_evlist * evlist)785 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
786 				  struct perf_evlist *evlist)
787 {
788 	struct perf_evsel *evsel;
789 	struct perf_hpp_fmt *fmt;
790 	struct hists *hists;
791 	int ret;
792 
793 	if (!symbol_conf.report_hierarchy)
794 		return 0;
795 
796 	evlist__for_each_entry(evlist, evsel) {
797 		hists = evsel__hists(evsel);
798 
799 		perf_hpp_list__for_each_sort_list(list, fmt) {
800 			if (perf_hpp__is_dynamic_entry(fmt) &&
801 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
802 				continue;
803 
804 			ret = add_hierarchy_fmt(hists, fmt);
805 			if (ret < 0)
806 				return ret;
807 		}
808 	}
809 
810 	return 0;
811 }
812