1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/debug.h"
3 #include "util/dso.h"
4 #include "util/event.h"
5 #include "util/map.h"
6 #include "util/symbol.h"
7 #include "util/sort.h"
8 #include "util/evsel.h"
9 #include "util/evlist.h"
10 #include "util/machine.h"
11 #include "util/thread.h"
12 #include "util/parse-events.h"
13 #include "tests/tests.h"
14 #include "tests/hists_common.h"
15 #include <linux/kernel.h>
16
17 struct sample {
18 u32 cpu;
19 u32 pid;
20 u64 ip;
21 struct thread *thread;
22 struct map *map;
23 struct symbol *sym;
24 };
25
26 /* For the numbers, see hists_common.c */
27 static struct sample fake_samples[] = {
28 /* perf [kernel] schedule() */
29 { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
30 /* perf [perf] main() */
31 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
32 /* perf [perf] cmd_record() */
33 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
34 /* perf [libc] malloc() */
35 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
36 /* perf [libc] free() */
37 { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
38 /* perf [perf] main() */
39 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
40 /* perf [kernel] page_fault() */
41 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
42 /* bash [bash] main() */
43 { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
44 /* bash [bash] xmalloc() */
45 { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
46 /* bash [kernel] page_fault() */
47 { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
48 };
49
add_hist_entries(struct hists * hists,struct machine * machine)50 static int add_hist_entries(struct hists *hists, struct machine *machine)
51 {
52 struct addr_location al;
53 struct evsel *evsel = hists_to_evsel(hists);
54 struct perf_sample sample = { .period = 100, };
55 size_t i;
56
57 addr_location__init(&al);
58 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
59 struct hist_entry_iter iter = {
60 .evsel = evsel,
61 .sample = &sample,
62 .ops = &hist_iter_normal,
63 .hide_unresolved = false,
64 };
65
66 sample.cpumode = PERF_RECORD_MISC_USER;
67 sample.cpu = fake_samples[i].cpu;
68 sample.pid = fake_samples[i].pid;
69 sample.tid = fake_samples[i].pid;
70 sample.ip = fake_samples[i].ip;
71
72 if (machine__resolve(machine, &al, &sample) < 0)
73 goto out;
74
75 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
76 NULL) < 0) {
77 goto out;
78 }
79
80 fake_samples[i].thread = al.thread;
81 map__put(fake_samples[i].map);
82 fake_samples[i].map = map__get(al.map);
83 fake_samples[i].sym = al.sym;
84 }
85
86 addr_location__exit(&al);
87 return TEST_OK;
88
89 out:
90 pr_debug("Not enough memory for adding a hist entry\n");
91 addr_location__exit(&al);
92 return TEST_FAIL;
93 }
94
del_hist_entries(struct hists * hists)95 static void del_hist_entries(struct hists *hists)
96 {
97 struct hist_entry *he;
98 struct rb_root_cached *root_in;
99 struct rb_root_cached *root_out;
100 struct rb_node *node;
101
102 if (hists__has(hists, need_collapse))
103 root_in = &hists->entries_collapsed;
104 else
105 root_in = hists->entries_in;
106
107 root_out = &hists->entries;
108
109 while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
110 node = rb_first_cached(root_out);
111
112 he = rb_entry(node, struct hist_entry, rb_node);
113 rb_erase_cached(node, root_out);
114 rb_erase_cached(&he->rb_node_in, root_in);
115 hist_entry__delete(he);
116 }
117 }
118
put_fake_samples(void)119 static void put_fake_samples(void)
120 {
121 size_t i;
122
123 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
124 map__put(fake_samples[i].map);
125 fake_samples[i].map = NULL;
126 }
127 }
128
129 typedef int (*test_fn_t)(struct evsel *, struct machine *);
130
131 #define COMM(he) (thread__comm_str(he->thread))
132 #define DSO(he) (map__dso(he->ms.map)->short_name)
133 #define SYM(he) (he->ms.sym->name)
134 #define CPU(he) (he->cpu)
135 #define PID(he) (thread__tid(he->thread))
136
137 /* default sort keys (no field) */
test1(struct evsel * evsel,struct machine * machine)138 static int test1(struct evsel *evsel, struct machine *machine)
139 {
140 int err;
141 struct hists *hists = evsel__hists(evsel);
142 struct hist_entry *he;
143 struct rb_root_cached *root;
144 struct rb_node *node;
145
146 field_order = NULL;
147 sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
148
149 setup_sorting(NULL);
150
151 /*
152 * expected output:
153 *
154 * Overhead Command Shared Object Symbol
155 * ======== ======= ============= ==============
156 * 20.00% perf perf [.] main
157 * 10.00% bash [kernel] [k] page_fault
158 * 10.00% bash bash [.] main
159 * 10.00% bash bash [.] xmalloc
160 * 10.00% perf [kernel] [k] page_fault
161 * 10.00% perf [kernel] [k] schedule
162 * 10.00% perf libc [.] free
163 * 10.00% perf libc [.] malloc
164 * 10.00% perf perf [.] cmd_record
165 */
166 err = add_hist_entries(hists, machine);
167 if (err < 0)
168 goto out;
169
170 hists__collapse_resort(hists, NULL);
171 evsel__output_resort(evsel, NULL);
172
173 if (verbose > 2) {
174 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
175 print_hists_out(hists);
176 }
177
178 root = &hists->entries;
179 node = rb_first_cached(root);
180 he = rb_entry(node, struct hist_entry, rb_node);
181 TEST_ASSERT_VAL("Invalid hist entry",
182 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
183 !strcmp(SYM(he), "main") && he->stat.period == 200);
184
185 node = rb_next(node);
186 he = rb_entry(node, struct hist_entry, rb_node);
187 TEST_ASSERT_VAL("Invalid hist entry",
188 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
189 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
190
191 node = rb_next(node);
192 he = rb_entry(node, struct hist_entry, rb_node);
193 TEST_ASSERT_VAL("Invalid hist entry",
194 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
195 !strcmp(SYM(he), "main") && he->stat.period == 100);
196
197 node = rb_next(node);
198 he = rb_entry(node, struct hist_entry, rb_node);
199 TEST_ASSERT_VAL("Invalid hist entry",
200 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
201 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
202
203 node = rb_next(node);
204 he = rb_entry(node, struct hist_entry, rb_node);
205 TEST_ASSERT_VAL("Invalid hist entry",
206 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
207 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
208
209 node = rb_next(node);
210 he = rb_entry(node, struct hist_entry, rb_node);
211 TEST_ASSERT_VAL("Invalid hist entry",
212 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
213 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
214
215 node = rb_next(node);
216 he = rb_entry(node, struct hist_entry, rb_node);
217 TEST_ASSERT_VAL("Invalid hist entry",
218 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
219 !strcmp(SYM(he), "free") && he->stat.period == 100);
220
221 node = rb_next(node);
222 he = rb_entry(node, struct hist_entry, rb_node);
223 TEST_ASSERT_VAL("Invalid hist entry",
224 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
225 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
226
227 node = rb_next(node);
228 he = rb_entry(node, struct hist_entry, rb_node);
229 TEST_ASSERT_VAL("Invalid hist entry",
230 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
231 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
232
233 out:
234 del_hist_entries(hists);
235 reset_output_field();
236 return err;
237 }
238
239 /* mixed fields and sort keys */
test2(struct evsel * evsel,struct machine * machine)240 static int test2(struct evsel *evsel, struct machine *machine)
241 {
242 int err;
243 struct hists *hists = evsel__hists(evsel);
244 struct hist_entry *he;
245 struct rb_root_cached *root;
246 struct rb_node *node;
247
248 field_order = "overhead,cpu";
249 sort_order = "pid";
250
251 setup_sorting(NULL);
252
253 /*
254 * expected output:
255 *
256 * Overhead CPU Command: Pid
257 * ======== === =============
258 * 30.00% 1 perf : 100
259 * 10.00% 0 perf : 100
260 * 10.00% 2 perf : 100
261 * 20.00% 2 perf : 200
262 * 10.00% 0 bash : 300
263 * 10.00% 1 bash : 300
264 * 10.00% 3 bash : 300
265 */
266 err = add_hist_entries(hists, machine);
267 if (err < 0)
268 goto out;
269
270 hists__collapse_resort(hists, NULL);
271 evsel__output_resort(evsel, NULL);
272
273 if (verbose > 2) {
274 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
275 print_hists_out(hists);
276 }
277
278 root = &hists->entries;
279 node = rb_first_cached(root);
280 he = rb_entry(node, struct hist_entry, rb_node);
281 TEST_ASSERT_VAL("Invalid hist entry",
282 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
283
284 node = rb_next(node);
285 he = rb_entry(node, struct hist_entry, rb_node);
286 TEST_ASSERT_VAL("Invalid hist entry",
287 CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100);
288
289 out:
290 del_hist_entries(hists);
291 reset_output_field();
292 return err;
293 }
294
295 /* fields only (no sort key) */
test3(struct evsel * evsel,struct machine * machine)296 static int test3(struct evsel *evsel, struct machine *machine)
297 {
298 int err;
299 struct hists *hists = evsel__hists(evsel);
300 struct hist_entry *he;
301 struct rb_root_cached *root;
302 struct rb_node *node;
303
304 field_order = "comm,overhead,dso";
305 sort_order = NULL;
306
307 setup_sorting(NULL);
308
309 /*
310 * expected output:
311 *
312 * Command Overhead Shared Object
313 * ======= ======== =============
314 * bash 20.00% bash
315 * bash 10.00% [kernel]
316 * perf 30.00% perf
317 * perf 20.00% [kernel]
318 * perf 20.00% libc
319 */
320 err = add_hist_entries(hists, machine);
321 if (err < 0)
322 goto out;
323
324 hists__collapse_resort(hists, NULL);
325 evsel__output_resort(evsel, NULL);
326
327 if (verbose > 2) {
328 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
329 print_hists_out(hists);
330 }
331
332 root = &hists->entries;
333 node = rb_first_cached(root);
334 he = rb_entry(node, struct hist_entry, rb_node);
335 TEST_ASSERT_VAL("Invalid hist entry",
336 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
337 he->stat.period == 200);
338
339 node = rb_next(node);
340 he = rb_entry(node, struct hist_entry, rb_node);
341 TEST_ASSERT_VAL("Invalid hist entry",
342 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
343 he->stat.period == 100);
344
345 node = rb_next(node);
346 he = rb_entry(node, struct hist_entry, rb_node);
347 TEST_ASSERT_VAL("Invalid hist entry",
348 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
349 he->stat.period == 300);
350
351 node = rb_next(node);
352 he = rb_entry(node, struct hist_entry, rb_node);
353 TEST_ASSERT_VAL("Invalid hist entry",
354 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
355 he->stat.period == 200);
356
357 node = rb_next(node);
358 he = rb_entry(node, struct hist_entry, rb_node);
359 TEST_ASSERT_VAL("Invalid hist entry",
360 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
361 he->stat.period == 200);
362
363 out:
364 del_hist_entries(hists);
365 reset_output_field();
366 return err;
367 }
368
369 /* handle duplicate 'dso' field */
test4(struct evsel * evsel,struct machine * machine)370 static int test4(struct evsel *evsel, struct machine *machine)
371 {
372 int err;
373 struct hists *hists = evsel__hists(evsel);
374 struct hist_entry *he;
375 struct rb_root_cached *root;
376 struct rb_node *node;
377
378 field_order = "dso,sym,comm,overhead,dso";
379 sort_order = "sym";
380
381 setup_sorting(NULL);
382
383 /*
384 * expected output:
385 *
386 * Shared Object Symbol Command Overhead
387 * ============= ============== ======= ========
388 * perf [.] cmd_record perf 10.00%
389 * libc [.] free perf 10.00%
390 * bash [.] main bash 10.00%
391 * perf [.] main perf 20.00%
392 * libc [.] malloc perf 10.00%
393 * [kernel] [k] page_fault bash 10.00%
394 * [kernel] [k] page_fault perf 10.00%
395 * [kernel] [k] schedule perf 10.00%
396 * bash [.] xmalloc bash 10.00%
397 */
398 err = add_hist_entries(hists, machine);
399 if (err < 0)
400 goto out;
401
402 hists__collapse_resort(hists, NULL);
403 evsel__output_resort(evsel, NULL);
404
405 if (verbose > 2) {
406 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
407 print_hists_out(hists);
408 }
409
410 root = &hists->entries;
411 node = rb_first_cached(root);
412 he = rb_entry(node, struct hist_entry, rb_node);
413 TEST_ASSERT_VAL("Invalid hist entry",
414 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
415 !strcmp(COMM(he), "perf") && he->stat.period == 100);
416
417 node = rb_next(node);
418 he = rb_entry(node, struct hist_entry, rb_node);
419 TEST_ASSERT_VAL("Invalid hist entry",
420 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") &&
421 !strcmp(COMM(he), "perf") && he->stat.period == 100);
422
423 node = rb_next(node);
424 he = rb_entry(node, struct hist_entry, rb_node);
425 TEST_ASSERT_VAL("Invalid hist entry",
426 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") &&
427 !strcmp(COMM(he), "bash") && he->stat.period == 100);
428
429 node = rb_next(node);
430 he = rb_entry(node, struct hist_entry, rb_node);
431 TEST_ASSERT_VAL("Invalid hist entry",
432 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") &&
433 !strcmp(COMM(he), "perf") && he->stat.period == 200);
434
435 node = rb_next(node);
436 he = rb_entry(node, struct hist_entry, rb_node);
437 TEST_ASSERT_VAL("Invalid hist entry",
438 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") &&
439 !strcmp(COMM(he), "perf") && he->stat.period == 100);
440
441 node = rb_next(node);
442 he = rb_entry(node, struct hist_entry, rb_node);
443 TEST_ASSERT_VAL("Invalid hist entry",
444 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
445 !strcmp(COMM(he), "bash") && he->stat.period == 100);
446
447 node = rb_next(node);
448 he = rb_entry(node, struct hist_entry, rb_node);
449 TEST_ASSERT_VAL("Invalid hist entry",
450 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
451 !strcmp(COMM(he), "perf") && he->stat.period == 100);
452
453 node = rb_next(node);
454 he = rb_entry(node, struct hist_entry, rb_node);
455 TEST_ASSERT_VAL("Invalid hist entry",
456 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") &&
457 !strcmp(COMM(he), "perf") && he->stat.period == 100);
458
459 node = rb_next(node);
460 he = rb_entry(node, struct hist_entry, rb_node);
461 TEST_ASSERT_VAL("Invalid hist entry",
462 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") &&
463 !strcmp(COMM(he), "bash") && he->stat.period == 100);
464
465 out:
466 del_hist_entries(hists);
467 reset_output_field();
468 return err;
469 }
470
471 /* full sort keys w/o overhead field */
test5(struct evsel * evsel,struct machine * machine)472 static int test5(struct evsel *evsel, struct machine *machine)
473 {
474 int err;
475 struct hists *hists = evsel__hists(evsel);
476 struct hist_entry *he;
477 struct rb_root_cached *root;
478 struct rb_node *node;
479
480 field_order = "cpu,pid,comm,dso,sym";
481 sort_order = "dso,pid";
482
483 setup_sorting(NULL);
484
485 /*
486 * expected output:
487 *
488 * CPU Command: Pid Command Shared Object Symbol
489 * === ============= ======= ============= ==============
490 * 0 perf: 100 perf [kernel] [k] schedule
491 * 2 perf: 200 perf [kernel] [k] page_fault
492 * 1 bash: 300 bash [kernel] [k] page_fault
493 * 0 bash: 300 bash bash [.] xmalloc
494 * 3 bash: 300 bash bash [.] main
495 * 1 perf: 100 perf libc [.] malloc
496 * 2 perf: 100 perf libc [.] free
497 * 1 perf: 100 perf perf [.] cmd_record
498 * 1 perf: 100 perf perf [.] main
499 * 2 perf: 200 perf perf [.] main
500 */
501 err = add_hist_entries(hists, machine);
502 if (err < 0)
503 goto out;
504
505 hists__collapse_resort(hists, NULL);
506 evsel__output_resort(evsel, NULL);
507
508 if (verbose > 2) {
509 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
510 print_hists_out(hists);
511 }
512
513 root = &hists->entries;
514 node = rb_first_cached(root);
515 he = rb_entry(node, struct hist_entry, rb_node);
516
517 TEST_ASSERT_VAL("Invalid hist entry",
518 CPU(he) == 0 && PID(he) == 100 &&
519 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
520 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
521
522 node = rb_next(node);
523 he = rb_entry(node, struct hist_entry, rb_node);
524 TEST_ASSERT_VAL("Invalid hist entry",
525 CPU(he) == 2 && PID(he) == 200 &&
526 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
527 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
528
529 node = rb_next(node);
530 he = rb_entry(node, struct hist_entry, rb_node);
531 TEST_ASSERT_VAL("Invalid hist entry",
532 CPU(he) == 1 && PID(he) == 300 &&
533 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
534 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
535
536 node = rb_next(node);
537 he = rb_entry(node, struct hist_entry, rb_node);
538 TEST_ASSERT_VAL("Invalid hist entry",
539 CPU(he) == 0 && PID(he) == 300 &&
540 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
541 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
542
543 node = rb_next(node);
544 he = rb_entry(node, struct hist_entry, rb_node);
545 TEST_ASSERT_VAL("Invalid hist entry",
546 CPU(he) == 3 && PID(he) == 300 &&
547 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
548 !strcmp(SYM(he), "main") && he->stat.period == 100);
549
550 node = rb_next(node);
551 he = rb_entry(node, struct hist_entry, rb_node);
552 TEST_ASSERT_VAL("Invalid hist entry",
553 CPU(he) == 1 && PID(he) == 100 &&
554 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
555 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
556
557 node = rb_next(node);
558 he = rb_entry(node, struct hist_entry, rb_node);
559 TEST_ASSERT_VAL("Invalid hist entry",
560 CPU(he) == 2 && PID(he) == 100 &&
561 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
562 !strcmp(SYM(he), "free") && he->stat.period == 100);
563
564 node = rb_next(node);
565 he = rb_entry(node, struct hist_entry, rb_node);
566 TEST_ASSERT_VAL("Invalid hist entry",
567 CPU(he) == 1 && PID(he) == 100 &&
568 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
569 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
570
571 node = rb_next(node);
572 he = rb_entry(node, struct hist_entry, rb_node);
573 TEST_ASSERT_VAL("Invalid hist entry",
574 CPU(he) == 1 && PID(he) == 100 &&
575 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
576 !strcmp(SYM(he), "main") && he->stat.period == 100);
577
578 node = rb_next(node);
579 he = rb_entry(node, struct hist_entry, rb_node);
580 TEST_ASSERT_VAL("Invalid hist entry",
581 CPU(he) == 2 && PID(he) == 200 &&
582 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
583 !strcmp(SYM(he), "main") && he->stat.period == 100);
584
585 out:
586 del_hist_entries(hists);
587 reset_output_field();
588 return err;
589 }
590
test__hists_output(struct test_suite * test __maybe_unused,int subtest __maybe_unused)591 static int test__hists_output(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
592 {
593 int err = TEST_FAIL;
594 struct machines machines;
595 struct machine *machine;
596 struct evsel *evsel;
597 struct evlist *evlist = evlist__new();
598 size_t i;
599 test_fn_t testcases[] = {
600 test1,
601 test2,
602 test3,
603 test4,
604 test5,
605 };
606
607 TEST_ASSERT_VAL("No memory", evlist);
608
609 err = parse_event(evlist, "cpu-clock");
610 if (err)
611 goto out;
612 err = TEST_FAIL;
613
614 machines__init(&machines);
615
616 /* setup threads/dso/map/symbols also */
617 machine = setup_fake_machine(&machines);
618 if (!machine)
619 goto out;
620
621 if (verbose > 1)
622 machine__fprintf(machine, stderr);
623
624 evsel = evlist__first(evlist);
625
626 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
627 err = testcases[i](evsel, machine);
628 if (err < 0)
629 break;
630 }
631
632 out:
633 /* tear down everything */
634 evlist__delete(evlist);
635 machines__exit(&machines);
636 put_fake_samples();
637
638 return err;
639 }
640
641 DEFINE_SUITE("Sort output of hist entries", hists_output);
642