• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2
3import unittest
4"""
5compare.py - versatile benchmark output compare tool
6"""
7
8import argparse
9from argparse import ArgumentParser
10import sys
11import gbench
12from gbench import util, report
13from gbench.util import *
14
15
16def check_inputs(in1, in2, flags):
17    """
18    Perform checking on the user provided inputs and diagnose any abnormalities
19    """
20    in1_kind, in1_err = classify_input_file(in1)
21    in2_kind, in2_err = classify_input_file(in2)
22    output_file = find_benchmark_flag('--benchmark_out=', flags)
23    output_type = find_benchmark_flag('--benchmark_out_format=', flags)
24    if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
25        print(("WARNING: '--benchmark_out=%s' will be passed to both "
26               "benchmarks causing it to be overwritten") % output_file)
27    if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
28        print("WARNING: passing optional flags has no effect since both "
29              "inputs are JSON")
30    if output_type is not None and output_type != 'json':
31        print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
32               " is not supported.") % output_type)
33        sys.exit(1)
34
35
36def create_parser():
37    parser = ArgumentParser(
38        description='versatile benchmark output compare tool')
39
40    parser.add_argument(
41        '-a',
42        '--display_aggregates_only',
43        dest='display_aggregates_only',
44        action="store_true",
45        help="If there are repetitions, by default, we display everything - the"
46             " actual runs, and the aggregates computed. Sometimes, it is "
47             "desirable to only view the aggregates. E.g. when there are a lot "
48             "of repetitions. Do note that only the display is affected. "
49             "Internally, all the actual runs are still used, e.g. for U test.")
50
51    parser.add_argument(
52        '--no-color',
53        dest='color',
54        default=True,
55        action="store_false",
56        help="Do not use colors in the terminal output"
57    )
58
59    utest = parser.add_argument_group()
60    utest.add_argument(
61        '--no-utest',
62        dest='utest',
63        default=True,
64        action="store_false",
65        help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
66    alpha_default = 0.05
67    utest.add_argument(
68        "--alpha",
69        dest='utest_alpha',
70        default=alpha_default,
71        type=float,
72        help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
73        alpha_default)
74
75    subparsers = parser.add_subparsers(
76        help='This tool has multiple modes of operation:',
77        dest='mode')
78
79    parser_a = subparsers.add_parser(
80        'benchmarks',
81        help='The most simple use-case, compare all the output of these two benchmarks')
82    baseline = parser_a.add_argument_group(
83        'baseline', 'The benchmark baseline')
84    baseline.add_argument(
85        'test_baseline',
86        metavar='test_baseline',
87        type=argparse.FileType('r'),
88        nargs=1,
89        help='A benchmark executable or JSON output file')
90    contender = parser_a.add_argument_group(
91        'contender', 'The benchmark that will be compared against the baseline')
92    contender.add_argument(
93        'test_contender',
94        metavar='test_contender',
95        type=argparse.FileType('r'),
96        nargs=1,
97        help='A benchmark executable or JSON output file')
98    parser_a.add_argument(
99        'benchmark_options',
100        metavar='benchmark_options',
101        nargs=argparse.REMAINDER,
102        help='Arguments to pass when running benchmark executables')
103
104    parser_b = subparsers.add_parser(
105        'filters', help='Compare filter one with the filter two of benchmark')
106    baseline = parser_b.add_argument_group(
107        'baseline', 'The benchmark baseline')
108    baseline.add_argument(
109        'test',
110        metavar='test',
111        type=argparse.FileType('r'),
112        nargs=1,
113        help='A benchmark executable or JSON output file')
114    baseline.add_argument(
115        'filter_baseline',
116        metavar='filter_baseline',
117        type=str,
118        nargs=1,
119        help='The first filter, that will be used as baseline')
120    contender = parser_b.add_argument_group(
121        'contender', 'The benchmark that will be compared against the baseline')
122    contender.add_argument(
123        'filter_contender',
124        metavar='filter_contender',
125        type=str,
126        nargs=1,
127        help='The second filter, that will be compared against the baseline')
128    parser_b.add_argument(
129        'benchmark_options',
130        metavar='benchmark_options',
131        nargs=argparse.REMAINDER,
132        help='Arguments to pass when running benchmark executables')
133
134    parser_c = subparsers.add_parser(
135        'benchmarksfiltered',
136        help='Compare filter one of first benchmark with filter two of the second benchmark')
137    baseline = parser_c.add_argument_group(
138        'baseline', 'The benchmark baseline')
139    baseline.add_argument(
140        'test_baseline',
141        metavar='test_baseline',
142        type=argparse.FileType('r'),
143        nargs=1,
144        help='A benchmark executable or JSON output file')
145    baseline.add_argument(
146        'filter_baseline',
147        metavar='filter_baseline',
148        type=str,
149        nargs=1,
150        help='The first filter, that will be used as baseline')
151    contender = parser_c.add_argument_group(
152        'contender', 'The benchmark that will be compared against the baseline')
153    contender.add_argument(
154        'test_contender',
155        metavar='test_contender',
156        type=argparse.FileType('r'),
157        nargs=1,
158        help='The second benchmark executable or JSON output file, that will be compared against the baseline')
159    contender.add_argument(
160        'filter_contender',
161        metavar='filter_contender',
162        type=str,
163        nargs=1,
164        help='The second filter, that will be compared against the baseline')
165    parser_c.add_argument(
166        'benchmark_options',
167        metavar='benchmark_options',
168        nargs=argparse.REMAINDER,
169        help='Arguments to pass when running benchmark executables')
170
171    return parser
172
173
174def main():
175    # Parse the command line flags
176    parser = create_parser()
177    args, unknown_args = parser.parse_known_args()
178    if args.mode is None:
179        parser.print_help()
180        exit(1)
181    assert not unknown_args
182    benchmark_options = args.benchmark_options
183
184    if args.mode == 'benchmarks':
185        test_baseline = args.test_baseline[0].name
186        test_contender = args.test_contender[0].name
187        filter_baseline = ''
188        filter_contender = ''
189
190        # NOTE: if test_baseline == test_contender, you are analyzing the stdev
191
192        description = 'Comparing %s to %s' % (test_baseline, test_contender)
193    elif args.mode == 'filters':
194        test_baseline = args.test[0].name
195        test_contender = args.test[0].name
196        filter_baseline = args.filter_baseline[0]
197        filter_contender = args.filter_contender[0]
198
199        # NOTE: if filter_baseline == filter_contender, you are analyzing the
200        # stdev
201
202        description = 'Comparing %s to %s (from %s)' % (
203            filter_baseline, filter_contender, args.test[0].name)
204    elif args.mode == 'benchmarksfiltered':
205        test_baseline = args.test_baseline[0].name
206        test_contender = args.test_contender[0].name
207        filter_baseline = args.filter_baseline[0]
208        filter_contender = args.filter_contender[0]
209
210        # NOTE: if test_baseline == test_contender and
211        # filter_baseline == filter_contender, you are analyzing the stdev
212
213        description = 'Comparing %s (from %s) to %s (from %s)' % (
214            filter_baseline, test_baseline, filter_contender, test_contender)
215    else:
216        # should never happen
217        print("Unrecognized mode of operation: '%s'" % args.mode)
218        parser.print_help()
219        exit(1)
220
221    check_inputs(test_baseline, test_contender, benchmark_options)
222
223    if args.display_aggregates_only:
224        benchmark_options += ['--benchmark_display_aggregates_only=true']
225
226    options_baseline = []
227    options_contender = []
228
229    if filter_baseline and filter_contender:
230        options_baseline = ['--benchmark_filter=%s' % filter_baseline]
231        options_contender = ['--benchmark_filter=%s' % filter_contender]
232
233    # Run the benchmarks and report the results
234    json1 = json1_orig = gbench.util.run_or_load_benchmark(
235        test_baseline, benchmark_options + options_baseline)
236    json2 = json2_orig = gbench.util.run_or_load_benchmark(
237        test_contender, benchmark_options + options_contender)
238
239    # Now, filter the benchmarks so that the difference report can work
240    if filter_baseline and filter_contender:
241        replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
242        json1 = gbench.report.filter_benchmark(
243            json1_orig, filter_baseline, replacement)
244        json2 = gbench.report.filter_benchmark(
245            json2_orig, filter_contender, replacement)
246
247    # Diff and output
248    output_lines = gbench.report.generate_difference_report(
249        json1, json2, args.display_aggregates_only,
250        args.utest, args.utest_alpha, args.color)
251    print(description)
252    for ln in output_lines:
253        print(ln)
254
255
256class TestParser(unittest.TestCase):
257    def setUp(self):
258        self.parser = create_parser()
259        testInputs = os.path.join(
260            os.path.dirname(
261                os.path.realpath(__file__)),
262            'gbench',
263            'Inputs')
264        self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
265        self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
266
267    def test_benchmarks_basic(self):
268        parsed = self.parser.parse_args(
269            ['benchmarks', self.testInput0, self.testInput1])
270        self.assertFalse(parsed.display_aggregates_only)
271        self.assertTrue(parsed.utest)
272        self.assertEqual(parsed.mode, 'benchmarks')
273        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
274        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
275        self.assertFalse(parsed.benchmark_options)
276
277    def test_benchmarks_basic_without_utest(self):
278        parsed = self.parser.parse_args(
279            ['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
280        self.assertFalse(parsed.display_aggregates_only)
281        self.assertFalse(parsed.utest)
282        self.assertEqual(parsed.utest_alpha, 0.05)
283        self.assertEqual(parsed.mode, 'benchmarks')
284        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
285        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
286        self.assertFalse(parsed.benchmark_options)
287
288    def test_benchmarks_basic_display_aggregates_only(self):
289        parsed = self.parser.parse_args(
290            ['-a', 'benchmarks', self.testInput0, self.testInput1])
291        self.assertTrue(parsed.display_aggregates_only)
292        self.assertTrue(parsed.utest)
293        self.assertEqual(parsed.mode, 'benchmarks')
294        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
295        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
296        self.assertFalse(parsed.benchmark_options)
297
298    def test_benchmarks_basic_with_utest_alpha(self):
299        parsed = self.parser.parse_args(
300            ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
301        self.assertFalse(parsed.display_aggregates_only)
302        self.assertTrue(parsed.utest)
303        self.assertEqual(parsed.utest_alpha, 0.314)
304        self.assertEqual(parsed.mode, 'benchmarks')
305        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
306        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
307        self.assertFalse(parsed.benchmark_options)
308
309    def test_benchmarks_basic_without_utest_with_utest_alpha(self):
310        parsed = self.parser.parse_args(
311            ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
312        self.assertFalse(parsed.display_aggregates_only)
313        self.assertFalse(parsed.utest)
314        self.assertEqual(parsed.utest_alpha, 0.314)
315        self.assertEqual(parsed.mode, 'benchmarks')
316        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
317        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
318        self.assertFalse(parsed.benchmark_options)
319
320    def test_benchmarks_with_remainder(self):
321        parsed = self.parser.parse_args(
322            ['benchmarks', self.testInput0, self.testInput1, 'd'])
323        self.assertFalse(parsed.display_aggregates_only)
324        self.assertTrue(parsed.utest)
325        self.assertEqual(parsed.mode, 'benchmarks')
326        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
327        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
328        self.assertEqual(parsed.benchmark_options, ['d'])
329
330    def test_benchmarks_with_remainder_after_doubleminus(self):
331        parsed = self.parser.parse_args(
332            ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
333        self.assertFalse(parsed.display_aggregates_only)
334        self.assertTrue(parsed.utest)
335        self.assertEqual(parsed.mode, 'benchmarks')
336        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
337        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
338        self.assertEqual(parsed.benchmark_options, ['e'])
339
340    def test_filters_basic(self):
341        parsed = self.parser.parse_args(
342            ['filters', self.testInput0, 'c', 'd'])
343        self.assertFalse(parsed.display_aggregates_only)
344        self.assertTrue(parsed.utest)
345        self.assertEqual(parsed.mode, 'filters')
346        self.assertEqual(parsed.test[0].name, self.testInput0)
347        self.assertEqual(parsed.filter_baseline[0], 'c')
348        self.assertEqual(parsed.filter_contender[0], 'd')
349        self.assertFalse(parsed.benchmark_options)
350
351    def test_filters_with_remainder(self):
352        parsed = self.parser.parse_args(
353            ['filters', self.testInput0, 'c', 'd', 'e'])
354        self.assertFalse(parsed.display_aggregates_only)
355        self.assertTrue(parsed.utest)
356        self.assertEqual(parsed.mode, 'filters')
357        self.assertEqual(parsed.test[0].name, self.testInput0)
358        self.assertEqual(parsed.filter_baseline[0], 'c')
359        self.assertEqual(parsed.filter_contender[0], 'd')
360        self.assertEqual(parsed.benchmark_options, ['e'])
361
362    def test_filters_with_remainder_after_doubleminus(self):
363        parsed = self.parser.parse_args(
364            ['filters', self.testInput0, 'c', 'd', '--', 'f'])
365        self.assertFalse(parsed.display_aggregates_only)
366        self.assertTrue(parsed.utest)
367        self.assertEqual(parsed.mode, 'filters')
368        self.assertEqual(parsed.test[0].name, self.testInput0)
369        self.assertEqual(parsed.filter_baseline[0], 'c')
370        self.assertEqual(parsed.filter_contender[0], 'd')
371        self.assertEqual(parsed.benchmark_options, ['f'])
372
373    def test_benchmarksfiltered_basic(self):
374        parsed = self.parser.parse_args(
375            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
376        self.assertFalse(parsed.display_aggregates_only)
377        self.assertTrue(parsed.utest)
378        self.assertEqual(parsed.mode, 'benchmarksfiltered')
379        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
380        self.assertEqual(parsed.filter_baseline[0], 'c')
381        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
382        self.assertEqual(parsed.filter_contender[0], 'e')
383        self.assertFalse(parsed.benchmark_options)
384
385    def test_benchmarksfiltered_with_remainder(self):
386        parsed = self.parser.parse_args(
387            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
388        self.assertFalse(parsed.display_aggregates_only)
389        self.assertTrue(parsed.utest)
390        self.assertEqual(parsed.mode, 'benchmarksfiltered')
391        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
392        self.assertEqual(parsed.filter_baseline[0], 'c')
393        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
394        self.assertEqual(parsed.filter_contender[0], 'e')
395        self.assertEqual(parsed.benchmark_options[0], 'f')
396
397    def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
398        parsed = self.parser.parse_args(
399            ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
400        self.assertFalse(parsed.display_aggregates_only)
401        self.assertTrue(parsed.utest)
402        self.assertEqual(parsed.mode, 'benchmarksfiltered')
403        self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
404        self.assertEqual(parsed.filter_baseline[0], 'c')
405        self.assertEqual(parsed.test_contender[0].name, self.testInput1)
406        self.assertEqual(parsed.filter_contender[0], 'e')
407        self.assertEqual(parsed.benchmark_options[0], 'g')
408
409
410if __name__ == '__main__':
411    # unittest.main()
412    main()
413
414# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
415# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
416# kate: indent-mode python; remove-trailing-spaces modified;
417