• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2"""
3compare_bench.py - Compare two benchmarks or their results and report the
4                   difference.
5"""
6import argparse
7from argparse import ArgumentParser
8import sys
9import gbench
10from gbench import util, report
11from gbench.util import *
12
13def check_inputs(in1, in2, flags):
14    """
15    Perform checking on the user provided inputs and diagnose any abnormalities
16    """
17    in1_kind, in1_err = classify_input_file(in1)
18    in2_kind, in2_err = classify_input_file(in2)
19    output_file = find_benchmark_flag('--benchmark_out=', flags)
20    output_type = find_benchmark_flag('--benchmark_out_format=', flags)
21    if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
22        print(("WARNING: '--benchmark_out=%s' will be passed to both "
23              "benchmarks causing it to be overwritten") % output_file)
24    if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
25        print("WARNING: passing --benchmark flags has no effect since both "
26              "inputs are JSON")
27    if output_type is not None and output_type != 'json':
28        print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
29              " is not supported.") % output_type)
30        sys.exit(1)
31
32
33def main():
34    parser = ArgumentParser(
35        description='compare the results of two benchmarks')
36    parser.add_argument(
37        'test1', metavar='test1', type=str, nargs=1,
38        help='A benchmark executable or JSON output file')
39    parser.add_argument(
40        'test2', metavar='test2', type=str, nargs=1,
41        help='A benchmark executable or JSON output file')
42    parser.add_argument(
43        'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
44        help='Arguments to pass when running benchmark executables'
45    )
46    args, unknown_args = parser.parse_known_args()
47    # Parse the command line flags
48    test1 = args.test1[0]
49    test2 = args.test2[0]
50    if unknown_args:
51        # should never happen
52        print("Unrecognized positional argument arguments: '%s'"
53              % unknown_args)
54        exit(1)
55    benchmark_options = args.benchmark_options
56    check_inputs(test1, test2, benchmark_options)
57    # Run the benchmarks and report the results
58    json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
59    json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options)
60    output_lines = gbench.report.generate_difference_report(json1, json2)
61    print('Comparing %s to %s' % (test1, test2))
62    for ln in output_lines:
63        print(ln)
64
65
66if __name__ == '__main__':
67    main()
68