• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""report.py - Utilities for reporting statistics about benchmark results
2"""
3import os
4
5class BenchmarkColor(object):
6    def __init__(self, name, code):
7        self.name = name
8        self.code = code
9
10    def __repr__(self):
11        return '%s%r' % (self.__class__.__name__,
12                         (self.name, self.code))
13
14    def __format__(self, format):
15        return self.code
16
17# Benchmark Colors Enumeration
18BC_NONE = BenchmarkColor('NONE', '')
19BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
20BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
21BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
22BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
23BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
24BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
25BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
26BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
27BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
28BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
29
30def color_format(use_color, fmt_str, *args, **kwargs):
31    """
32    Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
33    'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
34    is False then all color codes in 'args' and 'kwargs' are replaced with
35    the empty string.
36    """
37    assert use_color is True or use_color is False
38    if not use_color:
39        args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
40                for arg in args]
41        kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
42                  for key, arg in kwargs.items()}
43    return fmt_str.format(*args, **kwargs)
44
45
46def find_longest_name(benchmark_list):
47    """
48    Return the length of the longest benchmark name in a given list of
49    benchmark JSON objects
50    """
51    longest_name = 1
52    for bc in benchmark_list:
53        if len(bc['name']) > longest_name:
54            longest_name = len(bc['name'])
55    return longest_name
56
57
58def calculate_change(old_val, new_val):
59    """
60    Return a float representing the decimal change between old_val and new_val.
61    """
62    if old_val == 0 and new_val == 0:
63        return 0.0
64    if old_val == 0:
65        return float(new_val - old_val) / (float(old_val + new_val) / 2)
66    return float(new_val - old_val) / abs(old_val)
67
68
69def generate_difference_report(json1, json2, use_color=True):
70    """
71    Calculate and report the difference between each test of two benchmarks
72    runs specified as 'json1' and 'json2'.
73    """
74    first_col_width = find_longest_name(json1['benchmarks']) + 5
75    def find_test(name):
76        for b in json2['benchmarks']:
77            if b['name'] == name:
78                return b
79        return None
80    first_line = "{:<{}s}     Time           CPU           Old           New".format(
81        'Benchmark', first_col_width)
82    output_strs = [first_line, '-' * len(first_line)]
83    for bn in json1['benchmarks']:
84        other_bench = find_test(bn['name'])
85        if not other_bench:
86            continue
87
88        def get_color(res):
89            if res > 0.05:
90                return BC_FAIL
91            elif res > -0.07:
92                return BC_WHITE
93            else:
94                return BC_CYAN
95        fmt_str = "{}{:<{}s}{endc}    {}{:+.2f}{endc}         {}{:+.2f}{endc}         {:4d}         {:4d}"
96        tres = calculate_change(bn['real_time'], other_bench['real_time'])
97        cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
98        output_strs += [color_format(use_color, fmt_str,
99            BC_HEADER, bn['name'], first_col_width,
100            get_color(tres), tres, get_color(cpures), cpures,
101            bn['cpu_time'], other_bench['cpu_time'],
102            endc=BC_ENDC)]
103    return output_strs
104
105###############################################################################
106# Unit tests
107
108import unittest
109
110class TestReportDifference(unittest.TestCase):
111    def load_results(self):
112        import json
113        testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
114        testOutput1 = os.path.join(testInputs, 'test1_run1.json')
115        testOutput2 = os.path.join(testInputs, 'test1_run2.json')
116        with open(testOutput1, 'r') as f:
117            json1 = json.load(f)
118        with open(testOutput2, 'r') as f:
119            json2 = json.load(f)
120        return json1, json2
121
122    def test_basic(self):
123        expect_lines = [
124            ['BM_SameTimes', '+0.00', '+0.00'],
125            ['BM_2xFaster', '-0.50', '-0.50'],
126            ['BM_2xSlower', '+1.00', '+1.00'],
127            ['BM_10PercentFaster', '-0.10', '-0.10'],
128            ['BM_10PercentSlower', '+0.10', '+0.10']
129        ]
130        json1, json2 = self.load_results()
131        output_lines = generate_difference_report(json1, json2, use_color=False)
132        print output_lines
133        self.assertEqual(len(output_lines), len(expect_lines))
134        for i in xrange(0, len(output_lines)):
135            parts = [x for x in output_lines[i].split(' ') if x]
136            self.assertEqual(len(parts), 3)
137            self.assertEqual(parts, expect_lines[i])
138
139
140if __name__ == '__main__':
141    unittest.main()
142