• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Automated performance regression detection tool for ChromeOS perf tests.
6
7   Refer to the instruction on how to use this tool at
8   https://sites.google.com/a/chromium.org/dev/perf-regression-detection.
9"""
10
11import logging
12import os
13import re
14
15import common
16from autotest_lib.client.common_lib import utils
17
18
19class TraceNotFound(RuntimeError):
20    """Catch the error when an expectation is not defined for a trace."""
21    pass
22
23
24def divide(x, y):
25    if y == 0:
26        return float('inf')
27    return float(x) / y
28
29
30class perf_expectation_checker(object):
31    """Check performance results against expectations."""
32
33    def __init__(self, test_name, board=None,
34                 expectation_file_path=None):
35        """Initialize a perf expectation checker.
36
37           @param test_name: the name of the performance test,
38               will be used to load the expectation.
39           @param board: an alternative board name, will be used
40               to load the expectation. Defaults to the board name
41               in /etc/lsb-release.
42           @expectation_file_path: an alternative expectation file.
43               Defaults to perf_expectations.json under the same folder
44               of this file.
45        """
46        self._expectations = {}
47        if expectation_file_path:
48            self._expectation_file_path = expectation_file_path
49        else:
50            self._expectation_file_path = os.path.abspath(
51                os.path.join(os.path.dirname(__file__),
52                    'perf_expectations.json'))
53        self._board = board or utils.get_current_board()
54        self._test_name = test_name
55        assert self._board, 'Failed to get board name.'
56        assert self._test_name, (
57               'You must specify a test name when initialize'
58               ' perf_expectation_checker.')
59        self._load_perf_expectations_file()
60
61    def _load_perf_expectations_file(self):
62        """Load perf expectation file."""
63        try:
64            expectation_file = open(self._expectation_file_path)
65        except IOError, e:
66            logging.error('I/O Error reading expectations %s(%s): %s',
67                          self._expectation_file_path, e.errno, e.strerror)
68            raise e
69        # Must import here to make it work with autotest.
70        import json
71        try:
72            self._expectations = json.load(expectation_file)
73        except ValueError, e:
74            logging.error('ValueError parsing expectations %s(%s): %s',
75                          self._expectation_file_path, e.errno, e.strerror)
76            raise e
77        finally:
78            expectation_file.close()
79
80        if not self._expectations:
81            # Will skip checking the perf values against expectations
82            # when no expecation is defined.
83            logging.info('No expectation data found in %s.',
84                         self._expectation_file_path)
85            return
86
87    def compare_one_trace(self, trace, trace_perf_value):
88        """Compare a performance value of a trace with the expectation.
89
90        @param trace: the name of the trace
91        @param trace_perf_value: the performance value of the trace.
92        @return a tuple like one of the below
93            ('regress', 2.3), ('improve', 3.2), ('accept', None)
94            where the float numbers are regress/improve ratios,
95            or None if expectation for trace is not defined.
96        """
97        perf_key = '/'.join([self._board, self._test_name, trace])
98        if perf_key not in self._expectations:
99            raise TraceNotFound('Expectation for trace %s not defined' % trace)
100        perf_data = self._expectations[perf_key]
101        regress = float(perf_data['regress'])
102        improve = float(perf_data['improve'])
103        if (('better' in perf_data and perf_data['better'] == 'lower') or
104            ('better' not in perf_data and regress > improve)):
105            # The "lower is better" case.
106            if trace_perf_value < improve:
107                ratio = 1 - divide(trace_perf_value, improve)
108                return 'improve', ratio
109            elif trace_perf_value > regress:
110                ratio = divide(trace_perf_value, regress) - 1
111                return 'regress', ratio
112        else:
113            # The "higher is better" case.
114            if trace_perf_value > improve:
115                ratio = divide(trace_perf_value, improve) - 1
116                return 'improve', ratio
117            elif trace_perf_value < regress:
118                ratio = 1 - divide(trace_perf_value, regress)
119                return 'regress', ratio
120        return 'accept', None
121
122    def compare_multiple_traces(self, perf_results):
123        """Compare multiple traces with corresponding expectations.
124
125        @param perf_results: a dictionary from trace name to value in float,
126            e.g {"milliseconds_NewTabCalendar": 1231.000000
127                 "milliseconds_NewTabDocs": 889.000000}.
128
129        @return a dictionary of regressions, improvements, and acceptances
130            of the format below:
131            {'regress': [('trace_1', 2.35), ('trace_2', 2.83)...],
132             'improve': [('trace_3', 2.55), ('trace_3', 52.33)...],
133             'accept':  ['trace_4', 'trace_5'...]}
134            where the float number is the regress/improve ratio.
135        """
136        ret_val = {'regress':[], 'improve':[], 'accept':[]}
137        for trace in perf_results:
138            try:
139                # (key, ratio) is like ('regress', 2.83)
140                key, ratio = self.compare_one_trace(trace, perf_results[trace])
141                ret_val[key].append((trace, ratio))
142            except TraceNotFound:
143                logging.debug(
144                    'Skip checking %s/%s/%s, expectation not defined.',
145                    self._board, self._test_name, trace)
146        return ret_val
147