• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2012 Google Inc. All rights reserved.
2# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met:
7#
8#     * Redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer.
10#     * Redistributions in binary form must reproduce the above
11# copyright notice, this list of conditions and the following disclaimer
12# in the documentation and/or other materials provided with the
13# distribution.
14#     * Neither the name of Google Inc. nor the names of its
15# contributors may be used to endorse or promote products derived from
16# this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31import errno
32import logging
33import math
34import re
35import os
36import signal
37import socket
38import subprocess
39import sys
40import time
41
42from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
43from webkitpy.layout_tests.port.driver import DriverInput
44from webkitpy.layout_tests.port.driver import DriverOutput
45
46DEFAULT_TEST_RUNNER_COUNT = 4
47
48_log = logging.getLogger(__name__)
49
50
51class PerfTestMetric(object):
52    def __init__(self, metric, unit=None, iterations=None):
53        # FIXME: Fix runner.js to report correct metric names
54        self._iterations = iterations or []
55        self._unit = unit or self.metric_to_unit(metric)
56        self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
57
58    def name(self):
59        return self._metric
60
61    def has_values(self):
62        return bool(self._iterations)
63
64    def append_group(self, group_values):
65        assert isinstance(group_values, list)
66        self._iterations.append(group_values)
67
68    def grouped_iteration_values(self):
69        return self._iterations
70
71    def flattened_iteration_values(self):
72        return [value for group_values in self._iterations for value in group_values]
73
74    def unit(self):
75        return self._unit
76
77    @staticmethod
78    def metric_to_unit(metric):
79        assert metric in ('Time', 'Malloc', 'JSHeap')
80        return 'ms' if metric == 'Time' else 'bytes'
81
82    @staticmethod
83    def time_unit_to_metric(unit):
84        return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit]
85
86
87class PerfTest(object):
88
89    def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
90        self._port = port
91        self._test_name = test_name
92        self._test_path = test_path
93        self._description = None
94        self._metrics = {}
95        self._ordered_metrics_name = []
96        self._test_runner_count = test_runner_count
97
98    def test_name(self):
99        return self._test_name
100
101    def test_name_without_file_extension(self):
102        return re.sub(r'\.\w+$', '', self.test_name())
103
104    def test_path(self):
105        return self._test_path
106
107    def description(self):
108        return self._description
109
110    def prepare(self, time_out_ms):
111        return True
112
113    def _create_driver(self):
114        return self._port.create_driver(worker_number=0, no_timeout=True)
115
116    def run(self, time_out_ms):
117        for _ in xrange(self._test_runner_count):
118            driver = self._create_driver()
119            try:
120                if not self._run_with_driver(driver, time_out_ms):
121                    return None
122            finally:
123                driver.stop()
124
125        should_log = not self._port.get_option('profile')
126        if should_log and self._description:
127            _log.info('DESCRIPTION: %s' % self._description)
128
129        results = {}
130        for metric_name in self._ordered_metrics_name:
131            metric = self._metrics[metric_name]
132            results[metric.name()] = metric.grouped_iteration_values()
133            if should_log:
134                legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
135                self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
136                    metric.flattened_iteration_values(), metric.unit())
137
138        return results
139
140    @staticmethod
141    def log_statistics(test_name, values, unit):
142        sorted_values = sorted(values)
143
144        # Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
145        square_sum = 0
146        mean = 0
147        for i, time in enumerate(sorted_values):
148            delta = time - mean
149            sweep = i + 1.0
150            mean += delta / sweep
151            square_sum += delta * (time - mean)
152
153        middle = int(len(sorted_values) / 2)
154        mean = sum(sorted_values) / len(values)
155        median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2
156        stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0
157
158        _log.info('RESULT %s= %s %s' % (test_name, mean, unit))
159        _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s' %
160            (median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit))
161
162    _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
163    _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
164    _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
165    _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
166    _console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):')
167
168    def _run_with_driver(self, driver, time_out_ms):
169        output = self.run_single(driver, self.test_path(), time_out_ms)
170        self._filter_output(output)
171        if self.run_failed(output):
172            return False
173
174        current_metric = None
175        for line in re.split('\n', output.text):
176            description_match = self._description_regex.match(line)
177            metric_match = self._metrics_regex.match(line)
178            score = self._score_regex.match(line)
179            console_match = self._console_regex.match(line)
180
181            if description_match:
182                self._description = description_match.group('description')
183            elif metric_match:
184                current_metric = metric_match.group('metric').replace(' ', '')
185            elif score:
186                if score.group('key') != 'values':
187                    continue
188
189                metric = self._ensure_metrics(current_metric, score.group('unit'))
190                metric.append_group(map(lambda value: float(value), score.group('value').split(', ')))
191            elif console_match:
192                # Ignore console messages such as deprecation warnings.
193                continue
194            else:
195                _log.error('ERROR: ' + line)
196                return False
197
198        return True
199
200    def _ensure_metrics(self, metric_name, unit=None):
201        if metric_name not in self._metrics:
202            self._metrics[metric_name] = PerfTestMetric(metric_name, unit)
203            self._ordered_metrics_name.append(metric_name)
204        return self._metrics[metric_name]
205
206    def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
207        return driver.run_test(DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test, args=[]), stop_when_done=False)
208
209    def run_failed(self, output):
210        if output.error:
211            _log.error('error: %s\n%s' % (self.test_name(), output.error))
212
213        if output.text == None:
214            pass
215        elif output.timeout:
216            _log.error('timeout: %s' % self.test_name())
217        elif output.crash:
218            _log.error('crash: %s' % self.test_name())
219        else:
220            return False
221
222        return True
223
224    @staticmethod
225    def _should_ignore_line(regexps, line):
226        if not line:
227            return True
228        for regexp in regexps:
229            if regexp.search(line):
230                return True
231        return False
232
233    _lines_to_ignore_in_stderr = [
234        re.compile(r'^Unknown option:'),
235        re.compile(r'^\[WARNING:proxy_service.cc'),
236        re.compile(r'^\[INFO:'),
237        # These stderr messages come from content_shell on Linux.
238        re.compile(r'INFO:SkFontHost_fontconfig.cpp'),
239        re.compile(r'Running without the SUID sandbox'),
240        # crbug.com/345229
241        re.compile(r'InitializeSandbox\(\) called with multiple threads in process gpu-process')]
242
243    _lines_to_ignore_in_parser_result = [
244        re.compile(r'^\s*Running \d+ times$'),
245        re.compile(r'^\s*Ignoring warm-up '),
246        re.compile(r'^\s*Info:'),
247        re.compile(r'^\s*\d+(.\d+)?(\s*(runs\/s|ms|fps))?$'),
248        # Following are for handle existing test like Dromaeo
249        re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
250        re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
251        re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")),
252        # Following is for html5.html
253        re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/""")),
254        re.compile(r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."),
255        re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"),
256        # Dromaeo reports values for subtests. Ignore them for now.
257        re.compile(r'(?P<name>.+): \[(?P<values>(\d+(.\d+)?,\s+)*\d+(.\d+)?)\]'),
258    ]
259
260    def _filter_output(self, output):
261        if output.error:
262            output.error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line(self._lines_to_ignore_in_stderr, line)])
263        if output.text:
264            output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)])
265
266
267class SingleProcessPerfTest(PerfTest):
268    def __init__(self, port, test_name, test_path, test_runner_count=1):
269        super(SingleProcessPerfTest, self).__init__(port, test_name, test_path, test_runner_count)
270
271
272class ChromiumStylePerfTest(PerfTest):
273    _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
274
275    def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
276        super(ChromiumStylePerfTest, self).__init__(port, test_name, test_path, test_runner_count)
277
278    def run(self, time_out_ms):
279        driver = self._create_driver()
280        try:
281            output = self.run_single(driver, self.test_path(), time_out_ms)
282        finally:
283            driver.stop()
284
285        self._filter_output(output)
286        if self.run_failed(output):
287            return None
288
289        return self.parse_and_log_output(output)
290
291    def parse_and_log_output(self, output):
292        test_failed = False
293        results = {}
294        for line in re.split('\n', output.text):
295            resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
296            if resultLine:
297                # FIXME: Store the unit
298                results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
299                _log.info(line)
300            elif not len(line) == 0:
301                test_failed = True
302                _log.error(line)
303        return results if results and not test_failed else None
304
305
306class PerfTestFactory(object):
307
308    _pattern_map = [
309        (re.compile(r'^Dromaeo/'), SingleProcessPerfTest),
310        (re.compile(r'^inspector/'), ChromiumStylePerfTest),
311    ]
312
313    @classmethod
314    def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
315        for (pattern, test_class) in cls._pattern_map:
316            if pattern.match(test_name):
317                return test_class(port, test_name, path, test_runner_count)
318        return PerfTest(port, test_name, path, test_runner_count)
319