• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2012 Google Inc. All rights reserved.
2#
3# Redistribution and use in source and binary forms, with or without
4# modification, are permitted provided that the following conditions are
5# met:
6#
7#     * Redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer.
9#     * Redistributions in binary form must reproduce the above
10# copyright notice, this list of conditions and the following disclaimer
11# in the documentation and/or other materials provided with the
12# distribution.
13#     * Neither the name of Google Inc. nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29"""Run Inspector's perf tests in perf mode."""
30
31import os
32import json
33import logging
34import optparse
35import time
36import datetime
37
38from webkitpy.common import find_files
39from webkitpy.common.checkout.scm.detection import SCMDetector
40from webkitpy.common.config.urls import view_source_url
41from webkitpy.common.host import Host
42from webkitpy.common.net.file_uploader import FileUploader
43from webkitpy.performance_tests.perftest import PerfTestFactory
44from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
45
46
47_log = logging.getLogger(__name__)
48
49
50class PerfTestsRunner(object):
51    _default_branch = 'webkit-trunk'
52    EXIT_CODE_BAD_BUILD = -1
53    EXIT_CODE_BAD_SOURCE_JSON = -2
54    EXIT_CODE_BAD_MERGE = -3
55    EXIT_CODE_FAILED_UPLOADING = -4
56    EXIT_CODE_BAD_PREPARATION = -5
57
58    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60    def __init__(self, args=None, port=None):
61        self._options, self._args = PerfTestsRunner._parse_args(args)
62        if port:
63            self._port = port
64            self._host = self._port.host
65        else:
66            self._host = Host()
67            self._port = self._host.port_factory.get(self._options.platform, self._options)
68        self._host.initialize_scm()
69        self._webkit_base_dir_len = len(self._port.webkit_base())
70        self._base_path = self._port.perf_tests_dir()
71        self._timestamp = time.time()
72        self._utc_timestamp = datetime.datetime.utcnow()
73
74
75    @staticmethod
76    def _parse_args(args=None):
77        def _expand_path(option, opt_str, value, parser):
78            path = os.path.expandvars(os.path.expanduser(value))
79            setattr(parser.values, option.dest, path)
80        perf_option_list = [
81            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
82                help='Set the configuration to Debug'),
83            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
84                help='Set the configuration to Release'),
85            optparse.make_option("--platform",
86                help="Specify port/platform being tested (e.g. mac)"),
87            optparse.make_option("--chromium",
88                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
89            optparse.make_option("--android",
90                action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
91            optparse.make_option("--builder-name",
92                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
93            optparse.make_option("--build-number",
94                help=("The build number of the builder running this script.")),
95            optparse.make_option("--build", dest="build", action="store_true", default=True,
96                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
97            optparse.make_option("--no-build", dest="build", action="store_false",
98                help="Don't check to see if the DumpRenderTree build is up-to-date."),
99            optparse.make_option("--build-directory",
100                help="Path to the directory under which build files are kept (should not include configuration)"),
101            optparse.make_option("--time-out-ms", default=600 * 1000,
102                help="Set the timeout for each test"),
103            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
104                help="Do no generate results JSON and results page."),
105            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
106                help="Path to generate a JSON file at; may contain previous results if it already exists."),
107            optparse.make_option("--reset-results", action="store_true",
108                help="Clears the content in the generated JSON file before adding the results."),
109            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
110                help="Only used on bots. Path to a slave configuration file."),
111            optparse.make_option("--description",
112                help="Add a description to the output JSON file if one is generated"),
113            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
114                help="Don't launch a browser with results after the tests are done"),
115            optparse.make_option("--test-results-server",
116                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
117            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
118                help="Run all tests, including the ones in the Skipped list."),
119            optparse.make_option("--profile", action="store_true",
120                help="Output per-test profile information."),
121            optparse.make_option("--profiler", action="store",
122                help="Output per-test profile information, using the specified profiler."),
123            optparse.make_option("--additional-drt-flag", action="append",
124                default=[], help="Additional command line flag to pass to DumpRenderTree "
125                     "Specify multiple times to add multiple flags."),
126            optparse.make_option("--driver-name", type="string",
127                help="Alternative DumpRenderTree binary to use"),
128            optparse.make_option("--content-shell", action="store_true",
129                help="Use Content Shell instead of DumpRenderTree"),
130            optparse.make_option("--repeat", default=1, type="int",
131                help="Specify number of times to run test set (default: 1)."),
132            optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
133                help="Specify number of times to invoke test runner for each performance test."),
134            ]
135        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
136
137    def _collect_tests(self):
138        test_extensions = ['.html', '.svg']
139
140        def _is_test_file(filesystem, dirname, filename):
141            return filesystem.splitext(filename)[1] in test_extensions
142
143        filesystem = self._host.filesystem
144
145        paths = []
146        for arg in self._args:
147            if filesystem.exists(filesystem.join(self._base_path, arg)):
148                paths.append(arg)
149            else:
150                relpath = filesystem.relpath(arg, self._base_path)
151                if filesystem.exists(filesystem.join(self._base_path, relpath)):
152                    paths.append(filesystem.normpath(relpath))
153                else:
154                    _log.warn('Path was not found:' + arg)
155
156        skipped_directories = set(['.svn', 'resources'])
157        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
158        tests = []
159        for path in test_files:
160            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
161            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
162                continue
163            test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
164            tests.append(test)
165
166        return tests
167
168    def _start_http_servers(self):
169        self._port.acquire_http_lock()
170        self._port.start_http_server(number_of_servers=2)
171
172    def _stop_http_servers(self):
173        self._port.stop_http_server()
174        self._port.release_http_lock()
175
176    def run(self):
177        needs_http = self._port.requires_http_server()
178
179        class FakePrinter(object):
180            def write_update(self, msg):
181                print msg
182
183            def write_throttled_update(self, msg):
184                pass
185
186        if self._port.check_build(needs_http=needs_http, printer=FakePrinter()):
187            _log.error("Build not up to date for %s" % self._port._path_to_driver())
188            return self.EXIT_CODE_BAD_BUILD
189
190        run_count = 0
191        repeat = self._options.repeat
192        while (run_count < repeat):
193            run_count += 1
194
195            tests = self._collect_tests()
196            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
197            _log.info("Running %d tests%s" % (len(tests), runs))
198
199            for test in tests:
200                if not test.prepare(self._options.time_out_ms):
201                    return self.EXIT_CODE_BAD_PREPARATION
202
203            try:
204                if needs_http:
205                    self._start_http_servers()
206                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
207
208            finally:
209                if needs_http:
210                    self._stop_http_servers()
211
212            if self._options.generate_results and not self._options.profile:
213                exit_code = self._generate_results()
214                if exit_code:
215                    return exit_code
216
217        if self._options.generate_results and not self._options.profile:
218            test_results_server = self._options.test_results_server
219            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
220                return self.EXIT_CODE_FAILED_UPLOADING
221
222            if self._options.show_results:
223                self._port.show_results_html_file(self._results_page_path())
224
225        return unexpected
226
227    def _output_json_path(self):
228        output_json_path = self._options.output_json_path
229        if output_json_path:
230            return output_json_path
231        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
232
233    def _results_page_path(self):
234        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
235
236    def _generate_results(self):
237        options = self._options
238        output_json_path = self._output_json_path()
239        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
240
241        if options.slave_config_json_path:
242            output = self._merge_slave_config_json(options.slave_config_json_path, output)
243            if not output:
244                return self.EXIT_CODE_BAD_SOURCE_JSON
245
246        output = self._merge_outputs_if_needed(output_json_path, output)
247        if not output:
248            return self.EXIT_CODE_BAD_MERGE
249
250        filesystem = self._host.filesystem
251        json_output = json.dumps(output)
252        filesystem.write_text_file(output_json_path, json_output)
253
254        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
255        template = filesystem.read_text_file(template_path)
256
257        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
258        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
259        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
260
261        filesystem.write_text_file(self._results_page_path(), results_page)
262
263    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
264        revisions = {}
265        for (name, path) in self._port.repository_paths():
266            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
267            revision = scm.svn_revision(path)
268            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
269
270        meta_info = {
271            'description': description,
272            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
273            'platform': platform,
274            'revisions': revisions,
275            'builderName': builder_name,
276            'buildNumber': int(build_number) if build_number else None}
277
278        contents = {'tests': {}}
279        for key, value in meta_info.items():
280            if value:
281                contents[key] = value
282
283        for test, metrics in self._results:
284            for metric_name, iteration_values in metrics.iteritems():
285                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
286                    continue
287
288                tests = contents['tests']
289                path = test.test_name_without_file_extension().split('/')
290                for i in range(0, len(path)):
291                    is_last_token = i + 1 == len(path)
292                    url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
293                    tests.setdefault(path[i], {'url': url})
294                    current_test = tests[path[i]]
295                    if is_last_token:
296                        current_test.setdefault('metrics', {})
297                        assert metric_name not in current_test['metrics']
298                        current_test['metrics'][metric_name] = {'current': iteration_values}
299                    else:
300                        current_test.setdefault('tests', {})
301                        tests = current_test['tests']
302
303        return contents
304
305    @staticmethod
306    def _datetime_in_ES5_compatible_iso_format(datetime):
307        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
308
309    def _merge_slave_config_json(self, slave_config_json_path, contents):
310        if not self._host.filesystem.isfile(slave_config_json_path):
311            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
312            return None
313
314        try:
315            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
316            slave_config = json.load(slave_config_json)
317            for key in slave_config:
318                contents['builder' + key.capitalize()] = slave_config[key]
319            return contents
320        except Exception, error:
321            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
322        return None
323
324    def _merge_outputs_if_needed(self, output_json_path, output):
325        if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
326            return [output]
327        try:
328            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
329            return existing_outputs + [output]
330        except Exception, error:
331            _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
332        return None
333
334    def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
335        url = "https://%s%s" % (test_results_server, host_path)
336        uploader = file_uploader(url, 120)
337        try:
338            response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
339        except Exception, error:
340            _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
341            return False
342
343        response_body = [line.strip('\n') for line in response]
344        if response_body != ['OK']:
345            try:
346                parsed_response = json.loads('\n'.join(response_body))
347            except:
348                _log.error("Uploaded JSON to %s but got a bad response:" % url)
349                for line in response_body:
350                    _log.error(line)
351                return False
352            if parsed_response.get('status') != 'OK':
353                _log.error("Uploaded JSON to %s but got an error:" % url)
354                _log.error(json.dumps(parsed_response, indent=4))
355                return False
356
357        _log.info("JSON file uploaded to %s." % url)
358        return True
359
360    def _run_tests_set(self, tests):
361        result_count = len(tests)
362        failures = 0
363        self._results = []
364
365        for i, test in enumerate(tests):
366            _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
367            start_time = time.time()
368            metrics = test.run(self._options.time_out_ms)
369            if metrics:
370                self._results.append((test, metrics))
371            else:
372                failures += 1
373                _log.error('FAILED')
374
375            _log.info('Finished: %f s' % (time.time() - start_time))
376            _log.info('')
377
378        return failures
379