• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2010 Google Inc. All rights reserved.
2# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3# Copyright (C) 2011 Apple Inc. All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met:
8#
9#     * Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11#     * Redistributions in binary form must reproduce the above
12# copyright notice, this list of conditions and the following disclaimer
13# in the documentation and/or other materials provided with the
14# distribution.
15#     * Neither the name of Google Inc. nor the names of its
16# contributors may be used to endorse or promote products derived from
17# this software without specific prior written permission.
18#
19# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31import logging
32import optparse
33import os
34import sys
35import traceback
36
37from webkitpy.common.host import Host
38from webkitpy.layout_tests.controllers.manager import Manager
39from webkitpy.layout_tests.models import test_run_results
40from webkitpy.layout_tests.port import configuration_options, platform_options
41from webkitpy.layout_tests.views import buildbot_results
42from webkitpy.layout_tests.views import printing
43
44
45_log = logging.getLogger(__name__)
46
47
48
49def main(argv, stdout, stderr):
50    options, args = parse_args(argv)
51
52    if options.platform and 'test' in options.platform:
53        # It's a bit lame to import mocks into real code, but this allows the user
54        # to run tests against the test platform interactively, which is useful for
55        # debugging test failures.
56        from webkitpy.common.host_mock import MockHost
57        host = MockHost()
58    else:
59        host = Host()
60
61    if options.lint_test_files:
62        from webkitpy.layout_tests.lint_test_expectations import run_checks
63        return run_checks(host, options, stderr)
64
65    try:
66        port = host.port_factory.get(options.platform, options)
67    except NotImplementedError, e:
68        # FIXME: is this the best way to handle unsupported port names?
69        print >> stderr, str(e)
70        return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
71
72    try:
73        run_details = run(port, options, args, stderr)
74        if ((run_details.exit_code not in test_run_results.ERROR_CODES or
75             run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) and
76            not run_details.initial_results.keyboard_interrupted):
77            bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
78            bot_printer.print_results(run_details)
79
80        return run_details.exit_code
81    # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest cases.
82    except KeyboardInterrupt:
83        return test_run_results.INTERRUPTED_EXIT_STATUS
84    except test_run_results.TestRunException as e:
85        print >> stderr, e.msg
86        return e.code
87    except BaseException as e:
88        if isinstance(e, Exception):
89            print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
90            traceback.print_exc(file=stderr)
91        return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
92
93
94def parse_args(args):
95    option_group_definitions = []
96
97    option_group_definitions.append(("Platform options", platform_options()))
98    option_group_definitions.append(("Configuration options", configuration_options()))
99    option_group_definitions.append(("Printing Options", printing.print_options()))
100
101    option_group_definitions.append(("Android-specific Options", [
102        optparse.make_option("--adb-device",
103            action="append", default=[],
104            help="Run Android layout tests on these devices."),
105
106        # FIXME: Flip this to be off by default once we can log the device setup more cleanly.
107        optparse.make_option("--no-android-logging",
108            action="store_false", dest='android_logging', default=True,
109            help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"),
110    ]))
111
112    option_group_definitions.append(("Results Options", [
113        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
114            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
115        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
116            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
117        optparse.make_option("--results-directory", help="Location of test results"),
118        optparse.make_option("--build-directory",
119            help="Path to the directory under which build files are kept (should not include configuration)"),
120        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
121            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
122        optparse.make_option("--new-baseline", action="store_true",
123            default=False, help="Save generated results as new baselines "
124                 "into the *most-specific-platform* directory, overwriting whatever's "
125                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
126        optparse.make_option("--reset-results", action="store_true",
127            default=False, help="Reset expectations to the "
128                 "generated results in their existing location."),
129        optparse.make_option("--no-new-test-results", action="store_false",
130            dest="new_test_results", default=True,
131            help="Don't create new baselines when no expected results exist"),
132
133        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
134        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
135            help="A directory where it is allowed to execute tests as pixel tests. "
136                 "Specify multiple times to add multiple directories. "
137                 "This option implies --pixel-tests. If specified, only those tests "
138                 "will be executed as pixel tests that are located in one of the "
139                 "directories enumerated with the option. Some ports may ignore this "
140                 "option while others can have a default value that can be overridden here."),
141
142        optparse.make_option("--skip-failing-tests", action="store_true",
143            default=False, help="Skip tests that are expected to fail. "
144                 "Note: When using this option, you might miss new crashes "
145                 "in these tests."),
146        optparse.make_option("--additional-drt-flag", action="append",
147            default=[], help="Additional command line flag to pass to the driver "
148                 "Specify multiple times to add multiple flags."),
149        optparse.make_option("--driver-name", type="string",
150            help="Alternative driver binary to use"),
151        optparse.make_option("--additional-platform-directory", action="append",
152            default=[], help="Additional directory where to look for test "
153                 "baselines (will take precendence over platform baselines). "
154                 "Specify multiple times to add multiple search path entries."),
155        optparse.make_option("--additional-expectations", action="append", default=[],
156            help="Path to a test_expectations file that will override previous expectations. "
157                 "Specify multiple times for multiple sets of overrides."),
158        optparse.make_option("--compare-port", action="store", default=None,
159            help="Use the specified port's baselines first"),
160        optparse.make_option("--no-show-results", action="store_false",
161            default=True, dest="show_results",
162            help="Don't launch a browser with results after the tests "
163                 "are done"),
164        optparse.make_option("--full-results-html", action="store_true",
165            default=False,
166            help="Show all failures in results.html, rather than only regressions"),
167        optparse.make_option("--clobber-old-results", action="store_true",
168            default=False, help="Clobbers test results from previous runs."),
169        optparse.make_option("--smoke", action="store_true",
170            help="Run just the SmokeTests"),
171        optparse.make_option("--no-smoke", dest="smoke", action="store_false",
172            help="Do not run just the SmokeTests"),
173    ]))
174
175    option_group_definitions.append(("Testing Options", [
176        optparse.make_option("--build", dest="build",
177            action="store_true", default=True,
178            help="Check to ensure the build is up-to-date (default)."),
179        optparse.make_option("--no-build", dest="build",
180            action="store_false", help="Don't check to see if the build is up-to-date."),
181        optparse.make_option("-n", "--dry-run", action="store_true",
182            default=False,
183            help="Do everything but actually run the tests or upload results."),
184        optparse.make_option("--nocheck-sys-deps", action="store_true",
185            default=False,
186            help="Don't check the system dependencies (themes)"),
187        optparse.make_option("--wrapper",
188            help="wrapper command to insert before invocations of "
189                 "the driver; option is split on whitespace before "
190                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
191        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
192            help="directories or test to ignore (may specify multiple times)"),
193        optparse.make_option("--ignore-flaky-tests", action="store",
194            help=("Control whether tests that are flaky on the bots get ignored."
195                "'very-flaky' == Ignore any tests that flaked more than once on the bot."
196                "'maybe-flaky' == Ignore any tests that flaked once on the bot."
197                "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
198        optparse.make_option("--ignore-builder-category", action="store",
199            help=("The category of builders to use with the --ignore-flaky-tests "
200                "option ('layout' or 'deps').")),
201        optparse.make_option("--test-list", action="append",
202            help="read list of tests to run from file", metavar="FILE"),
203        optparse.make_option("--skipped", action="store", default=None,
204            help=("control how tests marked SKIP are run. "
205                 "'default' == Skip tests unless explicitly listed on the command line, "
206                 "'ignore' == Run them anyway, "
207                 "'only' == only run the SKIP tests, "
208                 "'always' == always skip, even if listed on the command line.")),
209        optparse.make_option("--time-out-ms",
210            help="Set the timeout for each test"),
211        optparse.make_option("--order", action="store", default="natural",
212            help=("determine the order in which the test cases will be run. "
213                  "'none' == use the order in which the tests were listed either in arguments or test list, "
214                  "'natural' == use the natural order (default), "
215                  "'random-seeded' == randomize the test order using a fixed seed, "
216                  "'random' == randomize the test order.")),
217        optparse.make_option("--run-chunk",
218            help=("Run a specified chunk (n:l), the nth of len l, "
219                 "of the layout tests")),
220        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
221                  "the nth of m parts, of the layout tests")),
222        optparse.make_option("--batch-size",
223            help=("Run a the tests in batches (n), after every n tests, "
224                  "the driver is relaunched."), type="int", default=None),
225        optparse.make_option("--run-singly", action="store_true",
226            default=False, help="DEPRECATED, same as --batch-size=1 --verbose"),
227        optparse.make_option("--child-processes",
228            help="Number of drivers to run in parallel."),
229        # FIXME: Display default number of child processes that will run.
230        optparse.make_option("-f", "--fully-parallel", action="store_true",
231            help="run all tests in parallel"),
232        optparse.make_option("--exit-after-n-failures", type="int", default=None,
233            help="Exit after the first N failures instead of running all "
234            "tests"),
235        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
236            default=None, help="Exit after the first N crashes instead of "
237            "running all tests"),
238        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
239        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
240        optparse.make_option("--retry-failures", action="store_true",
241            help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
242        optparse.make_option("--no-retry-failures", action="store_false",
243            dest="retry_failures",
244            help="Don't re-try any tests that produce unexpected results."),
245
246        optparse.make_option("--max-locked-shards", type="int", default=0,
247            help="Set the maximum number of locked shards"),
248        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
249            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
250        optparse.make_option("--profile", action="store_true",
251            help="Output per-test profile information."),
252        optparse.make_option("--profiler", action="store",
253            help="Output per-test profile information, using the specified profiler."),
254        optparse.make_option("--driver-logging", action="store_true",
255            help="Print detailed logging of the driver/content_shell"),
256        optparse.make_option("--disable-breakpad", action="store_true",
257            help="Don't use breakpad to symbolize unexpected crashes."),
258        optparse.make_option("--use-apache", action="store_true",
259            help="Use Apache instead of LigHTTPd (default is port-specific)."),
260        optparse.make_option("--no-use-apache", action="store_false", dest="use_apache",
261            help="Use LigHTTPd instead of Apache (default is port-specific)."),
262        optparse.make_option("--enable-leak-detection", action="store_true",
263            help="Enable the leak detection of DOM objects."),
264        optparse.make_option("--enable-sanitizer", action="store_true",
265            help="Only alert on sanitizer-related errors and crashes"),
266    ]))
267
268    option_group_definitions.append(("Miscellaneous Options", [
269        optparse.make_option("--lint-test-files", action="store_true",
270        default=False, help=("Makes sure the test files parse for all "
271                            "configurations. Does not run any tests.")),
272    ]))
273
274    # FIXME: Move these into json_results_generator.py
275    option_group_definitions.append(("Result JSON Options", [
276        optparse.make_option("--master-name", help="The name of the buildbot master."),
277        optparse.make_option("--builder-name", default="",
278            help=("The name of the builder shown on the waterfall running "
279                  "this script e.g. WebKit.")),
280        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
281            help=("The name of the builder used in its path, e.g. "
282                  "webkit-rel.")),
283        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
284            help=("The build number of the builder running this script.")),
285        optparse.make_option("--test-results-server", default="",
286            help=("If specified, upload results json files to this appengine "
287                  "server.")),
288    ]))
289
290    option_parser = optparse.OptionParser()
291
292    for group_name, group_options in option_group_definitions:
293        option_group = optparse.OptionGroup(option_parser, group_name)
294        option_group.add_options(group_options)
295        option_parser.add_option_group(option_group)
296
297    return option_parser.parse_args(args)
298
299
300def _set_up_derived_options(port, options, args):
301    """Sets the options values that depend on other options values."""
302    if not options.child_processes:
303        options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
304                                                 str(port.default_child_processes()))
305    if not options.max_locked_shards:
306        options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
307                                                       str(port.default_max_locked_shards())))
308
309    if not options.configuration:
310        options.configuration = port.default_configuration()
311
312    if options.pixel_tests is None:
313        options.pixel_tests = port.default_pixel_tests()
314
315    if not options.time_out_ms:
316        options.time_out_ms = str(port.default_timeout_ms())
317
318    options.slow_time_out_ms = str(5 * int(options.time_out_ms))
319
320    if options.additional_platform_directory:
321        additional_platform_directories = []
322        for path in options.additional_platform_directory:
323            additional_platform_directories.append(port.host.filesystem.abspath(path))
324        options.additional_platform_directory = additional_platform_directories
325
326    if options.new_baseline:
327        options.reset_results = True
328        options.add_platform_exceptions = True
329
330    if options.pixel_test_directories:
331        options.pixel_tests = True
332        varified_dirs = set()
333        pixel_test_directories = options.pixel_test_directories
334        for directory in pixel_test_directories:
335            # FIXME: we should support specifying the directories all the ways we support it for additional
336            # arguments specifying which tests and directories to run. We should also move the logic for that
337            # to Port.
338            filesystem = port.host.filesystem
339            if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
340                _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
341            else:
342                varified_dirs.add(directory)
343
344        options.pixel_test_directories = list(varified_dirs)
345
346    if options.run_singly:
347        options.batch_size = 1
348        options.verbose = True
349
350    if not args and not options.test_list and options.smoke is None:
351        options.smoke = port.default_smoke_test_only()
352    if options.smoke:
353        if not args and not options.test_list and options.retry_failures is None:
354            # Retry failures by default if we're doing just a smoke test (no additional tests).
355            options.retry_failures = True
356
357        if not options.test_list:
358            options.test_list = []
359        options.test_list.append(port.host.filesystem.join(port.layout_tests_dir(), 'SmokeTests'))
360        if not options.skipped:
361            options.skipped = 'always'
362
363    if not options.skipped:
364        options.skipped = 'default'
365
366def run(port, options, args, logging_stream):
367    logger = logging.getLogger()
368    logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
369
370    try:
371        printer = printing.Printer(port, options, logging_stream, logger=logger)
372
373        _set_up_derived_options(port, options, args)
374        manager = Manager(port, options, printer)
375        printer.print_config(port.results_directory())
376
377        run_details = manager.run(args)
378        _log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
379        return run_details
380    finally:
381        printer.cleanup()
382
383if __name__ == '__main__':
384    sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
385