1# Copyright (C) 2010 Google Inc. All rights reserved. 2# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged 3# Copyright (C) 2011 Apple Inc. All rights reserved. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: 8# 9# * Redistributions of source code must retain the above copyright 10# notice, this list of conditions and the following disclaimer. 11# * Redistributions in binary form must reproduce the above 12# copyright notice, this list of conditions and the following disclaimer 13# in the documentation and/or other materials provided with the 14# distribution. 15# * Neither the name of Google Inc. nor the names of its 16# contributors may be used to endorse or promote products derived from 17# this software without specific prior written permission. 18# 19# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31import logging 32import optparse 33import os 34import sys 35import traceback 36 37from webkitpy.common.host import Host 38from webkitpy.layout_tests.controllers.manager import Manager 39from webkitpy.layout_tests.models import test_run_results 40from webkitpy.layout_tests.port import configuration_options, platform_options 41from webkitpy.layout_tests.views import buildbot_results 42from webkitpy.layout_tests.views import printing 43 44 45_log = logging.getLogger(__name__) 46 47 48 49def main(argv, stdout, stderr): 50 options, args = parse_args(argv) 51 52 if options.platform and 'test' in options.platform: 53 # It's a bit lame to import mocks into real code, but this allows the user 54 # to run tests against the test platform interactively, which is useful for 55 # debugging test failures. 56 from webkitpy.common.host_mock import MockHost 57 host = MockHost() 58 else: 59 host = Host() 60 61 if options.lint_test_files: 62 from webkitpy.layout_tests.lint_test_expectations import lint 63 return lint(host, options, stderr) 64 65 try: 66 port = host.port_factory.get(options.platform, options) 67 except NotImplementedError, e: 68 # FIXME: is this the best way to handle unsupported port names? 69 print >> stderr, str(e) 70 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS 71 72 try: 73 run_details = run(port, options, args, stderr) 74 if run_details.exit_code not in test_run_results.ERROR_CODES and not run_details.initial_results.keyboard_interrupted: 75 bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging) 76 bot_printer.print_results(run_details) 77 78 return run_details.exit_code 79 # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest cases. 80 except KeyboardInterrupt: 81 return test_run_results.INTERRUPTED_EXIT_STATUS 82 except test_run_results.TestRunException as e: 83 print >> stderr, e.msg 84 return e.code 85 except BaseException as e: 86 if isinstance(e, Exception): 87 print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e)) 88 traceback.print_exc(file=stderr) 89 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS 90 91 92def parse_args(args): 93 option_group_definitions = [] 94 95 option_group_definitions.append(("Platform options", platform_options())) 96 option_group_definitions.append(("Configuration options", configuration_options())) 97 option_group_definitions.append(("Printing Options", printing.print_options())) 98 99 option_group_definitions.append(("Android-specific Options", [ 100 optparse.make_option("--adb-device", 101 action="append", default=[], 102 help="Run Android layout tests on these devices."), 103 104 # FIXME: Flip this to be off by default once we can log the device setup more cleanly. 105 optparse.make_option("--no-android-logging", 106 action="store_false", dest='android_logging', default=True, 107 help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"), 108 ])) 109 110 option_group_definitions.append(("Results Options", [ 111 optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true", 112 dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), 113 optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false", 114 dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), 115 optparse.make_option("--results-directory", help="Location of test results"), 116 optparse.make_option("--build-directory", 117 help="Path to the directory under which build files are kept (should not include configuration)"), 118 optparse.make_option("--add-platform-exceptions", action="store_true", default=False, 119 help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"), 120 optparse.make_option("--new-baseline", action="store_true", 121 default=False, help="Save generated results as new baselines " 122 "into the *most-specific-platform* directory, overwriting whatever's " 123 "already there. Equivalent to --reset-results --add-platform-exceptions"), 124 optparse.make_option("--reset-results", action="store_true", 125 default=False, help="Reset expectations to the " 126 "generated results in their existing location."), 127 optparse.make_option("--no-new-test-results", action="store_false", 128 dest="new_test_results", default=True, 129 help="Don't create new baselines when no expected results exist"), 130 131 #FIXME: we should support a comma separated list with --pixel-test-directory as well. 132 optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories", 133 help="A directory where it is allowed to execute tests as pixel tests. " 134 "Specify multiple times to add multiple directories. " 135 "This option implies --pixel-tests. If specified, only those tests " 136 "will be executed as pixel tests that are located in one of the " 137 "directories enumerated with the option. Some ports may ignore this " 138 "option while others can have a default value that can be overridden here."), 139 140 optparse.make_option("--skip-failing-tests", action="store_true", 141 default=False, help="Skip tests that are expected to fail. " 142 "Note: When using this option, you might miss new crashes " 143 "in these tests."), 144 optparse.make_option("--additional-drt-flag", action="append", 145 default=[], help="Additional command line flag to pass to the driver " 146 "Specify multiple times to add multiple flags."), 147 optparse.make_option("--driver-name", type="string", 148 help="Alternative driver binary to use"), 149 optparse.make_option("--additional-platform-directory", action="append", 150 default=[], help="Additional directory where to look for test " 151 "baselines (will take precendence over platform baselines). " 152 "Specify multiple times to add multiple search path entries."), 153 optparse.make_option("--additional-expectations", action="append", default=[], 154 help="Path to a test_expectations file that will override previous expectations. " 155 "Specify multiple times for multiple sets of overrides."), 156 optparse.make_option("--compare-port", action="store", default=None, 157 help="Use the specified port's baselines first"), 158 optparse.make_option("--no-show-results", action="store_false", 159 default=True, dest="show_results", 160 help="Don't launch a browser with results after the tests " 161 "are done"), 162 optparse.make_option("--full-results-html", action="store_true", 163 default=False, 164 help="Show all failures in results.html, rather than only regressions"), 165 optparse.make_option("--clobber-old-results", action="store_true", 166 default=False, help="Clobbers test results from previous runs."), 167 optparse.make_option("--smoke", action="store_true", 168 help="Run just the SmokeTests"), 169 optparse.make_option("--no-smoke", dest="smoke", action="store_false", 170 help="Do not run just the SmokeTests"), 171 ])) 172 173 option_group_definitions.append(("Testing Options", [ 174 optparse.make_option("--build", dest="build", 175 action="store_true", default=True, 176 help="Check to ensure the build is up-to-date (default)."), 177 optparse.make_option("--no-build", dest="build", 178 action="store_false", help="Don't check to see if the build is up-to-date."), 179 optparse.make_option("-n", "--dry-run", action="store_true", 180 default=False, 181 help="Do everything but actually run the tests or upload results."), 182 optparse.make_option("--nocheck-sys-deps", action="store_true", 183 default=False, 184 help="Don't check the system dependencies (themes)"), 185 optparse.make_option("--wrapper", 186 help="wrapper command to insert before invocations of " 187 "the driver; option is split on whitespace before " 188 "running. (Example: --wrapper='valgrind --smc-check=all')"), 189 optparse.make_option("-i", "--ignore-tests", action="append", default=[], 190 help="directories or test to ignore (may specify multiple times)"), 191 optparse.make_option("--ignore-flaky-tests", action="store", 192 help=("Control whether tests that are flaky on the bots get ignored." 193 "'very-flaky' == Ignore any tests that flaked more than once on the bot." 194 "'maybe-flaky' == Ignore any tests that flaked once on the bot." 195 "'unexpected' == Ignore any tests that had unexpected results on the bot.")), 196 optparse.make_option("--ignore-builder-category", action="store", 197 help=("The category of builders to use with the --ignore-flaky-tests " 198 "option ('layout' or 'deps').")), 199 optparse.make_option("--test-list", action="append", 200 help="read list of tests to run from file", metavar="FILE"), 201 optparse.make_option("--skipped", action="store", default=None, 202 help=("control how tests marked SKIP are run. " 203 "'default' == Skip tests unless explicitly listed on the command line, " 204 "'ignore' == Run them anyway, " 205 "'only' == only run the SKIP tests, " 206 "'always' == always skip, even if listed on the command line.")), 207 optparse.make_option("--time-out-ms", 208 help="Set the timeout for each test"), 209 optparse.make_option("--order", action="store", default="natural", 210 help=("determine the order in which the test cases will be run. " 211 "'none' == use the order in which the tests were listed either in arguments or test list, " 212 "'natural' == use the natural order (default), " 213 "'random-seeded' == randomize the test order using a fixed seed, " 214 "'random' == randomize the test order.")), 215 optparse.make_option("--run-chunk", 216 help=("Run a specified chunk (n:l), the nth of len l, " 217 "of the layout tests")), 218 optparse.make_option("--run-part", help=("Run a specified part (n:m), " 219 "the nth of m parts, of the layout tests")), 220 optparse.make_option("--batch-size", 221 help=("Run a the tests in batches (n), after every n tests, " 222 "the driver is relaunched."), type="int", default=None), 223 optparse.make_option("--run-singly", action="store_true", 224 default=False, help="DEPRECATED, same as --batch-size=1 --verbose"), 225 optparse.make_option("--child-processes", 226 help="Number of drivers to run in parallel."), 227 # FIXME: Display default number of child processes that will run. 228 optparse.make_option("-f", "--fully-parallel", action="store_true", 229 help="run all tests in parallel"), 230 optparse.make_option("--exit-after-n-failures", type="int", default=None, 231 help="Exit after the first N failures instead of running all " 232 "tests"), 233 optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int", 234 default=None, help="Exit after the first N crashes instead of " 235 "running all tests"), 236 optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"), 237 optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"), 238 optparse.make_option("--retry-failures", action="store_true", 239 help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."), 240 optparse.make_option("--no-retry-failures", action="store_false", 241 dest="retry_failures", 242 help="Don't re-try any tests that produce unexpected results."), 243 244 optparse.make_option("--max-locked-shards", type="int", default=0, 245 help="Set the maximum number of locked shards"), 246 optparse.make_option("--additional-env-var", type="string", action="append", default=[], 247 help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"), 248 optparse.make_option("--profile", action="store_true", 249 help="Output per-test profile information."), 250 optparse.make_option("--profiler", action="store", 251 help="Output per-test profile information, using the specified profiler."), 252 optparse.make_option("--driver-logging", action="store_true", 253 help="Print detailed logging of the driver/content_shell"), 254 optparse.make_option("--disable-breakpad", action="store_true", 255 help="Don't use breakpad to symbolize unexpected crashes."), 256 ])) 257 258 option_group_definitions.append(("Miscellaneous Options", [ 259 optparse.make_option("--lint-test-files", action="store_true", 260 default=False, help=("Makes sure the test files parse for all " 261 "configurations. Does not run any tests.")), 262 ])) 263 264 # FIXME: Move these into json_results_generator.py 265 option_group_definitions.append(("Result JSON Options", [ 266 optparse.make_option("--master-name", help="The name of the buildbot master."), 267 optparse.make_option("--builder-name", default="", 268 help=("The name of the builder shown on the waterfall running " 269 "this script e.g. WebKit.")), 270 optparse.make_option("--build-name", default="DUMMY_BUILD_NAME", 271 help=("The name of the builder used in its path, e.g. " 272 "webkit-rel.")), 273 optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER", 274 help=("The build number of the builder running this script.")), 275 optparse.make_option("--test-results-server", default="", 276 help=("If specified, upload results json files to this appengine " 277 "server.")), 278 ])) 279 280 option_parser = optparse.OptionParser() 281 282 for group_name, group_options in option_group_definitions: 283 option_group = optparse.OptionGroup(option_parser, group_name) 284 option_group.add_options(group_options) 285 option_parser.add_option_group(option_group) 286 287 return option_parser.parse_args(args) 288 289 290def _set_up_derived_options(port, options, args): 291 """Sets the options values that depend on other options values.""" 292 if not options.child_processes: 293 options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", 294 str(port.default_child_processes())) 295 if not options.max_locked_shards: 296 options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS", 297 str(port.default_max_locked_shards()))) 298 299 if not options.configuration: 300 options.configuration = port.default_configuration() 301 302 if options.pixel_tests is None: 303 options.pixel_tests = port.default_pixel_tests() 304 305 if not options.time_out_ms: 306 options.time_out_ms = str(port.default_timeout_ms()) 307 308 options.slow_time_out_ms = str(5 * int(options.time_out_ms)) 309 310 if options.additional_platform_directory: 311 additional_platform_directories = [] 312 for path in options.additional_platform_directory: 313 additional_platform_directories.append(port.host.filesystem.abspath(path)) 314 options.additional_platform_directory = additional_platform_directories 315 316 if options.new_baseline: 317 options.reset_results = True 318 options.add_platform_exceptions = True 319 320 if options.pixel_test_directories: 321 options.pixel_tests = True 322 varified_dirs = set() 323 pixel_test_directories = options.pixel_test_directories 324 for directory in pixel_test_directories: 325 # FIXME: we should support specifying the directories all the ways we support it for additional 326 # arguments specifying which tests and directories to run. We should also move the logic for that 327 # to Port. 328 filesystem = port.host.filesystem 329 if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)): 330 _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory)) 331 else: 332 varified_dirs.add(directory) 333 334 options.pixel_test_directories = list(varified_dirs) 335 336 if options.run_singly: 337 options.batch_size = 1 338 options.verbose = True 339 340 if not args and not options.test_list and options.smoke is None: 341 options.smoke = port.default_smoke_test_only() 342 if options.smoke: 343 if not args and not options.test_list and options.retry_failures is None: 344 # Retry failures by default if we're doing just a smoke test (no additional tests). 345 options.retry_failures = True 346 347 if not options.test_list: 348 options.test_list = [] 349 options.test_list.append(port.host.filesystem.join(port.layout_tests_dir(), 'SmokeTests')) 350 if not options.skipped: 351 options.skipped = 'always' 352 353 if not options.skipped: 354 options.skipped = 'default' 355 356def run(port, options, args, logging_stream): 357 logger = logging.getLogger() 358 logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO) 359 360 try: 361 printer = printing.Printer(port, options, logging_stream, logger=logger) 362 363 _set_up_derived_options(port, options, args) 364 manager = Manager(port, options, printer) 365 printer.print_config(port.results_directory()) 366 367 run_details = manager.run(args) 368 _log.debug("Testing completed, Exit status: %d" % run_details.exit_code) 369 return run_details 370 finally: 371 printer.cleanup() 372 373if __name__ == '__main__': 374 sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr)) 375