1# Copyright 2012 the V8 project authors. All rights reserved. 2# Redistribution and use in source and binary forms, with or without 3# modification, are permitted provided that the following conditions are 4# met: 5# 6# * Redistributions of source code must retain the above copyright 7# notice, this list of conditions and the following disclaimer. 8# * Redistributions in binary form must reproduce the above 9# copyright notice, this list of conditions and the following 10# disclaimer in the documentation and/or other materials provided 11# with the distribution. 12# * Neither the name of Google Inc. nor the names of its 13# contributors may be used to endorse or promote products derived 14# from this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28import copy 29import os 30import re 31import shlex 32 33from testrunner.outproc import base as outproc 34from testrunner.local import command 35from testrunner.local import statusfile 36from testrunner.local import utils 37from testrunner.local.variants import ALL_VARIANT_FLAGS 38from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT 39from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE 40from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG 41 42 43FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)") 44 45# Patterns for additional resource files on Android. Files that are not covered 46# by one of the other patterns below will be specified in the resources section. 47RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)") 48# Pattern to auto-detect files to push on Android for statements like: 49# load("path/to/file.js") 50# d8.file.execute("path/to/file.js") 51LOAD_PATTERN = re.compile( 52 r"(?:execute|load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)") 53# Pattern to auto-detect files to push on Android for statements like: 54# import foobar from "path/to/file.js" 55# import {foo, bar} from "path/to/file.js" 56# export {"foo" as "bar"} from "path/to/file.js" 57MODULE_FROM_RESOURCES_PATTERN = re.compile( 58 r"(?:import|export).*?from\s*\(?['\"]([^'\"]+)['\"]", 59 re.MULTILINE | re.DOTALL) 60# Pattern to detect files to push on Android for statements like: 61# import "path/to/file.js" 62# import("module.mjs").catch()... 63MODULE_IMPORT_RESOURCES_PATTERN = re.compile( 64 r"import\s*\(?['\"]([^'\"]+)['\"]", 65 re.MULTILINE | re.DOTALL) 66# Pattern to detect and strip test262 frontmatter from tests to prevent false 67# positives for MODULE_RESOURCES_PATTERN above. 68TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL) 69 70TIMEOUT_LONG = "long" 71 72def read_file(file): 73 with open(file, encoding='ISO-8859-1') as f: 74 return f.read() 75 76class TestCase(object): 77 def __init__(self, suite, path, name, test_config): 78 self.suite = suite # TestSuite object 79 80 self.path = path # string, e.g. 'div-mod', 'test-api/foo' 81 self.name = name # string that identifies test in the status file 82 83 self.variant = None # name of the used testing variant 84 self.variant_flags = [] # list of strings, flags specific to this test 85 86 # Fields used by the test processors. 87 self.origin = None # Test that this test is subtest of. 88 self.processor = None # Processor that created this subtest. 89 self.procid = '%s/%s' % (self.suite.name, self.name) # unique id 90 self.keep_output = False # Can output of this test be dropped 91 92 # Test config contains information needed to build the command. 93 self._test_config = test_config 94 self._random_seed = None # Overrides test config value if not None 95 96 # Outcomes 97 self._statusfile_outcomes = None 98 self._expected_outcomes = None 99 self._checked_flag_contradictions = False 100 self._statusfile_flags = None 101 self.expected_failure_reason = None 102 103 self._prepare_outcomes() 104 105 def create_subtest(self, processor, subtest_id, variant=None, flags=None, 106 keep_output=False, random_seed=None): 107 subtest = copy.copy(self) 108 subtest.origin = self 109 subtest.processor = processor 110 subtest.procid += '.%s' % subtest_id 111 subtest.keep_output |= keep_output 112 if random_seed: 113 subtest._random_seed = random_seed 114 if flags: 115 subtest.variant_flags = subtest.variant_flags + flags 116 if variant is not None: 117 assert self.variant is None 118 subtest.variant = variant 119 subtest._prepare_outcomes() 120 return subtest 121 122 def _prepare_outcomes(self, force_update=True): 123 if force_update or self._statusfile_outcomes is None: 124 def is_flag(outcome): 125 return outcome.startswith('--') 126 def not_flag(outcome): 127 return not is_flag(outcome) 128 129 outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant) 130 self._statusfile_outcomes = list(filter(not_flag, outcomes)) 131 self._statusfile_flags = list(filter(is_flag, outcomes)) 132 self._expected_outcomes = ( 133 self._parse_status_file_outcomes(self._statusfile_outcomes)) 134 135 def _parse_status_file_outcomes(self, outcomes): 136 if (statusfile.FAIL_SLOPPY in outcomes and 137 '--use-strict' not in self.variant_flags): 138 return outproc.OUTCOMES_FAIL 139 140 expected_outcomes = [] 141 if (statusfile.FAIL in outcomes or 142 statusfile.FAIL_OK in outcomes): 143 expected_outcomes.append(statusfile.FAIL) 144 if statusfile.CRASH in outcomes: 145 expected_outcomes.append(statusfile.CRASH) 146 147 # Do not add PASS if there is nothing else. Empty outcomes are converted to 148 # the global [PASS]. 149 if expected_outcomes and statusfile.PASS in outcomes: 150 expected_outcomes.append(statusfile.PASS) 151 152 # Avoid creating multiple instances of a list with a single FAIL. 153 if expected_outcomes == outproc.OUTCOMES_FAIL: 154 return outproc.OUTCOMES_FAIL 155 return expected_outcomes or outproc.OUTCOMES_PASS 156 157 def allow_timeouts(self): 158 if self.expected_outcomes == outproc.OUTCOMES_PASS: 159 self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT 160 elif self.expected_outcomes == outproc.OUTCOMES_FAIL: 161 self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT 162 elif statusfile.TIMEOUT not in self.expected_outcomes: 163 self._expected_outcomes = ( 164 self.expected_outcomes + [statusfile.TIMEOUT]) 165 166 def allow_pass(self): 167 if self.expected_outcomes == outproc.OUTCOMES_TIMEOUT: 168 self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT 169 elif self.expected_outcomes == outproc.OUTCOMES_FAIL: 170 self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_PASS 171 elif statusfile.PASS not in self.expected_outcomes: 172 self._expected_outcomes = ( 173 self.expected_outcomes + [statusfile.PASS]) 174 175 @property 176 def expected_outcomes(self): 177 def is_flag(maybe_flag): 178 return maybe_flag.startswith("--") # Best-effort heuristic. 179 180 # Filter to flags, e.g.: ["--foo", "3", "--bar"] -> ["--foo", "--bar"]. 181 def filter_flags(normalized_flags): 182 return [f for f in normalized_flags if is_flag(f)]; 183 184 def normalize_flag(flag): 185 return flag.replace("_", "-").replace("--no-", "--no") 186 187 def normalize_flags(flags): 188 return [normalize_flag(flag) for flag in filter_flags(flags)] 189 190 # Note this can get it wrong if the flag name starts with the characters 191 # "--no" where "no" is part of the flag name, e.g. "--nobodys-perfect". 192 # In that case the negation "--bodys-perfect" would be returned. This is 193 # a weakness we accept and hope to never run into. 194 def negate_flag(normalized_flag): 195 return ("--" + normalized_flag[4:] if normalized_flag.startswith("--no") 196 else "--no" + normalized_flag[2:]) 197 198 def negate_flags(normalized_flags): 199 return [negate_flag(flag) for flag in normalized_flags] 200 201 def has_flag(conflicting_flag, flags): 202 conflicting_flag = normalize_flag(conflicting_flag) 203 if conflicting_flag in flags: 204 return True 205 if conflicting_flag.endswith("*"): 206 return any(flag.startswith(conflicting_flag[:-1]) for flag in flags) 207 return False 208 209 def check_flags(incompatible_flags, actual_flags, rule): 210 for incompatible_flag in incompatible_flags: 211 if has_flag(incompatible_flag, actual_flags): 212 self._statusfile_outcomes = outproc.OUTCOMES_FAIL 213 self._expected_outcomes = outproc.OUTCOMES_FAIL 214 self.expected_failure_reason = ("Rule " + rule + " in " + 215 "tools/testrunner/local/variants.py expected a flag " + 216 "contradiction error with " + incompatible_flag + ".") 217 218 if not self._checked_flag_contradictions: 219 self._checked_flag_contradictions = True 220 221 file_specific_flags = (self._get_source_flags() + self._get_suite_flags() 222 + self._get_statusfile_flags()) 223 file_specific_flags = normalize_flags(file_specific_flags) 224 extra_flags = normalize_flags(self._get_extra_flags()) 225 226 # Contradiction: flags contains both a flag --foo and its negation 227 # --no-foo. 228 if self.variant in ALL_VARIANT_FLAGS: 229 for flags in ALL_VARIANT_FLAGS[self.variant]: 230 all_flags = (file_specific_flags + extra_flags 231 + normalize_flags(flags)) 232 check_flags(negate_flags(all_flags), all_flags, "Flag negations") 233 234 # Contradiction: flags specified through the "Flags:" annotation are 235 # incompatible with the variant. 236 if self.variant in INCOMPATIBLE_FLAGS_PER_VARIANT: 237 check_flags(INCOMPATIBLE_FLAGS_PER_VARIANT[self.variant], file_specific_flags, 238 "INCOMPATIBLE_FLAGS_PER_VARIANT[\""+self.variant+"\"]") 239 240 # Contradiction: flags specified through the "Flags:" annotation are 241 # incompatible with the build. 242 for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items(): 243 if self.suite.statusfile.variables[variable]: 244 check_flags(incompatible_flags, file_specific_flags, 245 "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\""+variable+"\"]") 246 247 # Contradiction: flags passed through --extra-flags are incompatible. 248 for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items(): 249 if has_flag(extra_flag, extra_flags): 250 check_flags(incompatible_flags, file_specific_flags, 251 "INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG[\""+extra_flag+"\"]") 252 return self._expected_outcomes 253 254 @property 255 def do_skip(self): 256 return (statusfile.SKIP in self._statusfile_outcomes and 257 not self.suite.test_config.run_skipped) 258 259 @property 260 def is_heavy(self): 261 return statusfile.HEAVY in self._statusfile_outcomes 262 263 @property 264 def is_slow(self): 265 return self.is_heavy or statusfile.SLOW in self._statusfile_outcomes 266 267 @property 268 def is_fail_ok(self): 269 return statusfile.FAIL_OK in self._statusfile_outcomes 270 271 @property 272 def is_pass_or_fail(self): 273 return (statusfile.PASS in self._statusfile_outcomes and 274 statusfile.FAIL in self._statusfile_outcomes and 275 statusfile.CRASH not in self._statusfile_outcomes) 276 277 @property 278 def is_fail(self): 279 return (statusfile.FAIL in self._statusfile_outcomes and 280 statusfile.PASS not in self._statusfile_outcomes) 281 282 @property 283 def only_standard_variant(self): 284 return statusfile.NO_VARIANTS in self._statusfile_outcomes 285 286 def get_command(self): 287 params = self._get_cmd_params() 288 env = self._get_cmd_env() 289 shell = self.get_shell() 290 if utils.IsWindows(): 291 shell += '.exe' 292 shell_flags = self._get_shell_flags() 293 timeout = self._get_timeout(params) 294 return self._create_cmd(shell, shell_flags + params, env, timeout) 295 296 def _get_cmd_params(self): 297 """Gets command parameters and combines them in the following order: 298 - files [empty by default] 299 - random seed 300 - mode flags (based on chosen mode) 301 - extra flags (from command line) 302 - user flags (variant/fuzzer flags) 303 - source flags (from source code) [empty by default] 304 - test-suite flags 305 - statusfile flags 306 307 The best way to modify how parameters are created is to only override 308 methods for getting partial parameters. 309 """ 310 return ( 311 self._get_files_params() + 312 self._get_random_seed_flags() + 313 self._get_mode_flags() + 314 self._get_extra_flags() + 315 self._get_variant_flags() + 316 self._get_source_flags() + 317 self._get_suite_flags() + 318 self._get_statusfile_flags() 319 ) 320 321 def _get_cmd_env(self): 322 return {} 323 324 def _get_files_params(self): 325 return [] 326 327 def _get_timeout_param(self): 328 return None 329 330 def _get_random_seed_flags(self): 331 return ['--random-seed=%d' % self.random_seed] 332 333 @property 334 def random_seed(self): 335 return self._random_seed or self._test_config.random_seed 336 337 def _get_extra_flags(self): 338 return self._test_config.extra_flags 339 340 def _get_variant_flags(self): 341 return self.variant_flags 342 343 def _get_statusfile_flags(self): 344 """Gets runtime flags from a status file. 345 346 Every outcome that starts with "--" is a flag. 347 """ 348 return self._statusfile_flags 349 350 def _get_mode_flags(self): 351 return self._test_config.mode_flags 352 353 def _get_source_flags(self): 354 return [] 355 356 def _get_suite_flags(self): 357 return [] 358 359 def _get_shell_flags(self): 360 return [] 361 362 def _get_timeout(self, params): 363 timeout = self._test_config.timeout 364 if "--stress-opt" in params: 365 timeout *= 4 366 if "--jitless" in params: 367 timeout *= 2 368 if "--no-opt" in params: 369 timeout *= 2 370 if "--noenable-vfp3" in params: 371 timeout *= 2 372 if self._get_timeout_param() == TIMEOUT_LONG: 373 timeout *= 10 374 if self.is_slow: 375 timeout *= 4 376 return timeout 377 378 def get_shell(self): 379 raise NotImplementedError() 380 381 def _get_suffix(self): 382 return '.js' 383 384 def _create_cmd(self, shell, params, env, timeout): 385 return command.Command( 386 cmd_prefix=self._test_config.command_prefix, 387 shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)), 388 args=params, 389 env=env, 390 timeout=timeout, 391 verbose=self._test_config.verbose, 392 resources_func=self._get_resources, 393 handle_sigterm=True, 394 ) 395 396 def _parse_source_flags(self, source=None): 397 source = source or self.get_source() 398 flags = [] 399 for match in re.findall(FLAGS_PATTERN, source): 400 flags += shlex.split(match.strip()) 401 return flags 402 403 def is_source_available(self): 404 return self._get_source_path() is not None 405 406 def get_source(self): 407 return read_file(self._get_source_path()) 408 409 def _get_source_path(self): 410 return None 411 412 def _get_resources(self): 413 """Returns a list of absolute paths with additional files needed by the 414 test case. 415 416 Used to push additional files to Android devices. 417 """ 418 return [] 419 420 def skip_predictable(self): 421 """Returns True if the test case is not suitable for predictable testing.""" 422 return True 423 424 @property 425 def output_proc(self): 426 if self.expected_outcomes is outproc.OUTCOMES_PASS: 427 return outproc.DEFAULT 428 return outproc.OutProc(self.expected_outcomes) 429 430 def __cmp__(self, other): 431 # Make sure that test cases are sorted correctly if sorted without 432 # key function. But using a key function is preferred for speed. 433 def cmp(x, y): 434 return (x > y) - (x < y) 435 return cmp( 436 (self.suite.name, self.name, self.variant), 437 (other.suite.name, other.name, other.variant) 438 ) 439 440 def __str__(self): 441 return self.suite.name + '/' + self.name 442 443 444class D8TestCase(TestCase): 445 def get_shell(self): 446 return "d8" 447 448 def _get_shell_flags(self): 449 return ['--test'] 450 451 def _get_resources_for_file(self, file): 452 """Returns for a given file a list of absolute paths of files needed by the 453 given file. 454 """ 455 source = read_file(file) 456 result = [] 457 def add_path(path): 458 result.append(os.path.abspath(path.replace('/', os.path.sep))) 459 def add_import_path(import_path): 460 add_path(os.path.normpath( 461 os.path.join(os.path.dirname(file), import_path))) 462 def strip_test262_frontmatter(input): 463 return TEST262_FRONTMATTER_PATTERN.sub('', input) 464 for match in RESOURCES_PATTERN.finditer(source): 465 # There are several resources per line. Relative to base dir. 466 for path in match.group(1).strip().split(): 467 add_path(path) 468 # Strip test262 frontmatter before looking for load() and import/export 469 # statements. 470 source = strip_test262_frontmatter(source) 471 for match in LOAD_PATTERN.finditer(source): 472 # Files in load statements are relative to base dir. 473 add_path(match.group(1)) 474 # Imported files are relative to the file importing them. 475 for match in MODULE_FROM_RESOURCES_PATTERN.finditer(source): 476 add_import_path(match.group(1)) 477 for match in MODULE_IMPORT_RESOURCES_PATTERN.finditer(source): 478 add_import_path(match.group(1)) 479 return result 480 481 def _get_resources(self): 482 """Returns the list of files needed by a test case.""" 483 if not self._get_source_path(): 484 return [] 485 result = set() 486 to_check = [self._get_source_path()] 487 # Recurse over all files until reaching a fixpoint. 488 while to_check: 489 next_resource = to_check.pop() 490 result.add(next_resource) 491 for resource in self._get_resources_for_file(next_resource): 492 # Only add files that exist on disc. The pattens we check for give some 493 # false positives otherwise. 494 if resource not in result and os.path.exists(resource): 495 to_check.append(resource) 496 return sorted(list(result)) 497 498 def skip_predictable(self): 499 """Returns True if the test case is not suitable for predictable testing.""" 500 return (statusfile.FAIL in self.expected_outcomes or 501 self.output_proc.negative) 502