• Home
  • Raw
  • Download

Lines Matching +full:- +full:- +full:fail +full:- +full:env +full:- +full:changed

1 #!/usr/bin/env python
92 # ---------------------------------------------
93 # --- P r o g r e s s I n d i c a t o r s ---
94 # ---------------------------------------------
132 # Spawn N-1 threads and then use this thread as the last one.
133 # That way -j1 avoids threading altogether which is a nice fallback
135 for i in range(tasks - 1):
143 # Use a timeout so that signals (ctrl-c) will be processed.
176 # See https://smartos.org/bugview/OS-2767
183 case.duration = (datetime.now() - start)
198 self.remaining -= 1
225 print("--- stderr ---")
228 print("--- stdout ---")
232 print("--- %s ---" % PrintCrashed(failed.output.exit_code))
234 print("--- TIMEOUT ---")
259 outcome = 'FAIL'
294 logger.info(' stack: |-')
313 # Print test name as (for example) "parallel/test-assert". Tests that are
317 command = output.command[-1]
322 self.severity = 'fail'
336 self.severity = 'fail'
362 logger.info(' ---')
383 # Print test name as (for example) "parallel/test-assert". Tests that are
387 command = output.command[-1]
436 print("--- %s ---" % PrintCrashed(output.output.exit_code))
438 print("--- TIMEOUT ---")
441 if length and (len(str) > (length - 3)):
442 return str[:(length-3)] + "..."
448 elapsed = time.time() - self.start_time
451 'remaining': (((self.total - self.remaining) * 100) // self.total),
467 …s)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]:…
481 … 'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
503 # -------------------------
504 # --- F r a m e w o r k ---
505 # -------------------------
544 def RunCommand(self, command, env): argument
549 env,
563 # Tests can leave the tty in non-blocking mode. If the test runner
590 outcome = FAIL
599 # Timed out tests will have exit_code -signal.SIGTERM.
626 SEM_INVALID_VALUE = -1
643 # looked at post-mortem, which helps for investigating failures that are
668 # Compute the end time - if the process crosses this limit we
712 def Execute(args, context, timeout=None, env=None, disable_core_files=False, stdin=None): argument
716 if env is None:
717 env = {}
729 for key, value in env.items():
747 env = env_copy,
920 # -------------------------------------------
921 # --- T e s t C o n f i g u r a t i o n ---
922 # -------------------------------------------
927 FAIL = 'fail' variable
945 def Evaluate(self, env, defs): argument
954 def GetOutcomes(self, env, defs): argument
955 if self.name in env: return set([env[self.name]])
964 def GetOutcomes(self, env, defs): argument
966 return defs[self.name].GetOutcomes(env, defs)
978 def Evaluate(self, env, defs): argument
980 return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
984 inter = self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
988 return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
990 def GetOutcomes(self, env, defs): argument
992 return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs)
994 if self.right.Evaluate(env, defs):
995 return self.left.GetOutcomes(env, defs)
1000 return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
1025 return self.index < self.length + (length - 1)
1186 def ClassifyTests(self, cases, env): argument
1187 sections = [ s for s in self.sections if s.condition.Evaluate(env, self.defs) ]
1193 outcomes_list = [ r.GetOutcomes(env, self.defs) for r in matches ]
1225 def GetOutcomes(self, env, defs): argument
1226 return self.value.GetOutcomes(env, defs)
1240 PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w_.\-/]+)$')
1280 # ---------------
1281 # --- M a i n ---
1282 # ---------------
1290 result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
1292 result.add_option("-v", "--verbose", help="Verbose output",
1294 result.add_option('--logfile', dest='logfile',
1296 result.add_option("-p", "--progress",
1299 result.add_option("--report", help="Print a summary of the tests to be run",
1301 result.add_option("-s", "--suite", help="A test suite",
1303 result.add_option("-t", "--timeout", help="Timeout in seconds",
1305 result.add_option("--arch", help='The architecture to run tests for',
1307 result.add_option("--snapshot", help="Run the tests with snapshot turned on",
1309 result.add_option("--special-command", default=None)
1310 result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
1312 result.add_option("--expect-fail", dest="expect_fail",
1313 help="Expect test cases to fail", default=False, action="store_true")
1314 result.add_option("--valgrind", help="Run tests through valgrind",
1316 result.add_option("--worker", help="Run parallel tests inside a worker context",
1318 result.add_option("--check-deopts", help="Check tests for permanent deoptimizations",
1320 result.add_option("--cat", help="Print the source of the tests",
1322 result.add_option("--flaky-tests",
1325 result.add_option("--skip-tests",
1326 help="Tests that should not be executed (comma-separated)",
1328 result.add_option("--warn-unused", help="Report unused rules",
1330 result.add_option("-j", help="The number of parallel tasks to run",
1332 result.add_option("-J", help="Run tasks in parallel on all cores",
1334 result.add_option("--time", help="Print timing information after running",
1336 result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
1338 result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
1340 result.add_option("--shell", help="Path to node executable", default=None)
1341 result.add_option("--store-unexpected-output",
1344 result.add_option("--no-store-unexpected-output",
1347 result.add_option("-r", "--run",
1348 …help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)…
1350 result.add_option('--temp-dir',
1352 result.add_option('--test-root',
1354 result.add_option('--repeat',
1357 result.add_option('--abort-on-timeout',
1360 result.add_option("--type",
1377 print("The run argument must be two comma-separated integers.")
1397 print("Unknown flaky-tests mode %s" % options.flaky_tests)
1406 * %(fail_ok)4d tests are expected to fail that we won't fix
1407 * %(fail)4d tests are expected to fail that we should fix\
1438 path = path[:-3]
1440 path = path[:-4]
1444 if (not value) or (value.find('@') == -1):
1479 return "CRASHED (Signal: %d)" % -code
1483 # default JavaScript test-run, e.g., internet/ requires a network connection,
1491 'js-native-api',
1492 'node-api',
1494 'tick-processor',
1495 'v8-updates'
1503 subsystem_regex = re.compile(r'^[a-zA-Z-]*$')
1505 mapped_args = ["*/test*-%s-*" % arg if check(arg) else arg for arg in args]
1516 ssl_ver = Execute([vm, '-p', 'process.versions.openssl'], context).stdout
1533 fh = logging.FileHandler(options.logfile, encoding='utf-8', mode='w')
1547 # Check for --valgrind option. If enabled, we overwrite the special
1548 # command flag with a command that uses the run-valgrind.py script.
1550 run_valgrind = join(workspace, "tools", "run-valgrind.py")
1551 options.special_command = "python -u " + run_valgrind + " @"
1554 options.node_args.append("--trace-opt")
1555 options.node_args.append("--trace-file-names")
1556 # --always-opt is needed because many tests do not run long enough for the
1558 options.node_args.append("--always-opt")
1562 run_worker = join(workspace, "tools", "run-worker.js")
1597 archEngineContext = Execute([vm, "-p", "process.arch"], context)
1603 env = {
1611 cases, unused_rules = config.ClassifyTests(test_list, env)
1622 '-p', 'process.features.inspector'], context)
1627 '-p', 'process.versions.openssl'], context)
1638 print("--- begin source: %s ---" % test.GetLabel())
1641 print("--- end source: %s ---" % test.GetLabel())
1675 'skipped': len(all_cases) - len(cases_to_run),
1677 'fail_ok': len([t for t in cases_to_run if t.outcomes == set([FAIL, OKAY])]),
1678 'fail': len([t for t in cases_to_run if t.outcomes == set([FAIL])])
1683 # silent errors if this file is changed to list the tests in a way that
1700 duration = time.time() - start
1709 sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))