1#!/usr/bin/env python 2# 3# Copyright 2008 the V8 project authors. All rights reserved. 4# Redistribution and use in source and binary forms, with or without 5# modification, are permitted provided that the following conditions are 6# met: 7# 8# * Redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer. 10# * Redistributions in binary form must reproduce the above 11# copyright notice, this list of conditions and the following 12# disclaimer in the documentation and/or other materials provided 13# with the distribution. 14# * Neither the name of Google Inc. nor the names of its 15# contributors may be used to endorse or promote products derived 16# from this software without specific prior written permission. 17# 18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30 31from __future__ import print_function 32import logging 33import optparse 34import os 35import re 36import signal 37import subprocess 38import sys 39import tempfile 40import time 41import threading 42import utils 43import multiprocessing 44import errno 45import copy 46 47 48if sys.version_info >= (3, 5): 49 from importlib import machinery, util 50 def get_module(name, path): 51 loader_details = (machinery.SourceFileLoader, machinery.SOURCE_SUFFIXES) 52 spec = machinery.FileFinder(path, loader_details).find_spec(name) 53 module = util.module_from_spec(spec) 54 spec.loader.exec_module(module) 55 return module 56else: 57 import imp 58 def get_module(name, path): 59 file = None 60 try: 61 (file, pathname, description) = imp.find_module(name, [path]) 62 return imp.load_module(name, file, pathname, description) 63 finally: 64 if file: 65 file.close() 66 67 68from io import open 69from os.path import join, dirname, abspath, basename, isdir, exists 70from datetime import datetime 71try: 72 from queue import Queue, Empty # Python 3 73except ImportError: 74 from Queue import Queue, Empty # Python 2 75 76from functools import reduce 77 78try: 79 from urllib.parse import unquote # Python 3 80except ImportError: 81 from urllib import unquote # Python 2 82 83 84logger = logging.getLogger('testrunner') 85skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE) 86 87VERBOSE = False 88 89os.umask(0o022) 90os.environ['NODE_OPTIONS'] = '' 91 92# --------------------------------------------- 93# --- P r o g r e s s I n d i c a t o r s --- 94# --------------------------------------------- 95 96 97class ProgressIndicator(object): 98 99 def __init__(self, cases, flaky_tests_mode, measure_flakiness): 100 self.cases = cases 101 self.serial_id = 0 102 self.flaky_tests_mode = flaky_tests_mode 103 self.measure_flakiness = measure_flakiness 104 self.parallel_queue = Queue(len(cases)) 105 self.sequential_queue = Queue(len(cases)) 106 for case in cases: 107 if case.parallel: 108 self.parallel_queue.put_nowait(case) 109 else: 110 self.sequential_queue.put_nowait(case) 111 self.succeeded = 0 112 self.remaining = len(cases) 113 self.total = len(cases) 114 self.failed = [ ] 115 self.flaky_failed = [ ] 116 self.crashed = 0 117 self.lock = threading.Lock() 118 self.shutdown_event = threading.Event() 119 120 def GetFailureOutput(self, failure): 121 output = [] 122 if failure.output.stderr: 123 output += ["--- stderr ---" ] 124 output += [failure.output.stderr.strip()] 125 if failure.output.stdout: 126 output += ["--- stdout ---"] 127 output += [failure.output.stdout.strip()] 128 output += ["Command: %s" % EscapeCommand(failure.command)] 129 if failure.HasCrashed(): 130 output += ["--- %s ---" % PrintCrashed(failure.output.exit_code)] 131 if failure.HasTimedOut(): 132 output += ["--- TIMEOUT ---"] 133 output = "\n".join(output) 134 return output 135 136 def PrintFailureOutput(self, failure): 137 print(self.GetFailureOutput(failure)) 138 139 def PrintFailureHeader(self, test): 140 if test.IsNegative(): 141 negative_marker = '[negative] ' 142 else: 143 negative_marker = '' 144 print("=== %(label)s %(negative)s===" % { 145 'label': test.GetLabel(), 146 'negative': negative_marker 147 }) 148 print("Path: %s" % "/".join(test.path)) 149 150 def Run(self, tasks): 151 self.Starting() 152 threads = [] 153 # Spawn N-1 threads and then use this thread as the last one. 154 # That way -j1 avoids threading altogether which is a nice fallback 155 # in case of threading problems. 156 for i in range(tasks - 1): 157 thread = threading.Thread(target=self.RunSingle, args=[True, i + 1]) 158 threads.append(thread) 159 thread.start() 160 try: 161 self.RunSingle(False, 0) 162 # Wait for the remaining threads 163 for thread in threads: 164 # Use a timeout so that signals (ctrl-c) will be processed. 165 thread.join(timeout=1000000) 166 except (KeyboardInterrupt, SystemExit): 167 self.shutdown_event.set() 168 except Exception: 169 # If there's an exception we schedule an interruption for any 170 # remaining threads. 171 self.shutdown_event.set() 172 # ...and then reraise the exception to bail out 173 raise 174 self.Done() 175 return not self.failed 176 177 def RunSingle(self, parallel, thread_id): 178 while not self.shutdown_event.is_set(): 179 try: 180 test = self.parallel_queue.get_nowait() 181 except Empty: 182 if parallel: 183 return 184 try: 185 test = self.sequential_queue.get_nowait() 186 except Empty: 187 return 188 case = test 189 case.thread_id = thread_id 190 self.lock.acquire() 191 case.serial_id = self.serial_id 192 self.serial_id += 1 193 self.AboutToRun(case) 194 self.lock.release() 195 try: 196 start = datetime.now() 197 output = case.Run() 198 # SmartOS has a bug that causes unexpected ECONNREFUSED errors. 199 # See https://smartos.org/bugview/OS-2767 200 # If ECONNREFUSED on SmartOS, retry the test one time. 201 if (output.UnexpectedOutput() and 202 sys.platform == 'sunos5' and 203 'ECONNREFUSED' in output.output.stderr): 204 output = case.Run() 205 output.diagnostic.append('ECONNREFUSED received, test retried') 206 case.duration = (datetime.now() - start) 207 except IOError: 208 return 209 if self.shutdown_event.is_set(): 210 return 211 self.lock.acquire() 212 if output.UnexpectedOutput(): 213 if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE: 214 self.flaky_failed.append(output) 215 elif FLAKY in output.test.outcomes and self.flaky_tests_mode == KEEP_RETRYING: 216 for _ in range(99): 217 if not case.Run().UnexpectedOutput(): 218 self.flaky_failed.append(output) 219 break 220 else: 221 # If after 100 tries, the test is not passing, it's not flaky. 222 self.failed.append(output) 223 else: 224 self.failed.append(output) 225 if output.HasCrashed(): 226 self.crashed += 1 227 if self.measure_flakiness: 228 outputs = [case.Run() for _ in range(self.measure_flakiness)] 229 # +1s are there because the test already failed once at this point. 230 print(" failed {} out of {}".format(len([i for i in outputs if i.UnexpectedOutput()]) + 1, self.measure_flakiness + 1)) 231 else: 232 self.succeeded += 1 233 self.remaining -= 1 234 self.HasRun(output) 235 self.lock.release() 236 237 238def EscapeCommand(command): 239 parts = [] 240 for part in command: 241 if ' ' in part: 242 # Escape spaces. We may need to escape more characters for this 243 # to work properly. 244 parts.append('"%s"' % part) 245 else: 246 parts.append(part) 247 return " ".join(parts) 248 249 250class SimpleProgressIndicator(ProgressIndicator): 251 252 def Starting(self): 253 print('Running %i tests' % len(self.cases)) 254 255 def Done(self): 256 print() 257 for failed in self.failed: 258 self.PrintFailureHeader(failed.test) 259 self.PrintFailureOutput(failed) 260 if len(self.failed) == 0: 261 print("===") 262 print("=== All tests succeeded") 263 print("===") 264 else: 265 print() 266 print("===") 267 print("=== %i tests failed" % len(self.failed)) 268 if self.crashed > 0: 269 print("=== %i tests CRASHED" % self.crashed) 270 print("===") 271 272 273class VerboseProgressIndicator(SimpleProgressIndicator): 274 275 def AboutToRun(self, case): 276 print('Starting %s...' % case.GetLabel()) 277 sys.stdout.flush() 278 279 def HasRun(self, output): 280 if output.UnexpectedOutput(): 281 if output.HasCrashed(): 282 outcome = 'CRASH' 283 else: 284 outcome = 'FAIL' 285 else: 286 outcome = 'pass' 287 print('Done running %s: %s' % (output.test.GetLabel(), outcome)) 288 289 290class DotsProgressIndicator(SimpleProgressIndicator): 291 292 def AboutToRun(self, case): 293 pass 294 295 def HasRun(self, output): 296 total = self.succeeded + len(self.failed) 297 if (total > 1) and (total % 50 == 1): 298 sys.stdout.write('\n') 299 if output.UnexpectedOutput(): 300 if output.HasCrashed(): 301 sys.stdout.write('C') 302 sys.stdout.flush() 303 elif output.HasTimedOut(): 304 sys.stdout.write('T') 305 sys.stdout.flush() 306 else: 307 sys.stdout.write('F') 308 sys.stdout.flush() 309 else: 310 sys.stdout.write('.') 311 sys.stdout.flush() 312 313class ActionsAnnotationProgressIndicator(DotsProgressIndicator): 314 def GetAnnotationInfo(self, test, output): 315 traceback = output.stdout + output.stderr 316 find_full_path = re.search(r' +at .*\(.*%s:([0-9]+):([0-9]+)' % test.file, traceback) 317 col = line = 0 318 if find_full_path: 319 line, col = map(int, find_full_path.groups()) 320 root_path = abspath(join(dirname(__file__), '../')) + os.sep 321 filename = test.file.replace(root_path, "") 322 return filename, line, col 323 324 def PrintFailureOutput(self, failure): 325 output = self.GetFailureOutput(failure) 326 filename, line, column = self.GetAnnotationInfo(failure.test, failure.output) 327 print("::error file=%s,line=%d,col=%d::%s" % (filename, line, column, output.replace('\n', '%0A'))) 328 329class TapProgressIndicator(SimpleProgressIndicator): 330 331 def _printDiagnostic(self): 332 logger.info(' severity: %s', self.severity) 333 self.exitcode and logger.info(' exitcode: %s', self.exitcode) 334 logger.info(' stack: |-') 335 336 for l in self.traceback.splitlines(): 337 logger.info(' ' + l) 338 339 def Starting(self): 340 logger.info('TAP version 13') 341 logger.info('1..%i' % len(self.cases)) 342 self._done = 0 343 344 def AboutToRun(self, case): 345 pass 346 347 def HasRun(self, output): 348 self._done += 1 349 self.traceback = '' 350 self.severity = 'ok' 351 self.exitcode = '' 352 353 # Print test name as (for example) "parallel/test-assert". Tests that are 354 # scraped from the addons documentation are all named test.js, making it 355 # hard to decipher what test is running when only the filename is printed. 356 prefix = abspath(join(dirname(__file__), '../test')) + os.sep 357 command = output.command[-1] 358 command = NormalizePath(command, prefix) 359 360 if output.UnexpectedOutput(): 361 status_line = 'not ok %i %s' % (self._done, command) 362 self.severity = 'fail' 363 self.exitcode = output.output.exit_code 364 self.traceback = output.output.stdout + output.output.stderr 365 366 if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE: 367 status_line = status_line + ' # TODO : Fix flaky test' 368 self.severity = 'flaky' 369 370 logger.info(status_line) 371 372 if output.HasCrashed(): 373 self.severity = 'crashed' 374 375 elif output.HasTimedOut(): 376 self.severity = 'fail' 377 378 else: 379 skip = skip_regex.search(output.output.stdout) 380 if skip: 381 logger.info( 382 'ok %i %s # skip %s' % (self._done, command, skip.group(1))) 383 else: 384 status_line = 'ok %i %s' % (self._done, command) 385 if FLAKY in output.test.outcomes: 386 status_line = status_line + ' # TODO : Fix flaky test' 387 logger.info(status_line) 388 389 if output.diagnostic: 390 self.severity = 'ok' 391 if isinstance(output.diagnostic, list): 392 self.traceback = '\n'.join(output.diagnostic) 393 else: 394 self.traceback = output.diagnostic 395 396 397 duration = output.test.duration 398 399 # total_seconds() was added in 2.7 400 total_seconds = (duration.microseconds + 401 (duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6 402 403 # duration_ms is measured in seconds and is read as such by TAP parsers. 404 # It should read as "duration including ms" rather than "duration in ms" 405 logger.info(' ---') 406 logger.info(' duration_ms: %d.%d' % 407 (total_seconds, duration.microseconds / 1000)) 408 if self.severity != 'ok' or self.traceback != '': 409 if output.HasTimedOut(): 410 self.traceback = 'timeout\n' + output.output.stdout + output.output.stderr 411 self._printDiagnostic() 412 logger.info(' ...') 413 414 def Done(self): 415 pass 416 417class DeoptsCheckProgressIndicator(SimpleProgressIndicator): 418 419 def Starting(self): 420 pass 421 422 def AboutToRun(self, case): 423 pass 424 425 def HasRun(self, output): 426 # Print test name as (for example) "parallel/test-assert". Tests that are 427 # scraped from the addons documentation are all named test.js, making it 428 # hard to decipher what test is running when only the filename is printed. 429 prefix = abspath(join(dirname(__file__), '../test')) + os.sep 430 command = output.command[-1] 431 command = NormalizePath(command, prefix) 432 433 stdout = output.output.stdout.strip() 434 printed_file = False 435 for line in stdout.splitlines(): 436 if ( 437 (line.startswith("[aborted optimiz") or line.startswith("[disabled optimiz")) and 438 ("because:" in line or "reason:" in line) 439 ): 440 if not printed_file: 441 printed_file = True 442 print('==== %s ====' % command) 443 self.failed.append(output) 444 print(' %s' % line) 445 446 def Done(self): 447 pass 448 449 450class CompactProgressIndicator(ProgressIndicator): 451 452 def __init__(self, cases, flaky_tests_mode, measure_flakiness, templates): 453 super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness) 454 self.templates = templates 455 self.last_status_length = 0 456 self.start_time = time.time() 457 458 def Starting(self): 459 pass 460 461 def Done(self): 462 self.PrintProgress('Done\n') 463 464 def AboutToRun(self, case): 465 self.PrintProgress(case.GetLabel()) 466 467 def HasRun(self, output): 468 if output.UnexpectedOutput(): 469 self.ClearLine(self.last_status_length) 470 self.PrintFailureHeader(output.test) 471 stdout = output.output.stdout.strip() 472 if len(stdout): 473 print(self.templates['stdout'] % stdout) 474 stderr = output.output.stderr.strip() 475 if len(stderr): 476 print(self.templates['stderr'] % stderr) 477 print("Command: %s" % EscapeCommand(output.command)) 478 if output.HasCrashed(): 479 print("--- %s ---" % PrintCrashed(output.output.exit_code)) 480 if output.HasTimedOut(): 481 print("--- TIMEOUT ---") 482 483 def Truncate(self, str, length): 484 if length and (len(str) > (length - 3)): 485 return str[:(length-3)] + "..." 486 else: 487 return str 488 489 def PrintProgress(self, name): 490 self.ClearLine(self.last_status_length) 491 elapsed = time.time() - self.start_time 492 status = self.templates['status_line'] % { 493 'passed': self.succeeded, 494 'remaining': (((self.total - self.remaining) * 100) // self.total), 495 'failed': len(self.failed), 496 'test': name, 497 'mins': int(elapsed) / 60, 498 'secs': int(elapsed) % 60 499 } 500 status = self.Truncate(status, 78) 501 self.last_status_length = len(status) 502 print(status, end='') 503 sys.stdout.flush() 504 505 506class ColorProgressIndicator(CompactProgressIndicator): 507 508 def __init__(self, cases, flaky_tests_mode, measure_flakiness): 509 templates = { 510 'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s", 511 'stdout': "\033[1m%s\033[0m", 512 'stderr': "\033[31m%s\033[0m", 513 } 514 super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness, templates) 515 516 def ClearLine(self, last_line_length): 517 print("\033[1K\r", end='') 518 519 520class MonochromeProgressIndicator(CompactProgressIndicator): 521 522 def __init__(self, cases, flaky_tests_mode, measure_flakiness): 523 templates = { 524 'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s", 525 'stdout': '%s', 526 'stderr': '%s', 527 'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"), 528 'max_length': 78 529 } 530 super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness, templates) 531 532 def ClearLine(self, last_line_length): 533 print(("\r" + (" " * last_line_length) + "\r"), end='') 534 535 536PROGRESS_INDICATORS = { 537 'verbose': VerboseProgressIndicator, 538 'dots': DotsProgressIndicator, 539 'actions': ActionsAnnotationProgressIndicator, 540 'color': ColorProgressIndicator, 541 'tap': TapProgressIndicator, 542 'mono': MonochromeProgressIndicator, 543 'deopts': DeoptsCheckProgressIndicator 544} 545 546 547# ------------------------- 548# --- F r a m e w o r k --- 549# ------------------------- 550 551 552class CommandOutput(object): 553 554 def __init__(self, exit_code, timed_out, stdout, stderr): 555 self.exit_code = exit_code 556 self.timed_out = timed_out 557 self.stdout = stdout 558 self.stderr = stderr 559 self.failed = None 560 561 562class TestCase(object): 563 564 def __init__(self, context, path, arch, mode): 565 self.path = path 566 self.context = context 567 self.duration = None 568 self.arch = arch 569 self.mode = mode 570 self.parallel = False 571 self.disable_core_files = False 572 self.serial_id = 0 573 self.thread_id = 0 574 575 def IsNegative(self): 576 return self.context.expect_fail 577 578 def DidFail(self, output): 579 if output.failed is None: 580 output.failed = self.IsFailureOutput(output) 581 return output.failed 582 583 def IsFailureOutput(self, output): 584 return output.exit_code != 0 585 586 def GetSource(self): 587 return "(no source available)" 588 589 def RunCommand(self, command, env): 590 full_command = self.context.processor(command) 591 output = Execute(full_command, 592 self.context, 593 self.context.GetTimeout(self.mode), 594 env, 595 disable_core_files = self.disable_core_files) 596 return TestOutput(self, 597 full_command, 598 output, 599 self.context.store_unexpected_output) 600 601 def Run(self): 602 try: 603 result = self.RunCommand(self.GetCommand(), { 604 "TEST_SERIAL_ID": "%d" % self.serial_id, 605 "TEST_THREAD_ID": "%d" % self.thread_id, 606 "TEST_PARALLEL" : "%d" % self.parallel 607 }) 608 finally: 609 # Tests can leave the tty in non-blocking mode. If the test runner 610 # tries to print to stdout/stderr after that and the tty buffer is 611 # full, it'll die with a EAGAIN OSError. Ergo, put the tty back in 612 # blocking mode before proceeding. 613 if sys.platform != 'win32': 614 from fcntl import fcntl, F_GETFL, F_SETFL 615 from os import O_NONBLOCK 616 for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL)) 617 618 return result 619 620 621class TestOutput(object): 622 623 def __init__(self, test, command, output, store_unexpected_output): 624 self.test = test 625 self.command = command 626 self.output = output 627 self.store_unexpected_output = store_unexpected_output 628 self.diagnostic = [] 629 630 def UnexpectedOutput(self): 631 if self.HasCrashed(): 632 outcome = CRASH 633 elif self.HasTimedOut(): 634 outcome = TIMEOUT 635 elif self.HasFailed(): 636 outcome = FAIL 637 else: 638 outcome = PASS 639 return not outcome in self.test.outcomes 640 641 def HasCrashed(self): 642 if utils.IsWindows(): 643 return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code) 644 else: 645 # Timed out tests will have exit_code -signal.SIGTERM. 646 if self.output.timed_out: 647 return False 648 return self.output.exit_code < 0 649 650 def HasTimedOut(self): 651 return self.output.timed_out 652 653 def HasFailed(self): 654 execution_failed = self.test.DidFail(self.output) 655 if self.test.IsNegative(): 656 return not execution_failed 657 else: 658 return execution_failed 659 660 661def KillProcessWithID(pid, signal_to_send=signal.SIGTERM): 662 if utils.IsWindows(): 663 os.popen('taskkill /T /F /PID %d' % pid) 664 else: 665 os.kill(pid, signal_to_send) 666 667 668MAX_SLEEP_TIME = 0.1 669INITIAL_SLEEP_TIME = 0.0001 670SLEEP_TIME_FACTOR = 1.25 671 672SEM_INVALID_VALUE = -1 673SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h 674 675def Win32SetErrorMode(mode): 676 prev_error_mode = SEM_INVALID_VALUE 677 try: 678 import ctypes 679 prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode) 680 except ImportError: 681 pass 682 return prev_error_mode 683 684 685def KillTimedOutProcess(context, pid): 686 signal_to_send = signal.SIGTERM 687 if context.abort_on_timeout: 688 # Using SIGABRT here allows the OS to generate a core dump that can be 689 # looked at post-mortem, which helps for investigating failures that are 690 # difficult to reproduce. 691 signal_to_send = signal.SIGABRT 692 KillProcessWithID(pid, signal_to_send) 693 694 695def RunProcess(context, timeout, args, **rest): 696 if context.verbose: print("#", " ".join(args)) 697 popen_args = args 698 prev_error_mode = SEM_INVALID_VALUE 699 if utils.IsWindows(): 700 if context.suppress_dialogs: 701 # Try to change the error mode to avoid dialogs on fatal errors. Don't 702 # touch any existing error mode flags by merging the existing error mode. 703 # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx. 704 error_mode = SEM_NOGPFAULTERRORBOX 705 prev_error_mode = Win32SetErrorMode(error_mode) 706 Win32SetErrorMode(error_mode | prev_error_mode) 707 708 process = subprocess.Popen( 709 args = popen_args, 710 **rest 711 ) 712 if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE: 713 Win32SetErrorMode(prev_error_mode) 714 # Compute the end time - if the process crosses this limit we 715 # consider it timed out. 716 if timeout is None: end_time = None 717 else: end_time = time.time() + timeout 718 timed_out = False 719 # Repeatedly check the exit code from the process in a 720 # loop and keep track of whether or not it times out. 721 exit_code = None 722 sleep_time = INITIAL_SLEEP_TIME 723 724 while exit_code is None: 725 if (not end_time is None) and (time.time() >= end_time): 726 # Kill the process and wait for it to exit. 727 KillTimedOutProcess(context, process.pid) 728 exit_code = process.wait() 729 timed_out = True 730 else: 731 exit_code = process.poll() 732 time.sleep(sleep_time) 733 sleep_time = sleep_time * SLEEP_TIME_FACTOR 734 if sleep_time > MAX_SLEEP_TIME: 735 sleep_time = MAX_SLEEP_TIME 736 return (process, exit_code, timed_out) 737 738 739def PrintError(str): 740 sys.stderr.write(str) 741 sys.stderr.write('\n') 742 743 744def CheckedUnlink(name): 745 while True: 746 try: 747 os.unlink(name) 748 except OSError as e: 749 # On Windows unlink() fails if another process (typically a virus scanner 750 # or the indexing service) has the file open. Those processes keep a 751 # file open for a short time only, so yield and try again; it'll succeed. 752 if sys.platform == 'win32' and e.errno == errno.EACCES: 753 time.sleep(0) 754 continue 755 PrintError("os.unlink() " + str(e)) 756 break 757 758def Execute(args, context, timeout=None, env=None, disable_core_files=False, stdin=None): 759 (fd_out, outname) = tempfile.mkstemp() 760 (fd_err, errname) = tempfile.mkstemp() 761 762 if env is None: 763 env = {} 764 env_copy = os.environ.copy() 765 766 # Remove NODE_PATH 767 if "NODE_PATH" in env_copy: 768 del env_copy["NODE_PATH"] 769 770 # Remove NODE_REPL_EXTERNAL_MODULE 771 if "NODE_REPL_EXTERNAL_MODULE" in env_copy: 772 del env_copy["NODE_REPL_EXTERNAL_MODULE"] 773 774 # Extend environment 775 for key, value in env.items(): 776 env_copy[key] = value 777 778 preexec_fn = None 779 780 if disable_core_files and not utils.IsWindows(): 781 def disableCoreFiles(): 782 import resource 783 resource.setrlimit(resource.RLIMIT_CORE, (0,0)) 784 preexec_fn = disableCoreFiles 785 786 (process, exit_code, timed_out) = RunProcess( 787 context, 788 timeout, 789 args = args, 790 stdin = stdin, 791 stdout = fd_out, 792 stderr = fd_err, 793 env = env_copy, 794 preexec_fn = preexec_fn 795 ) 796 os.close(fd_out) 797 os.close(fd_err) 798 output = open(outname, encoding='utf8').read() 799 errors = open(errname, encoding='utf8').read() 800 CheckedUnlink(outname) 801 CheckedUnlink(errname) 802 803 return CommandOutput(exit_code, timed_out, output, errors) 804 805 806def CarCdr(path): 807 if len(path) == 0: 808 return (None, [ ]) 809 else: 810 return (path[0], path[1:]) 811 812 813class TestConfiguration(object): 814 def __init__(self, context, root, section): 815 self.context = context 816 self.root = root 817 self.section = section 818 819 def Contains(self, path, file): 820 if len(path) > len(file): 821 return False 822 for i in range(len(path)): 823 if not path[i].match(NormalizePath(file[i])): 824 return False 825 return True 826 827 def GetTestStatus(self, sections, defs): 828 status_file = join(self.root, '%s.status' % self.section) 829 if exists(status_file): 830 ReadConfigurationInto(status_file, sections, defs) 831 832 833class TestSuite(object): 834 835 def __init__(self, name): 836 self.name = name 837 838 def GetName(self): 839 return self.name 840 841 842class TestRepository(TestSuite): 843 844 def __init__(self, path): 845 normalized_path = abspath(path) 846 super(TestRepository, self).__init__(basename(normalized_path)) 847 self.path = normalized_path 848 self.is_loaded = False 849 self.config = None 850 851 def GetConfiguration(self, context): 852 if self.is_loaded: 853 return self.config 854 self.is_loaded = True 855 856 module = get_module('testcfg', self.path) 857 self.config = module.GetConfiguration(context, self.path) 858 if hasattr(self.config, 'additional_flags'): 859 self.config.additional_flags += context.node_args 860 else: 861 self.config.additional_flags = context.node_args 862 return self.config 863 864 def GetBuildRequirements(self, path, context): 865 return self.GetConfiguration(context).GetBuildRequirements() 866 867 def AddTestsToList(self, result, current_path, path, context, arch, mode): 868 tests = self.GetConfiguration(context).ListTests(current_path, path, 869 arch, mode) 870 result += tests 871 for i in range(1, context.repeat): 872 result += copy.deepcopy(tests) 873 874 def GetTestStatus(self, context, sections, defs): 875 self.GetConfiguration(context).GetTestStatus(sections, defs) 876 877 878class LiteralTestSuite(TestSuite): 879 def __init__(self, tests_repos, test_root): 880 super(LiteralTestSuite, self).__init__('root') 881 self.tests_repos = tests_repos 882 self.test_root = test_root 883 884 def GetBuildRequirements(self, path, context): 885 (name, rest) = CarCdr(path) 886 result = [ ] 887 for test in self.tests_repos: 888 if not name or name.match(test.GetName()): 889 result += test.GetBuildRequirements(rest, context) 890 return result 891 892 def ListTests(self, current_path, path, context, arch, mode): 893 (name, rest) = CarCdr(path) 894 result = [ ] 895 for test in self.tests_repos: 896 test_name = test.GetName() 897 if not name or name.match(test_name): 898 full_path = current_path + [test_name] 899 test.AddTestsToList(result, full_path, path, context, arch, mode) 900 result.sort(key=lambda x: x.GetName()) 901 return result 902 903 def GetTestStatus(self, context, sections, defs): 904 # Just read the test configuration from root_path/root.status. 905 root = TestConfiguration(context, self.test_root, 'root') 906 root.GetTestStatus(sections, defs) 907 for tests_repos in self.tests_repos: 908 tests_repos.GetTestStatus(context, sections, defs) 909 910 911TIMEOUT_SCALEFACTOR = { 912 'armv6' : { 'debug' : 12, 'release' : 3 }, # The ARM buildbots are slow. 913 'arm' : { 'debug' : 8, 'release' : 2 }, 914 'ia32' : { 'debug' : 4, 'release' : 1 }, 915 'ppc' : { 'debug' : 4, 'release' : 1 }, 916 's390' : { 'debug' : 4, 'release' : 1 } } 917 918 919class Context(object): 920 921 def __init__(self, workspace, verbose, vm, args, expect_fail, 922 timeout, processor, suppress_dialogs, 923 store_unexpected_output, repeat, abort_on_timeout): 924 self.workspace = workspace 925 self.verbose = verbose 926 self.vm = vm 927 self.node_args = args 928 self.expect_fail = expect_fail 929 self.timeout = timeout 930 self.processor = processor 931 self.suppress_dialogs = suppress_dialogs 932 self.store_unexpected_output = store_unexpected_output 933 self.repeat = repeat 934 self.abort_on_timeout = abort_on_timeout 935 self.v8_enable_inspector = True 936 self.node_has_crypto = True 937 938 def GetVm(self, arch, mode): 939 if self.vm is not None: 940 return self.vm 941 if arch == 'none': 942 name = 'out/Debug/node' if mode == 'debug' else 'out/Release/node' 943 else: 944 name = 'out/%s.%s/node' % (arch, mode) 945 946 # Currently GYP does not support output_dir for MSVS. 947 # http://code.google.com/p/gyp/issues/detail?id=40 948 # It will put the builds into Release/node.exe or Debug/node.exe 949 if utils.IsWindows(): 950 if not exists(name + '.exe'): 951 name = name.replace('out/', '') 952 name = os.path.abspath(name + '.exe') 953 954 if not exists(name): 955 raise ValueError('Could not find executable. Should be ' + name) 956 957 return name 958 959 def GetTimeout(self, mode): 960 return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode] 961 962def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode, measure_flakiness): 963 progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode, measure_flakiness) 964 return progress.Run(tasks) 965 966# ------------------------------------------- 967# --- T e s t C o n f i g u r a t i o n --- 968# ------------------------------------------- 969 970 971RUN = 'run' 972SKIP = 'skip' 973FAIL = 'fail' 974PASS = 'pass' 975OKAY = 'okay' 976TIMEOUT = 'timeout' 977CRASH = 'crash' 978SLOW = 'slow' 979FLAKY = 'flaky' 980DONTCARE = 'dontcare' 981KEEP_RETRYING = 'keep_retrying' 982 983class Expression(object): 984 pass 985 986 987class Constant(Expression): 988 989 def __init__(self, value): 990 self.value = value 991 992 def Evaluate(self, env, defs): 993 return self.value 994 995 996class Variable(Expression): 997 998 def __init__(self, name): 999 self.name = name 1000 1001 def GetOutcomes(self, env, defs): 1002 if self.name in env: return set([env[self.name]]) 1003 else: return set() 1004 1005 1006class Outcome(Expression): 1007 1008 def __init__(self, name): 1009 self.name = name 1010 1011 def GetOutcomes(self, env, defs): 1012 if self.name in defs: 1013 return defs[self.name].GetOutcomes(env, defs) 1014 else: 1015 return set([self.name]) 1016 1017 1018class Operation(Expression): 1019 1020 def __init__(self, left, op, right): 1021 self.left = left 1022 self.op = op 1023 self.right = right 1024 1025 def Evaluate(self, env, defs): 1026 if self.op == '||' or self.op == ',': 1027 return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs) 1028 elif self.op == 'if': 1029 return False 1030 elif self.op == '==': 1031 inter = self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs) 1032 return bool(inter) 1033 else: 1034 assert self.op == '&&' 1035 return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs) 1036 1037 def GetOutcomes(self, env, defs): 1038 if self.op == '||' or self.op == ',': 1039 return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs) 1040 elif self.op == 'if': 1041 if self.right.Evaluate(env, defs): 1042 return self.left.GetOutcomes(env, defs) 1043 else: 1044 return set() 1045 else: 1046 assert self.op == '&&' 1047 return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs) 1048 1049 1050def IsAlpha(str): 1051 for char in str: 1052 if not (char.isalpha() or char.isdigit() or char == '_'): 1053 return False 1054 return True 1055 1056 1057class Tokenizer(object): 1058 """A simple string tokenizer that chops expressions into variables, 1059 parens and operators""" 1060 1061 def __init__(self, expr): 1062 self.index = 0 1063 self.expr = expr 1064 self.length = len(expr) 1065 self.tokens = None 1066 1067 def Current(self, length = 1): 1068 if not self.HasMore(length): return "" 1069 return self.expr[self.index:self.index+length] 1070 1071 def HasMore(self, length = 1): 1072 return self.index < self.length + (length - 1) 1073 1074 def Advance(self, count = 1): 1075 self.index = self.index + count 1076 1077 def AddToken(self, token): 1078 self.tokens.append(token) 1079 1080 def SkipSpaces(self): 1081 while self.HasMore() and self.Current().isspace(): 1082 self.Advance() 1083 1084 def Tokenize(self): 1085 self.tokens = [ ] 1086 while self.HasMore(): 1087 self.SkipSpaces() 1088 if not self.HasMore(): 1089 return None 1090 if self.Current() == '(': 1091 self.AddToken('(') 1092 self.Advance() 1093 elif self.Current() == ')': 1094 self.AddToken(')') 1095 self.Advance() 1096 elif self.Current() == '$': 1097 self.AddToken('$') 1098 self.Advance() 1099 elif self.Current() == ',': 1100 self.AddToken(',') 1101 self.Advance() 1102 elif IsAlpha(self.Current()): 1103 buf = "" 1104 while self.HasMore() and IsAlpha(self.Current()): 1105 buf += self.Current() 1106 self.Advance() 1107 self.AddToken(buf) 1108 elif self.Current(2) == '&&': 1109 self.AddToken('&&') 1110 self.Advance(2) 1111 elif self.Current(2) == '||': 1112 self.AddToken('||') 1113 self.Advance(2) 1114 elif self.Current(2) == '==': 1115 self.AddToken('==') 1116 self.Advance(2) 1117 else: 1118 return None 1119 return self.tokens 1120 1121 1122class Scanner(object): 1123 """A simple scanner that can serve out tokens from a given list""" 1124 1125 def __init__(self, tokens): 1126 self.tokens = tokens 1127 self.length = len(tokens) 1128 self.index = 0 1129 1130 def HasMore(self): 1131 return self.index < self.length 1132 1133 def Current(self): 1134 return self.tokens[self.index] 1135 1136 def Advance(self): 1137 self.index = self.index + 1 1138 1139 1140def ParseAtomicExpression(scan): 1141 if scan.Current() == "true": 1142 scan.Advance() 1143 return Constant(True) 1144 elif scan.Current() == "false": 1145 scan.Advance() 1146 return Constant(False) 1147 elif IsAlpha(scan.Current()): 1148 name = scan.Current() 1149 scan.Advance() 1150 return Outcome(name.lower()) 1151 elif scan.Current() == '$': 1152 scan.Advance() 1153 if not IsAlpha(scan.Current()): 1154 return None 1155 name = scan.Current() 1156 scan.Advance() 1157 return Variable(name.lower()) 1158 elif scan.Current() == '(': 1159 scan.Advance() 1160 result = ParseLogicalExpression(scan) 1161 if (not result) or (scan.Current() != ')'): 1162 return None 1163 scan.Advance() 1164 return result 1165 else: 1166 return None 1167 1168 1169BINARIES = ['=='] 1170def ParseOperatorExpression(scan): 1171 left = ParseAtomicExpression(scan) 1172 if not left: return None 1173 while scan.HasMore() and (scan.Current() in BINARIES): 1174 op = scan.Current() 1175 scan.Advance() 1176 right = ParseOperatorExpression(scan) 1177 if not right: 1178 return None 1179 left = Operation(left, op, right) 1180 return left 1181 1182 1183def ParseConditionalExpression(scan): 1184 left = ParseOperatorExpression(scan) 1185 if not left: return None 1186 while scan.HasMore() and (scan.Current() == 'if'): 1187 scan.Advance() 1188 right = ParseOperatorExpression(scan) 1189 if not right: 1190 return None 1191 left= Operation(left, 'if', right) 1192 return left 1193 1194 1195LOGICALS = ["&&", "||", ","] 1196def ParseLogicalExpression(scan): 1197 left = ParseConditionalExpression(scan) 1198 if not left: return None 1199 while scan.HasMore() and (scan.Current() in LOGICALS): 1200 op = scan.Current() 1201 scan.Advance() 1202 right = ParseConditionalExpression(scan) 1203 if not right: 1204 return None 1205 left = Operation(left, op, right) 1206 return left 1207 1208 1209def ParseCondition(expr): 1210 """Parses a logical expression into an Expression object""" 1211 tokens = Tokenizer(expr).Tokenize() 1212 if not tokens: 1213 print("Malformed expression: '%s'" % expr) 1214 return None 1215 scan = Scanner(tokens) 1216 ast = ParseLogicalExpression(scan) 1217 if not ast: 1218 print("Malformed expression: '%s'" % expr) 1219 return None 1220 if scan.HasMore(): 1221 print("Malformed expression: '%s'" % expr) 1222 return None 1223 return ast 1224 1225 1226class Configuration(object): 1227 """The parsed contents of a configuration file""" 1228 1229 def __init__(self, sections, defs): 1230 self.sections = sections 1231 self.defs = defs 1232 1233 def ClassifyTests(self, cases, env): 1234 sections = [ s for s in self.sections if s.condition.Evaluate(env, self.defs) ] 1235 all_rules = reduce(list.__add__, [s.rules for s in sections], []) 1236 unused_rules = set(all_rules) 1237 result = [] 1238 for case in cases: 1239 matches = [ r for r in all_rules if r.Contains(case.path) ] 1240 outcomes_list = [ r.GetOutcomes(env, self.defs) for r in matches ] 1241 outcomes = reduce(set.union, outcomes_list, set()) 1242 unused_rules.difference_update(matches) 1243 case.outcomes = set(outcomes) or set([PASS]) 1244 # slow tests may also just pass. 1245 if SLOW in case.outcomes: 1246 case.outcomes.add(PASS) 1247 result.append(case) 1248 return result, unused_rules 1249 1250 1251class Section(object): 1252 """A section of the configuration file. Sections are enabled or 1253 disabled prior to running the tests, based on their conditions""" 1254 1255 def __init__(self, condition): 1256 self.condition = condition 1257 self.rules = [ ] 1258 1259 def AddRule(self, rule): 1260 self.rules.append(rule) 1261 1262 1263class Rule(object): 1264 """A single rule that specifies the expected outcome for a single 1265 test.""" 1266 1267 def __init__(self, raw_path, path, value): 1268 self.raw_path = raw_path 1269 self.path = path 1270 self.value = value 1271 1272 def GetOutcomes(self, env, defs): 1273 return self.value.GetOutcomes(env, defs) 1274 1275 def Contains(self, path): 1276 if len(self.path) > len(path): 1277 return False 1278 for i in range(len(self.path)): 1279 if not self.path[i].match(path[i]): 1280 return False 1281 return True 1282 1283 1284HEADER_PATTERN = re.compile(r'\[([^]]+)\]') 1285RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)') 1286DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$') 1287PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w_.\-/]+)$') 1288 1289 1290def ReadConfigurationInto(path, sections, defs): 1291 current_section = Section(Constant(True)) 1292 sections.append(current_section) 1293 prefix = [] 1294 for line in utils.ReadLinesFrom(path): 1295 header_match = HEADER_PATTERN.match(line) 1296 if header_match: 1297 condition_str = header_match.group(1).strip() 1298 condition = ParseCondition(condition_str) 1299 new_section = Section(condition) 1300 sections.append(new_section) 1301 current_section = new_section 1302 continue 1303 rule_match = RULE_PATTERN.match(line) 1304 if rule_match: 1305 path = prefix + SplitPath(rule_match.group(1).strip()) 1306 value_str = rule_match.group(2).strip() 1307 value = ParseCondition(value_str) 1308 if not value: 1309 return False 1310 current_section.AddRule(Rule(rule_match.group(1), path, value)) 1311 continue 1312 def_match = DEF_PATTERN.match(line) 1313 if def_match: 1314 name = def_match.group(1).lower() 1315 value = ParseCondition(def_match.group(2).strip()) 1316 if not value: 1317 return False 1318 defs[name] = value 1319 continue 1320 prefix_match = PREFIX_PATTERN.match(line) 1321 if prefix_match: 1322 prefix = SplitPath(prefix_match.group(1).strip()) 1323 continue 1324 raise Exception("Malformed line: '%s'." % line) 1325 1326 1327# --------------- 1328# --- M a i n --- 1329# --------------- 1330 1331 1332ARCH_GUESS = utils.GuessArchitecture() 1333 1334 1335def BuildOptions(): 1336 result = optparse.OptionParser() 1337 result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)", 1338 default='release') 1339 result.add_option("-v", "--verbose", help="Verbose output", 1340 default=False, action="store_true") 1341 result.add_option('--logfile', dest='logfile', 1342 help='write test output to file. NOTE: this only applies the tap progress indicator') 1343 result.add_option("-p", "--progress", 1344 help="The style of progress indicator (%s)" % ", ".join(PROGRESS_INDICATORS.keys()), 1345 choices=list(PROGRESS_INDICATORS.keys()), default="mono") 1346 result.add_option("--report", help="Print a summary of the tests to be run", 1347 default=False, action="store_true") 1348 result.add_option("-s", "--suite", help="A test suite", 1349 default=[], action="append") 1350 result.add_option("-t", "--timeout", help="Timeout in seconds", 1351 default=120, type="int") 1352 result.add_option("--arch", help='The architecture to run tests for', 1353 default='none') 1354 result.add_option("--snapshot", help="Run the tests with snapshot turned on", 1355 default=False, action="store_true") 1356 result.add_option("--special-command", default=None) 1357 result.add_option("--node-args", dest="node_args", help="Args to pass through to Node", 1358 default=[], action="append") 1359 result.add_option("--expect-fail", dest="expect_fail", 1360 help="Expect test cases to fail", default=False, action="store_true") 1361 result.add_option("--valgrind", help="Run tests through valgrind", 1362 default=False, action="store_true") 1363 result.add_option("--worker", help="Run parallel tests inside a worker context", 1364 default=False, action="store_true") 1365 result.add_option("--check-deopts", help="Check tests for permanent deoptimizations", 1366 default=False, action="store_true") 1367 result.add_option("--cat", help="Print the source of the tests", 1368 default=False, action="store_true") 1369 result.add_option("--flaky-tests", 1370 help="Regard tests marked as flaky (run|skip|dontcare|keep_retrying)", 1371 default="run") 1372 result.add_option("--measure-flakiness", 1373 help="When a test fails, re-run it x number of times", 1374 default=0, type="int") 1375 result.add_option("--skip-tests", 1376 help="Tests that should not be executed (comma-separated)", 1377 default="") 1378 result.add_option("--warn-unused", help="Report unused rules", 1379 default=False, action="store_true") 1380 result.add_option("-j", help="The number of parallel tasks to run", 1381 default=1, type="int") 1382 result.add_option("-J", help="Run tasks in parallel on all cores", 1383 default=False, action="store_true") 1384 result.add_option("--time", help="Print timing information after running", 1385 default=False, action="store_true") 1386 result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests", 1387 dest="suppress_dialogs", default=True, action="store_true") 1388 result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests", 1389 dest="suppress_dialogs", action="store_false") 1390 result.add_option("--shell", help="Path to node executable", default=None) 1391 result.add_option("--store-unexpected-output", 1392 help="Store the temporary JS files from tests that fails", 1393 dest="store_unexpected_output", default=True, action="store_true") 1394 result.add_option("--no-store-unexpected-output", 1395 help="Deletes the temporary JS files from tests that fails", 1396 dest="store_unexpected_output", action="store_false") 1397 result.add_option("-r", "--run", 1398 help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)", 1399 default="") 1400 result.add_option('--temp-dir', 1401 help='Optional path to change directory used for tests', default=False) 1402 result.add_option('--test-root', 1403 help='Optional path to change test directory', dest='test_root', default=None) 1404 result.add_option('--repeat', 1405 help='Number of times to repeat given tests', 1406 default=1, type="int") 1407 result.add_option('--abort-on-timeout', 1408 help='Send SIGABRT instead of SIGTERM to kill processes that time out', 1409 default=False, action="store_true", dest="abort_on_timeout") 1410 result.add_option("--type", 1411 help="Type of build (simple, fips, coverage)", 1412 default=None) 1413 return result 1414 1415 1416def ProcessOptions(options): 1417 global VERBOSE 1418 VERBOSE = options.verbose 1419 options.arch = options.arch.split(',') 1420 options.mode = options.mode.split(',') 1421 options.run = options.run.split(',') 1422 # Split at commas and filter out all the empty strings. 1423 options.skip_tests = [test for test in options.skip_tests.split(',') if test] 1424 if options.run == [""]: 1425 options.run = None 1426 elif len(options.run) != 2: 1427 print("The run argument must be two comma-separated integers.") 1428 return False 1429 else: 1430 try: 1431 options.run = [int(level) for level in options.run] 1432 except ValueError: 1433 print("Could not parse the integers from the run argument.") 1434 return False 1435 if options.run[0] < 0 or options.run[1] < 0: 1436 print("The run argument cannot have negative integers.") 1437 return False 1438 if options.run[0] >= options.run[1]: 1439 print("The test group to run (n) must be smaller than number of groups (m).") 1440 return False 1441 if options.J: 1442 # inherit JOBS from environment if provided. some virtualised systems 1443 # tends to exaggerate the number of available cpus/cores. 1444 cores = os.environ.get('JOBS') 1445 options.j = int(cores) if cores is not None else multiprocessing.cpu_count() 1446 if options.flaky_tests not in [RUN, SKIP, DONTCARE, KEEP_RETRYING]: 1447 print("Unknown flaky-tests mode %s" % options.flaky_tests) 1448 return False 1449 return True 1450 1451 1452REPORT_TEMPLATE = """\ 1453Total: %(total)i tests 1454 * %(skipped)4d tests will be skipped 1455 * %(pass)4d tests are expected to pass 1456 * %(fail_ok)4d tests are expected to fail that we won't fix 1457 * %(fail)4d tests are expected to fail that we should fix\ 1458""" 1459 1460 1461class Pattern(object): 1462 1463 def __init__(self, pattern): 1464 self.pattern = pattern 1465 self.compiled = None 1466 1467 def match(self, str): 1468 if not self.compiled: 1469 pattern = "^" + self.pattern.replace('*', '.*') + "$" 1470 self.compiled = re.compile(pattern) 1471 return self.compiled.match(str) 1472 1473 def __str__(self): 1474 return self.pattern 1475 1476 1477def SplitPath(path_arg): 1478 stripped = [c.strip() for c in path_arg.split('/')] 1479 return [Pattern(s) for s in stripped if len(s) > 0] 1480 1481def NormalizePath(path, prefix='test/'): 1482 # strip the extra path information of the specified test 1483 prefix = prefix.replace('\\', '/') 1484 path = path.replace('\\', '/') 1485 if path.startswith(prefix): 1486 path = path[len(prefix):] 1487 if path.endswith('.js'): 1488 path = path[:-3] 1489 elif path.endswith('.mjs'): 1490 path = path[:-4] 1491 return path 1492 1493def GetSpecialCommandProcessor(value): 1494 if (not value) or (value.find('@') == -1): 1495 def ExpandCommand(args): 1496 return args 1497 return ExpandCommand 1498 else: 1499 prefix, _, suffix = value.partition('@') 1500 prefix = unquote(prefix).split() 1501 suffix = unquote(suffix).split() 1502 def ExpandCommand(args): 1503 return prefix + args + suffix 1504 return ExpandCommand 1505 1506def GetSuites(test_root): 1507 def IsSuite(path): 1508 return isdir(path) and exists(join(path, 'testcfg.py')) 1509 return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ] 1510 1511 1512def FormatTime(d): 1513 millis = round(d * 1000) % 1000 1514 return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis) 1515 1516 1517def FormatTimedelta(td): 1518 if hasattr(td, 'total_seconds'): 1519 d = td.total_seconds() 1520 else: # python2.6 compat 1521 d = td.seconds + (td.microseconds / 10.0**6) 1522 return FormatTime(d) 1523 1524 1525def PrintCrashed(code): 1526 if utils.IsWindows(): 1527 return "CRASHED" 1528 else: 1529 return "CRASHED (Signal: %d)" % -code 1530 1531 1532# these suites represent special cases that should not be run as part of the 1533# default JavaScript test-run, e.g., internet/ requires a network connection, 1534# addons/ requires compilation. 1535IGNORED_SUITES = [ 1536 'addons', 1537 'benchmark', 1538 'doctool', 1539 'embedding', 1540 'internet', 1541 'js-native-api', 1542 'node-api', 1543 'pummel', 1544 'tick-processor', 1545 'v8-updates' 1546] 1547 1548 1549def ArgsToTestPaths(test_root, args, suites): 1550 if len(args) == 0 or 'default' in args: 1551 def_suites = [s for s in suites if s not in IGNORED_SUITES] 1552 args = [a for a in args if a != 'default'] + def_suites 1553 subsystem_regex = re.compile(r'^[a-zA-Z-]*$') 1554 check = lambda arg: subsystem_regex.match(arg) and (arg not in suites) 1555 mapped_args = ["*/test*-%s-*" % arg if check(arg) else arg for arg in args] 1556 paths = [SplitPath(NormalizePath(a)) for a in mapped_args] 1557 return paths 1558 1559 1560def get_env_type(vm, options_type, context): 1561 if options_type is not None: 1562 env_type = options_type 1563 else: 1564 # 'simple' is the default value for 'env_type'. 1565 env_type = 'simple' 1566 ssl_ver = Execute([vm, '-p', 'process.versions.openssl'], context).stdout 1567 if 'fips' in ssl_ver: 1568 env_type = 'fips' 1569 return env_type 1570 1571 1572def Main(): 1573 parser = BuildOptions() 1574 (options, args) = parser.parse_args() 1575 if not ProcessOptions(options): 1576 parser.print_help() 1577 return 1 1578 1579 ch = logging.StreamHandler(sys.stdout) 1580 logger.addHandler(ch) 1581 logger.setLevel(logging.INFO) 1582 if options.logfile: 1583 fh = logging.FileHandler(options.logfile, encoding='utf-8', mode='w') 1584 logger.addHandler(fh) 1585 1586 workspace = abspath(join(dirname(sys.argv[0]), '..')) 1587 test_root = join(workspace, 'test') 1588 if options.test_root is not None: 1589 test_root = options.test_root 1590 suites = GetSuites(test_root) 1591 repositories = [TestRepository(join(test_root, name)) for name in suites] 1592 repositories += [TestRepository(a) for a in options.suite] 1593 1594 root = LiteralTestSuite(repositories, test_root) 1595 paths = ArgsToTestPaths(test_root, args, suites) 1596 1597 # Check for --valgrind option. If enabled, we overwrite the special 1598 # command flag with a command that uses the run-valgrind.py script. 1599 if options.valgrind: 1600 run_valgrind = join(workspace, "tools", "run-valgrind.py") 1601 options.special_command = "python -u " + run_valgrind + " @" 1602 1603 if options.check_deopts: 1604 options.node_args.append("--trace-opt") 1605 options.node_args.append("--trace-file-names") 1606 # --always-opt is needed because many tests do not run long enough for the 1607 # optimizer to kick in, so this flag will force it to run. 1608 options.node_args.append("--always-opt") 1609 options.progress = "deopts" 1610 1611 if options.worker: 1612 run_worker = join(workspace, "tools", "run-worker.js") 1613 options.node_args.append(run_worker) 1614 1615 processor = GetSpecialCommandProcessor(options.special_command) 1616 1617 context = Context(workspace, 1618 VERBOSE, 1619 options.shell, 1620 options.node_args, 1621 options.expect_fail, 1622 options.timeout, 1623 processor, 1624 options.suppress_dialogs, 1625 options.store_unexpected_output, 1626 options.repeat, 1627 options.abort_on_timeout) 1628 1629 # Get status for tests 1630 sections = [ ] 1631 defs = { } 1632 root.GetTestStatus(context, sections, defs) 1633 config = Configuration(sections, defs) 1634 1635 # List the tests 1636 all_cases = [ ] 1637 all_unused = [ ] 1638 unclassified_tests = [ ] 1639 globally_unused_rules = None 1640 for path in paths: 1641 for arch in options.arch: 1642 for mode in options.mode: 1643 vm = context.GetVm(arch, mode) 1644 if not exists(vm): 1645 print("Can't find shell executable: '%s'" % vm) 1646 continue 1647 archEngineContext = Execute([vm, "-p", "process.arch"], context) 1648 vmArch = archEngineContext.stdout.rstrip() 1649 if archEngineContext.exit_code != 0 or vmArch == "undefined": 1650 print("Can't determine the arch of: '%s'" % vm) 1651 print(archEngineContext.stderr.rstrip()) 1652 continue 1653 env = { 1654 'mode': mode, 1655 'system': utils.GuessOS(), 1656 'arch': vmArch, 1657 'type': get_env_type(vm, options.type, context), 1658 } 1659 test_list = root.ListTests([], path, context, arch, mode) 1660 unclassified_tests += test_list 1661 cases, unused_rules = config.ClassifyTests(test_list, env) 1662 if globally_unused_rules is None: 1663 globally_unused_rules = set(unused_rules) 1664 else: 1665 globally_unused_rules = ( 1666 globally_unused_rules.intersection(unused_rules)) 1667 all_cases += cases 1668 all_unused.append(unused_rules) 1669 1670 # We want to skip the inspector tests if node was built without the inspector. 1671 has_inspector = Execute([vm, 1672 '-p', 'process.features.inspector'], context) 1673 if has_inspector.stdout.rstrip() == 'false': 1674 context.v8_enable_inspector = False 1675 1676 has_crypto = Execute([vm, 1677 '-p', 'process.versions.openssl'], context) 1678 if has_crypto.stdout.rstrip() == 'undefined': 1679 context.node_has_crypto = False 1680 1681 if options.cat: 1682 visited = set() 1683 for test in unclassified_tests: 1684 key = tuple(test.path) 1685 if key in visited: 1686 continue 1687 visited.add(key) 1688 print("--- begin source: %s ---" % test.GetLabel()) 1689 source = test.GetSource().strip() 1690 print(source) 1691 print("--- end source: %s ---" % test.GetLabel()) 1692 return 0 1693 1694 if options.warn_unused: 1695 for rule in globally_unused_rules: 1696 print("Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])) 1697 1698 tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir 1699 if tempdir: 1700 os.environ['NODE_TEST_DIR'] = tempdir 1701 try: 1702 os.makedirs(tempdir) 1703 except OSError as exception: 1704 if exception.errno != errno.EEXIST: 1705 print("Could not create the temporary directory", options.temp_dir) 1706 sys.exit(1) 1707 1708 def should_keep(case): 1709 if any((s in case.file) for s in options.skip_tests): 1710 return False 1711 elif SKIP in case.outcomes: 1712 return False 1713 elif (options.flaky_tests == SKIP) and (set([SLOW, FLAKY]) & case.outcomes): 1714 return False 1715 else: 1716 return True 1717 1718 cases_to_run = [ 1719 test_case for test_case in all_cases if should_keep(test_case) 1720 ] 1721 1722 if options.report: 1723 print(REPORT_TEMPLATE % { 1724 'total': len(all_cases), 1725 'skipped': len(all_cases) - len(cases_to_run), 1726 'pass': len([t for t in cases_to_run if PASS in t.outcomes]), 1727 'fail_ok': len([t for t in cases_to_run if t.outcomes == set([FAIL, OKAY])]), 1728 'fail': len([t for t in cases_to_run if t.outcomes == set([FAIL])]) 1729 }) 1730 1731 if options.run is not None: 1732 # Must ensure the list of tests is sorted before selecting, to avoid 1733 # silent errors if this file is changed to list the tests in a way that 1734 # can be different in different machines 1735 cases_to_run.sort(key=lambda c: (c.arch, c.mode, c.file)) 1736 cases_to_run = [ cases_to_run[i] for i 1737 in range(options.run[0], 1738 len(cases_to_run), 1739 options.run[1]) ] 1740 if len(cases_to_run) == 0: 1741 print("No tests to run.") 1742 return 1 1743 else: 1744 try: 1745 start = time.time() 1746 if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests, options.measure_flakiness): 1747 result = 0 1748 else: 1749 result = 1 1750 duration = time.time() - start 1751 except KeyboardInterrupt: 1752 print("Interrupted") 1753 return 1 1754 1755 if options.time: 1756 # Write the times to stderr to make it easy to separate from the 1757 # test output. 1758 print() 1759 sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration)) 1760 timed_tests = [ t for t in cases_to_run if not t.duration is None ] 1761 timed_tests.sort(key=lambda x: x.duration) 1762 for i, entry in enumerate(timed_tests[:20], start=1): 1763 t = FormatTimedelta(entry.duration) 1764 sys.stderr.write("%4i (%s) %s\n" % (i, t, entry.GetLabel())) 1765 1766 return result 1767 1768 1769if __name__ == '__main__': 1770 sys.exit(Main()) 1771