• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright 2008 the V8 project authors. All rights reserved.
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met:
7#
8#     * Redistributions of source code must retain the above copyright
9#       notice, this list of conditions and the following disclaimer.
10#     * Redistributions in binary form must reproduce the above
11#       copyright notice, this list of conditions and the following
12#       disclaimer in the documentation and/or other materials provided
13#       with the distribution.
14#     * Neither the name of Google Inc. nor the names of its
15#       contributors may be used to endorse or promote products derived
16#       from this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31from __future__ import print_function
32from typing import Dict
33import logging
34import optparse
35import os
36import re
37import signal
38import subprocess
39import sys
40import tempfile
41import time
42import threading
43import utils
44import multiprocessing
45import errno
46import copy
47import io
48
49
50if sys.version_info >= (3, 5):
51  from importlib import machinery, util
52  def get_module(name, path):
53    loader_details = (machinery.SourceFileLoader, machinery.SOURCE_SUFFIXES)
54    spec = machinery.FileFinder(path, loader_details).find_spec(name)
55    module = util.module_from_spec(spec)
56    spec.loader.exec_module(module)
57    return module
58else:
59  import imp
60  def get_module(name, path):
61    file = None
62    try:
63      (file, pathname, description) = imp.find_module(name, [path])
64      return imp.load_module(name, file, pathname, description)
65    finally:
66      if file:
67        file.close()
68
69
70from io import open
71from os.path import join, dirname, abspath, basename, isdir, exists
72from datetime import datetime, timedelta
73try:
74    from queue import Queue, Empty  # Python 3
75except ImportError:
76    from Queue import Queue, Empty  # Python 2
77
78from functools import reduce
79
80try:
81  from urllib.parse import unquote    # Python 3
82except ImportError:
83  from urllib import unquote          # Python 2
84
85
86logger = logging.getLogger('testrunner')
87skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE)
88
89VERBOSE = False
90
91os.umask(0o022)
92os.environ['NODE_OPTIONS'] = ''
93
94# ---------------------------------------------
95# --- P r o g r e s s   I n d i c a t o r s ---
96# ---------------------------------------------
97
98
99class ProgressIndicator(object):
100
101  def __init__(self, cases, flaky_tests_mode, measure_flakiness):
102    self.cases = cases
103    self.serial_id = 0
104    self.flaky_tests_mode = flaky_tests_mode
105    self.measure_flakiness = measure_flakiness
106    self.parallel_queue = Queue(len(cases))
107    self.sequential_queue = Queue(len(cases))
108    for case in cases:
109      if case.parallel:
110        self.parallel_queue.put_nowait(case)
111      else:
112        self.sequential_queue.put_nowait(case)
113    self.succeeded = 0
114    self.remaining = len(cases)
115    self.total = len(cases)
116    self.failed = [ ]
117    self.flaky_failed = [ ]
118    self.crashed = 0
119    self.lock = threading.Lock()
120    self.shutdown_event = threading.Event()
121
122  def GetFailureOutput(self, failure):
123    output = []
124    if failure.output.stderr:
125      output += ["--- stderr ---" ]
126      output += [failure.output.stderr.strip()]
127    if failure.output.stdout:
128      output += ["--- stdout ---"]
129      output += [failure.output.stdout.strip()]
130    output += ["Command: %s" % EscapeCommand(failure.command)]
131    if failure.HasCrashed():
132      output += ["--- %s ---" % PrintCrashed(failure.output.exit_code)]
133    if failure.HasTimedOut():
134      output += ["--- TIMEOUT ---"]
135    output = "\n".join(output)
136    return output
137
138  def PrintFailureOutput(self, failure):
139    print(self.GetFailureOutput(failure))
140
141  def PrintFailureHeader(self, test):
142    if test.IsNegative():
143      negative_marker = '[negative] '
144    else:
145      negative_marker = ''
146    print("=== %(label)s %(negative)s===" % {
147      'label': test.GetLabel(),
148      'negative': negative_marker
149    })
150    print("Path: %s" % "/".join(test.path))
151
152  def Run(self, tasks) -> Dict:
153    self.Starting()
154    threads = []
155    # Spawn N-1 threads and then use this thread as the last one.
156    # That way -j1 avoids threading altogether which is a nice fallback
157    # in case of threading problems.
158    for i in range(tasks - 1):
159      thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
160      threads.append(thread)
161      thread.start()
162    try:
163      self.RunSingle(False, 0)
164      # Wait for the remaining threads
165      for thread in threads:
166        # Use a timeout so that signals (ctrl-c) will be processed.
167        thread.join(timeout=1000000)
168    except (KeyboardInterrupt, SystemExit):
169      self.shutdown_event.set()
170    except Exception:
171      # If there's an exception we schedule an interruption for any
172      # remaining threads.
173      self.shutdown_event.set()
174      # ...and then reraise the exception to bail out
175      raise
176    self.Done()
177    return {
178      'allPassed': not self.failed,
179      'failed': self.failed,
180    }
181
182  def RunSingle(self, parallel, thread_id):
183    while not self.shutdown_event.is_set():
184      try:
185        test = self.parallel_queue.get_nowait()
186      except Empty:
187        if parallel:
188          return
189        try:
190          test = self.sequential_queue.get_nowait()
191        except Empty:
192          return
193      case = test
194      case.thread_id = thread_id
195      self.lock.acquire()
196      case.serial_id = self.serial_id
197      self.serial_id += 1
198      self.AboutToRun(case)
199      self.lock.release()
200      try:
201        start = datetime.now()
202        output = case.Run()
203        # SmartOS has a bug that causes unexpected ECONNREFUSED errors.
204        # See https://smartos.org/bugview/OS-2767
205        # If ECONNREFUSED on SmartOS, retry the test one time.
206        if (output.UnexpectedOutput() and
207          sys.platform == 'sunos5' and
208          'ECONNREFUSED' in output.output.stderr):
209            output = case.Run()
210            output.diagnostic.append('ECONNREFUSED received, test retried')
211        case.duration = (datetime.now() - start)
212      except IOError:
213        return
214      if self.shutdown_event.is_set():
215        return
216      self.lock.acquire()
217      if output.UnexpectedOutput():
218        if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
219          self.flaky_failed.append(output)
220        elif FLAKY in output.test.outcomes and self.flaky_tests_mode == KEEP_RETRYING:
221          for _ in range(99):
222            if not case.Run().UnexpectedOutput():
223              self.flaky_failed.append(output)
224              break
225          else:
226            # If after 100 tries, the test is not passing, it's not flaky.
227            self.failed.append(output)
228        else:
229          self.failed.append(output)
230          if output.HasCrashed():
231            self.crashed += 1
232          if self.measure_flakiness:
233            outputs = [case.Run() for _ in range(self.measure_flakiness)]
234            # +1s are there because the test already failed once at this point.
235            print(" failed %d out of %d" % (len([i for i in outputs if i.UnexpectedOutput()]) + 1, self.measure_flakiness + 1))
236      else:
237        self.succeeded += 1
238      self.remaining -= 1
239      self.HasRun(output)
240      self.lock.release()
241
242
243def EscapeCommand(command):
244  parts = []
245  for part in command:
246    if ' ' in part:
247      # Escape spaces.  We may need to escape more characters for this
248      # to work properly.
249      parts.append('"%s"' % part)
250    else:
251      parts.append(part)
252  return " ".join(parts)
253
254
255class SimpleProgressIndicator(ProgressIndicator):
256
257  def Starting(self):
258    print('Running %i tests' % len(self.cases))
259
260  def Done(self):
261    print()
262    for failed in self.failed:
263      self.PrintFailureHeader(failed.test)
264      self.PrintFailureOutput(failed)
265    if len(self.failed) == 0:
266      print("===")
267      print("=== All tests succeeded")
268      print("===")
269    else:
270      print()
271      print("===")
272      print("=== %i tests failed" % len(self.failed))
273      if self.crashed > 0:
274        print("=== %i tests CRASHED" % self.crashed)
275      print("===")
276
277
278class VerboseProgressIndicator(SimpleProgressIndicator):
279
280  def AboutToRun(self, case):
281    print('Starting %s...' % case.GetLabel())
282    sys.stdout.flush()
283
284  def HasRun(self, output):
285    if output.UnexpectedOutput():
286      if output.HasCrashed():
287        outcome = 'CRASH'
288      else:
289        outcome = 'FAIL'
290    else:
291      outcome = 'pass'
292    print('Done running %s: %s' % (output.test.GetLabel(), outcome))
293
294
295class DotsProgressIndicator(SimpleProgressIndicator):
296
297  def AboutToRun(self, case):
298    pass
299
300  def HasRun(self, output):
301    total = self.succeeded + len(self.failed)
302    if (total > 1) and (total % 50 == 1):
303      sys.stdout.write('\n')
304    if output.UnexpectedOutput():
305      if output.HasCrashed():
306        sys.stdout.write('C')
307        sys.stdout.flush()
308      elif output.HasTimedOut():
309        sys.stdout.write('T')
310        sys.stdout.flush()
311      else:
312        sys.stdout.write('F')
313        sys.stdout.flush()
314    else:
315      sys.stdout.write('.')
316      sys.stdout.flush()
317
318class ActionsAnnotationProgressIndicator(DotsProgressIndicator):
319  def GetAnnotationInfo(self, test, output):
320    traceback = output.stdout + output.stderr
321    find_full_path = re.search(r' +at .*\(.*%s:([0-9]+):([0-9]+)' % test.file, traceback)
322    col = line = 0
323    if find_full_path:
324        line, col = map(int, find_full_path.groups())
325    root_path = abspath(join(dirname(__file__), '../')) + os.sep
326    filename = test.file.replace(root_path, "")
327    return filename, line, col
328
329  def PrintFailureOutput(self, failure):
330    output = self.GetFailureOutput(failure)
331    filename, line, column = self.GetAnnotationInfo(failure.test, failure.output)
332    print("::error file=%s,line=%d,col=%d::%s" % (filename, line, column, output.replace('\n', '%0A')))
333
334class TapProgressIndicator(SimpleProgressIndicator):
335
336  def _printDiagnostic(self):
337    logger.info('  severity: %s', self.severity)
338    self.exitcode and logger.info('  exitcode: %s', self.exitcode)
339    logger.info('  stack: |-')
340
341    for l in self.traceback.splitlines():
342      logger.info('    ' + l)
343
344  def Starting(self):
345    logger.info('TAP version 13')
346    logger.info('1..%i' % len(self.cases))
347    self._done = 0
348
349  def AboutToRun(self, case):
350    pass
351
352  def HasRun(self, output):
353    self._done += 1
354    self.traceback = ''
355    self.severity = 'ok'
356    self.exitcode = ''
357
358    # Print test name as (for example) "parallel/test-assert".  Tests that are
359    # scraped from the addons documentation are all named test.js, making it
360    # hard to decipher what test is running when only the filename is printed.
361    prefix = abspath(join(dirname(__file__), '../test')) + os.sep
362    command = output.command[-1]
363    command = NormalizePath(command, prefix)
364
365    if output.UnexpectedOutput():
366      status_line = 'not ok %i %s' % (self._done, command)
367      self.severity = 'fail'
368      self.exitcode = output.output.exit_code
369      self.traceback = output.output.stdout + output.output.stderr
370
371      if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
372        status_line = status_line + ' # TODO : Fix flaky test'
373        self.severity = 'flaky'
374
375      logger.info(status_line)
376
377      if output.HasCrashed():
378        self.severity = 'crashed'
379
380      elif output.HasTimedOut():
381        self.severity = 'fail'
382
383    else:
384      skip = skip_regex.search(output.output.stdout)
385      if skip:
386        logger.info(
387          'ok %i %s # skip %s' % (self._done, command, skip.group(1)))
388      else:
389        status_line = 'ok %i %s' % (self._done, command)
390        if FLAKY in output.test.outcomes:
391          status_line = status_line + ' # TODO : Fix flaky test'
392        logger.info(status_line)
393
394      if output.diagnostic:
395        self.severity = 'ok'
396        if isinstance(output.diagnostic, list):
397          self.traceback = '\n'.join(output.diagnostic)
398        else:
399          self.traceback = output.diagnostic
400
401
402    duration = output.test.duration
403    logger.info('  ---')
404    logger.info('  duration_ms: %.5f' % (duration  / timedelta(milliseconds=1)))
405    if self.severity != 'ok' or self.traceback != '':
406      if output.HasTimedOut():
407        self.traceback = 'timeout\n' + output.output.stdout + output.output.stderr
408      self._printDiagnostic()
409    logger.info('  ...')
410
411  def Done(self):
412    pass
413
414class DeoptsCheckProgressIndicator(SimpleProgressIndicator):
415
416  def Starting(self):
417    pass
418
419  def AboutToRun(self, case):
420    pass
421
422  def HasRun(self, output):
423    # Print test name as (for example) "parallel/test-assert".  Tests that are
424    # scraped from the addons documentation are all named test.js, making it
425    # hard to decipher what test is running when only the filename is printed.
426    prefix = abspath(join(dirname(__file__), '../test')) + os.sep
427    command = output.command[-1]
428    command = NormalizePath(command, prefix)
429
430    stdout = output.output.stdout.strip()
431    printed_file = False
432    for line in stdout.splitlines():
433      if (
434        (line.startswith("[aborted optimiz") or line.startswith("[disabled optimiz")) and
435        ("because:" in line or "reason:" in line)
436      ):
437        if not printed_file:
438          printed_file = True
439          print('==== %s ====' % command)
440          self.failed.append(output)
441        print('  %s' % line)
442
443  def Done(self):
444    pass
445
446
447class CompactProgressIndicator(ProgressIndicator):
448
449  def __init__(self, cases, flaky_tests_mode, measure_flakiness, templates):
450    super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness)
451    self.templates = templates
452    self.last_status_length = 0
453    self.start_time = time.time()
454
455  def Starting(self):
456    pass
457
458  def Done(self):
459    self.PrintProgress('Done\n')
460
461  def AboutToRun(self, case):
462    self.PrintProgress(case.GetLabel())
463
464  def HasRun(self, output):
465    if output.UnexpectedOutput():
466      self.ClearLine(self.last_status_length)
467      self.PrintFailureHeader(output.test)
468      stdout = output.output.stdout.strip()
469      if len(stdout):
470        print(self.templates['stdout'] % stdout)
471      stderr = output.output.stderr.strip()
472      if len(stderr):
473        print(self.templates['stderr'] % stderr)
474      print("Command: %s" % EscapeCommand(output.command))
475      if output.HasCrashed():
476        print("--- %s ---" % PrintCrashed(output.output.exit_code))
477      if output.HasTimedOut():
478        print("--- TIMEOUT ---")
479      print("\n") # Two blank lines between failures, for visual separation
480
481  def Truncate(self, str, length):
482    if length and (len(str) > (length - 3)):
483      return str[:(length-3)] + "..."
484    else:
485      return str
486
487  def PrintProgress(self, name):
488    self.ClearLine(self.last_status_length)
489    elapsed = time.time() - self.start_time
490    status = self.templates['status_line'] % {
491      'passed': self.succeeded,
492      'remaining': (((self.total - self.remaining) * 100) // self.total),
493      'failed': len(self.failed),
494      'test': name,
495      'mins': int(elapsed) / 60,
496      'secs': int(elapsed) % 60
497    }
498    status = self.Truncate(status, 78)
499    self.last_status_length = len(status)
500    print(status, end='')
501    sys.stdout.flush()
502
503
504class ColorProgressIndicator(CompactProgressIndicator):
505
506  def __init__(self, cases, flaky_tests_mode, measure_flakiness):
507    templates = {
508      'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
509      'stdout': "\033[1m%s\033[0m",
510      'stderr': "\033[31m%s\033[0m",
511    }
512    super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness, templates)
513
514  def ClearLine(self, last_line_length):
515    print("\033[1K\r", end='')
516
517
518class MonochromeProgressIndicator(CompactProgressIndicator):
519
520  def __init__(self, cases, flaky_tests_mode, measure_flakiness):
521    templates = {
522      'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
523      'stdout': '%s',
524      'stderr': '%s',
525      'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
526      'max_length': 78
527    }
528    super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, measure_flakiness, templates)
529
530  def ClearLine(self, last_line_length):
531    print(("\r" + (" " * last_line_length) + "\r"), end='')
532
533
534PROGRESS_INDICATORS = {
535  'verbose': VerboseProgressIndicator,
536  'dots': DotsProgressIndicator,
537  'actions': ActionsAnnotationProgressIndicator,
538  'color': ColorProgressIndicator,
539  'tap': TapProgressIndicator,
540  'mono': MonochromeProgressIndicator,
541  'deopts': DeoptsCheckProgressIndicator
542}
543
544
545# -------------------------
546# --- F r a m e w o r k ---
547# -------------------------
548
549
550class CommandOutput(object):
551
552  def __init__(self, exit_code, timed_out, stdout, stderr):
553    self.exit_code = exit_code
554    self.timed_out = timed_out
555    self.stdout = stdout
556    self.stderr = stderr
557    self.failed = None
558
559
560class TestCase(object):
561
562  def __init__(self, context, path, arch, mode):
563    self.path = path
564    self.context = context
565    self.duration = None
566    self.arch = arch
567    self.mode = mode
568    self.parallel = False
569    self.disable_core_files = False
570    self.serial_id = 0
571    self.thread_id = 0
572
573  def IsNegative(self):
574    return self.context.expect_fail
575
576  def DidFail(self, output):
577    if output.failed is None:
578      output.failed = self.IsFailureOutput(output)
579    return output.failed
580
581  def IsFailureOutput(self, output):
582    return output.exit_code != 0
583
584  def GetSource(self):
585    return "(no source available)"
586
587  def RunCommand(self, command, env):
588    full_command = self.context.processor(command)
589    output = Execute(full_command,
590                     self.context,
591                     self.context.GetTimeout(self.mode, self.config.section),
592                     env,
593                     disable_core_files = self.disable_core_files)
594    return TestOutput(self,
595                      full_command,
596                      output,
597                      self.context.store_unexpected_output)
598
599  def Run(self):
600    try:
601      result = self.RunCommand(self.GetCommand(), {
602        "TEST_SERIAL_ID": "%d" % self.serial_id,
603        "TEST_THREAD_ID": "%d" % self.thread_id,
604        "TEST_PARALLEL" : "%d" % self.parallel
605      })
606    finally:
607      # Tests can leave the tty in non-blocking mode. If the test runner
608      # tries to print to stdout/stderr after that and the tty buffer is
609      # full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
610      # blocking mode before proceeding.
611      if sys.platform != 'win32':
612        from fcntl import fcntl, F_GETFL, F_SETFL
613        from os import O_NONBLOCK
614        for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
615
616    return result
617
618
619class TestOutput(object):
620
621  def __init__(self, test, command, output, store_unexpected_output):
622    self.test = test
623    self.command = command
624    self.output = output
625    self.store_unexpected_output = store_unexpected_output
626    self.diagnostic = []
627
628  def UnexpectedOutput(self):
629    if self.HasCrashed():
630      outcome = CRASH
631    elif self.HasTimedOut():
632      outcome = TIMEOUT
633    elif self.HasFailed():
634      outcome = FAIL
635    else:
636      outcome = PASS
637    return not outcome in self.test.outcomes
638
639  def HasCrashed(self):
640    if utils.IsWindows():
641      return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
642    else:
643      # Timed out tests will have exit_code -signal.SIGTERM.
644      if self.output.timed_out:
645        return False
646      return self.output.exit_code < 0
647
648  def HasTimedOut(self):
649    return self.output.timed_out
650
651  def HasFailed(self):
652    execution_failed = self.test.DidFail(self.output)
653    if self.test.IsNegative():
654      return not execution_failed
655    else:
656      return execution_failed
657
658
659def KillProcessWithID(pid, signal_to_send=signal.SIGTERM):
660  if utils.IsWindows():
661    os.popen('taskkill /T /F /PID %d' % pid)
662  else:
663    os.kill(pid, signal_to_send)
664
665
666MAX_SLEEP_TIME = 0.1
667INITIAL_SLEEP_TIME = 0.0001
668SLEEP_TIME_FACTOR = 1.25
669
670SEM_INVALID_VALUE = -1
671SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
672
673def Win32SetErrorMode(mode):
674  prev_error_mode = SEM_INVALID_VALUE
675  try:
676    import ctypes
677    prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode)
678  except ImportError:
679    pass
680  return prev_error_mode
681
682
683def KillTimedOutProcess(context, pid):
684  signal_to_send = signal.SIGTERM
685  if context.abort_on_timeout:
686    # Using SIGABRT here allows the OS to generate a core dump that can be
687    # looked at post-mortem, which helps for investigating failures that are
688    # difficult to reproduce.
689    signal_to_send = signal.SIGABRT
690  KillProcessWithID(pid, signal_to_send)
691
692
693def RunProcess(context, timeout, args, **rest):
694  if context.verbose: print("#", " ".join(args))
695  popen_args = args
696  prev_error_mode = SEM_INVALID_VALUE
697  if utils.IsWindows():
698    if context.suppress_dialogs:
699      # Try to change the error mode to avoid dialogs on fatal errors. Don't
700      # touch any existing error mode flags by merging the existing error mode.
701      # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
702      error_mode = SEM_NOGPFAULTERRORBOX
703      prev_error_mode = Win32SetErrorMode(error_mode)
704      Win32SetErrorMode(error_mode | prev_error_mode)
705
706  process = subprocess.Popen(
707    args = popen_args,
708    **rest
709  )
710  if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
711    Win32SetErrorMode(prev_error_mode)
712  # Compute the end time - if the process crosses this limit we
713  # consider it timed out.
714  if timeout is None: end_time = None
715  else: end_time = time.time() + timeout
716  timed_out = False
717  # Repeatedly check the exit code from the process in a
718  # loop and keep track of whether or not it times out.
719  exit_code = None
720  sleep_time = INITIAL_SLEEP_TIME
721
722  while exit_code is None:
723    if (not end_time is None) and (time.time() >= end_time):
724      # Kill the process and wait for it to exit.
725      KillTimedOutProcess(context, process.pid)
726      exit_code = process.wait()
727      timed_out = True
728    else:
729      exit_code = process.poll()
730      time.sleep(sleep_time)
731      sleep_time = sleep_time * SLEEP_TIME_FACTOR
732      if sleep_time > MAX_SLEEP_TIME:
733        sleep_time = MAX_SLEEP_TIME
734  return (process, exit_code, timed_out)
735
736
737def PrintError(str):
738  sys.stderr.write(str)
739  sys.stderr.write('\n')
740
741
742def CheckedUnlink(name):
743  while True:
744    try:
745      os.unlink(name)
746    except OSError as e:
747      # On Windows unlink() fails if another process (typically a virus scanner
748      # or the indexing service) has the file open. Those processes keep a
749      # file open for a short time only, so yield and try again; it'll succeed.
750      if sys.platform == 'win32' and e.errno == errno.EACCES:
751        time.sleep(0)
752        continue
753      PrintError("os.unlink() " + str(e))
754    break
755
756def Execute(args, context, timeout=None, env=None, disable_core_files=False, stdin=None):
757  (fd_out, outname) = tempfile.mkstemp()
758  (fd_err, errname) = tempfile.mkstemp()
759
760  if env is None:
761    env = {}
762  env_copy = os.environ.copy()
763
764  # Remove NODE_PATH
765  if "NODE_PATH" in env_copy:
766    del env_copy["NODE_PATH"]
767
768  # Remove NODE_REPL_EXTERNAL_MODULE
769  if "NODE_REPL_EXTERNAL_MODULE" in env_copy:
770    del env_copy["NODE_REPL_EXTERNAL_MODULE"]
771
772  # Extend environment
773  for key, value in env.items():
774    env_copy[key] = value
775
776  preexec_fn = None
777
778  if disable_core_files and not utils.IsWindows():
779    def disableCoreFiles():
780      import resource
781      resource.setrlimit(resource.RLIMIT_CORE, (0,0))
782    preexec_fn = disableCoreFiles
783
784  (process, exit_code, timed_out) = RunProcess(
785    context,
786    timeout,
787    args = args,
788    stdin = stdin,
789    stdout = fd_out,
790    stderr = fd_err,
791    env = env_copy,
792    preexec_fn = preexec_fn
793  )
794  os.close(fd_out)
795  os.close(fd_err)
796  output = open(outname, encoding='utf8').read()
797  errors = open(errname, encoding='utf8').read()
798  CheckedUnlink(outname)
799  CheckedUnlink(errname)
800
801  return CommandOutput(exit_code, timed_out, output, errors)
802
803
804def CarCdr(path):
805  if len(path) == 0:
806    return (None, [ ])
807  else:
808    return (path[0], path[1:])
809
810
811class TestConfiguration(object):
812  def __init__(self, context, root, section):
813    self.context = context
814    self.root = root
815    self.section = section
816
817  def Contains(self, path, file):
818    if len(path) > len(file):
819      return False
820    for i in range(len(path)):
821      if not path[i].match(NormalizePath(file[i])):
822        return False
823    return True
824
825  def GetTestStatus(self, sections, defs):
826    status_file = join(self.root, '%s.status' % self.section)
827    if exists(status_file):
828      ReadConfigurationInto(status_file, sections, defs)
829
830
831class TestSuite(object):
832
833  def __init__(self, name):
834    self.name = name
835
836  def GetName(self):
837    return self.name
838
839
840class TestRepository(TestSuite):
841
842  def __init__(self, path):
843    normalized_path = abspath(path)
844    super(TestRepository, self).__init__(basename(normalized_path))
845    self.path = normalized_path
846    self.is_loaded = False
847    self.config = None
848
849  def GetConfiguration(self, context):
850    if self.is_loaded:
851      return self.config
852    self.is_loaded = True
853
854    module = get_module('testcfg', self.path)
855    self.config = module.GetConfiguration(context, self.path)
856    if hasattr(self.config, 'additional_flags'):
857      self.config.additional_flags += context.node_args
858    else:
859      self.config.additional_flags = context.node_args
860    return self.config
861
862  def GetBuildRequirements(self, path, context):
863    return self.GetConfiguration(context).GetBuildRequirements()
864
865  def AddTestsToList(self, result, current_path, path, context, arch, mode):
866    tests = self.GetConfiguration(context).ListTests(current_path, path,
867                                                     arch, mode)
868    result += tests
869    for i in range(1, context.repeat):
870      result += copy.deepcopy(tests)
871
872  def GetTestStatus(self, context, sections, defs):
873    self.GetConfiguration(context).GetTestStatus(sections, defs)
874
875
876class LiteralTestSuite(TestSuite):
877  def __init__(self, tests_repos, test_root):
878    super(LiteralTestSuite, self).__init__('root')
879    self.tests_repos = tests_repos
880    self.test_root = test_root
881
882  def GetBuildRequirements(self, path, context):
883    (name, rest) = CarCdr(path)
884    result = [ ]
885    for test in self.tests_repos:
886      if not name or name.match(test.GetName()):
887        result += test.GetBuildRequirements(rest, context)
888    return result
889
890  def ListTests(self, current_path, path, context, arch, mode):
891    (name, rest) = CarCdr(path)
892    result = [ ]
893    for test in self.tests_repos:
894      test_name = test.GetName()
895      if not name or name.match(test_name):
896        full_path = current_path + [test_name]
897        test.AddTestsToList(result, full_path, path, context, arch, mode)
898    result.sort(key=lambda x: x.GetName())
899    return result
900
901  def GetTestStatus(self, context, sections, defs):
902    # Just read the test configuration from root_path/root.status.
903    root = TestConfiguration(context, self.test_root, 'root')
904    root.GetTestStatus(sections, defs)
905    for tests_repos in self.tests_repos:
906      tests_repos.GetTestStatus(context, sections, defs)
907
908
909TIMEOUT_SCALEFACTOR = {
910    'arm'       : { 'debug' :  8, 'release' : 3 }, # The ARM buildbots are slow.
911    'riscv64'   : { 'debug' :  8, 'release' : 3 }, # The riscv devices are slow.
912    'ia32'      : { 'debug' :  4, 'release' : 1 },
913    'ppc'       : { 'debug' :  4, 'release' : 1 },
914    's390'      : { 'debug' :  4, 'release' : 1 } }
915
916
917class Context(object):
918
919  def __init__(self, workspace, verbose, vm, args, expect_fail,
920               timeout, processor, suppress_dialogs,
921               store_unexpected_output, repeat, abort_on_timeout):
922    self.workspace = workspace
923    self.verbose = verbose
924    self.vm = vm
925    self.node_args = args
926    self.expect_fail = expect_fail
927    self.timeout = timeout
928    self.processor = processor
929    self.suppress_dialogs = suppress_dialogs
930    self.store_unexpected_output = store_unexpected_output
931    self.repeat = repeat
932    self.abort_on_timeout = abort_on_timeout
933    self.v8_enable_inspector = True
934    self.node_has_crypto = True
935
936  def GetVm(self, arch, mode):
937    if self.vm is not None:
938      return self.vm
939    if arch == 'none':
940      name = 'out/Debug/node' if mode == 'debug' else 'out/Release/node'
941    else:
942      name = 'out/%s.%s/node' % (arch, mode)
943
944    # Currently GYP does not support output_dir for MSVS.
945    # http://code.google.com/p/gyp/issues/detail?id=40
946    # It will put the builds into Release/node.exe or Debug/node.exe
947    if utils.IsWindows():
948      if not exists(name + '.exe'):
949        name = name.replace('out/', '')
950      name = os.path.abspath(name + '.exe')
951
952    if not exists(name):
953      raise ValueError('Could not find executable. Should be ' + name)
954
955    return name
956
957  def GetTimeout(self, mode, section=''):
958    timeout = self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
959    if section == 'pummel' or section == 'benchmark':
960      timeout = timeout * 6
961    # We run all WPT from one subset in the same process using workers.
962    # As the number of the tests grow, it can take longer to run some of the
963    # subsets, but it's still overall faster than running them in different
964    # processes.
965    elif section == 'wpt':
966      timeout = timeout * 12
967    return timeout
968
969def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode, measure_flakiness):
970  progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode, measure_flakiness)
971  return progress.Run(tasks)
972
973# -------------------------------------------
974# --- T e s t   C o n f i g u r a t i o n ---
975# -------------------------------------------
976
977
978RUN = 'run'
979SKIP = 'skip'
980FAIL = 'fail'
981PASS = 'pass'
982OKAY = 'okay'
983TIMEOUT = 'timeout'
984CRASH = 'crash'
985SLOW = 'slow'
986FLAKY = 'flaky'
987DONTCARE = 'dontcare'
988KEEP_RETRYING = 'keep_retrying'
989
990class Expression(object):
991  pass
992
993
994class Constant(Expression):
995
996  def __init__(self, value):
997    self.value = value
998
999  def Evaluate(self, env, defs):
1000    return self.value
1001
1002
1003class Variable(Expression):
1004
1005  def __init__(self, name):
1006    self.name = name
1007
1008  def GetOutcomes(self, env, defs):
1009    if self.name in env: return set([env[self.name]])
1010    else: return set()
1011
1012
1013class Outcome(Expression):
1014
1015  def __init__(self, name):
1016    self.name = name
1017
1018  def GetOutcomes(self, env, defs):
1019    if self.name in defs:
1020      return defs[self.name].GetOutcomes(env, defs)
1021    else:
1022      return set([self.name])
1023
1024
1025class Operation(Expression):
1026
1027  def __init__(self, left, op, right):
1028    self.left = left
1029    self.op = op
1030    self.right = right
1031
1032  def Evaluate(self, env, defs):
1033    if self.op == '||' or self.op == ',':
1034      return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
1035    elif self.op == 'if':
1036      return False
1037    elif self.op == '==':
1038      inter = self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
1039      return bool(inter)
1040    else:
1041      assert self.op == '&&'
1042      return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
1043
1044  def GetOutcomes(self, env, defs):
1045    if self.op == '||' or self.op == ',':
1046      return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs)
1047    elif self.op == 'if':
1048      if self.right.Evaluate(env, defs):
1049        return self.left.GetOutcomes(env, defs)
1050      else:
1051        return set()
1052    else:
1053      assert self.op == '&&'
1054      return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
1055
1056
1057def IsAlpha(str):
1058  for char in str:
1059    if not (char.isalpha() or char.isdigit() or char == '_'):
1060      return False
1061  return True
1062
1063
1064class Tokenizer(object):
1065  """A simple string tokenizer that chops expressions into variables,
1066  parens and operators"""
1067
1068  def __init__(self, expr):
1069    self.index = 0
1070    self.expr = expr
1071    self.length = len(expr)
1072    self.tokens = None
1073
1074  def Current(self, length = 1):
1075    if not self.HasMore(length): return ""
1076    return self.expr[self.index:self.index+length]
1077
1078  def HasMore(self, length = 1):
1079    return self.index < self.length + (length - 1)
1080
1081  def Advance(self, count = 1):
1082    self.index = self.index + count
1083
1084  def AddToken(self, token):
1085    self.tokens.append(token)
1086
1087  def SkipSpaces(self):
1088    while self.HasMore() and self.Current().isspace():
1089      self.Advance()
1090
1091  def Tokenize(self):
1092    self.tokens = [ ]
1093    while self.HasMore():
1094      self.SkipSpaces()
1095      if not self.HasMore():
1096        return None
1097      if self.Current() == '(':
1098        self.AddToken('(')
1099        self.Advance()
1100      elif self.Current() == ')':
1101        self.AddToken(')')
1102        self.Advance()
1103      elif self.Current() == '$':
1104        self.AddToken('$')
1105        self.Advance()
1106      elif self.Current() == ',':
1107        self.AddToken(',')
1108        self.Advance()
1109      elif IsAlpha(self.Current()):
1110        buf = ""
1111        while self.HasMore() and IsAlpha(self.Current()):
1112          buf += self.Current()
1113          self.Advance()
1114        self.AddToken(buf)
1115      elif self.Current(2) == '&&':
1116        self.AddToken('&&')
1117        self.Advance(2)
1118      elif self.Current(2) == '||':
1119        self.AddToken('||')
1120        self.Advance(2)
1121      elif self.Current(2) == '==':
1122        self.AddToken('==')
1123        self.Advance(2)
1124      else:
1125        return None
1126    return self.tokens
1127
1128
1129class Scanner(object):
1130  """A simple scanner that can serve out tokens from a given list"""
1131
1132  def __init__(self, tokens):
1133    self.tokens = tokens
1134    self.length = len(tokens)
1135    self.index = 0
1136
1137  def HasMore(self):
1138    return self.index < self.length
1139
1140  def Current(self):
1141    return self.tokens[self.index]
1142
1143  def Advance(self):
1144    self.index = self.index + 1
1145
1146
1147def ParseAtomicExpression(scan):
1148  if scan.Current() == "true":
1149    scan.Advance()
1150    return Constant(True)
1151  elif scan.Current() == "false":
1152    scan.Advance()
1153    return Constant(False)
1154  elif IsAlpha(scan.Current()):
1155    name = scan.Current()
1156    scan.Advance()
1157    return Outcome(name.lower())
1158  elif scan.Current() == '$':
1159    scan.Advance()
1160    if not IsAlpha(scan.Current()):
1161      return None
1162    name = scan.Current()
1163    scan.Advance()
1164    return Variable(name.lower())
1165  elif scan.Current() == '(':
1166    scan.Advance()
1167    result = ParseLogicalExpression(scan)
1168    if (not result) or (scan.Current() != ')'):
1169      return None
1170    scan.Advance()
1171    return result
1172  else:
1173    return None
1174
1175
1176BINARIES = ['==']
1177def ParseOperatorExpression(scan):
1178  left = ParseAtomicExpression(scan)
1179  if not left: return None
1180  while scan.HasMore() and (scan.Current() in BINARIES):
1181    op = scan.Current()
1182    scan.Advance()
1183    right = ParseOperatorExpression(scan)
1184    if not right:
1185      return None
1186    left = Operation(left, op, right)
1187  return left
1188
1189
1190def ParseConditionalExpression(scan):
1191  left = ParseOperatorExpression(scan)
1192  if not left: return None
1193  while scan.HasMore() and (scan.Current() == 'if'):
1194    scan.Advance()
1195    right = ParseOperatorExpression(scan)
1196    if not right:
1197      return None
1198    left=  Operation(left, 'if', right)
1199  return left
1200
1201
1202LOGICALS = ["&&", "||", ","]
1203def ParseLogicalExpression(scan):
1204  left = ParseConditionalExpression(scan)
1205  if not left: return None
1206  while scan.HasMore() and (scan.Current() in LOGICALS):
1207    op = scan.Current()
1208    scan.Advance()
1209    right = ParseConditionalExpression(scan)
1210    if not right:
1211      return None
1212    left = Operation(left, op, right)
1213  return left
1214
1215
1216def ParseCondition(expr):
1217  """Parses a logical expression into an Expression object"""
1218  tokens = Tokenizer(expr).Tokenize()
1219  if not tokens:
1220    print("Malformed expression: '%s'" % expr)
1221    return None
1222  scan = Scanner(tokens)
1223  ast = ParseLogicalExpression(scan)
1224  if not ast:
1225    print("Malformed expression: '%s'" % expr)
1226    return None
1227  if scan.HasMore():
1228    print("Malformed expression: '%s'" % expr)
1229    return None
1230  return ast
1231
1232
1233class Configuration(object):
1234  """The parsed contents of a configuration file"""
1235
1236  def __init__(self, sections, defs):
1237    self.sections = sections
1238    self.defs = defs
1239
1240  def ClassifyTests(self, cases, env):
1241    sections = [ s for s in self.sections if s.condition.Evaluate(env, self.defs) ]
1242    all_rules = reduce(list.__add__, [s.rules for s in sections], [])
1243    unused_rules = set(all_rules)
1244    result = []
1245    for case in cases:
1246      matches = [ r for r in all_rules if r.Contains(case.path) ]
1247      outcomes_list = [ r.GetOutcomes(env, self.defs) for r in matches ]
1248      outcomes = reduce(set.union, outcomes_list, set())
1249      unused_rules.difference_update(matches)
1250      case.outcomes = set(outcomes) or set([PASS])
1251      # slow tests may also just pass.
1252      if SLOW in case.outcomes:
1253        case.outcomes.add(PASS)
1254      result.append(case)
1255    return result, unused_rules
1256
1257
1258class Section(object):
1259  """A section of the configuration file.  Sections are enabled or
1260  disabled prior to running the tests, based on their conditions"""
1261
1262  def __init__(self, condition):
1263    self.condition = condition
1264    self.rules = [ ]
1265
1266  def AddRule(self, rule):
1267    self.rules.append(rule)
1268
1269
1270class Rule(object):
1271  """A single rule that specifies the expected outcome for a single
1272  test."""
1273
1274  def __init__(self, raw_path, path, value):
1275    self.raw_path = raw_path
1276    self.path = path
1277    self.value = value
1278
1279  def GetOutcomes(self, env, defs):
1280    return self.value.GetOutcomes(env, defs)
1281
1282  def Contains(self, path):
1283    if len(self.path) > len(path):
1284      return False
1285    for i in range(len(self.path)):
1286      if not self.path[i].match(path[i]):
1287        return False
1288    return True
1289
1290
1291HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
1292RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
1293DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
1294PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w_.\-/]+)$')
1295
1296
1297def ReadConfigurationInto(path, sections, defs):
1298  current_section = Section(Constant(True))
1299  sections.append(current_section)
1300  prefix = []
1301  for line in utils.ReadLinesFrom(path):
1302    header_match = HEADER_PATTERN.match(line)
1303    if header_match:
1304      condition_str = header_match.group(1).strip()
1305      condition = ParseCondition(condition_str)
1306      new_section = Section(condition)
1307      sections.append(new_section)
1308      current_section = new_section
1309      continue
1310    rule_match = RULE_PATTERN.match(line)
1311    if rule_match:
1312      path = prefix + SplitPath(rule_match.group(1).strip())
1313      value_str = rule_match.group(2).strip()
1314      value = ParseCondition(value_str)
1315      if not value:
1316        return False
1317      current_section.AddRule(Rule(rule_match.group(1), path, value))
1318      continue
1319    def_match = DEF_PATTERN.match(line)
1320    if def_match:
1321      name = def_match.group(1).lower()
1322      value = ParseCondition(def_match.group(2).strip())
1323      if not value:
1324        return False
1325      defs[name] = value
1326      continue
1327    prefix_match = PREFIX_PATTERN.match(line)
1328    if prefix_match:
1329      prefix = SplitPath(prefix_match.group(1).strip())
1330      continue
1331    raise Exception("Malformed line: '%s'." % line)
1332
1333
1334# ---------------
1335# --- M a i n ---
1336# ---------------
1337
1338
1339ARCH_GUESS = utils.GuessArchitecture()
1340
1341
1342def BuildOptions():
1343  result = optparse.OptionParser()
1344  result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
1345      default='release')
1346  result.add_option("-v", "--verbose", help="Verbose output",
1347      default=False, action="store_true")
1348  result.add_option('--logfile', dest='logfile',
1349      help='write test output to file. NOTE: this only applies the tap progress indicator')
1350  result.add_option("-p", "--progress",
1351      help="The style of progress indicator (%s)" % ", ".join(PROGRESS_INDICATORS.keys()),
1352      choices=list(PROGRESS_INDICATORS.keys()), default="mono")
1353  result.add_option("--report", help="Print a summary of the tests to be run",
1354      default=False, action="store_true")
1355  result.add_option("-s", "--suite", help="A test suite",
1356      default=[], action="append")
1357  result.add_option("-t", "--timeout", help="Timeout in seconds",
1358      default=120, type="int")
1359  result.add_option("--arch", help='The architecture to run tests for',
1360      default='none')
1361  result.add_option("--snapshot", help="Run the tests with snapshot turned on",
1362      default=False, action="store_true")
1363  result.add_option("--special-command", default=None)
1364  result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
1365      default=[], action="append")
1366  result.add_option("--expect-fail", dest="expect_fail",
1367      help="Expect test cases to fail", default=False, action="store_true")
1368  result.add_option("--valgrind", help="Run tests through valgrind",
1369      default=False, action="store_true")
1370  result.add_option("--worker", help="Run parallel tests inside a worker context",
1371      default=False, action="store_true")
1372  result.add_option("--check-deopts", help="Check tests for permanent deoptimizations",
1373      default=False, action="store_true")
1374  result.add_option("--cat", help="Print the source of the tests",
1375      default=False, action="store_true")
1376  result.add_option("--flaky-tests",
1377      help="Regard tests marked as flaky (run|skip|dontcare|keep_retrying)",
1378      default="run")
1379  result.add_option("--measure-flakiness",
1380      help="When a test fails, re-run it x number of times",
1381      default=0, type="int")
1382  result.add_option("--skip-tests",
1383      help="Tests that should not be executed (comma-separated)",
1384      default="")
1385  result.add_option("--warn-unused", help="Report unused rules",
1386      default=False, action="store_true")
1387  result.add_option("-j", help="The number of parallel tasks to run, 0=use number of cores",
1388      default=0, type="int")
1389  result.add_option("-J", help="For legacy compatibility, has no effect",
1390      default=False, action="store_true")
1391  result.add_option("--time", help="Print timing information after running",
1392      default=False, action="store_true")
1393  result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
1394        dest="suppress_dialogs", default=True, action="store_true")
1395  result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
1396        dest="suppress_dialogs", action="store_false")
1397  result.add_option("--shell", help="Path to node executable", default=None)
1398  result.add_option("--store-unexpected-output",
1399      help="Store the temporary JS files from tests that fails",
1400      dest="store_unexpected_output", default=True, action="store_true")
1401  result.add_option("--no-store-unexpected-output",
1402      help="Deletes the temporary JS files from tests that fails",
1403      dest="store_unexpected_output", action="store_false")
1404  result.add_option("-r", "--run",
1405      help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)",
1406      default="")
1407  result.add_option('--temp-dir',
1408      help='Optional path to change directory used for tests', default=False)
1409  result.add_option('--test-root',
1410      help='Optional path to change test directory', dest='test_root', default=None)
1411  result.add_option('--repeat',
1412      help='Number of times to repeat given tests',
1413      default=1, type="int")
1414  result.add_option('--abort-on-timeout',
1415      help='Send SIGABRT instead of SIGTERM to kill processes that time out',
1416      default=False, action="store_true", dest="abort_on_timeout")
1417  result.add_option("--type",
1418      help="Type of build (simple, fips, coverage)",
1419      default=None)
1420  return result
1421
1422
1423def ProcessOptions(options):
1424  global VERBOSE
1425  VERBOSE = options.verbose
1426  options.arch = options.arch.split(',')
1427  options.mode = options.mode.split(',')
1428  options.run = options.run.split(',')
1429  # Split at commas and filter out all the empty strings.
1430  options.skip_tests = [test for test in options.skip_tests.split(',') if test]
1431  if options.run == [""]:
1432    options.run = None
1433  elif len(options.run) != 2:
1434    print("The run argument must be two comma-separated integers.")
1435    return False
1436  else:
1437    try:
1438      options.run = [int(level) for level in options.run]
1439    except ValueError:
1440      print("Could not parse the integers from the run argument.")
1441      return False
1442    if options.run[0] < 0 or options.run[1] < 0:
1443      print("The run argument cannot have negative integers.")
1444      return False
1445    if options.run[0] >= options.run[1]:
1446      print("The test group to run (n) must be smaller than number of groups (m).")
1447      return False
1448  if options.j == 0:
1449    # inherit JOBS from environment if provided. some virtualised systems
1450    # tends to exaggerate the number of available cpus/cores.
1451    cores = os.environ.get('JOBS')
1452    options.j = int(cores) if cores is not None else multiprocessing.cpu_count()
1453  elif options.J:
1454    # If someone uses -j and legacy -J, let them know that we will be respecting
1455    # -j and ignoring -J, which is the opposite of what we used to do before -J
1456    # became a legacy no-op.
1457    print('Warning: Legacy -J option is ignored. Using the -j option.')
1458  if options.flaky_tests not in [RUN, SKIP, DONTCARE, KEEP_RETRYING]:
1459    print("Unknown flaky-tests mode %s" % options.flaky_tests)
1460    return False
1461  return True
1462
1463
1464REPORT_TEMPLATE = """\
1465Total: %(total)i tests
1466 * %(skipped)4d tests will be skipped
1467 * %(pass)4d tests are expected to pass
1468 * %(fail_ok)4d tests are expected to fail that we won't fix
1469 * %(fail)4d tests are expected to fail that we should fix\
1470"""
1471
1472
1473class Pattern(object):
1474
1475  def __init__(self, pattern):
1476    self.pattern = pattern
1477    self.compiled = None
1478
1479  def match(self, str):
1480    if not self.compiled:
1481      pattern = "^" + self.pattern.replace('*', '.*') + "$"
1482      self.compiled = re.compile(pattern)
1483    return self.compiled.match(str)
1484
1485  def __str__(self):
1486    return self.pattern
1487
1488
1489def SplitPath(path_arg):
1490  stripped = [c.strip() for c in path_arg.split('/')]
1491  return [Pattern(s) for s in stripped if len(s) > 0]
1492
1493def NormalizePath(path, prefix='test/'):
1494  # strip the extra path information of the specified test
1495  prefix = prefix.replace('\\', '/')
1496  path = path.replace('\\', '/')
1497  if path.startswith(prefix):
1498    path = path[len(prefix):]
1499  if path.endswith('.js'):
1500    path = path[:-3]
1501  elif path.endswith('.mjs'):
1502    path = path[:-4]
1503  return path
1504
1505def GetSpecialCommandProcessor(value):
1506  if (not value) or (value.find('@') == -1):
1507    def ExpandCommand(args):
1508      return args
1509    return ExpandCommand
1510  else:
1511    prefix, _, suffix = value.partition('@')
1512    prefix = unquote(prefix).split()
1513    suffix = unquote(suffix).split()
1514    def ExpandCommand(args):
1515      return prefix + args + suffix
1516    return ExpandCommand
1517
1518def GetSuites(test_root):
1519  def IsSuite(path):
1520    return isdir(path) and exists(join(path, 'testcfg.py'))
1521  return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
1522
1523
1524def FormatTime(d):
1525  millis = round(d * 1000) % 1000
1526  return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
1527
1528
1529def FormatTimedelta(td):
1530  if hasattr(td, 'total_seconds'):
1531    d = td.total_seconds()
1532  else: # python2.6 compat
1533    d =  td.seconds + (td.microseconds / 10.0**6)
1534  return FormatTime(d)
1535
1536
1537def PrintCrashed(code):
1538  if utils.IsWindows():
1539    return "CRASHED"
1540  else:
1541    return "CRASHED (Signal: %d)" % -code
1542
1543
1544# these suites represent special cases that should not be run as part of the
1545# default JavaScript test-run, e.g., internet/ requires a network connection,
1546# addons/ requires compilation.
1547IGNORED_SUITES = [
1548  'addons',
1549  'benchmark',
1550  'doctool',
1551  'embedding',
1552  'internet',
1553  'js-native-api',
1554  'node-api',
1555  'pummel',
1556  'tick-processor',
1557  'v8-updates'
1558]
1559
1560
1561def ArgsToTestPaths(test_root, args, suites):
1562  if len(args) == 0 or 'default' in args:
1563    def_suites = [s for s in suites if s not in IGNORED_SUITES]
1564    args = [a for a in args if a != 'default'] + def_suites
1565  subsystem_regex = re.compile(r'^[a-zA-Z-]*$')
1566  check = lambda arg: subsystem_regex.match(arg) and (arg not in suites)
1567  mapped_args = ["*/test*-%s-*" % arg if check(arg) else arg for arg in args]
1568  paths = [SplitPath(NormalizePath(a)) for a in mapped_args]
1569  return paths
1570
1571
1572def get_env_type(vm, options_type, context):
1573  if options_type is not None:
1574    env_type = options_type
1575  else:
1576    # 'simple' is the default value for 'env_type'.
1577    env_type = 'simple'
1578    ssl_ver = Execute([vm, '-p', 'process.versions.openssl'], context).stdout
1579    if 'fips' in ssl_ver:
1580      env_type = 'fips'
1581  return env_type
1582
1583
1584def get_asan_state():
1585  return "on" if os.environ.get('ASAN') is not None else "off"
1586
1587
1588def Main():
1589  parser = BuildOptions()
1590  (options, args) = parser.parse_args()
1591  if not ProcessOptions(options):
1592    parser.print_help()
1593    return 1
1594
1595  stream = sys.stdout
1596  try:
1597    sys.stdout.reconfigure(encoding='utf8')
1598  except AttributeError:
1599    # Python < 3.7 does not have reconfigure
1600    stream = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
1601  ch = logging.StreamHandler(stream)
1602  logger.addHandler(ch)
1603  logger.setLevel(logging.INFO)
1604  if options.logfile:
1605    fh = logging.FileHandler(options.logfile, encoding='utf-8', mode='w')
1606    logger.addHandler(fh)
1607
1608  workspace = abspath(join(dirname(sys.argv[0]), '..'))
1609  test_root = join(workspace, 'test')
1610  if options.test_root is not None:
1611    test_root = options.test_root
1612  suites = GetSuites(test_root)
1613  repositories = [TestRepository(join(test_root, name)) for name in suites]
1614  repositories += [TestRepository(a) for a in options.suite]
1615
1616  root = LiteralTestSuite(repositories, test_root)
1617  paths = ArgsToTestPaths(test_root, args, suites)
1618
1619  # Check for --valgrind option. If enabled, we overwrite the special
1620  # command flag with a command that uses the run-valgrind.py script.
1621  if options.valgrind:
1622    run_valgrind = join(workspace, "tools", "run-valgrind.py")
1623    options.special_command = "python -u " + run_valgrind + " @"
1624
1625  if options.check_deopts:
1626    options.node_args.append("--trace-opt")
1627    options.node_args.append("--trace-file-names")
1628    # --always-opt is needed because many tests do not run long enough for the
1629    # optimizer to kick in, so this flag will force it to run.
1630    options.node_args.append("--always-opt")
1631    options.progress = "deopts"
1632
1633  if options.worker:
1634    run_worker = join(workspace, "tools", "run-worker.js")
1635    options.node_args.append(run_worker)
1636
1637  processor = GetSpecialCommandProcessor(options.special_command)
1638
1639  context = Context(workspace,
1640                    VERBOSE,
1641                    options.shell,
1642                    options.node_args,
1643                    options.expect_fail,
1644                    options.timeout,
1645                    processor,
1646                    options.suppress_dialogs,
1647                    options.store_unexpected_output,
1648                    options.repeat,
1649                    options.abort_on_timeout)
1650
1651  # Get status for tests
1652  sections = [ ]
1653  defs = { }
1654  root.GetTestStatus(context, sections, defs)
1655  config = Configuration(sections, defs)
1656
1657  # List the tests
1658  all_cases = [ ]
1659  all_unused = [ ]
1660  unclassified_tests = [ ]
1661  globally_unused_rules = None
1662  for path in paths:
1663    for arch in options.arch:
1664      for mode in options.mode:
1665        vm = context.GetVm(arch, mode)
1666        if not exists(vm):
1667          print("Can't find shell executable: '%s'" % vm)
1668          continue
1669        archEngineContext = Execute([vm, "-p", "process.arch"], context)
1670        vmArch = archEngineContext.stdout.rstrip()
1671        if archEngineContext.exit_code != 0 or vmArch == "undefined":
1672          print("Can't determine the arch of: '%s'" % vm)
1673          print(archEngineContext.stderr.rstrip())
1674          continue
1675        env = {
1676          'mode': mode,
1677          'system': utils.GuessOS(),
1678          'arch': vmArch,
1679          'type': get_env_type(vm, options.type, context),
1680          'asan': get_asan_state(),
1681        }
1682        test_list = root.ListTests([], path, context, arch, mode)
1683        unclassified_tests += test_list
1684        cases, unused_rules = config.ClassifyTests(test_list, env)
1685        if globally_unused_rules is None:
1686          globally_unused_rules = set(unused_rules)
1687        else:
1688          globally_unused_rules = (
1689              globally_unused_rules.intersection(unused_rules))
1690        all_cases += cases
1691        all_unused.append(unused_rules)
1692
1693  # We want to skip the inspector tests if node was built without the inspector.
1694  has_inspector = Execute([vm,
1695      '-p', 'process.features.inspector'], context)
1696  if has_inspector.stdout.rstrip() == 'false':
1697    context.v8_enable_inspector = False
1698
1699  has_crypto = Execute([vm,
1700      '-p', 'process.versions.openssl'], context)
1701  if has_crypto.stdout.rstrip() == 'undefined':
1702    context.node_has_crypto = False
1703
1704  if options.cat:
1705    visited = set()
1706    for test in unclassified_tests:
1707      key = tuple(test.path)
1708      if key in visited:
1709        continue
1710      visited.add(key)
1711      print("--- begin source: %s ---" % test.GetLabel())
1712      source = test.GetSource().strip()
1713      print(source)
1714      print("--- end source: %s ---" % test.GetLabel())
1715    return 0
1716
1717  if options.warn_unused:
1718    for rule in globally_unused_rules:
1719      print("Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path]))
1720
1721  tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir
1722  if tempdir:
1723    os.environ['NODE_TEST_DIR'] = tempdir
1724    try:
1725      os.makedirs(tempdir)
1726    except OSError as exception:
1727      if exception.errno != errno.EEXIST:
1728        print("Could not create the temporary directory", options.temp_dir)
1729        sys.exit(1)
1730
1731  def should_keep(case):
1732    if any((s in case.file) for s in options.skip_tests):
1733      return False
1734    elif SKIP in case.outcomes:
1735      return False
1736    elif (options.flaky_tests == SKIP) and (set([SLOW, FLAKY]) & case.outcomes):
1737      return False
1738    else:
1739      return True
1740
1741  cases_to_run = [
1742    test_case for test_case in all_cases if should_keep(test_case)
1743  ]
1744
1745  if options.report:
1746    print(REPORT_TEMPLATE % {
1747      'total': len(all_cases),
1748      'skipped': len(all_cases) - len(cases_to_run),
1749      'pass': len([t for t in cases_to_run if PASS in t.outcomes]),
1750      'fail_ok': len([t for t in cases_to_run if t.outcomes == set([FAIL, OKAY])]),
1751      'fail': len([t for t in cases_to_run if t.outcomes == set([FAIL])])
1752    })
1753
1754  if options.run is not None:
1755    # Must ensure the list of tests is sorted before selecting, to avoid
1756    # silent errors if this file is changed to list the tests in a way that
1757    # can be different in different machines
1758    cases_to_run.sort(key=lambda c: (c.arch, c.mode, c.file))
1759    cases_to_run = [ cases_to_run[i] for i
1760                     in range(options.run[0],
1761                               len(cases_to_run),
1762                               options.run[1]) ]
1763  if len(cases_to_run) == 0:
1764    print("No tests to run.")
1765    return 1
1766  else:
1767    try:
1768      start = time.time()
1769      result = RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests, options.measure_flakiness)
1770      exitcode = 0 if result['allPassed'] else 1
1771      duration = time.time() - start
1772    except KeyboardInterrupt:
1773      print("Interrupted")
1774      return 1
1775
1776  if options.time:
1777    # Write the times to stderr to make it easy to separate from the
1778    # test output.
1779    print()
1780    sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
1781    timed_tests = [ t for t in cases_to_run if not t.duration is None ]
1782    timed_tests.sort(key=lambda x: x.duration)
1783    for i, entry in enumerate(timed_tests[:20], start=1):
1784      t = FormatTimedelta(entry.duration)
1785      sys.stderr.write("%4i (%s) %s\n" % (i, t, entry.GetLabel()))
1786
1787  if result['allPassed']:
1788    print("\nAll tests passed.")
1789  else:
1790    print("\nFailed tests:")
1791    for failure in result['failed']:
1792      print(EscapeCommand(failure.command))
1793
1794  return exitcode
1795
1796
1797if __name__ == '__main__':
1798  sys.exit(Main())
1799