• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-no-jvmti-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import csv
64import datetime
65import fnmatch
66import itertools
67import json
68import multiprocessing
69import os
70import re
71import shlex
72import shutil
73import signal
74import subprocess
75import sys
76import tempfile
77import threading
78import time
79
80import env
81from target_config import target_config
82from device_config import device_config
83from typing import Dict, Set, List
84from functools import lru_cache
85from pathlib import Path
86
87# TODO: make it adjustable per tests and for buildbots
88#
89# Note: this needs to be larger than run-test timeouts, as long as this script
90#       does not push the value to run-test. run-test is somewhat complicated:
91#                      base: 25m  (large for ASAN)
92#        + timeout handling:  2m
93#        +   gcstress extra: 20m
94#        -----------------------
95#                            47m
96timeout = 3600 # 60 minutes
97
98if env.ART_TEST_RUN_ON_ARM_FVP:
99  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
100  timeout = 36000
101
102# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
103# that has key as the test name (like 001-HelloWorld), and value as set of
104# variants that the test is disabled for.
105DISABLED_TEST_CONTAINER = {}
106
107# The Dict contains the list of all possible variants for a given type. For example,
108# for key TARGET, the value would be target and host. The list is used to parse
109# the test name given as the argument to run.
110VARIANT_TYPE_DICT: Dict[str, Set[str]] = {}
111
112# The set of all variant sets that are incompatible and will always be skipped.
113NONFUNCTIONAL_VARIANT_SETS = set()
114
115# The set contains all the variants of each time.
116TOTAL_VARIANTS_SET: Set[str] = set()
117
118# The colors are used in the output. When a test passes, COLOR_PASS is used,
119# and so on.
120COLOR_ERROR = '\033[91m'
121COLOR_PASS = '\033[92m'
122COLOR_SKIP = '\033[93m'
123COLOR_NORMAL = '\033[0m'
124
125# The set contains the list of all the possible run tests that are in art/test
126# directory.
127RUN_TEST_SET = set()
128
129failed_tests = []
130skipped_tests = []
131
132# Flags
133n_thread = 0
134total_test_count = 0
135verbose = False
136dry_run = False
137ignore_skips = False
138build = False
139dist = False
140gdb = False
141gdb_arg = ''
142dump_cfg = ''
143gdb_dex2oat = False
144gdb_dex2oat_args = ''
145csv_result = None
146csv_writer = None
147runtime_option = ''
148with_agent: List[str] = []
149run_test_option: List[str] = []
150dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
151run_all_configs = False
152
153# Dict containing extra arguments
154extra_arguments: Dict[str, List[str]] = { "host" : [], "target" : [] }
155
156# Dict to store user requested test variants.
157# key: variant_type.
158# value: set of variants user wants to run of type <key>.
159_user_input_variants: collections.defaultdict = collections.defaultdict(set)
160
161
162class ChildProcessTracker(object):
163  """Keeps track of forked child processes to be able to kill them."""
164
165  def __init__(self):
166    self.procs = {}             # dict from pid to subprocess.Popen object
167    self.mutex = threading.Lock()
168
169  def wait(self, proc, timeout):
170    """Waits on the given subprocess and makes it available to kill_all meanwhile.
171
172    Args:
173      proc: The subprocess.Popen object to wait on.
174      timeout: Timeout passed on to proc.communicate.
175
176    Returns: A tuple of the process stdout output and its return value.
177    """
178    with self.mutex:
179      if self.procs is not None:
180        self.procs[proc.pid] = proc
181      else:
182        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
183    try:
184      output = proc.communicate(timeout=timeout)[0]
185      return_value = proc.wait()
186      return output, return_value
187    finally:
188      with self.mutex:
189        if self.procs is not None:
190          del self.procs[proc.pid]
191
192  def kill_all(self):
193    """Kills all currently running processes and any future ones."""
194    with self.mutex:
195      for pid in self.procs:
196        os.killpg(pid, signal.SIGKILL)
197      self.procs = None # Make future wait() calls kill their processes immediately.
198
199child_process_tracker = ChildProcessTracker()
200
201def setup_csv_result():
202  """Set up the CSV output if required."""
203  global csv_writer
204  csv_writer = csv.writer(csv_result)
205  # Write the header.
206  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
207                       'jni', 'image', 'debuggable', 'jvmti', 'test', 'address_size', 'result'])
208
209
210def send_csv_result(test, result):
211  """
212  Write a line into the CSV results file if one is available.
213  """
214  if csv_writer is not None:
215    csv_writer.writerow(extract_test_name(test) + [result])
216
217def close_csv_file():
218  global csv_result
219  global csv_writer
220  if csv_result is not None:
221    csv_writer = None
222    csv_result.flush()
223    csv_result.close()
224    csv_result = None
225
226def gather_test_info():
227  """The method gathers test information about the test to be run which includes
228  generating the list of total tests from the art/test directory and the list
229  of disabled test. It also maps various variants to types.
230  """
231  global TOTAL_VARIANTS_SET
232  # TODO: Avoid duplication of the variant names in different lists.
233  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
234  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
235  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
236  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
237  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
238  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
239  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
240  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
241  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
242  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
243  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
244                                'field-stress', 'step-stress'}
245  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
246                                   'optimizing', 'speed-profile', 'baseline'}
247
248  for v_type in VARIANT_TYPE_DICT:
249    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
250
251  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
252  for f in os.listdir(test_dir):
253    if fnmatch.fnmatch(f, '[0-9]*'):
254      RUN_TEST_SET.add(f)
255
256
257def setup_test_env():
258  """The method sets default value for the various variants of the tests if they
259  are already not set.
260  """
261  if env.ART_TEST_BISECTION:
262    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
263    env.ART_TEST_RUN_TEST_PREBUILD = False
264    # Bisection search writes to standard output.
265    env.ART_TEST_QUIET = False
266
267  global _user_input_variants
268  global run_all_configs
269  # These are the default variant-options we will use if nothing in the group is specified.
270  default_variants = {
271      'target': {'host', 'target'},
272      'prebuild': {'prebuild'},
273      'jvmti': { 'no-jvmti'},
274      'compiler': {'optimizing',
275                   'jit',
276                   'interpreter',
277                   'interp-ac',
278                   'speed-profile'},
279      'relocate': {'no-relocate'},
280      'trace': {'ntrace'},
281      'gc': {'cms'},
282      'jni': {'checkjni'},
283      'image': {'picimage'},
284      'debuggable': {'ndebuggable'},
285      'run': {'debug'},
286      # address_sizes_target depends on the target so it is dealt with below.
287  }
288  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
289  # want to pick up if we pass --all.
290  default_variants_keys = default_variants.keys()
291  if run_all_configs:
292    default_variants = VARIANT_TYPE_DICT
293
294  for key in default_variants_keys:
295    if not _user_input_variants[key]:
296      _user_input_variants[key] = default_variants[key]
297
298  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
299  if not _user_input_variants['address_sizes']:
300    _user_input_variants['address_sizes_target']['target'].add(
301        env.ART_PHONY_TEST_TARGET_SUFFIX)
302    _user_input_variants['address_sizes_target']['host'].add(
303        env.ART_PHONY_TEST_HOST_SUFFIX)
304    if env.ART_TEST_RUN_TEST_2ND_ARCH:
305      _user_input_variants['address_sizes_target']['host'].add(
306          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
307      _user_input_variants['address_sizes_target']['target'].add(
308          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
309  else:
310    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
311    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
312
313  global n_thread
314  if 'target' in _user_input_variants['target']:
315    device_name = get_device_name()
316    if n_thread == 0:
317      # Use only part of the cores since fully loading the device tends to lead to timeouts.
318      fraction = 1.0 if env.ART_TEST_ON_VM else 0.75
319      n_thread = max(1, int(get_target_cpu_count() * fraction))
320  else:
321    device_name = "host"
322    if n_thread == 0:
323      n_thread = get_host_cpu_count()
324  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
325
326  global extra_arguments
327  for target in _user_input_variants['target']:
328    extra_arguments[target] = find_extra_device_arguments(target)
329
330  if not sys.stdout.isatty():
331    global COLOR_ERROR
332    global COLOR_PASS
333    global COLOR_SKIP
334    global COLOR_NORMAL
335    COLOR_ERROR = ''
336    COLOR_PASS = ''
337    COLOR_SKIP = ''
338    COLOR_NORMAL = ''
339
340def find_extra_device_arguments(target):
341  """
342  Gets any extra arguments from the device_config.
343  """
344  device_name = target
345  if target == 'target':
346    device_name = get_device_name()
347  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
348
349def get_device_name():
350  """
351  Gets the value of ro.product.name from remote device (unless running on a VM).
352  """
353  if env.ART_TEST_RUN_FROM_SOONG:
354    return "target"  # We can't use adb during build.
355  if env.ART_TEST_ON_VM:
356    return subprocess.Popen(f"{env.ART_SSH_CMD} uname -a".split(),
357                            stdout = subprocess.PIPE,
358                            universal_newlines=True).stdout.read().strip()
359
360  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
361                          stderr=subprocess.STDOUT,
362                          stdout = subprocess.PIPE,
363                          universal_newlines=True)
364  # only wait 2 seconds.
365  timeout_val = 2
366
367  if env.ART_TEST_RUN_ON_ARM_FVP:
368    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
369    timeout_val = 200
370
371  output = proc.communicate(timeout = timeout_val)[0]
372  success = not proc.wait()
373  if success:
374    return output.strip()
375  else:
376    print_text("Unable to determine device type!\n")
377    print_text("Continuing anyway.\n")
378    return "UNKNOWN_TARGET"
379
380def run_tests(tests):
381  """This method generates variants of the tests to be run and executes them.
382
383  Args:
384    tests: The set of tests to be run.
385  """
386  args_all = []
387
388  # jvm does not run with all these combinations,
389  # or at least it doesn't make sense for most of them.
390  # TODO: support some jvm variants like jvmti ?
391  target_input_variants = _user_input_variants['target']
392  uncombinated_target_input_variants = []
393  if 'jvm' in target_input_variants:
394    _user_input_variants['target'].remove('jvm')
395    uncombinated_target_input_variants.append('jvm')
396
397  global total_test_count
398  total_test_count = len(tests)
399  if target_input_variants:
400    for variant_type in VARIANT_TYPE_DICT:
401      if not (variant_type == 'target' or 'address_sizes' in variant_type):
402        total_test_count *= len(_user_input_variants[variant_type])
403  target_address_combinations = 0
404  for target in target_input_variants:
405    for address_size in _user_input_variants['address_sizes_target'][target]:
406      target_address_combinations += 1
407  target_address_combinations += len(uncombinated_target_input_variants)
408  total_test_count *= target_address_combinations
409
410  if env.ART_TEST_WITH_STRACE:
411    args_all += ['--strace']
412
413  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
414    args_all += ['--always-clean']
415
416  if env.ART_TEST_BISECTION:
417    args_all += ['--bisection-search']
418
419  if gdb:
420    args_all += ['--gdb']
421    if gdb_arg:
422      args_all += ['--gdb-arg', gdb_arg]
423
424  if dump_cfg:
425    args_all += ['--dump-cfg', dump_cfg]
426  if gdb_dex2oat:
427    args_all += ['--gdb-dex2oat']
428    if gdb_dex2oat_args:
429      args_all += ['--gdb-dex2oat-args', f'{gdb_dex2oat_args}']
430
431  args_all += run_test_option
432
433  if runtime_option:
434    for opt in runtime_option:
435      args_all += ['--runtime-option', opt]
436  if with_agent:
437    for opt in with_agent:
438      args_all += ['--with-agent', opt]
439
440  if dex2oat_jobs != -1:
441    args_all += ['--dex2oat-jobs', str(dex2oat_jobs)]
442
443  def iter_config(tests, input_variants, user_input_variants):
444    config = itertools.product(tests, input_variants, user_input_variants['run'],
445                                 user_input_variants['prebuild'], user_input_variants['compiler'],
446                                 user_input_variants['relocate'], user_input_variants['trace'],
447                                 user_input_variants['gc'], user_input_variants['jni'],
448                                 user_input_variants['image'],
449                                 user_input_variants['debuggable'], user_input_variants['jvmti'])
450    return config
451
452  # [--host, --target] combines with all the other user input variants.
453  config = iter_config(tests, target_input_variants, _user_input_variants)
454  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
455  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
456  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
457      'prebuild': [''], 'compiler': [''],
458      'relocate': [''], 'trace': [''],
459      'gc': [''], 'jni': [''],
460      'image': [''],
461      'debuggable': [''], 'jvmti': ['']})
462
463  def start_combination(executor, config_tuple, global_options, address_size):
464      test, target, run, prebuild, compiler, relocate, trace, gc, \
465      jni, image, debuggable, jvmti = config_tuple
466
467      # NB The order of components here should match the order of
468      # components in the regex parser in parse_test_name.
469      test_name = 'test-art-'
470      test_name += target + '-run-test-'
471      test_name += run + '-'
472      test_name += prebuild + '-'
473      test_name += compiler + '-'
474      test_name += relocate + '-'
475      test_name += trace + '-'
476      test_name += gc + '-'
477      test_name += jni + '-'
478      test_name += image + '-'
479      test_name += debuggable + '-'
480      test_name += jvmti + '-'
481      test_name += test
482      test_name += address_size
483
484      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
485                     image, debuggable, jvmti, address_size}
486
487      args_test = global_options.copy()
488
489      if target == 'host':
490        args_test += ['--host']
491      elif target == 'jvm':
492        args_test += ['--jvm']
493
494      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
495      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
496      # for target tests.
497      if target == 'target':
498        if env.ART_TEST_CHROOT:
499          args_test += ['--chroot', env.ART_TEST_CHROOT]
500        if env.ART_TEST_ANDROID_ROOT:
501          args_test += ['--android-root', env.ART_TEST_ANDROID_ROOT]
502        if env.ART_TEST_ANDROID_I18N_ROOT:
503            args_test += ['--android-i18n-root', env.ART_TEST_ANDROID_I18N_ROOT]
504        if env.ART_TEST_ANDROID_ART_ROOT:
505          args_test += ['--android-art-root', env.ART_TEST_ANDROID_ART_ROOT]
506        if env.ART_TEST_ANDROID_TZDATA_ROOT:
507          args_test += ['--android-tzdata-root', env.ART_TEST_ANDROID_TZDATA_ROOT]
508
509      if run == 'ndebug':
510        args_test += ['-O']
511
512      if prebuild == 'prebuild':
513        args_test += ['--prebuild']
514      elif prebuild == 'no-prebuild':
515        args_test += ['--no-prebuild']
516
517      if compiler == 'optimizing':
518        args_test += ['--optimizing']
519      elif compiler == 'interpreter':
520        args_test += ['--interpreter']
521      elif compiler == 'interp-ac':
522        args_test += ['--switch-interpreter', '--verify-soft-fail']
523      elif compiler == 'jit':
524        args_test += ['--jit']
525      elif compiler == 'jit-on-first-use':
526        args_test += ['--jit', '--runtime-option', '-Xjitthreshold:0']
527      elif compiler == 'speed-profile':
528        args_test += ['--random-profile']
529      elif compiler == 'baseline':
530        args_test += ['--baseline']
531
532      if relocate == 'relocate':
533        args_test += ['--relocate']
534      elif relocate == 'no-relocate':
535        args_test += ['--no-relocate']
536
537      if trace == 'trace':
538        args_test += ['--trace']
539      elif trace == 'stream':
540        args_test += ['--trace', '--stream']
541
542      if gc == 'gcverify':
543        args_test += ['--gcverify']
544      elif gc == 'gcstress':
545        args_test += ['--gcstress']
546
547      if jni == 'forcecopy':
548        args_test += ['--runtime-option', '-Xjniopts:forcecopy']
549      elif jni == 'checkjni':
550        args_test += ['--runtime-option', '-Xcheck:jni']
551
552      if image == 'no-image':
553        args_test += ['--no-image']
554
555      if debuggable == 'debuggable':
556        args_test += ['--debuggable', '--runtime-option', '-Xopaque-jni-ids:true']
557
558      if jvmti == 'jvmti-stress':
559        args_test += ['--jvmti-trace-stress', '--jvmti-redefine-stress', '--jvmti-field-stress']
560      elif jvmti == 'field-stress':
561        args_test += ['--jvmti-field-stress']
562      elif jvmti == 'trace-stress':
563        args_test += ['--jvmti-trace-stress']
564      elif jvmti == 'redefine-stress':
565        args_test += ['--jvmti-redefine-stress']
566      elif jvmti == 'step-stress':
567        args_test += ['--jvmti-step-stress']
568
569      if address_size == '64':
570        args_test += ['--64']
571
572      # Run the run-test script using the prebuilt python.
573      python3_bin = env.ANDROID_BUILD_TOP + "/prebuilts/build-tools/path/linux-x86/python3"
574      run_test_sh = str(Path(__file__).parent.parent / 'run-test')
575      if not os.path.exists(python3_bin):
576        python3_bin = sys.executable  # Fallback to current python if we are in a sandbox.
577      args_test = [python3_bin, run_test_sh] + args_test + extra_arguments[target] + [test]
578      return executor.submit(run_test, args_test, test, variant_set, test_name)
579
580  global n_thread
581  with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
582    test_futures = []
583    for config_tuple in config:
584      target = config_tuple[1]
585      for address_size in _user_input_variants['address_sizes_target'][target]:
586        test_futures.append(start_combination(executor, config_tuple, args_all, address_size))
587
588    for config_tuple in uncombinated_config:
589      test_futures.append(
590          start_combination(executor, config_tuple, args_all, ""))  # no address size
591
592    try:
593      tests_done = 0
594      for test_future in concurrent.futures.as_completed(f for f in test_futures if f):
595        (test, status, failure_info, test_time) = test_future.result()
596        tests_done += 1
597        print_test_info(tests_done, test, status, failure_info, test_time)
598        if failure_info and not env.ART_TEST_KEEP_GOING:
599          for f in test_futures:
600            f.cancel()
601          break
602    except KeyboardInterrupt:
603      for f in test_futures:
604        f.cancel()
605      child_process_tracker.kill_all()
606    executor.shutdown(True)
607
608def _popen(**kwargs):
609  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
610    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
611  return subprocess.Popen(**kwargs)
612
613def run_test(args, test, test_variant, test_name):
614  """Runs the test.
615
616  It invokes art/test/run-test script to run the test. The output of the script
617  is checked, and if it ends with "Succeeded!", it assumes that the tests
618  passed, otherwise, put it in the list of failed test. Before actually running
619  the test, it also checks if the test is placed in the list of disabled tests,
620  and if yes, it skips running it, and adds the test in the list of skipped
621  tests.
622
623  Args:
624    args: The command to be used to invoke the script
625    test: The name of the test without the variant information.
626    test_variant: The set of variant for the test.
627    test_name: The name of the test along with the variants.
628
629  Returns: a tuple of testname, status, optional failure info, and test time.
630  """
631  try:
632    command = ' '.join(args)
633
634    if is_test_disabled(test, test_variant):
635      test_skipped = True
636      test_time = datetime.timedelta()
637    else:
638      test_skipped = False
639      test_start_time = time.monotonic()
640      if verbose:
641        print_text("Starting %s at %s\n" % (test_name, test_start_time))
642      environ = dict(os.environ)
643      environ["FULL_TEST_NAME"] = test_name
644      if gdb or gdb_dex2oat:
645        proc = _popen(
646          args=args,
647          env=environ,
648          stderr=subprocess.STDOUT,
649          universal_newlines=True,
650          start_new_session=True
651        )
652      else:
653        proc = _popen(
654          args=args,
655          env=environ,
656          stderr=subprocess.STDOUT,
657          stdout = subprocess.PIPE,
658          universal_newlines=True,
659          start_new_session=True,
660        )
661      script_output, return_value = child_process_tracker.wait(proc, timeout)
662      test_passed = not return_value
663      test_time_seconds = time.monotonic() - test_start_time
664      test_time = datetime.timedelta(seconds=test_time_seconds)
665
666    if not test_skipped:
667      if test_passed:
668        return (test_name, 'PASS', None, test_time)
669      else:
670        failed_tests.append((test_name, str(command) + "\n" + script_output))
671        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
672    elif not dry_run:
673      skipped_tests.append(test_name)
674      return (test_name, 'SKIP', None, test_time)
675    else:
676      return (test_name, 'PASS', None, test_time)
677  except subprocess.TimeoutExpired as e:
678    if verbose:
679      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
680    test_time_seconds = time.monotonic() - test_start_time
681    test_time = datetime.timedelta(seconds=test_time_seconds)
682    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
683
684    # HACK(b/142039427): Print extra backtraces on timeout.
685    if "-target-" in test_name and not env.ART_TEST_ON_VM:
686      for i in range(8):
687        proc_name = "dalvikvm" + test_name[-2:]
688        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
689        for pid in pidof.stdout.decode("ascii").split():
690          if i >= 4:
691            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
692            subprocess.run(["adb", "shell", "debuggerd", pid])
693            time.sleep(10)
694          task_dir = "/proc/%s/task" % pid
695          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
696          for tid in tids.stdout.decode("ascii").split():
697            for status in ["stat", "status"]:
698              filename = "%s/%s/%s" % (task_dir, tid, status)
699              print_text("Content of %s\n" % (filename))
700              subprocess.run(["adb", "shell", "cat", filename])
701        time.sleep(60)
702
703    # The python documentation states that it is necessary to actually kill the process.
704    os.killpg(proc.pid, signal.SIGKILL)
705    script_output = proc.communicate()
706
707    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
708  except Exception as e:
709    failed_tests.append((test_name, str(e)))
710    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
711
712@lru_cache
713def get_console_width(default=100):
714  # NB: The command may fail if we are running under 'nohup'.
715  proc = subprocess.run(['stty', 'size'], capture_output=True)
716  return int(proc.stdout.decode("utf8").split()[1]) if proc.returncode == 0 else default
717
718def print_test_info(test_count, test_name, result, failed_test_info="",
719                    test_time=datetime.timedelta()):
720  """Print the continous test information
721
722  If verbose is set to True, it continuously prints test status information
723  on a new line.
724  If verbose is set to False, it keeps on erasing test
725  information by overriding it with the latest test information. Also,
726  in this case it stictly makes sure that the information length doesn't
727  exceed the console width. It does so by shortening the test_name.
728
729  When a test fails, it prints the output of the run-test script and
730  command used to invoke the script. It doesn't override the failing
731  test information in either of the cases.
732  """
733
734  info = ''
735  if not verbose:
736    # Without --verbose, the testrunner erases passing test info. It
737    # does that by overriding the printed text with white spaces all across
738    # the console width.
739    info = '\r' + ' ' * get_console_width() + '\r'
740  try:
741    percent = (test_count * 100) / total_test_count
742    progress_info = ('[ %d%% %d/%d ]') % (
743      percent,
744      test_count,
745      total_test_count)
746    if test_time.total_seconds() != 0 and verbose:
747      info += '(%s)' % str(test_time)
748
749
750    if result == 'FAIL' or result == 'TIMEOUT':
751      if not verbose:
752        info += ('%s %s %s\n') % (
753          progress_info,
754          test_name,
755          COLOR_ERROR + result + COLOR_NORMAL)
756      else:
757        info += ('%s %s %s\n%s\n') % (
758          progress_info,
759          test_name,
760          COLOR_ERROR + result + COLOR_NORMAL,
761          failed_test_info)
762    else:
763      result_text = ''
764      if result == 'PASS':
765        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
766      elif result == 'SKIP':
767        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
768
769      if verbose:
770        info += ('%s %s %s\n') % (
771          progress_info,
772          test_name,
773          result_text)
774      else:
775        total_output_length = 2 # Two spaces
776        total_output_length += len(progress_info)
777        total_output_length += len(result)
778        allowed_test_length = get_console_width() - total_output_length
779        test_name_len = len(test_name)
780        if allowed_test_length < test_name_len:
781          test_name = ('...%s') % (
782            test_name[-(allowed_test_length - 3):])
783        info += ('%s %s %s') % (
784          progress_info,
785          test_name,
786          result_text)
787    send_csv_result(test_name, result)
788    print_text(info)
789  except Exception as e:
790    print_text(('%s\n%s\n') % (test_name, str(e)))
791    failed_tests.append(test_name)
792
793def verify_knownfailure_entry(entry):
794  supported_field = {
795      'tests' : (list, str),
796      'test_patterns' : (list,),
797      'description' : (list, str),
798      'bug' : (str,),
799      'variant' : (str,),
800      'devices': (list, str),
801      'env_vars' : (dict,),
802  }
803  for field in entry:
804    field_type = type(entry[field])
805    if field_type not in supported_field[field]:
806      raise ValueError('%s is not supported type for %s\n%s' % (
807          str(field_type),
808          field,
809          str(entry)))
810
811def get_disabled_test_info(device_name):
812  """Generate set of known failures.
813
814  It parses the art/test/knownfailures.json file to generate the list of
815  disabled tests.
816
817  Returns:
818    The method returns a dict of tests mapped to the variants list
819    for which the test should not be run.
820  """
821  known_failures_file = Path(__file__).parent.parent / 'knownfailures.json'
822  with open(known_failures_file) as known_failures_json:
823    known_failures_info = json.loads(known_failures_json.read())
824
825  disabled_test_info = {}
826  for failure in known_failures_info:
827    verify_knownfailure_entry(failure)
828    tests = failure.get('tests', [])
829    if isinstance(tests, str):
830      tests = [tests]
831    patterns = failure.get("test_patterns", [])
832    if (not isinstance(patterns, list)):
833      raise ValueError("test_patterns is not a list in %s" % failure)
834
835    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
836    variants = parse_variants(failure.get('variant'))
837
838    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
839    # "foo" is present in "devices".
840    device_names = failure.get('devices', [])
841    if isinstance(device_names, str):
842      device_names = [device_names]
843    if len(device_names) != 0:
844      if device_name in device_names:
845        variants.add('target')
846      else:
847        # Skip adding test info as device_name is not present in "devices" entry.
848        continue
849
850    env_vars = failure.get('env_vars')
851
852    if check_env_vars(env_vars):
853      for test in tests:
854        if test not in RUN_TEST_SET:
855          if env.ART_TEST_RUN_FROM_SOONG:
856            continue  # Soong can see only sub-set of the tests within the shard.
857          raise ValueError('%s is not a valid run-test' % (
858              test))
859        if test in disabled_test_info:
860          disabled_test_info[test] = disabled_test_info[test].union(variants)
861        else:
862          disabled_test_info[test] = variants
863
864  return disabled_test_info
865
866def gather_disabled_test_info():
867  global DISABLED_TEST_CONTAINER
868  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
869  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
870
871def check_env_vars(env_vars):
872  """Checks if the env variables are set as required to run the test.
873
874  Returns:
875    True if all the env variables are set as required, otherwise False.
876  """
877
878  if not env_vars:
879    return True
880  for key in env_vars:
881    if env.get_env(key) != env_vars.get(key):
882      return False
883  return True
884
885
886def is_test_disabled(test, variant_set):
887  """Checks if the test along with the variant_set is disabled.
888
889  Args:
890    test: The name of the test as in art/test directory.
891    variant_set: Variants to be used for the test.
892  Returns:
893    True, if the test is disabled.
894  """
895  if dry_run:
896    return True
897  if test in env.EXTRA_DISABLED_TESTS:
898    return True
899  if ignore_skips:
900    return False
901  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
902  for variants in variants_list:
903    variants_present = True
904    for variant in variants:
905      if variant not in variant_set:
906        variants_present = False
907        break
908    if variants_present:
909      return True
910  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
911    if bad_combo.issubset(variant_set):
912      return True
913  return False
914
915
916def parse_variants(variants):
917  """Parse variants fetched from art/test/knownfailures.json.
918  """
919  if not variants:
920    variants = ''
921    for variant in TOTAL_VARIANTS_SET:
922      variants += variant
923      variants += '|'
924    variants = variants[:-1]
925  variant_list = set()
926  or_variants = variants.split('|')
927  for or_variant in or_variants:
928    and_variants = or_variant.split('&')
929    variant = set()
930    for and_variant in and_variants:
931      and_variant = and_variant.strip()
932      if and_variant not in TOTAL_VARIANTS_SET:
933        raise ValueError('%s is not a valid variant' % (
934            and_variant))
935      variant.add(and_variant)
936    variant_list.add(frozenset(variant))
937  return variant_list
938
939def print_text(output, error=False):
940  if env.ART_TEST_RUN_FROM_SOONG and not error:
941    return  # Be quiet during build.
942  sys.stdout.write(output)
943  sys.stdout.flush()
944
945def print_analysis():
946  if not verbose:
947    # Without --verbose, the testrunner erases passing test info. It
948    # does that by overriding the printed text with white spaces all across
949    # the console width.
950    eraser_text = '\r' + ' ' * get_console_width() + '\r'
951    print_text(eraser_text)
952
953  # Prints information about the total tests run.
954  # E.g., "2/38 (5%) tests passed".
955  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
956  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
957      passed_test_count,
958      total_test_count,
959      (passed_test_count*100)/total_test_count,
960      'tests' if passed_test_count > 1 else 'test')
961  print_text(passed_test_information)
962
963  # Prints the list of skipped tests, if any.
964  if skipped_tests:
965    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
966    for test in skipped_tests:
967      print_text(test + '\n')
968    print_text('\n')
969
970  # Prints the list of failed tests, if any.
971  if failed_tests:
972    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n', error=True)
973    for test_info in failed_tests:
974      print_text(('%s\n%s\n' % (test_info[0], test_info[1])), error=True)
975    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
976    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
977      print_text(('%s\n' % (failed_test)))
978
979test_name_matcher = None
980def extract_test_name(test_name):
981  """Parses the test name and returns all the parts"""
982  global test_name_matcher
983  if test_name_matcher is None:
984    regex = '^test-art-'
985    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
986    regex += 'run-test-'
987    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
988    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
989    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
990    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
991    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
992    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
993    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
994    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
995    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
996    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
997    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
998    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
999    test_name_matcher = re.compile(regex)
1000  match = test_name_matcher.match(test_name)
1001  if match:
1002    return list(match.groups())
1003  raise ValueError(test_name + " is not a valid test")
1004
1005def parse_test_name(test_name):
1006  """Parses the testname provided by the user.
1007  It supports two types of test_name:
1008  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1009  exists and if it does, it returns the testname.
1010  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-ndebuggable-no-jvmti-001-HelloWorld32
1011  In this case, it will parse all the variants and check if they are placed
1012  correctly. If yes, it will set the various VARIANT_TYPES to use the
1013  variants required to run the test. Again, it returns the test_name
1014  without the variant information like 001-HelloWorld.
1015  """
1016  test_set = set()
1017  for test in RUN_TEST_SET:
1018    if test.startswith(test_name):
1019      test_set.add(test)
1020  if test_set:
1021    return test_set
1022
1023  parsed = extract_test_name(test_name)
1024  _user_input_variants['target'].add(parsed[0])
1025  _user_input_variants['run'].add(parsed[1])
1026  _user_input_variants['prebuild'].add(parsed[2])
1027  _user_input_variants['compiler'].add(parsed[3])
1028  _user_input_variants['relocate'].add(parsed[4])
1029  _user_input_variants['trace'].add(parsed[5])
1030  _user_input_variants['gc'].add(parsed[6])
1031  _user_input_variants['jni'].add(parsed[7])
1032  _user_input_variants['image'].add(parsed[8])
1033  _user_input_variants['debuggable'].add(parsed[9])
1034  _user_input_variants['jvmti'].add(parsed[10])
1035  _user_input_variants['address_sizes'].add(parsed[12])
1036  return {parsed[11]}
1037
1038
1039def get_target_cpu_count():
1040  if env.ART_TEST_ON_VM:
1041    command = f"{env.ART_SSH_CMD} cat /sys/devices/system/cpu/present"
1042  else:
1043    command = 'adb shell cat /sys/devices/system/cpu/present'
1044  cpu_info_proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
1045  cpu_info = cpu_info_proc.stdout.read()
1046  if type(cpu_info) is bytes:
1047    cpu_info = cpu_info.decode('utf-8')
1048  cpu_info_regex = r'\d*-(\d*)'
1049  match = re.match(cpu_info_regex, cpu_info)
1050  if match:
1051    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1052  else:
1053    raise ValueError('Unable to predict the concurrency for the target. '
1054                     'Is device connected?')
1055
1056
1057def get_host_cpu_count():
1058  return multiprocessing.cpu_count()
1059
1060
1061def parse_option():
1062  global verbose
1063  global dry_run
1064  global ignore_skips
1065  global n_thread
1066  global build
1067  global dist
1068  global gdb
1069  global gdb_arg
1070  global dump_cfg
1071  global gdb_dex2oat
1072  global gdb_dex2oat_args
1073  global runtime_option
1074  global run_test_option
1075  global timeout
1076  global dex2oat_jobs
1077  global run_all_configs
1078  global with_agent
1079  global csv_result
1080
1081  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1082  parser.add_argument('tests', action='extend', nargs="*", help='name(s) of the test(s)')
1083  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)'
1084      ' (deprecated: use positional arguments at the end without any option instead)')
1085  global_group = parser.add_argument_group('Global options',
1086                                           'Options that affect all tests being run')
1087  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1088                            Defaults to half of CPUs on target and all CPUs on host.""")
1089  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1090  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1091  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1092  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1093                            help="Skip the given test in all circumstances.")
1094  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1095                            help="""Don't skip any run-test configurations listed in
1096                            knownfailures.json.""")
1097  global_group.add_argument('--no-build-dependencies',
1098                            action='store_false', dest='build',
1099                            help="""Don't build dependencies under any circumstances. This is the
1100                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1101  global_group.add_argument('-b', '--build-dependencies',
1102                            action='store_true', dest='build',
1103                            help="""Build dependencies under all circumstances. By default we will
1104                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1105  global_group.add_argument('--dist',
1106                            action='store_true', dest='dist',
1107                            help="""If dependencies are to be built, pass `dist` to the build
1108                            command line. You may want to also set the DIST_DIR environment
1109                            variable when using this flag.""")
1110  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1111  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1112  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1113  global_group.add_argument('--dump-cfg', dest='dump_cfg',
1114                            help="""Dump the CFG to the specified host path.
1115                            Example \"--dump-cfg <full-path>/graph.cfg\".""")
1116  global_group.add_argument('--gdb-dex2oat', action='store_true', dest='gdb_dex2oat')
1117  global_group.add_argument('--gdb-dex2oat-args', dest='gdb_dex2oat_args')
1118  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1119                            default=[],
1120                            help="""Pass an option, unaltered, to the run-test script.
1121                            This should be enclosed in single-quotes to allow for spaces. The option
1122                            will be split using shlex.split() prior to invoking run-test.
1123                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\".""")
1124  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1125                            help="""Pass an agent to be attached to the runtime""")
1126  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1127                            help="""Pass an option to the runtime. Runtime options
1128                            starting with a '-' must be separated by a '=', for
1129                            example '--runtime-option=-Xjitthreshold:0'.""")
1130  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1131                            help='Number of dex2oat jobs')
1132  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1133                            help="Run all the possible configurations for the input test set")
1134  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1135                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1136  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1137    var_group = parser.add_argument_group(
1138        '{}-type Options'.format(variant_type),
1139        "Options that control the '{}' variants.".format(variant_type))
1140    var_group.add_argument('--all-' + variant_type,
1141                           action='store_true',
1142                           dest='all_' + variant_type,
1143                           help='Enable all variants of ' + variant_type)
1144    for variant in variant_set:
1145      flag = '--' + variant
1146      var_group.add_argument(flag, action='store_true', dest=variant)
1147
1148  options = vars(parser.parse_args())
1149  if options['csv_result'] is not None:
1150    csv_result = options['csv_result']
1151    setup_csv_result()
1152  # Handle the --all-<type> meta-options
1153  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1154    if options['all_' + variant_type]:
1155      for variant in variant_set:
1156        options[variant] = True
1157
1158  tests = None
1159  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1160  if options['tests']:
1161    tests = set()
1162    for test_name in options['tests']:
1163      tests |= parse_test_name(test_name)
1164
1165  for variant_type in VARIANT_TYPE_DICT:
1166    for variant in VARIANT_TYPE_DICT[variant_type]:
1167      if options.get(variant):
1168        _user_input_variants[variant_type].add(variant)
1169
1170  if options['verbose']:
1171    verbose = True
1172  if options['n_thread']:
1173    n_thread = max(1, options['n_thread'])
1174  ignore_skips = options['ignore_skips']
1175  if options['dry_run']:
1176    dry_run = True
1177    verbose = True
1178  build = options['build']
1179  dist = options['dist']
1180  if options['gdb']:
1181    n_thread = 1
1182    gdb = True
1183    if options['gdb_arg']:
1184      gdb_arg = options['gdb_arg']
1185  if options['dump_cfg']:
1186    dump_cfg = options['dump_cfg']
1187  if options['gdb_dex2oat']:
1188    n_thread = 1
1189    gdb_dex2oat = True
1190    if options['gdb_dex2oat_args']:
1191      gdb_dex2oat_args = options['gdb_dex2oat_args']
1192  runtime_option = options['runtime_option'];
1193  with_agent = options['with_agent'];
1194  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1195
1196  timeout = options['timeout']
1197  if options['dex2oat_jobs']:
1198    dex2oat_jobs = options['dex2oat_jobs']
1199  if options['run_all']:
1200    run_all_configs = True
1201
1202  return tests or RUN_TEST_SET
1203
1204def main():
1205  gather_test_info()
1206  tests = parse_option()
1207  setup_test_env()
1208  gather_disabled_test_info()
1209  if build:
1210    build_targets = []
1211    # Build only the needed shards (depending on the selected tests).
1212    shards = set(re.search(r"(\d\d)-", t).group(1) for t in tests)
1213    if any("hiddenapi" in t for t in tests):
1214      shards.add("HiddenApi")  # Include special HiddenApi shard.
1215    for mode in ['host', 'target', 'jvm']:
1216      if mode in _user_input_variants['target']:
1217        build_targets += ['test-art-{}-run-test-dependencies'.format(mode)]
1218        if len(shards) >= 100:
1219          build_targets += ["art-run-test-{}-data".format(mode)]  # Build all.
1220        else:
1221          build_targets += ["art-run-test-{}-data-shard{}".format(mode, s) for s in shards]
1222    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1223    build_command += ' D8='
1224    if dist:
1225      build_command += ' dist'
1226    build_command += ' ' + ' '.join(build_targets)
1227    print_text('Build command: %s\n' % build_command)
1228    if subprocess.call(build_command.split()):
1229      # Debugging for b/62653020
1230      if env.DIST_DIR:
1231        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1232      sys.exit(1)
1233
1234  run_tests(tests)
1235
1236  print_analysis()
1237  close_csv_file()
1238
1239  exit_code = 0 if len(failed_tests) == 0 else 1
1240  sys.exit(exit_code)
1241
1242if __name__ == '__main__':
1243  main()
1244