• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import csv
64import datetime
65import fnmatch
66import itertools
67import json
68import multiprocessing
69import os
70import re
71import shlex
72import shutil
73import signal
74import subprocess
75import sys
76import tempfile
77import threading
78import time
79
80import env
81from target_config import target_config
82from device_config import device_config
83from typing import Dict, Set, List
84from functools import lru_cache
85
86# TODO: make it adjustable per tests and for buildbots
87#
88# Note: this needs to be larger than run-test timeouts, as long as this script
89#       does not push the value to run-test. run-test is somewhat complicated:
90#                      base: 25m  (large for ASAN)
91#        + timeout handling:  2m
92#        +   gcstress extra: 20m
93#        -----------------------
94#                            47m
95timeout = 3600 # 60 minutes
96
97if env.ART_TEST_RUN_ON_ARM_FVP:
98  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
99  timeout = 36000
100
101# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
102# that has key as the test name (like 001-HelloWorld), and value as set of
103# variants that the test is disabled for.
104DISABLED_TEST_CONTAINER = {}
105
106# The Dict contains the list of all possible variants for a given type. For example,
107# for key TARGET, the value would be target and host. The list is used to parse
108# the test name given as the argument to run.
109VARIANT_TYPE_DICT: Dict[str, Set[str]] = {}
110
111# The set of all variant sets that are incompatible and will always be skipped.
112NONFUNCTIONAL_VARIANT_SETS = set()
113
114# The set contains all the variants of each time.
115TOTAL_VARIANTS_SET: Set[str] = set()
116
117# The colors are used in the output. When a test passes, COLOR_PASS is used,
118# and so on.
119COLOR_ERROR = '\033[91m'
120COLOR_PASS = '\033[92m'
121COLOR_SKIP = '\033[93m'
122COLOR_NORMAL = '\033[0m'
123
124# The set contains the list of all the possible run tests that are in art/test
125# directory.
126RUN_TEST_SET = set()
127
128failed_tests = []
129skipped_tests = []
130
131# Flags
132n_thread = 0
133total_test_count = 0
134verbose = False
135dry_run = False
136ignore_skips = False
137build = False
138dist = False
139gdb = False
140gdb_arg = ''
141dump_cfg = ''
142gdb_dex2oat = False
143gdb_dex2oat_args = ''
144csv_result = None
145csv_writer = None
146runtime_option = ''
147with_agent: List[str] = []
148run_test_option: List[str] = []
149dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
150run_all_configs = False
151
152# Dict containing extra arguments
153extra_arguments: Dict[str, List[str]] = { "host" : [], "target" : [] }
154
155# Dict to store user requested test variants.
156# key: variant_type.
157# value: set of variants user wants to run of type <key>.
158_user_input_variants: collections.defaultdict = collections.defaultdict(set)
159
160
161class ChildProcessTracker(object):
162  """Keeps track of forked child processes to be able to kill them."""
163
164  def __init__(self):
165    self.procs = {}             # dict from pid to subprocess.Popen object
166    self.mutex = threading.Lock()
167
168  def wait(self, proc, timeout):
169    """Waits on the given subprocess and makes it available to kill_all meanwhile.
170
171    Args:
172      proc: The subprocess.Popen object to wait on.
173      timeout: Timeout passed on to proc.communicate.
174
175    Returns: A tuple of the process stdout output and its return value.
176    """
177    with self.mutex:
178      if self.procs is not None:
179        self.procs[proc.pid] = proc
180      else:
181        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
182    try:
183      output = proc.communicate(timeout=timeout)[0]
184      return_value = proc.wait()
185      return output, return_value
186    finally:
187      with self.mutex:
188        if self.procs is not None:
189          del self.procs[proc.pid]
190
191  def kill_all(self):
192    """Kills all currently running processes and any future ones."""
193    with self.mutex:
194      for pid in self.procs:
195        os.killpg(pid, signal.SIGKILL)
196      self.procs = None # Make future wait() calls kill their processes immediately.
197
198child_process_tracker = ChildProcessTracker()
199
200def setup_csv_result():
201  """Set up the CSV output if required."""
202  global csv_writer
203  csv_writer = csv.writer(csv_result)
204  # Write the header.
205  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
206                       'jni', 'image', 'debuggable', 'jvmti', 'test', 'address_size', 'result'])
207
208
209def send_csv_result(test, result):
210  """
211  Write a line into the CSV results file if one is available.
212  """
213  if csv_writer is not None:
214    csv_writer.writerow(extract_test_name(test) + [result])
215
216def close_csv_file():
217  global csv_result
218  global csv_writer
219  if csv_result is not None:
220    csv_writer = None
221    csv_result.flush()
222    csv_result.close()
223    csv_result = None
224
225def gather_test_info():
226  """The method gathers test information about the test to be run which includes
227  generating the list of total tests from the art/test directory and the list
228  of disabled test. It also maps various variants to types.
229  """
230  global TOTAL_VARIANTS_SET
231  # TODO: Avoid duplication of the variant names in different lists.
232  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
233  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
234  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
235  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
236  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
237  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
238  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
239  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
240  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
241  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
242  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
243                                'field-stress', 'step-stress'}
244  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
245                                   'optimizing', 'speed-profile', 'baseline'}
246
247  for v_type in VARIANT_TYPE_DICT:
248    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
249
250  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
251  for f in os.listdir(test_dir):
252    if fnmatch.fnmatch(f, '[0-9]*'):
253      RUN_TEST_SET.add(f)
254
255
256def setup_test_env():
257  """The method sets default value for the various variants of the tests if they
258  are already not set.
259  """
260  if env.ART_TEST_BISECTION:
261    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
262    env.ART_TEST_RUN_TEST_PREBUILD = False
263    # Bisection search writes to standard output.
264    env.ART_TEST_QUIET = False
265
266  global _user_input_variants
267  global run_all_configs
268  # These are the default variant-options we will use if nothing in the group is specified.
269  default_variants = {
270      'target': {'host', 'target'},
271      'prebuild': {'prebuild'},
272      'jvmti': { 'no-jvmti'},
273      'compiler': {'optimizing',
274                   'jit',
275                   'interpreter',
276                   'interp-ac',
277                   'speed-profile'},
278      'relocate': {'no-relocate'},
279      'trace': {'ntrace'},
280      'gc': {'cms'},
281      'jni': {'checkjni'},
282      'image': {'picimage'},
283      'debuggable': {'ndebuggable'},
284      'run': {'debug'},
285      # address_sizes_target depends on the target so it is dealt with below.
286  }
287  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
288  # want to pick up if we pass --all.
289  default_variants_keys = default_variants.keys()
290  if run_all_configs:
291    default_variants = VARIANT_TYPE_DICT
292
293  for key in default_variants_keys:
294    if not _user_input_variants[key]:
295      _user_input_variants[key] = default_variants[key]
296
297  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
298  if not _user_input_variants['address_sizes']:
299    _user_input_variants['address_sizes_target']['target'].add(
300        env.ART_PHONY_TEST_TARGET_SUFFIX)
301    _user_input_variants['address_sizes_target']['host'].add(
302        env.ART_PHONY_TEST_HOST_SUFFIX)
303    if env.ART_TEST_RUN_TEST_2ND_ARCH:
304      _user_input_variants['address_sizes_target']['host'].add(
305          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
306      _user_input_variants['address_sizes_target']['target'].add(
307          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
308  else:
309    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
310    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
311
312  global n_thread
313  if 'target' in _user_input_variants['target']:
314    device_name = get_device_name()
315    if n_thread == 0:
316      # Use only part of the cores since fully loading the device tends to lead to timeouts.
317      fraction = 1.0 if env.ART_TEST_ON_VM else 0.75
318      n_thread = max(1, int(get_target_cpu_count() * fraction))
319      if device_name == 'fugu':
320        n_thread = 1
321  else:
322    device_name = "host"
323    if n_thread == 0:
324      n_thread = get_host_cpu_count()
325  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
326
327  global extra_arguments
328  for target in _user_input_variants['target']:
329    extra_arguments[target] = find_extra_device_arguments(target)
330
331  if not sys.stdout.isatty():
332    global COLOR_ERROR
333    global COLOR_PASS
334    global COLOR_SKIP
335    global COLOR_NORMAL
336    COLOR_ERROR = ''
337    COLOR_PASS = ''
338    COLOR_SKIP = ''
339    COLOR_NORMAL = ''
340
341def find_extra_device_arguments(target):
342  """
343  Gets any extra arguments from the device_config.
344  """
345  device_name = target
346  if target == 'target':
347    device_name = get_device_name()
348  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
349
350def get_device_name():
351  """
352  Gets the value of ro.product.name from remote device (unless running on a VM).
353  """
354  if env.ART_TEST_ON_VM:
355    return subprocess.Popen(f"{env.ART_SSH_CMD} uname -a".split(),
356                            stdout = subprocess.PIPE,
357                            universal_newlines=True).stdout.read().strip()
358
359  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
360                          stderr=subprocess.STDOUT,
361                          stdout = subprocess.PIPE,
362                          universal_newlines=True)
363  # only wait 2 seconds.
364  timeout_val = 2
365
366  if env.ART_TEST_RUN_ON_ARM_FVP:
367    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
368    timeout_val = 200
369
370  output = proc.communicate(timeout = timeout_val)[0]
371  success = not proc.wait()
372  if success:
373    return output.strip()
374  else:
375    print_text("Unable to determine device type!\n")
376    print_text("Continuing anyway.\n")
377    return "UNKNOWN_TARGET"
378
379def run_tests(tests):
380  """This method generates variants of the tests to be run and executes them.
381
382  Args:
383    tests: The set of tests to be run.
384  """
385  args_all = []
386
387  # jvm does not run with all these combinations,
388  # or at least it doesn't make sense for most of them.
389  # TODO: support some jvm variants like jvmti ?
390  target_input_variants = _user_input_variants['target']
391  uncombinated_target_input_variants = []
392  if 'jvm' in target_input_variants:
393    _user_input_variants['target'].remove('jvm')
394    uncombinated_target_input_variants.append('jvm')
395
396  global total_test_count
397  total_test_count = len(tests)
398  if target_input_variants:
399    for variant_type in VARIANT_TYPE_DICT:
400      if not (variant_type == 'target' or 'address_sizes' in variant_type):
401        total_test_count *= len(_user_input_variants[variant_type])
402  target_address_combinations = 0
403  for target in target_input_variants:
404    for address_size in _user_input_variants['address_sizes_target'][target]:
405      target_address_combinations += 1
406  target_address_combinations += len(uncombinated_target_input_variants)
407  total_test_count *= target_address_combinations
408
409  if env.ART_TEST_WITH_STRACE:
410    args_all += ['--strace']
411
412  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
413    args_all += ['--always-clean']
414
415  if env.ART_TEST_BISECTION:
416    args_all += ['--bisection-search']
417
418  if gdb:
419    args_all += ['--gdb']
420    if gdb_arg:
421      args_all += ['--gdb-arg', gdb_arg]
422
423  if dump_cfg:
424    args_all += ['--dump-cfg', dump_cfg]
425  if gdb_dex2oat:
426    args_all += ['--gdb-dex2oat']
427    if gdb_dex2oat_args:
428      args_all += ['--gdb-dex2oat-args', f'{gdb_dex2oat_args}']
429
430  args_all += run_test_option
431
432  if runtime_option:
433    for opt in runtime_option:
434      args_all += ['--runtime-option', opt]
435  if with_agent:
436    for opt in with_agent:
437      args_all += ['--with-agent', opt]
438
439  if dex2oat_jobs != -1:
440    args_all += ['--dex2oat-jobs', str(dex2oat_jobs)]
441
442  def iter_config(tests, input_variants, user_input_variants):
443    config = itertools.product(tests, input_variants, user_input_variants['run'],
444                                 user_input_variants['prebuild'], user_input_variants['compiler'],
445                                 user_input_variants['relocate'], user_input_variants['trace'],
446                                 user_input_variants['gc'], user_input_variants['jni'],
447                                 user_input_variants['image'],
448                                 user_input_variants['debuggable'], user_input_variants['jvmti'])
449    return config
450
451  # [--host, --target] combines with all the other user input variants.
452  config = iter_config(tests, target_input_variants, _user_input_variants)
453  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
454  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
455  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
456      'prebuild': [''], 'compiler': [''],
457      'relocate': [''], 'trace': [''],
458      'gc': [''], 'jni': [''],
459      'image': [''],
460      'debuggable': [''], 'jvmti': ['']})
461
462  def start_combination(executor, config_tuple, global_options, address_size):
463      test, target, run, prebuild, compiler, relocate, trace, gc, \
464      jni, image, debuggable, jvmti = config_tuple
465
466      # NB The order of components here should match the order of
467      # components in the regex parser in parse_test_name.
468      test_name = 'test-art-'
469      test_name += target + '-run-test-'
470      test_name += run + '-'
471      test_name += prebuild + '-'
472      test_name += compiler + '-'
473      test_name += relocate + '-'
474      test_name += trace + '-'
475      test_name += gc + '-'
476      test_name += jni + '-'
477      test_name += image + '-'
478      test_name += debuggable + '-'
479      test_name += jvmti + '-'
480      test_name += test
481      test_name += address_size
482
483      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
484                     image, debuggable, jvmti, address_size}
485
486      args_test = global_options.copy()
487
488      if target == 'host':
489        args_test += ['--host']
490      elif target == 'jvm':
491        args_test += ['--jvm']
492
493      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
494      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
495      # for target tests.
496      if target == 'target':
497        if env.ART_TEST_CHROOT:
498          args_test += ['--chroot', env.ART_TEST_CHROOT]
499        if env.ART_TEST_ANDROID_ROOT:
500          args_test += ['--android-root', env.ART_TEST_ANDROID_ROOT]
501        if env.ART_TEST_ANDROID_I18N_ROOT:
502            args_test += ['--android-i18n-root', env.ART_TEST_ANDROID_I18N_ROOT]
503        if env.ART_TEST_ANDROID_ART_ROOT:
504          args_test += ['--android-art-root', env.ART_TEST_ANDROID_ART_ROOT]
505        if env.ART_TEST_ANDROID_TZDATA_ROOT:
506          args_test += ['--android-tzdata-root', env.ART_TEST_ANDROID_TZDATA_ROOT]
507
508      if run == 'ndebug':
509        args_test += ['-O']
510
511      if prebuild == 'prebuild':
512        args_test += ['--prebuild']
513      elif prebuild == 'no-prebuild':
514        args_test += ['--no-prebuild']
515
516      if compiler == 'optimizing':
517        args_test += ['--optimizing']
518      elif compiler == 'interpreter':
519        args_test += ['--interpreter']
520      elif compiler == 'interp-ac':
521        args_test += ['--switch-interpreter', '--verify-soft-fail']
522      elif compiler == 'jit':
523        args_test += ['--jit']
524      elif compiler == 'jit-on-first-use':
525        args_test += ['--jit', '--runtime-option', '-Xjitthreshold:0']
526      elif compiler == 'speed-profile':
527        args_test += ['--random-profile']
528      elif compiler == 'baseline':
529        args_test += ['--baseline']
530
531      if relocate == 'relocate':
532        args_test += ['--relocate']
533      elif relocate == 'no-relocate':
534        args_test += ['--no-relocate']
535
536      if trace == 'trace':
537        args_test += ['--trace']
538      elif trace == 'stream':
539        args_test += ['--trace', '--stream']
540
541      if gc == 'gcverify':
542        args_test += ['--gcverify']
543      elif gc == 'gcstress':
544        args_test += ['--gcstress']
545
546      if jni == 'forcecopy':
547        args_test += ['--runtime-option', '-Xjniopts:forcecopy']
548      elif jni == 'checkjni':
549        args_test += ['--runtime-option', '-Xcheck:jni']
550
551      if image == 'no-image':
552        args_test += ['--no-image']
553
554      if debuggable == 'debuggable':
555        args_test += ['--debuggable', '--runtime-option', '-Xopaque-jni-ids:true']
556
557      if jvmti == 'jvmti-stress':
558        args_test += ['--jvmti-trace-stress', '--jvmti-redefine-stress', '--jvmti-field-stress']
559      elif jvmti == 'field-stress':
560        args_test += ['--jvmti-field-stress']
561      elif jvmti == 'trace-stress':
562        args_test += ['--jvmti-trace-stress']
563      elif jvmti == 'redefine-stress':
564        args_test += ['--jvmti-redefine-stress']
565      elif jvmti == 'step-stress':
566        args_test += ['--jvmti-step-stress']
567
568      if address_size == '64':
569        args_test += ['--64']
570
571      # b/36039166: Note that the path lengths must kept reasonably short.
572      temp_path = tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)
573      args_test = ['--temp-path', temp_path] + args_test
574
575      # Run the run-test script using the prebuilt python.
576      python3_bin = env.ANDROID_BUILD_TOP + "/prebuilts/build-tools/path/linux-x86/python3"
577      run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
578      args_test = [python3_bin, run_test_sh] + args_test + extra_arguments[target] + [test]
579      return executor.submit(run_test, args_test, test, variant_set, test_name)
580
581  global n_thread
582  with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
583    test_futures = []
584    for config_tuple in config:
585      target = config_tuple[1]
586      for address_size in _user_input_variants['address_sizes_target'][target]:
587        test_futures.append(start_combination(executor, config_tuple, args_all, address_size))
588
589    for config_tuple in uncombinated_config:
590      test_futures.append(
591          start_combination(executor, config_tuple, args_all, ""))  # no address size
592
593    try:
594      tests_done = 0
595      for test_future in concurrent.futures.as_completed(f for f in test_futures if f):
596        (test, status, failure_info, test_time) = test_future.result()
597        tests_done += 1
598        print_test_info(tests_done, test, status, failure_info, test_time)
599        if failure_info and not env.ART_TEST_KEEP_GOING:
600          for f in test_futures:
601            f.cancel()
602          break
603    except KeyboardInterrupt:
604      for f in test_futures:
605        f.cancel()
606      child_process_tracker.kill_all()
607    executor.shutdown(True)
608
609def _popen(**kwargs):
610  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
611    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
612  return subprocess.Popen(**kwargs)
613
614def run_test(args, test, test_variant, test_name):
615  """Runs the test.
616
617  It invokes art/test/run-test script to run the test. The output of the script
618  is checked, and if it ends with "Succeeded!", it assumes that the tests
619  passed, otherwise, put it in the list of failed test. Before actually running
620  the test, it also checks if the test is placed in the list of disabled tests,
621  and if yes, it skips running it, and adds the test in the list of skipped
622  tests.
623
624  Args:
625    args: The command to be used to invoke the script
626    test: The name of the test without the variant information.
627    test_variant: The set of variant for the test.
628    test_name: The name of the test along with the variants.
629
630  Returns: a tuple of testname, status, optional failure info, and test time.
631  """
632  try:
633    command = ' '.join(args)
634
635    if is_test_disabled(test, test_variant):
636      test_skipped = True
637      test_time = datetime.timedelta()
638    else:
639      test_skipped = False
640      test_start_time = time.monotonic()
641      if verbose:
642        print_text("Starting %s at %s\n" % (test_name, test_start_time))
643      environ = dict(os.environ)
644      environ["FULL_TEST_NAME"] = test_name
645      if gdb or gdb_dex2oat:
646        proc = _popen(
647          args=args,
648          env=environ,
649          stderr=subprocess.STDOUT,
650          universal_newlines=True,
651          start_new_session=True
652        )
653      else:
654        proc = _popen(
655          args=args,
656          env=environ,
657          stderr=subprocess.STDOUT,
658          stdout = subprocess.PIPE,
659          universal_newlines=True,
660          start_new_session=True,
661        )
662      script_output, return_value = child_process_tracker.wait(proc, timeout)
663      test_passed = not return_value
664      test_time_seconds = time.monotonic() - test_start_time
665      test_time = datetime.timedelta(seconds=test_time_seconds)
666
667    if not test_skipped:
668      if test_passed:
669        return (test_name, 'PASS', None, test_time)
670      else:
671        failed_tests.append((test_name, str(command) + "\n" + script_output))
672        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
673    elif not dry_run:
674      skipped_tests.append(test_name)
675      return (test_name, 'SKIP', None, test_time)
676    else:
677      return (test_name, 'PASS', None, test_time)
678  except subprocess.TimeoutExpired as e:
679    if verbose:
680      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
681    test_time_seconds = time.monotonic() - test_start_time
682    test_time = datetime.timedelta(seconds=test_time_seconds)
683    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
684
685    # HACK(b/142039427): Print extra backtraces on timeout.
686    if "-target-" in test_name and not env.ART_TEST_ON_VM:
687      for i in range(8):
688        proc_name = "dalvikvm" + test_name[-2:]
689        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
690        for pid in pidof.stdout.decode("ascii").split():
691          if i >= 4:
692            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
693            subprocess.run(["adb", "shell", "debuggerd", pid])
694            time.sleep(10)
695          task_dir = "/proc/%s/task" % pid
696          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
697          for tid in tids.stdout.decode("ascii").split():
698            for status in ["stat", "status"]:
699              filename = "%s/%s/%s" % (task_dir, tid, status)
700              print_text("Content of %s\n" % (filename))
701              subprocess.run(["adb", "shell", "cat", filename])
702        time.sleep(60)
703
704    # The python documentation states that it is necessary to actually kill the process.
705    os.killpg(proc.pid, signal.SIGKILL)
706    script_output = proc.communicate()
707
708    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
709  except Exception as e:
710    failed_tests.append((test_name, str(e)))
711    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
712
713@lru_cache
714def get_console_width(default=100):
715  # NB: The command may fail if we are running under 'nohup'.
716  proc = subprocess.run(['stty', 'size'], capture_output=True)
717  return int(proc.stdout.decode("utf8").split()[1]) if proc.returncode == 0 else default
718
719def print_test_info(test_count, test_name, result, failed_test_info="",
720                    test_time=datetime.timedelta()):
721  """Print the continous test information
722
723  If verbose is set to True, it continuously prints test status information
724  on a new line.
725  If verbose is set to False, it keeps on erasing test
726  information by overriding it with the latest test information. Also,
727  in this case it stictly makes sure that the information length doesn't
728  exceed the console width. It does so by shortening the test_name.
729
730  When a test fails, it prints the output of the run-test script and
731  command used to invoke the script. It doesn't override the failing
732  test information in either of the cases.
733  """
734
735  info = ''
736  if not verbose:
737    # Without --verbose, the testrunner erases passing test info. It
738    # does that by overriding the printed text with white spaces all across
739    # the console width.
740    info = '\r' + ' ' * get_console_width() + '\r'
741  try:
742    percent = (test_count * 100) / total_test_count
743    progress_info = ('[ %d%% %d/%d ]') % (
744      percent,
745      test_count,
746      total_test_count)
747    if test_time.total_seconds() != 0 and verbose:
748      info += '(%s)' % str(test_time)
749
750
751    if result == 'FAIL' or result == 'TIMEOUT':
752      if not verbose:
753        info += ('%s %s %s\n') % (
754          progress_info,
755          test_name,
756          COLOR_ERROR + result + COLOR_NORMAL)
757      else:
758        info += ('%s %s %s\n%s\n') % (
759          progress_info,
760          test_name,
761          COLOR_ERROR + result + COLOR_NORMAL,
762          failed_test_info)
763    else:
764      result_text = ''
765      if result == 'PASS':
766        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
767      elif result == 'SKIP':
768        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
769
770      if verbose:
771        info += ('%s %s %s\n') % (
772          progress_info,
773          test_name,
774          result_text)
775      else:
776        total_output_length = 2 # Two spaces
777        total_output_length += len(progress_info)
778        total_output_length += len(result)
779        allowed_test_length = get_console_width() - total_output_length
780        test_name_len = len(test_name)
781        if allowed_test_length < test_name_len:
782          test_name = ('...%s') % (
783            test_name[-(allowed_test_length - 3):])
784        info += ('%s %s %s') % (
785          progress_info,
786          test_name,
787          result_text)
788    send_csv_result(test_name, result)
789    print_text(info)
790  except Exception as e:
791    print_text(('%s\n%s\n') % (test_name, str(e)))
792    failed_tests.append(test_name)
793
794def verify_knownfailure_entry(entry):
795  supported_field = {
796      'tests' : (list, str),
797      'test_patterns' : (list,),
798      'description' : (list, str),
799      'bug' : (str,),
800      'variant' : (str,),
801      'devices': (list, str),
802      'env_vars' : (dict,),
803  }
804  for field in entry:
805    field_type = type(entry[field])
806    if field_type not in supported_field[field]:
807      raise ValueError('%s is not supported type for %s\n%s' % (
808          str(field_type),
809          field,
810          str(entry)))
811
812def get_disabled_test_info(device_name):
813  """Generate set of known failures.
814
815  It parses the art/test/knownfailures.json file to generate the list of
816  disabled tests.
817
818  Returns:
819    The method returns a dict of tests mapped to the variants list
820    for which the test should not be run.
821  """
822  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
823  with open(known_failures_file) as known_failures_json:
824    known_failures_info = json.loads(known_failures_json.read())
825
826  disabled_test_info = {}
827  for failure in known_failures_info:
828    verify_knownfailure_entry(failure)
829    tests = failure.get('tests', [])
830    if isinstance(tests, str):
831      tests = [tests]
832    patterns = failure.get("test_patterns", [])
833    if (not isinstance(patterns, list)):
834      raise ValueError("test_patterns is not a list in %s" % failure)
835
836    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
837    variants = parse_variants(failure.get('variant'))
838
839    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
840    # "foo" is present in "devices".
841    device_names = failure.get('devices', [])
842    if isinstance(device_names, str):
843      device_names = [device_names]
844    if len(device_names) != 0:
845      if device_name in device_names:
846        variants.add('target')
847      else:
848        # Skip adding test info as device_name is not present in "devices" entry.
849        continue
850
851    env_vars = failure.get('env_vars')
852
853    if check_env_vars(env_vars):
854      for test in tests:
855        if test not in RUN_TEST_SET:
856          raise ValueError('%s is not a valid run-test' % (
857              test))
858        if test in disabled_test_info:
859          disabled_test_info[test] = disabled_test_info[test].union(variants)
860        else:
861          disabled_test_info[test] = variants
862
863  return disabled_test_info
864
865def gather_disabled_test_info():
866  global DISABLED_TEST_CONTAINER
867  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
868  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
869
870def check_env_vars(env_vars):
871  """Checks if the env variables are set as required to run the test.
872
873  Returns:
874    True if all the env variables are set as required, otherwise False.
875  """
876
877  if not env_vars:
878    return True
879  for key in env_vars:
880    if env.get_env(key) != env_vars.get(key):
881      return False
882  return True
883
884
885def is_test_disabled(test, variant_set):
886  """Checks if the test along with the variant_set is disabled.
887
888  Args:
889    test: The name of the test as in art/test directory.
890    variant_set: Variants to be used for the test.
891  Returns:
892    True, if the test is disabled.
893  """
894  if dry_run:
895    return True
896  if test in env.EXTRA_DISABLED_TESTS:
897    return True
898  if ignore_skips:
899    return False
900  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
901  for variants in variants_list:
902    variants_present = True
903    for variant in variants:
904      if variant not in variant_set:
905        variants_present = False
906        break
907    if variants_present:
908      return True
909  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
910    if bad_combo.issubset(variant_set):
911      return True
912  return False
913
914
915def parse_variants(variants):
916  """Parse variants fetched from art/test/knownfailures.json.
917  """
918  if not variants:
919    variants = ''
920    for variant in TOTAL_VARIANTS_SET:
921      variants += variant
922      variants += '|'
923    variants = variants[:-1]
924  variant_list = set()
925  or_variants = variants.split('|')
926  for or_variant in or_variants:
927    and_variants = or_variant.split('&')
928    variant = set()
929    for and_variant in and_variants:
930      and_variant = and_variant.strip()
931      if and_variant not in TOTAL_VARIANTS_SET:
932        raise ValueError('%s is not a valid variant' % (
933            and_variant))
934      variant.add(and_variant)
935    variant_list.add(frozenset(variant))
936  return variant_list
937
938def print_text(output):
939  sys.stdout.write(output)
940  sys.stdout.flush()
941
942def print_analysis():
943  if not verbose:
944    # Without --verbose, the testrunner erases passing test info. It
945    # does that by overriding the printed text with white spaces all across
946    # the console width.
947    eraser_text = '\r' + ' ' * get_console_width() + '\r'
948    print_text(eraser_text)
949
950  # Prints information about the total tests run.
951  # E.g., "2/38 (5%) tests passed".
952  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
953  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
954      passed_test_count,
955      total_test_count,
956      (passed_test_count*100)/total_test_count,
957      'tests' if passed_test_count > 1 else 'test')
958  print_text(passed_test_information)
959
960  # Prints the list of skipped tests, if any.
961  if skipped_tests:
962    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
963    for test in skipped_tests:
964      print_text(test + '\n')
965    print_text('\n')
966
967  # Prints the list of failed tests, if any.
968  if failed_tests:
969    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
970    for test_info in failed_tests:
971      print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
972    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
973    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
974      print_text(('%s\n' % (failed_test)))
975
976test_name_matcher = None
977def extract_test_name(test_name):
978  """Parses the test name and returns all the parts"""
979  global test_name_matcher
980  if test_name_matcher is None:
981    regex = '^test-art-'
982    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
983    regex += 'run-test-'
984    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
985    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
986    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
987    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
988    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
989    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
990    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
991    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
992    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
993    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
994    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
995    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
996    test_name_matcher = re.compile(regex)
997  match = test_name_matcher.match(test_name)
998  if match:
999    return list(match.group(i) for i in range(1,15))
1000  raise ValueError(test_name + " is not a valid test")
1001
1002def parse_test_name(test_name):
1003  """Parses the testname provided by the user.
1004  It supports two types of test_name:
1005  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1006  exists and if it does, it returns the testname.
1007  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
1008  In this case, it will parse all the variants and check if they are placed
1009  correctly. If yes, it will set the various VARIANT_TYPES to use the
1010  variants required to run the test. Again, it returns the test_name
1011  without the variant information like 001-HelloWorld.
1012  """
1013  test_set = set()
1014  for test in RUN_TEST_SET:
1015    if test.startswith(test_name):
1016      test_set.add(test)
1017  if test_set:
1018    return test_set
1019
1020  parsed = extract_test_name(test_name)
1021  _user_input_variants['target'].add(parsed[0])
1022  _user_input_variants['run'].add(parsed[1])
1023  _user_input_variants['prebuild'].add(parsed[2])
1024  _user_input_variants['compiler'].add(parsed[3])
1025  _user_input_variants['relocate'].add(parsed[4])
1026  _user_input_variants['trace'].add(parsed[5])
1027  _user_input_variants['gc'].add(parsed[6])
1028  _user_input_variants['jni'].add(parsed[7])
1029  _user_input_variants['image'].add(parsed[8])
1030  _user_input_variants['debuggable'].add(parsed[9])
1031  _user_input_variants['jvmti'].add(parsed[10])
1032  _user_input_variants['address_sizes'].add(parsed[12])
1033  return {parsed[11]}
1034
1035
1036def get_target_cpu_count():
1037  if env.ART_TEST_ON_VM:
1038    command = f"{env.ART_SSH_CMD} cat /sys/devices/system/cpu/present"
1039  else:
1040    command = 'adb shell cat /sys/devices/system/cpu/present'
1041  cpu_info_proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
1042  cpu_info = cpu_info_proc.stdout.read()
1043  if type(cpu_info) is bytes:
1044    cpu_info = cpu_info.decode('utf-8')
1045  cpu_info_regex = r'\d*-(\d*)'
1046  match = re.match(cpu_info_regex, cpu_info)
1047  if match:
1048    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1049  else:
1050    raise ValueError('Unable to predict the concurrency for the target. '
1051                     'Is device connected?')
1052
1053
1054def get_host_cpu_count():
1055  return multiprocessing.cpu_count()
1056
1057
1058def parse_option():
1059  global verbose
1060  global dry_run
1061  global ignore_skips
1062  global n_thread
1063  global build
1064  global dist
1065  global gdb
1066  global gdb_arg
1067  global dump_cfg
1068  global gdb_dex2oat
1069  global gdb_dex2oat_args
1070  global runtime_option
1071  global run_test_option
1072  global timeout
1073  global dex2oat_jobs
1074  global run_all_configs
1075  global with_agent
1076  global csv_result
1077
1078  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1079  parser.add_argument('tests', action='extend', nargs="*", help='name(s) of the test(s)')
1080  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)'
1081      ' (deprecated: use positional arguments at the end without any option instead)')
1082  global_group = parser.add_argument_group('Global options',
1083                                           'Options that affect all tests being run')
1084  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1085                            Defaults to half of CPUs on target and all CPUs on host.""")
1086  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1087  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1088  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1089  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1090                            help="Skip the given test in all circumstances.")
1091  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1092                            help="""Don't skip any run-test configurations listed in
1093                            knownfailures.json.""")
1094  global_group.add_argument('--no-build-dependencies',
1095                            action='store_false', dest='build',
1096                            help="""Don't build dependencies under any circumstances. This is the
1097                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1098  global_group.add_argument('-b', '--build-dependencies',
1099                            action='store_true', dest='build',
1100                            help="""Build dependencies under all circumstances. By default we will
1101                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1102  global_group.add_argument('--dist',
1103                            action='store_true', dest='dist',
1104                            help="""If dependencies are to be built, pass `dist` to the build
1105                            command line. You may want to also set the DIST_DIR environment
1106                            variable when using this flag.""")
1107  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1108  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1109  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1110  global_group.add_argument('--dump-cfg', dest='dump_cfg',
1111                            help="""Dump the CFG to the specified host path.
1112                            Example \"--dump-cfg <full-path>/graph.cfg\".""")
1113  global_group.add_argument('--gdb-dex2oat', action='store_true', dest='gdb_dex2oat')
1114  global_group.add_argument('--gdb-dex2oat-args', dest='gdb_dex2oat_args')
1115  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1116                            default=[],
1117                            help="""Pass an option, unaltered, to the run-test script.
1118                            This should be enclosed in single-quotes to allow for spaces. The option
1119                            will be split using shlex.split() prior to invoking run-test.
1120                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\".""")
1121  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1122                            help="""Pass an agent to be attached to the runtime""")
1123  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1124                            help="""Pass an option to the runtime. Runtime options
1125                            starting with a '-' must be separated by a '=', for
1126                            example '--runtime-option=-Xjitthreshold:0'.""")
1127  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1128                            help='Number of dex2oat jobs')
1129  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1130                            help="Run all the possible configurations for the input test set")
1131  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1132                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1133  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1134    var_group = parser.add_argument_group(
1135        '{}-type Options'.format(variant_type),
1136        "Options that control the '{}' variants.".format(variant_type))
1137    var_group.add_argument('--all-' + variant_type,
1138                           action='store_true',
1139                           dest='all_' + variant_type,
1140                           help='Enable all variants of ' + variant_type)
1141    for variant in variant_set:
1142      flag = '--' + variant
1143      var_group.add_argument(flag, action='store_true', dest=variant)
1144
1145  options = vars(parser.parse_args())
1146  if options['csv_result'] is not None:
1147    csv_result = options['csv_result']
1148    setup_csv_result()
1149  # Handle the --all-<type> meta-options
1150  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1151    if options['all_' + variant_type]:
1152      for variant in variant_set:
1153        options[variant] = True
1154
1155  tests = None
1156  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1157  if options['tests']:
1158    tests = set()
1159    for test_name in options['tests']:
1160      tests |= parse_test_name(test_name)
1161
1162  for variant_type in VARIANT_TYPE_DICT:
1163    for variant in VARIANT_TYPE_DICT[variant_type]:
1164      if options.get(variant):
1165        _user_input_variants[variant_type].add(variant)
1166
1167  if options['verbose']:
1168    verbose = True
1169  if options['n_thread']:
1170    n_thread = max(1, options['n_thread'])
1171  ignore_skips = options['ignore_skips']
1172  if options['dry_run']:
1173    dry_run = True
1174    verbose = True
1175  build = options['build']
1176  dist = options['dist']
1177  if options['gdb']:
1178    n_thread = 1
1179    gdb = True
1180    if options['gdb_arg']:
1181      gdb_arg = options['gdb_arg']
1182  if options['dump_cfg']:
1183    dump_cfg = options['dump_cfg']
1184  if options['gdb_dex2oat']:
1185    n_thread = 1
1186    gdb_dex2oat = True
1187    if options['gdb_dex2oat_args']:
1188      gdb_dex2oat_args = options['gdb_dex2oat_args']
1189  runtime_option = options['runtime_option'];
1190  with_agent = options['with_agent'];
1191  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1192
1193  timeout = options['timeout']
1194  if options['dex2oat_jobs']:
1195    dex2oat_jobs = options['dex2oat_jobs']
1196  if options['run_all']:
1197    run_all_configs = True
1198
1199  return tests or RUN_TEST_SET
1200
1201def main():
1202  gather_test_info()
1203  tests = parse_option()
1204  setup_test_env()
1205  gather_disabled_test_info()
1206  if build:
1207    build_targets = []
1208    # Build only the needed shards (depending on the selected tests).
1209    shards = set(re.search("(\d\d)-", t).group(1) for t in tests)
1210    if any("hiddenapi" in t for t in tests):
1211      shards.add("HiddenApi")  # Include special HiddenApi shard.
1212    for mode in ['host', 'target', 'jvm']:
1213      if mode in _user_input_variants['target']:
1214        build_targets += ['test-art-{}-run-test-dependencies'.format(mode)]
1215        if len(shards) >= 100:
1216          build_targets += ["art-run-test-{}-data".format(mode)]  # Build all.
1217        else:
1218          build_targets += ["art-run-test-{}-data-shard{}".format(mode, s) for s in shards]
1219    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1220    build_command += ' D8='
1221    if dist:
1222      build_command += ' dist'
1223    build_command += ' ' + ' '.join(build_targets)
1224    print_text('Build command: %s\n' % build_command)
1225    if subprocess.call(build_command.split()):
1226      # Debugging for b/62653020
1227      if env.DIST_DIR:
1228        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1229      sys.exit(1)
1230
1231  run_tests(tests)
1232
1233  print_analysis()
1234  close_csv_file()
1235
1236  exit_code = 0 if len(failed_tests) == 0 else 1
1237  sys.exit(exit_code)
1238
1239if __name__ == '__main__':
1240  main()
1241