• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import contextlib
64import csv
65import datetime
66import fnmatch
67import itertools
68import json
69import multiprocessing
70import os
71import re
72import shlex
73import shutil
74import signal
75import subprocess
76import sys
77import tempfile
78import threading
79import time
80
81import env
82from target_config import target_config
83from device_config import device_config
84
85# TODO: make it adjustable per tests and for buildbots
86#
87# Note: this needs to be larger than run-test timeouts, as long as this script
88#       does not push the value to run-test. run-test is somewhat complicated:
89#                      base: 25m  (large for ASAN)
90#        + timeout handling:  2m
91#        +   gcstress extra: 20m
92#        -----------------------
93#                            47m
94timeout = 3600 # 60 minutes
95
96if env.ART_TEST_RUN_ON_ARM_FVP:
97  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
98  timeout = 36000
99
100# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
101# that has key as the test name (like 001-HelloWorld), and value as set of
102# variants that the test is disabled for.
103DISABLED_TEST_CONTAINER = {}
104
105# The Dict contains the list of all possible variants for a given type. For example,
106# for key TARGET, the value would be target and host. The list is used to parse
107# the test name given as the argument to run.
108VARIANT_TYPE_DICT = {}
109
110# The set of all variant sets that are incompatible and will always be skipped.
111NONFUNCTIONAL_VARIANT_SETS = set()
112
113# The set contains all the variants of each time.
114TOTAL_VARIANTS_SET = set()
115
116# The colors are used in the output. When a test passes, COLOR_PASS is used,
117# and so on.
118COLOR_ERROR = '\033[91m'
119COLOR_PASS = '\033[92m'
120COLOR_SKIP = '\033[93m'
121COLOR_NORMAL = '\033[0m'
122
123# The set contains the list of all the possible run tests that are in art/test
124# directory.
125RUN_TEST_SET = set()
126
127failed_tests = []
128skipped_tests = []
129
130# Flags
131n_thread = 0
132total_test_count = 0
133verbose = False
134dry_run = False
135ignore_skips = False
136build = False
137dist = False
138gdb = False
139gdb_arg = ''
140dump_cfg = ''
141gdb_dex2oat = False
142gdb_dex2oat_args = ''
143csv_result = None
144csv_writer = None
145runtime_option = ''
146with_agent = []
147zipapex_loc = None
148run_test_option = []
149dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
150run_all_configs = False
151
152# Dict containing extra arguments
153extra_arguments = { "host" : [], "target" : [] }
154
155# Dict to store user requested test variants.
156# key: variant_type.
157# value: set of variants user wants to run of type <key>.
158_user_input_variants = collections.defaultdict(set)
159
160
161class ChildProcessTracker(object):
162  """Keeps track of forked child processes to be able to kill them."""
163
164  def __init__(self):
165    self.procs = {}             # dict from pid to subprocess.Popen object
166    self.mutex = threading.Lock()
167
168  def wait(self, proc, timeout):
169    """Waits on the given subprocess and makes it available to kill_all meanwhile.
170
171    Args:
172      proc: The subprocess.Popen object to wait on.
173      timeout: Timeout passed on to proc.communicate.
174
175    Returns: A tuple of the process stdout output and its return value.
176    """
177    with self.mutex:
178      if self.procs is not None:
179        self.procs[proc.pid] = proc
180      else:
181        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
182    try:
183      output = proc.communicate(timeout=timeout)[0]
184      return_value = proc.wait()
185      return output, return_value
186    finally:
187      with self.mutex:
188        if self.procs is not None:
189          del self.procs[proc.pid]
190
191  def kill_all(self):
192    """Kills all currently running processes and any future ones."""
193    with self.mutex:
194      for pid in self.procs:
195        os.killpg(pid, signal.SIGKILL)
196      self.procs = None # Make future wait() calls kill their processes immediately.
197
198child_process_tracker = ChildProcessTracker()
199
200def setup_csv_result():
201  """Set up the CSV output if required."""
202  global csv_writer
203  csv_writer = csv.writer(csv_result)
204  # Write the header.
205  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
206                       'jni', 'image', 'debuggable', 'jvmti', 'cdex_level', 'test', 'address_size', 'result'])
207
208
209def send_csv_result(test, result):
210  """
211  Write a line into the CSV results file if one is available.
212  """
213  if csv_writer is not None:
214    csv_writer.writerow(extract_test_name(test) + [result])
215
216def close_csv_file():
217  global csv_result
218  global csv_writer
219  if csv_result is not None:
220    csv_writer = None
221    csv_result.flush()
222    csv_result.close()
223    csv_result = None
224
225def gather_test_info():
226  """The method gathers test information about the test to be run which includes
227  generating the list of total tests from the art/test directory and the list
228  of disabled test. It also maps various variants to types.
229  """
230  global TOTAL_VARIANTS_SET
231  # TODO: Avoid duplication of the variant names in different lists.
232  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
233  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
234  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
235  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
236  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
237  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
238  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
239  VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
240  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
241  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
242  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
243  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
244                                'field-stress', 'step-stress'}
245  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
246                                   'optimizing', 'regalloc_gc',
247                                   'speed-profile', 'baseline'}
248
249  # Regalloc_GC cannot work with prebuild.
250  NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'}))
251
252  for v_type in VARIANT_TYPE_DICT:
253    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
254
255  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
256  for f in os.listdir(test_dir):
257    if fnmatch.fnmatch(f, '[0-9]*'):
258      RUN_TEST_SET.add(f)
259
260
261def setup_test_env():
262  """The method sets default value for the various variants of the tests if they
263  are already not set.
264  """
265  if env.ART_TEST_BISECTION:
266    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
267    env.ART_TEST_RUN_TEST_PREBUILD = False
268    # Bisection search writes to standard output.
269    env.ART_TEST_QUIET = False
270
271  global _user_input_variants
272  global run_all_configs
273  # These are the default variant-options we will use if nothing in the group is specified.
274  default_variants = {
275      'target': {'host', 'target'},
276      'prebuild': {'prebuild'},
277      'cdex_level': {'cdex-fast'},
278      'jvmti': { 'no-jvmti'},
279      'compiler': {'optimizing',
280                   'jit',
281                   'interpreter',
282                   'interp-ac',
283                   'speed-profile'},
284      'relocate': {'no-relocate'},
285      'trace': {'ntrace'},
286      'gc': {'cms'},
287      'jni': {'checkjni'},
288      'image': {'picimage'},
289      'debuggable': {'ndebuggable'},
290      'run': {'debug'},
291      # address_sizes_target depends on the target so it is dealt with below.
292  }
293  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
294  # want to pick up if we pass --all.
295  default_variants_keys = default_variants.keys()
296  if run_all_configs:
297    default_variants = VARIANT_TYPE_DICT
298
299  for key in default_variants_keys:
300    if not _user_input_variants[key]:
301      _user_input_variants[key] = default_variants[key]
302
303  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
304  if not _user_input_variants['address_sizes']:
305    _user_input_variants['address_sizes_target']['target'].add(
306        env.ART_PHONY_TEST_TARGET_SUFFIX)
307    _user_input_variants['address_sizes_target']['host'].add(
308        env.ART_PHONY_TEST_HOST_SUFFIX)
309    if env.ART_TEST_RUN_TEST_2ND_ARCH:
310      _user_input_variants['address_sizes_target']['host'].add(
311          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
312      _user_input_variants['address_sizes_target']['target'].add(
313          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
314  else:
315    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
316    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
317
318  global n_thread
319  if 'target' in _user_input_variants['target']:
320    device_name = get_device_name()
321    if n_thread == 0:
322      # Use only part of the cores since fully loading the device tends to lead to timeouts.
323      n_thread = max(1, int(get_target_cpu_count() * 0.75))
324      if device_name == 'fugu':
325        n_thread = 1
326  else:
327    device_name = "host"
328    if n_thread == 0:
329      n_thread = get_host_cpu_count()
330  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
331
332  global extra_arguments
333  for target in _user_input_variants['target']:
334    extra_arguments[target] = find_extra_device_arguments(target)
335
336  if not sys.stdout.isatty():
337    global COLOR_ERROR
338    global COLOR_PASS
339    global COLOR_SKIP
340    global COLOR_NORMAL
341    COLOR_ERROR = ''
342    COLOR_PASS = ''
343    COLOR_SKIP = ''
344    COLOR_NORMAL = ''
345
346def find_extra_device_arguments(target):
347  """
348  Gets any extra arguments from the device_config.
349  """
350  device_name = target
351  if target == 'target':
352    device_name = get_device_name()
353  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
354
355def get_device_name():
356  """
357  Gets the value of ro.product.name from remote device (unless running on a VM).
358  """
359  if env.ART_TEST_ON_VM:
360    return subprocess.Popen(f"{env.ART_SSH_CMD} uname -a".split(),
361                            stdout = subprocess.PIPE,
362                            universal_newlines=True).stdout.read().strip()
363
364  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
365                          stderr=subprocess.STDOUT,
366                          stdout = subprocess.PIPE,
367                          universal_newlines=True)
368  # only wait 2 seconds.
369  timeout_val = 2
370
371  if env.ART_TEST_RUN_ON_ARM_FVP:
372    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
373    timeout_val = 200
374
375  output = proc.communicate(timeout = timeout_val)[0]
376  success = not proc.wait()
377  if success:
378    return output.strip()
379  else:
380    print_text("Unable to determine device type!\n")
381    print_text("Continuing anyway.\n")
382    return "UNKNOWN_TARGET"
383
384def run_tests(tests):
385  """This method generates variants of the tests to be run and executes them.
386
387  Args:
388    tests: The set of tests to be run.
389  """
390  options_all = ''
391
392  # jvm does not run with all these combinations,
393  # or at least it doesn't make sense for most of them.
394  # TODO: support some jvm variants like jvmti ?
395  target_input_variants = _user_input_variants['target']
396  uncombinated_target_input_variants = []
397  if 'jvm' in target_input_variants:
398    _user_input_variants['target'].remove('jvm')
399    uncombinated_target_input_variants.append('jvm')
400
401  global total_test_count
402  total_test_count = len(tests)
403  if target_input_variants:
404    for variant_type in VARIANT_TYPE_DICT:
405      if not (variant_type == 'target' or 'address_sizes' in variant_type):
406        total_test_count *= len(_user_input_variants[variant_type])
407  target_address_combinations = 0
408  for target in target_input_variants:
409    for address_size in _user_input_variants['address_sizes_target'][target]:
410      target_address_combinations += 1
411  target_address_combinations += len(uncombinated_target_input_variants)
412  total_test_count *= target_address_combinations
413
414  if env.ART_TEST_WITH_STRACE:
415    options_all += ' --strace'
416
417  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
418    options_all += ' --always-clean'
419
420  if env.ART_TEST_BISECTION:
421    options_all += ' --bisection-search'
422
423  if gdb:
424    options_all += ' --gdb'
425    if gdb_arg:
426      options_all += ' --gdb-arg ' + gdb_arg
427
428  if dump_cfg:
429    options_all += ' --dump-cfg ' + dump_cfg
430  if gdb_dex2oat:
431    options_all += ' --gdb-dex2oat'
432    if gdb_dex2oat_args:
433      options_all += ' --gdb-dex2oat-args ' + gdb_dex2oat_args
434
435  options_all += ' ' + ' '.join(run_test_option)
436
437  if runtime_option:
438    for opt in runtime_option:
439      options_all += ' --runtime-option ' + opt
440  if with_agent:
441    for opt in with_agent:
442      options_all += ' --with-agent ' + opt
443
444  if dex2oat_jobs != -1:
445    options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
446
447  def iter_config(tests, input_variants, user_input_variants):
448    config = itertools.product(tests, input_variants, user_input_variants['run'],
449                                 user_input_variants['prebuild'], user_input_variants['compiler'],
450                                 user_input_variants['relocate'], user_input_variants['trace'],
451                                 user_input_variants['gc'], user_input_variants['jni'],
452                                 user_input_variants['image'],
453                                 user_input_variants['debuggable'], user_input_variants['jvmti'],
454                                 user_input_variants['cdex_level'])
455    return config
456
457  # [--host, --target] combines with all the other user input variants.
458  config = iter_config(tests, target_input_variants, _user_input_variants)
459  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
460  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
461  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
462      'prebuild': [''], 'compiler': [''],
463      'relocate': [''], 'trace': [''],
464      'gc': [''], 'jni': [''],
465      'image': [''],
466      'debuggable': [''], 'jvmti': [''],
467      'cdex_level': ['']})
468
469  def start_combination(executor, config_tuple, global_options, address_size):
470      test, target, run, prebuild, compiler, relocate, trace, gc, \
471      jni, image, debuggable, jvmti, cdex_level = config_tuple
472
473      # NB The order of components here should match the order of
474      # components in the regex parser in parse_test_name.
475      test_name = 'test-art-'
476      test_name += target + '-run-test-'
477      test_name += run + '-'
478      test_name += prebuild + '-'
479      test_name += compiler + '-'
480      test_name += relocate + '-'
481      test_name += trace + '-'
482      test_name += gc + '-'
483      test_name += jni + '-'
484      test_name += image + '-'
485      test_name += debuggable + '-'
486      test_name += jvmti + '-'
487      test_name += cdex_level + '-'
488      test_name += test
489      test_name += address_size
490
491      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
492                     image, debuggable, jvmti, cdex_level, address_size}
493
494      options_test = global_options
495
496      if target == 'host':
497        options_test += ' --host'
498      elif target == 'jvm':
499        options_test += ' --jvm'
500
501      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
502      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
503      # for target tests.
504      if target == 'target':
505        if env.ART_TEST_CHROOT:
506          options_test += ' --chroot ' + env.ART_TEST_CHROOT
507        if env.ART_TEST_ANDROID_ROOT:
508          options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
509        if env.ART_TEST_ANDROID_I18N_ROOT:
510            options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT
511        if env.ART_TEST_ANDROID_ART_ROOT:
512          options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT
513        if env.ART_TEST_ANDROID_TZDATA_ROOT:
514          options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT
515
516      if run == 'ndebug':
517        options_test += ' -O'
518
519      if prebuild == 'prebuild':
520        options_test += ' --prebuild'
521      elif prebuild == 'no-prebuild':
522        options_test += ' --no-prebuild'
523
524      if cdex_level:
525        # Add option and remove the cdex- prefix.
526        options_test += ' --compact-dex-level ' + cdex_level.replace('cdex-','')
527
528      if compiler == 'optimizing':
529        options_test += ' --optimizing'
530      elif compiler == 'regalloc_gc':
531        options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
532      elif compiler == 'interpreter':
533        options_test += ' --interpreter'
534      elif compiler == 'interp-ac':
535        options_test += ' --interpreter --verify-soft-fail'
536      elif compiler == 'jit':
537        options_test += ' --jit'
538      elif compiler == 'jit-on-first-use':
539        options_test += ' --jit --runtime-option -Xjitthreshold:0'
540      elif compiler == 'speed-profile':
541        options_test += ' --random-profile'
542      elif compiler == 'baseline':
543        options_test += ' --baseline'
544
545      if relocate == 'relocate':
546        options_test += ' --relocate'
547      elif relocate == 'no-relocate':
548        options_test += ' --no-relocate'
549
550      if trace == 'trace':
551        options_test += ' --trace'
552      elif trace == 'stream':
553        options_test += ' --trace --stream'
554
555      if gc == 'gcverify':
556        options_test += ' --gcverify'
557      elif gc == 'gcstress':
558        options_test += ' --gcstress'
559
560      if jni == 'forcecopy':
561        options_test += ' --runtime-option -Xjniopts:forcecopy'
562      elif jni == 'checkjni':
563        options_test += ' --runtime-option -Xcheck:jni'
564
565      if image == 'no-image':
566        options_test += ' --no-image'
567
568      if debuggable == 'debuggable':
569        options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true'
570
571      if jvmti == 'jvmti-stress':
572        options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
573      elif jvmti == 'field-stress':
574        options_test += ' --jvmti-field-stress'
575      elif jvmti == 'trace-stress':
576        options_test += ' --jvmti-trace-stress'
577      elif jvmti == 'redefine-stress':
578        options_test += ' --jvmti-redefine-stress'
579      elif jvmti == 'step-stress':
580        options_test += ' --jvmti-step-stress'
581
582      if address_size == '64':
583        options_test += ' --64'
584
585      # b/36039166: Note that the path lengths must kept reasonably short.
586      temp_path = tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)
587      options_test = '--temp-path {} '.format(temp_path) + options_test
588
589      # Run the run-test script using the prebuilt python.
590      python3_bin = env.ANDROID_BUILD_TOP + "/prebuilts/build-tools/path/linux-x86/python3"
591      run_test_sh = python3_bin + ' ' + env.ANDROID_BUILD_TOP + '/art/test/run-test'
592      command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
593      return executor.submit(run_test, command, test, variant_set, test_name)
594
595  #  Use a context-manager to handle cleaning up the extracted zipapex if needed.
596  with handle_zipapex(zipapex_loc) as zipapex_opt:
597    options_all += zipapex_opt
598    global n_thread
599    with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
600      test_futures = []
601      for config_tuple in config:
602        target = config_tuple[1]
603        for address_size in _user_input_variants['address_sizes_target'][target]:
604          test_futures.append(start_combination(executor, config_tuple, options_all, address_size))
605
606      for config_tuple in uncombinated_config:
607        test_futures.append(
608            start_combination(executor, config_tuple, options_all, ""))  # no address size
609
610      try:
611        tests_done = 0
612        for test_future in concurrent.futures.as_completed(f for f in test_futures if f):
613          (test, status, failure_info, test_time) = test_future.result()
614          tests_done += 1
615          print_test_info(tests_done, test, status, failure_info, test_time)
616          if failure_info and not env.ART_TEST_KEEP_GOING:
617            for f in test_futures:
618              f.cancel()
619            break
620      except KeyboardInterrupt:
621        for f in test_futures:
622          f.cancel()
623        child_process_tracker.kill_all()
624      executor.shutdown(True)
625
626@contextlib.contextmanager
627def handle_zipapex(ziploc):
628  """Extracts the zipapex (if present) and handles cleanup.
629
630  If we are running out of a zipapex we want to unzip it once and have all the tests use the same
631  extracted contents. This extracts the files and handles cleanup if needed. It returns the
632  required extra arguments to pass to the run-test.
633  """
634  if ziploc is not None:
635    with tempfile.TemporaryDirectory() as tmpdir:
636      subprocess.check_call(["unzip", "-qq", ziploc, "apex_payload.zip", "-d", tmpdir])
637      subprocess.check_call(
638        ["unzip", "-qq", os.path.join(tmpdir, "apex_payload.zip"), "-d", tmpdir])
639      yield " --runtime-extracted-zipapex " + tmpdir
640  else:
641    yield ""
642
643def _popen(**kwargs):
644  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
645    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
646  return subprocess.Popen(**kwargs)
647
648def run_test(command, test, test_variant, test_name):
649  """Runs the test.
650
651  It invokes art/test/run-test script to run the test. The output of the script
652  is checked, and if it ends with "Succeeded!", it assumes that the tests
653  passed, otherwise, put it in the list of failed test. Before actually running
654  the test, it also checks if the test is placed in the list of disabled tests,
655  and if yes, it skips running it, and adds the test in the list of skipped
656  tests.
657
658  Args:
659    command: The command to be used to invoke the script
660    test: The name of the test without the variant information.
661    test_variant: The set of variant for the test.
662    test_name: The name of the test along with the variants.
663
664  Returns: a tuple of testname, status, optional failure info, and test time.
665  """
666  try:
667    if is_test_disabled(test, test_variant):
668      test_skipped = True
669      test_time = datetime.timedelta()
670    else:
671      test_skipped = False
672      test_start_time = time.monotonic()
673      if verbose:
674        print_text("Starting %s at %s\n" % (test_name, test_start_time))
675      env = dict(os.environ)
676      env["FULL_TEST_NAME"] = test_name
677      if gdb or gdb_dex2oat:
678        proc = _popen(
679          args=command.split(),
680          env=env,
681          stderr=subprocess.STDOUT,
682          universal_newlines=True,
683          start_new_session=True
684        )
685      else:
686        proc = _popen(
687          args=command.split(),
688          env=env,
689          stderr=subprocess.STDOUT,
690          stdout = subprocess.PIPE,
691          universal_newlines=True,
692          start_new_session=True,
693        )
694      script_output, return_value = child_process_tracker.wait(proc, timeout)
695      test_passed = not return_value
696      test_time_seconds = time.monotonic() - test_start_time
697      test_time = datetime.timedelta(seconds=test_time_seconds)
698
699    if not test_skipped:
700      if test_passed:
701        return (test_name, 'PASS', None, test_time)
702      else:
703        failed_tests.append((test_name, str(command) + "\n" + script_output))
704        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
705    elif not dry_run:
706      skipped_tests.append(test_name)
707      return (test_name, 'SKIP', None, test_time)
708    else:
709      return (test_name, 'PASS', None, test_time)
710  except subprocess.TimeoutExpired as e:
711    if verbose:
712      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
713    test_time_seconds = time.monotonic() - test_start_time
714    test_time = datetime.timedelta(seconds=test_time_seconds)
715    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
716
717    # HACK(b/142039427): Print extra backtraces on timeout.
718    if "-target-" in test_name and not env.ART_TEST_ON_VM:
719      for i in range(8):
720        proc_name = "dalvikvm" + test_name[-2:]
721        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
722        for pid in pidof.stdout.decode("ascii").split():
723          if i >= 4:
724            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
725            subprocess.run(["adb", "shell", "debuggerd", pid])
726            time.sleep(10)
727          task_dir = "/proc/%s/task" % pid
728          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
729          for tid in tids.stdout.decode("ascii").split():
730            for status in ["stat", "status"]:
731              filename = "%s/%s/%s" % (task_dir, tid, status)
732              print_text("Content of %s\n" % (filename))
733              subprocess.run(["adb", "shell", "cat", filename])
734        time.sleep(60)
735
736    # The python documentation states that it is necessary to actually kill the process.
737    os.killpg(proc.pid, signal.SIGKILL)
738    script_output = proc.communicate()
739
740    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
741  except Exception as e:
742    failed_tests.append((test_name, str(e)))
743    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
744
745def print_test_info(test_count, test_name, result, failed_test_info="",
746                    test_time=datetime.timedelta()):
747  """Print the continous test information
748
749  If verbose is set to True, it continuously prints test status information
750  on a new line.
751  If verbose is set to False, it keeps on erasing test
752  information by overriding it with the latest test information. Also,
753  in this case it stictly makes sure that the information length doesn't
754  exceed the console width. It does so by shortening the test_name.
755
756  When a test fails, it prints the output of the run-test script and
757  command used to invoke the script. It doesn't override the failing
758  test information in either of the cases.
759  """
760
761  info = ''
762  if not verbose:
763    # Without --verbose, the testrunner erases passing test info. It
764    # does that by overriding the printed text with white spaces all across
765    # the console width.
766    console_width = int(os.popen('stty size', 'r').read().split()[1])
767    info = '\r' + ' ' * console_width + '\r'
768  try:
769    percent = (test_count * 100) / total_test_count
770    progress_info = ('[ %d%% %d/%d ]') % (
771      percent,
772      test_count,
773      total_test_count)
774    if test_time.total_seconds() != 0 and verbose:
775      info += '(%s)' % str(test_time)
776
777
778    if result == 'FAIL' or result == 'TIMEOUT':
779      if not verbose:
780        info += ('%s %s %s\n') % (
781          progress_info,
782          test_name,
783          COLOR_ERROR + result + COLOR_NORMAL)
784      else:
785        info += ('%s %s %s\n%s\n') % (
786          progress_info,
787          test_name,
788          COLOR_ERROR + result + COLOR_NORMAL,
789          failed_test_info)
790    else:
791      result_text = ''
792      if result == 'PASS':
793        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
794      elif result == 'SKIP':
795        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
796
797      if verbose:
798        info += ('%s %s %s\n') % (
799          progress_info,
800          test_name,
801          result_text)
802      else:
803        total_output_length = 2 # Two spaces
804        total_output_length += len(progress_info)
805        total_output_length += len(result)
806        allowed_test_length = console_width - total_output_length
807        test_name_len = len(test_name)
808        if allowed_test_length < test_name_len:
809          test_name = ('...%s') % (
810            test_name[-(allowed_test_length - 3):])
811        info += ('%s %s %s') % (
812          progress_info,
813          test_name,
814          result_text)
815    send_csv_result(test_name, result)
816    print_text(info)
817  except Exception as e:
818    print_text(('%s\n%s\n') % (test_name, str(e)))
819    failed_tests.append(test_name)
820
821def verify_knownfailure_entry(entry):
822  supported_field = {
823      'tests' : (list, str),
824      'test_patterns' : (list,),
825      'description' : (list, str),
826      'bug' : (str,),
827      'variant' : (str,),
828      'devices': (list, str),
829      'env_vars' : (dict,),
830      'zipapex' : (bool,),
831  }
832  for field in entry:
833    field_type = type(entry[field])
834    if field_type not in supported_field[field]:
835      raise ValueError('%s is not supported type for %s\n%s' % (
836          str(field_type),
837          field,
838          str(entry)))
839
840def get_disabled_test_info(device_name):
841  """Generate set of known failures.
842
843  It parses the art/test/knownfailures.json file to generate the list of
844  disabled tests.
845
846  Returns:
847    The method returns a dict of tests mapped to the variants list
848    for which the test should not be run.
849  """
850  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
851  with open(known_failures_file) as known_failures_json:
852    known_failures_info = json.loads(known_failures_json.read())
853
854  disabled_test_info = {}
855  for failure in known_failures_info:
856    verify_knownfailure_entry(failure)
857    tests = failure.get('tests', [])
858    if isinstance(tests, str):
859      tests = [tests]
860    patterns = failure.get("test_patterns", [])
861    if (not isinstance(patterns, list)):
862      raise ValueError("test_patterns is not a list in %s" % failure)
863
864    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
865    variants = parse_variants(failure.get('variant'))
866
867    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
868    # "foo" is present in "devices".
869    device_names = failure.get('devices', [])
870    if isinstance(device_names, str):
871      device_names = [device_names]
872    if len(device_names) != 0:
873      if device_name in device_names:
874        variants.add('target')
875      else:
876        # Skip adding test info as device_name is not present in "devices" entry.
877        continue
878
879    env_vars = failure.get('env_vars')
880
881    if check_env_vars(env_vars):
882      for test in tests:
883        if test not in RUN_TEST_SET:
884          raise ValueError('%s is not a valid run-test' % (
885              test))
886        if test in disabled_test_info:
887          disabled_test_info[test] = disabled_test_info[test].union(variants)
888        else:
889          disabled_test_info[test] = variants
890
891    zipapex_disable = failure.get("zipapex", False)
892    if zipapex_disable and zipapex_loc is not None:
893      for test in tests:
894        if test not in RUN_TEST_SET:
895          raise ValueError('%s is not a valid run-test' % (test))
896        if test in disabled_test_info:
897          disabled_test_info[test] = disabled_test_info[test].union(variants)
898        else:
899          disabled_test_info[test] = variants
900
901  return disabled_test_info
902
903def gather_disabled_test_info():
904  global DISABLED_TEST_CONTAINER
905  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
906  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
907
908def check_env_vars(env_vars):
909  """Checks if the env variables are set as required to run the test.
910
911  Returns:
912    True if all the env variables are set as required, otherwise False.
913  """
914
915  if not env_vars:
916    return True
917  for key in env_vars:
918    if env.get_env(key) != env_vars.get(key):
919      return False
920  return True
921
922
923def is_test_disabled(test, variant_set):
924  """Checks if the test along with the variant_set is disabled.
925
926  Args:
927    test: The name of the test as in art/test directory.
928    variant_set: Variants to be used for the test.
929  Returns:
930    True, if the test is disabled.
931  """
932  if dry_run:
933    return True
934  if test in env.EXTRA_DISABLED_TESTS:
935    return True
936  if ignore_skips:
937    return False
938  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
939  for variants in variants_list:
940    variants_present = True
941    for variant in variants:
942      if variant not in variant_set:
943        variants_present = False
944        break
945    if variants_present:
946      return True
947  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
948    if bad_combo.issubset(variant_set):
949      return True
950  return False
951
952
953def parse_variants(variants):
954  """Parse variants fetched from art/test/knownfailures.json.
955  """
956  if not variants:
957    variants = ''
958    for variant in TOTAL_VARIANTS_SET:
959      variants += variant
960      variants += '|'
961    variants = variants[:-1]
962  variant_list = set()
963  or_variants = variants.split('|')
964  for or_variant in or_variants:
965    and_variants = or_variant.split('&')
966    variant = set()
967    for and_variant in and_variants:
968      and_variant = and_variant.strip()
969      if and_variant not in TOTAL_VARIANTS_SET:
970        raise ValueError('%s is not a valid variant' % (
971            and_variant))
972      variant.add(and_variant)
973    variant_list.add(frozenset(variant))
974  return variant_list
975
976def print_text(output):
977  sys.stdout.write(output)
978  sys.stdout.flush()
979
980def print_analysis():
981  if not verbose:
982    # Without --verbose, the testrunner erases passing test info. It
983    # does that by overriding the printed text with white spaces all across
984    # the console width.
985    console_width = int(os.popen('stty size', 'r').read().split()[1])
986    eraser_text = '\r' + ' ' * console_width + '\r'
987    print_text(eraser_text)
988
989  # Prints information about the total tests run.
990  # E.g., "2/38 (5%) tests passed".
991  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
992  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
993      passed_test_count,
994      total_test_count,
995      (passed_test_count*100)/total_test_count,
996      'tests' if passed_test_count > 1 else 'test')
997  print_text(passed_test_information)
998
999  # Prints the list of skipped tests, if any.
1000  if skipped_tests:
1001    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
1002    for test in skipped_tests:
1003      print_text(test + '\n')
1004    print_text('\n')
1005
1006  # Prints the list of failed tests, if any.
1007  if failed_tests:
1008    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
1009    for test_info in failed_tests:
1010      print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
1011    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
1012    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
1013      print_text(('%s\n' % (failed_test)))
1014
1015test_name_matcher = None
1016def extract_test_name(test_name):
1017  """Parses the test name and returns all the parts"""
1018  global test_name_matcher
1019  if test_name_matcher is None:
1020    regex = '^test-art-'
1021    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
1022    regex += 'run-test-'
1023    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
1024    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
1025    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
1026    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
1027    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
1028    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
1029    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
1030    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
1031    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
1032    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
1033    regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
1034    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
1035    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
1036    test_name_matcher = re.compile(regex)
1037  match = test_name_matcher.match(test_name)
1038  if match:
1039    return list(match.group(i) for i in range(1,15))
1040  raise ValueError(test_name + " is not a valid test")
1041
1042def parse_test_name(test_name):
1043  """Parses the testname provided by the user.
1044  It supports two types of test_name:
1045  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1046  exists and if it does, it returns the testname.
1047  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
1048  In this case, it will parse all the variants and check if they are placed
1049  correctly. If yes, it will set the various VARIANT_TYPES to use the
1050  variants required to run the test. Again, it returns the test_name
1051  without the variant information like 001-HelloWorld.
1052  """
1053  test_set = set()
1054  for test in RUN_TEST_SET:
1055    if test.startswith(test_name):
1056      test_set.add(test)
1057  if test_set:
1058    return test_set
1059
1060  parsed = extract_test_name(test_name)
1061  _user_input_variants['target'].add(parsed[0])
1062  _user_input_variants['run'].add(parsed[1])
1063  _user_input_variants['prebuild'].add(parsed[2])
1064  _user_input_variants['compiler'].add(parsed[3])
1065  _user_input_variants['relocate'].add(parsed[4])
1066  _user_input_variants['trace'].add(parsed[5])
1067  _user_input_variants['gc'].add(parsed[6])
1068  _user_input_variants['jni'].add(parsed[7])
1069  _user_input_variants['image'].add(parsed[8])
1070  _user_input_variants['debuggable'].add(parsed[9])
1071  _user_input_variants['jvmti'].add(parsed[10])
1072  _user_input_variants['cdex_level'].add(parsed[11])
1073  _user_input_variants['address_sizes'].add(parsed[13])
1074  return {parsed[12]}
1075
1076
1077def get_target_cpu_count():
1078  if env.ART_TEST_ON_VM:
1079    command = f"{env.ART_SSH_CMD} cat /sys/devices/system/cpu/present"
1080  else:
1081    command = 'adb shell cat /sys/devices/system/cpu/present'
1082  cpu_info_proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
1083  cpu_info = cpu_info_proc.stdout.read()
1084  if type(cpu_info) is bytes:
1085    cpu_info = cpu_info.decode('utf-8')
1086  cpu_info_regex = r'\d*-(\d*)'
1087  match = re.match(cpu_info_regex, cpu_info)
1088  if match:
1089    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1090  else:
1091    raise ValueError('Unable to predict the concurrency for the target. '
1092                     'Is device connected?')
1093
1094
1095def get_host_cpu_count():
1096  return multiprocessing.cpu_count()
1097
1098
1099def parse_option():
1100  global verbose
1101  global dry_run
1102  global ignore_skips
1103  global n_thread
1104  global build
1105  global dist
1106  global gdb
1107  global gdb_arg
1108  global dump_cfg
1109  global gdb_dex2oat
1110  global gdb_dex2oat_args
1111  global runtime_option
1112  global run_test_option
1113  global timeout
1114  global dex2oat_jobs
1115  global run_all_configs
1116  global with_agent
1117  global zipapex_loc
1118  global csv_result
1119
1120  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1121  parser.add_argument('tests', action='extend', nargs="*", help='name(s) of the test(s)')
1122  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)'
1123      ' (deprecated: use positional arguments at the end without any option instead)')
1124  global_group = parser.add_argument_group('Global options',
1125                                           'Options that affect all tests being run')
1126  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1127                            Defaults to half of CPUs on target and all CPUs on host.""")
1128  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1129  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1130  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1131  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1132                            help="Skip the given test in all circumstances.")
1133  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1134                            help="""Don't skip any run-test configurations listed in
1135                            knownfailures.json.""")
1136  global_group.add_argument('--no-build-dependencies',
1137                            action='store_false', dest='build',
1138                            help="""Don't build dependencies under any circumstances. This is the
1139                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1140  global_group.add_argument('-b', '--build-dependencies',
1141                            action='store_true', dest='build',
1142                            help="""Build dependencies under all circumstances. By default we will
1143                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1144  global_group.add_argument('--dist',
1145                            action='store_true', dest='dist',
1146                            help="""If dependencies are to be built, pass `dist` to the build
1147                            command line. You may want to also set the DIST_DIR environment
1148                            variable when using this flag.""")
1149  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1150  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1151  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1152  global_group.add_argument('--dump-cfg', dest='dump_cfg',
1153                            help="""Dump the CFG to the specified host path.
1154                            Example \"--dump-cfg <full-path>/graph.cfg\".""")
1155  global_group.add_argument('--gdb-dex2oat', action='store_true', dest='gdb_dex2oat')
1156  global_group.add_argument('--gdb-dex2oat-args', dest='gdb_dex2oat_args')
1157  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1158                            default=[],
1159                            help="""Pass an option, unaltered, to the run-test script.
1160                            This should be enclosed in single-quotes to allow for spaces. The option
1161                            will be split using shlex.split() prior to invoking run-test.
1162                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\".""")
1163  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1164                            help="""Pass an agent to be attached to the runtime""")
1165  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1166                            help="""Pass an option to the runtime. Runtime options
1167                            starting with a '-' must be separated by a '=', for
1168                            example '--runtime-option=-Xjitthreshold:0'.""")
1169  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1170                            help='Number of dex2oat jobs')
1171  global_group.add_argument('--runtime-zipapex', dest='runtime_zipapex', default=None,
1172                            help='Location for runtime zipapex.')
1173  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1174                            help="Run all the possible configurations for the input test set")
1175  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1176                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1177  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1178    var_group = parser.add_argument_group(
1179        '{}-type Options'.format(variant_type),
1180        "Options that control the '{}' variants.".format(variant_type))
1181    var_group.add_argument('--all-' + variant_type,
1182                           action='store_true',
1183                           dest='all_' + variant_type,
1184                           help='Enable all variants of ' + variant_type)
1185    for variant in variant_set:
1186      flag = '--' + variant
1187      var_group.add_argument(flag, action='store_true', dest=variant)
1188
1189  options = vars(parser.parse_args())
1190  if options['csv_result'] is not None:
1191    csv_result = options['csv_result']
1192    setup_csv_result()
1193  # Handle the --all-<type> meta-options
1194  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1195    if options['all_' + variant_type]:
1196      for variant in variant_set:
1197        options[variant] = True
1198
1199  tests = None
1200  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1201  if options['tests']:
1202    tests = set()
1203    for test_name in options['tests']:
1204      tests |= parse_test_name(test_name)
1205
1206  for variant_type in VARIANT_TYPE_DICT:
1207    for variant in VARIANT_TYPE_DICT[variant_type]:
1208      if options.get(variant):
1209        _user_input_variants[variant_type].add(variant)
1210
1211  if options['verbose']:
1212    verbose = True
1213  if options['n_thread']:
1214    n_thread = max(1, options['n_thread'])
1215  ignore_skips = options['ignore_skips']
1216  if options['dry_run']:
1217    dry_run = True
1218    verbose = True
1219  build = options['build']
1220  dist = options['dist']
1221  if options['gdb']:
1222    n_thread = 1
1223    gdb = True
1224    if options['gdb_arg']:
1225      gdb_arg = options['gdb_arg']
1226  if options['dump_cfg']:
1227    dump_cfg = options['dump_cfg']
1228  if options['gdb_dex2oat']:
1229    n_thread = 1
1230    gdb_dex2oat = True
1231    if options['gdb_dex2oat_args']:
1232      gdb_dex2oat_args = options['gdb_dex2oat_args']
1233  runtime_option = options['runtime_option'];
1234  with_agent = options['with_agent'];
1235  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1236  zipapex_loc = options['runtime_zipapex']
1237
1238  timeout = options['timeout']
1239  if options['dex2oat_jobs']:
1240    dex2oat_jobs = options['dex2oat_jobs']
1241  if options['run_all']:
1242    run_all_configs = True
1243
1244  return tests or RUN_TEST_SET
1245
1246def main():
1247  gather_test_info()
1248  tests = parse_option()
1249  setup_test_env()
1250  gather_disabled_test_info()
1251  if build:
1252    build_targets = []
1253    # Build only the needed shards (depending on the selected tests).
1254    shards = set(re.search("(\d\d)-", t).group(1) for t in tests)
1255    if any("hiddenapi" in t for t in tests):
1256      shards.add("HiddenApi")  # Include special HiddenApi shard.
1257    for mode in ['host', 'target', 'jvm']:
1258      if mode in _user_input_variants['target']:
1259        build_targets += ['test-art-{}-run-test-dependencies'.format(mode)]
1260        if len(shards) >= 100:
1261          build_targets += ["art-run-test-{}-data".format(mode)]  # Build all.
1262        else:
1263          build_targets += ["art-run-test-{}-data-shard{}".format(mode, s) for s in shards]
1264    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1265    build_command += ' D8='
1266    if dist:
1267      build_command += ' dist'
1268    build_command += ' ' + ' '.join(build_targets)
1269    print_text('Build command: %s\n' % build_command)
1270    if subprocess.call(build_command.split()):
1271      # Debugging for b/62653020
1272      if env.DIST_DIR:
1273        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1274      sys.exit(1)
1275
1276  run_tests(tests)
1277
1278  print_analysis()
1279  close_csv_file()
1280
1281  exit_code = 0 if len(failed_tests) == 0 else 1
1282  sys.exit(exit_code)
1283
1284if __name__ == '__main__':
1285  main()
1286