• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import contextlib
64import csv
65import datetime
66import fnmatch
67import itertools
68import json
69import multiprocessing
70import os
71import re
72import shlex
73import shutil
74import signal
75import subprocess
76import sys
77import tempfile
78import threading
79import time
80
81import env
82from target_config import target_config
83from device_config import device_config
84
85# TODO: make it adjustable per tests and for buildbots
86#
87# Note: this needs to be larger than run-test timeouts, as long as this script
88#       does not push the value to run-test. run-test is somewhat complicated:
89#                      base: 25m  (large for ASAN)
90#        + timeout handling:  2m
91#        +   gcstress extra: 20m
92#        -----------------------
93#                            47m
94timeout = 3600 # 60 minutes
95
96if env.ART_TEST_RUN_ON_ARM_FVP:
97  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
98  timeout = 36000
99
100# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
101# that has key as the test name (like 001-HelloWorld), and value as set of
102# variants that the test is disabled for.
103DISABLED_TEST_CONTAINER = {}
104
105# The Dict contains the list of all possible variants for a given type. For example,
106# for key TARGET, the value would be target and host. The list is used to parse
107# the test name given as the argument to run.
108VARIANT_TYPE_DICT = {}
109
110# The set of all variant sets that are incompatible and will always be skipped.
111NONFUNCTIONAL_VARIANT_SETS = set()
112
113# The set contains all the variants of each time.
114TOTAL_VARIANTS_SET = set()
115
116# The colors are used in the output. When a test passes, COLOR_PASS is used,
117# and so on.
118COLOR_ERROR = '\033[91m'
119COLOR_PASS = '\033[92m'
120COLOR_SKIP = '\033[93m'
121COLOR_NORMAL = '\033[0m'
122
123# The set contains the list of all the possible run tests that are in art/test
124# directory.
125RUN_TEST_SET = set()
126
127failed_tests = []
128skipped_tests = []
129
130# Flags
131n_thread = 0
132total_test_count = 0
133verbose = False
134dry_run = False
135ignore_skips = False
136build = False
137dist = False
138gdb = False
139gdb_arg = ''
140dump_cfg = ''
141gdb_dex2oat = False
142gdb_dex2oat_args = ''
143csv_result = None
144csv_writer = None
145runtime_option = ''
146with_agent = []
147zipapex_loc = None
148run_test_option = []
149dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
150run_all_configs = False
151
152# Dict containing extra arguments
153extra_arguments = { "host" : [], "target" : [] }
154
155# Dict to store user requested test variants.
156# key: variant_type.
157# value: set of variants user wants to run of type <key>.
158_user_input_variants = collections.defaultdict(set)
159
160
161class ChildProcessTracker(object):
162  """Keeps track of forked child processes to be able to kill them."""
163
164  def __init__(self):
165    self.procs = {}             # dict from pid to subprocess.Popen object
166    self.mutex = threading.Lock()
167
168  def wait(self, proc, timeout):
169    """Waits on the given subprocess and makes it available to kill_all meanwhile.
170
171    Args:
172      proc: The subprocess.Popen object to wait on.
173      timeout: Timeout passed on to proc.communicate.
174
175    Returns: A tuple of the process stdout output and its return value.
176    """
177    with self.mutex:
178      if self.procs is not None:
179        self.procs[proc.pid] = proc
180      else:
181        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
182    try:
183      output = proc.communicate(timeout=timeout)[0]
184      return_value = proc.wait()
185      return output, return_value
186    finally:
187      with self.mutex:
188        if self.procs is not None:
189          del self.procs[proc.pid]
190
191  def kill_all(self):
192    """Kills all currently running processes and any future ones."""
193    with self.mutex:
194      for pid in self.procs:
195        os.killpg(pid, signal.SIGKILL)
196      self.procs = None # Make future wait() calls kill their processes immediately.
197
198child_process_tracker = ChildProcessTracker()
199
200def setup_csv_result():
201  """Set up the CSV output if required."""
202  global csv_writer
203  csv_writer = csv.writer(csv_result)
204  # Write the header.
205  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
206                       'jni', 'image', 'debuggable', 'jvmti', 'cdex_level', 'test', 'address_size', 'result'])
207
208
209def send_csv_result(test, result):
210  """
211  Write a line into the CSV results file if one is available.
212  """
213  if csv_writer is not None:
214    csv_writer.writerow(extract_test_name(test) + [result])
215
216def close_csv_file():
217  global csv_result
218  global csv_writer
219  if csv_result is not None:
220    csv_writer = None
221    csv_result.flush()
222    csv_result.close()
223    csv_result = None
224
225def gather_test_info():
226  """The method gathers test information about the test to be run which includes
227  generating the list of total tests from the art/test directory and the list
228  of disabled test. It also maps various variants to types.
229  """
230  global TOTAL_VARIANTS_SET
231  # TODO: Avoid duplication of the variant names in different lists.
232  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
233  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
234  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
235  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
236  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
237  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
238  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
239  VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
240  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
241  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
242  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
243  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
244                                'field-stress', 'step-stress'}
245  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
246                                   'optimizing', 'regalloc_gc',
247                                   'speed-profile', 'baseline'}
248
249  # Regalloc_GC cannot work with prebuild.
250  NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'}))
251
252  for v_type in VARIANT_TYPE_DICT:
253    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
254
255  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
256  for f in os.listdir(test_dir):
257    if fnmatch.fnmatch(f, '[0-9]*'):
258      RUN_TEST_SET.add(f)
259
260
261def setup_test_env():
262  """The method sets default value for the various variants of the tests if they
263  are already not set.
264  """
265  if env.ART_TEST_BISECTION:
266    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
267    env.ART_TEST_RUN_TEST_PREBUILD = False
268    # Bisection search writes to standard output.
269    env.ART_TEST_QUIET = False
270
271  global _user_input_variants
272  global run_all_configs
273  # These are the default variant-options we will use if nothing in the group is specified.
274  default_variants = {
275      'target': {'host', 'target'},
276      'prebuild': {'prebuild'},
277      'cdex_level': {'cdex-fast'},
278      'jvmti': { 'no-jvmti'},
279      'compiler': {'optimizing',
280                   'jit',
281                   'interpreter',
282                   'interp-ac',
283                   'speed-profile'},
284      'relocate': {'no-relocate'},
285      'trace': {'ntrace'},
286      'gc': {'cms'},
287      'jni': {'checkjni'},
288      'image': {'picimage'},
289      'debuggable': {'ndebuggable'},
290      'run': {'debug'},
291      # address_sizes_target depends on the target so it is dealt with below.
292  }
293  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
294  # want to pick up if we pass --all.
295  default_variants_keys = default_variants.keys()
296  if run_all_configs:
297    default_variants = VARIANT_TYPE_DICT
298
299  for key in default_variants_keys:
300    if not _user_input_variants[key]:
301      _user_input_variants[key] = default_variants[key]
302
303  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
304  if not _user_input_variants['address_sizes']:
305    _user_input_variants['address_sizes_target']['target'].add(
306        env.ART_PHONY_TEST_TARGET_SUFFIX)
307    _user_input_variants['address_sizes_target']['host'].add(
308        env.ART_PHONY_TEST_HOST_SUFFIX)
309    if env.ART_TEST_RUN_TEST_2ND_ARCH:
310      _user_input_variants['address_sizes_target']['host'].add(
311          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
312      _user_input_variants['address_sizes_target']['target'].add(
313          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
314  else:
315    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
316    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
317
318  global n_thread
319  if 'target' in _user_input_variants['target']:
320    device_name = get_device_name()
321    if n_thread == 0:
322      # Use only part of the cores since fully loading the device tends to lead to timeouts.
323      n_thread = max(1, int(get_target_cpu_count() * 0.75))
324      if device_name == 'fugu':
325        n_thread = 1
326  else:
327    device_name = "host"
328    if n_thread == 0:
329      n_thread = get_host_cpu_count()
330  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
331
332  global extra_arguments
333  for target in _user_input_variants['target']:
334    extra_arguments[target] = find_extra_device_arguments(target)
335
336  if not sys.stdout.isatty():
337    global COLOR_ERROR
338    global COLOR_PASS
339    global COLOR_SKIP
340    global COLOR_NORMAL
341    COLOR_ERROR = ''
342    COLOR_PASS = ''
343    COLOR_SKIP = ''
344    COLOR_NORMAL = ''
345
346def find_extra_device_arguments(target):
347  """
348  Gets any extra arguments from the device_config.
349  """
350  device_name = target
351  if target == 'target':
352    device_name = get_device_name()
353  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
354
355def get_device_name():
356  """
357  Gets the value of ro.product.name from remote device.
358  """
359  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
360                          stderr=subprocess.STDOUT,
361                          stdout = subprocess.PIPE,
362                          universal_newlines=True)
363  # only wait 2 seconds.
364  timeout_val = 2
365
366  if env.ART_TEST_RUN_ON_ARM_FVP:
367    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
368    timeout_val = 200
369
370  output = proc.communicate(timeout = timeout_val)[0]
371  success = not proc.wait()
372  if success:
373    return output.strip()
374  else:
375    print_text("Unable to determine device type!\n")
376    print_text("Continuing anyway.\n")
377    return "UNKNOWN_TARGET"
378
379def run_tests(tests):
380  """This method generates variants of the tests to be run and executes them.
381
382  Args:
383    tests: The set of tests to be run.
384  """
385  options_all = ''
386
387  # jvm does not run with all these combinations,
388  # or at least it doesn't make sense for most of them.
389  # TODO: support some jvm variants like jvmti ?
390  target_input_variants = _user_input_variants['target']
391  uncombinated_target_input_variants = []
392  if 'jvm' in target_input_variants:
393    _user_input_variants['target'].remove('jvm')
394    uncombinated_target_input_variants.append('jvm')
395
396  global total_test_count
397  total_test_count = len(tests)
398  if target_input_variants:
399    for variant_type in VARIANT_TYPE_DICT:
400      if not (variant_type == 'target' or 'address_sizes' in variant_type):
401        total_test_count *= len(_user_input_variants[variant_type])
402  target_address_combinations = 0
403  for target in target_input_variants:
404    for address_size in _user_input_variants['address_sizes_target'][target]:
405      target_address_combinations += 1
406  target_address_combinations += len(uncombinated_target_input_variants)
407  total_test_count *= target_address_combinations
408
409  if env.ART_TEST_WITH_STRACE:
410    options_all += ' --strace'
411
412  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
413    options_all += ' --always-clean'
414
415  if env.ART_TEST_BISECTION:
416    options_all += ' --bisection-search'
417
418  if gdb:
419    options_all += ' --gdb'
420    if gdb_arg:
421      options_all += ' --gdb-arg ' + gdb_arg
422
423  if dump_cfg:
424    options_all += ' --dump-cfg ' + dump_cfg
425  if gdb_dex2oat:
426    options_all += ' --gdb-dex2oat'
427    if gdb_dex2oat_args:
428      options_all += ' --gdb-dex2oat-args ' + gdb_dex2oat_args
429
430  options_all += ' ' + ' '.join(run_test_option)
431
432  if runtime_option:
433    for opt in runtime_option:
434      options_all += ' --runtime-option ' + opt
435  if with_agent:
436    for opt in with_agent:
437      options_all += ' --with-agent ' + opt
438
439  if dex2oat_jobs != -1:
440    options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
441
442  def iter_config(tests, input_variants, user_input_variants):
443    config = itertools.product(tests, input_variants, user_input_variants['run'],
444                                 user_input_variants['prebuild'], user_input_variants['compiler'],
445                                 user_input_variants['relocate'], user_input_variants['trace'],
446                                 user_input_variants['gc'], user_input_variants['jni'],
447                                 user_input_variants['image'],
448                                 user_input_variants['debuggable'], user_input_variants['jvmti'],
449                                 user_input_variants['cdex_level'])
450    return config
451
452  # [--host, --target] combines with all the other user input variants.
453  config = iter_config(tests, target_input_variants, _user_input_variants)
454  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
455  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
456  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
457      'prebuild': [''], 'compiler': [''],
458      'relocate': [''], 'trace': [''],
459      'gc': [''], 'jni': [''],
460      'image': [''],
461      'debuggable': [''], 'jvmti': [''],
462      'cdex_level': ['']})
463
464  def start_combination(executor, config_tuple, global_options, address_size):
465      test, target, run, prebuild, compiler, relocate, trace, gc, \
466      jni, image, debuggable, jvmti, cdex_level = config_tuple
467
468      # NB The order of components here should match the order of
469      # components in the regex parser in parse_test_name.
470      test_name = 'test-art-'
471      test_name += target + '-run-test-'
472      test_name += run + '-'
473      test_name += prebuild + '-'
474      test_name += compiler + '-'
475      test_name += relocate + '-'
476      test_name += trace + '-'
477      test_name += gc + '-'
478      test_name += jni + '-'
479      test_name += image + '-'
480      test_name += debuggable + '-'
481      test_name += jvmti + '-'
482      test_name += cdex_level + '-'
483      test_name += test
484      test_name += address_size
485
486      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
487                     image, debuggable, jvmti, cdex_level, address_size}
488
489      options_test = global_options
490
491      if target == 'host':
492        options_test += ' --host'
493      elif target == 'jvm':
494        options_test += ' --jvm'
495
496      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
497      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
498      # for target tests.
499      if target == 'target':
500        if env.ART_TEST_CHROOT:
501          options_test += ' --chroot ' + env.ART_TEST_CHROOT
502        if env.ART_TEST_ANDROID_ROOT:
503          options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
504        if env.ART_TEST_ANDROID_I18N_ROOT:
505            options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT
506        if env.ART_TEST_ANDROID_ART_ROOT:
507          options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT
508        if env.ART_TEST_ANDROID_TZDATA_ROOT:
509          options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT
510
511      if run == 'ndebug':
512        options_test += ' -O'
513
514      if prebuild == 'prebuild':
515        options_test += ' --prebuild'
516      elif prebuild == 'no-prebuild':
517        options_test += ' --no-prebuild'
518
519      if cdex_level:
520        # Add option and remove the cdex- prefix.
521        options_test += ' --compact-dex-level ' + cdex_level.replace('cdex-','')
522
523      if compiler == 'optimizing':
524        options_test += ' --optimizing'
525      elif compiler == 'regalloc_gc':
526        options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
527      elif compiler == 'interpreter':
528        options_test += ' --interpreter'
529      elif compiler == 'interp-ac':
530        options_test += ' --interpreter --verify-soft-fail'
531      elif compiler == 'jit':
532        options_test += ' --jit'
533      elif compiler == 'jit-on-first-use':
534        options_test += ' --jit --runtime-option -Xjitthreshold:0'
535      elif compiler == 'speed-profile':
536        options_test += ' --random-profile'
537      elif compiler == 'baseline':
538        options_test += ' --baseline'
539
540      if relocate == 'relocate':
541        options_test += ' --relocate'
542      elif relocate == 'no-relocate':
543        options_test += ' --no-relocate'
544
545      if trace == 'trace':
546        options_test += ' --trace'
547      elif trace == 'stream':
548        options_test += ' --trace --stream'
549
550      if gc == 'gcverify':
551        options_test += ' --gcverify'
552      elif gc == 'gcstress':
553        options_test += ' --gcstress'
554
555      if jni == 'forcecopy':
556        options_test += ' --runtime-option -Xjniopts:forcecopy'
557      elif jni == 'checkjni':
558        options_test += ' --runtime-option -Xcheck:jni'
559
560      if image == 'no-image':
561        options_test += ' --no-image'
562
563      if debuggable == 'debuggable':
564        options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true'
565
566      if jvmti == 'jvmti-stress':
567        options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
568      elif jvmti == 'field-stress':
569        options_test += ' --jvmti-field-stress'
570      elif jvmti == 'trace-stress':
571        options_test += ' --jvmti-trace-stress'
572      elif jvmti == 'redefine-stress':
573        options_test += ' --jvmti-redefine-stress'
574      elif jvmti == 'step-stress':
575        options_test += ' --jvmti-step-stress'
576
577      if address_size == '64':
578        options_test += ' --64'
579
580      # b/36039166: Note that the path lengths must kept reasonably short.
581      temp_path = tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)
582      options_test = '--temp-path {} '.format(temp_path) + options_test
583
584      run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
585      command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
586      return executor.submit(run_test, command, test, variant_set, test_name)
587
588  #  Use a context-manager to handle cleaning up the extracted zipapex if needed.
589  with handle_zipapex(zipapex_loc) as zipapex_opt:
590    options_all += zipapex_opt
591    global n_thread
592    with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
593      test_futures = []
594      for config_tuple in config:
595        target = config_tuple[1]
596        for address_size in _user_input_variants['address_sizes_target'][target]:
597          test_futures.append(start_combination(executor, config_tuple, options_all, address_size))
598
599      for config_tuple in uncombinated_config:
600        test_futures.append(
601            start_combination(executor, config_tuple, options_all, ""))  # no address size
602
603      try:
604        tests_done = 0
605        for test_future in concurrent.futures.as_completed(f for f in test_futures if f):
606          (test, status, failure_info, test_time) = test_future.result()
607          tests_done += 1
608          print_test_info(tests_done, test, status, failure_info, test_time)
609          if failure_info and not env.ART_TEST_KEEP_GOING:
610            for f in test_futures:
611              f.cancel()
612            break
613      except KeyboardInterrupt:
614        for f in test_futures:
615          f.cancel()
616        child_process_tracker.kill_all()
617      executor.shutdown(True)
618
619@contextlib.contextmanager
620def handle_zipapex(ziploc):
621  """Extracts the zipapex (if present) and handles cleanup.
622
623  If we are running out of a zipapex we want to unzip it once and have all the tests use the same
624  extracted contents. This extracts the files and handles cleanup if needed. It returns the
625  required extra arguments to pass to the run-test.
626  """
627  if ziploc is not None:
628    with tempfile.TemporaryDirectory() as tmpdir:
629      subprocess.check_call(["unzip", "-qq", ziploc, "apex_payload.zip", "-d", tmpdir])
630      subprocess.check_call(
631        ["unzip", "-qq", os.path.join(tmpdir, "apex_payload.zip"), "-d", tmpdir])
632      yield " --runtime-extracted-zipapex " + tmpdir
633  else:
634    yield ""
635
636def _popen(**kwargs):
637  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
638    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
639  return subprocess.Popen(**kwargs)
640
641def run_test(command, test, test_variant, test_name):
642  """Runs the test.
643
644  It invokes art/test/run-test script to run the test. The output of the script
645  is checked, and if it ends with "Succeeded!", it assumes that the tests
646  passed, otherwise, put it in the list of failed test. Before actually running
647  the test, it also checks if the test is placed in the list of disabled tests,
648  and if yes, it skips running it, and adds the test in the list of skipped
649  tests.
650
651  Args:
652    command: The command to be used to invoke the script
653    test: The name of the test without the variant information.
654    test_variant: The set of variant for the test.
655    test_name: The name of the test along with the variants.
656
657  Returns: a tuple of testname, status, optional failure info, and test time.
658  """
659  try:
660    if is_test_disabled(test, test_variant):
661      test_skipped = True
662      test_time = datetime.timedelta()
663    else:
664      test_skipped = False
665      test_start_time = time.monotonic()
666      if verbose:
667        print_text("Starting %s at %s\n" % (test_name, test_start_time))
668      if gdb or gdb_dex2oat:
669        proc = _popen(
670          args=command.split(),
671          stderr=subprocess.STDOUT,
672          universal_newlines=True,
673          start_new_session=True
674        )
675      else:
676        proc = _popen(
677          args=command.split(),
678          stderr=subprocess.STDOUT,
679          stdout = subprocess.PIPE,
680          universal_newlines=True,
681          start_new_session=True,
682        )
683      script_output, return_value = child_process_tracker.wait(proc, timeout)
684      test_passed = not return_value
685      test_time_seconds = time.monotonic() - test_start_time
686      test_time = datetime.timedelta(seconds=test_time_seconds)
687
688    if not test_skipped:
689      if test_passed:
690        return (test_name, 'PASS', None, test_time)
691      else:
692        failed_tests.append((test_name, str(command) + "\n" + script_output))
693        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
694    elif not dry_run:
695      skipped_tests.append(test_name)
696      return (test_name, 'SKIP', None, test_time)
697    else:
698      return (test_name, 'PASS', None, test_time)
699  except subprocess.TimeoutExpired as e:
700    if verbose:
701      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
702    test_time_seconds = time.monotonic() - test_start_time
703    test_time = datetime.timedelta(seconds=test_time_seconds)
704    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
705
706    # HACK(b/142039427): Print extra backtraces on timeout.
707    if "-target-" in test_name:
708      for i in range(8):
709        proc_name = "dalvikvm" + test_name[-2:]
710        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
711        for pid in pidof.stdout.decode("ascii").split():
712          if i >= 4:
713            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
714            subprocess.run(["adb", "shell", "debuggerd", pid])
715            time.sleep(10)
716          task_dir = "/proc/%s/task" % pid
717          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
718          for tid in tids.stdout.decode("ascii").split():
719            for status in ["stat", "status"]:
720              filename = "%s/%s/%s" % (task_dir, tid, status)
721              print_text("Content of %s\n" % (filename))
722              subprocess.run(["adb", "shell", "cat", filename])
723        time.sleep(60)
724
725    # The python documentation states that it is necessary to actually kill the process.
726    os.killpg(proc.pid, signal.SIGKILL)
727    script_output = proc.communicate()
728
729    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
730  except Exception as e:
731    failed_tests.append((test_name, str(e)))
732    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
733
734def print_test_info(test_count, test_name, result, failed_test_info="",
735                    test_time=datetime.timedelta()):
736  """Print the continous test information
737
738  If verbose is set to True, it continuously prints test status information
739  on a new line.
740  If verbose is set to False, it keeps on erasing test
741  information by overriding it with the latest test information. Also,
742  in this case it stictly makes sure that the information length doesn't
743  exceed the console width. It does so by shortening the test_name.
744
745  When a test fails, it prints the output of the run-test script and
746  command used to invoke the script. It doesn't override the failing
747  test information in either of the cases.
748  """
749
750  info = ''
751  if not verbose:
752    # Without --verbose, the testrunner erases passing test info. It
753    # does that by overriding the printed text with white spaces all across
754    # the console width.
755    console_width = int(os.popen('stty size', 'r').read().split()[1])
756    info = '\r' + ' ' * console_width + '\r'
757  try:
758    percent = (test_count * 100) / total_test_count
759    progress_info = ('[ %d%% %d/%d ]') % (
760      percent,
761      test_count,
762      total_test_count)
763    if test_time.total_seconds() != 0 and verbose:
764      info += '(%s)' % str(test_time)
765
766
767    if result == 'FAIL' or result == 'TIMEOUT':
768      if not verbose:
769        info += ('%s %s %s\n') % (
770          progress_info,
771          test_name,
772          COLOR_ERROR + result + COLOR_NORMAL)
773      else:
774        info += ('%s %s %s\n%s\n') % (
775          progress_info,
776          test_name,
777          COLOR_ERROR + result + COLOR_NORMAL,
778          failed_test_info)
779    else:
780      result_text = ''
781      if result == 'PASS':
782        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
783      elif result == 'SKIP':
784        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
785
786      if verbose:
787        info += ('%s %s %s\n') % (
788          progress_info,
789          test_name,
790          result_text)
791      else:
792        total_output_length = 2 # Two spaces
793        total_output_length += len(progress_info)
794        total_output_length += len(result)
795        allowed_test_length = console_width - total_output_length
796        test_name_len = len(test_name)
797        if allowed_test_length < test_name_len:
798          test_name = ('...%s') % (
799            test_name[-(allowed_test_length - 3):])
800        info += ('%s %s %s') % (
801          progress_info,
802          test_name,
803          result_text)
804    send_csv_result(test_name, result)
805    print_text(info)
806  except Exception as e:
807    print_text(('%s\n%s\n') % (test_name, str(e)))
808    failed_tests.append(test_name)
809
810def verify_knownfailure_entry(entry):
811  supported_field = {
812      'tests' : (list, str),
813      'test_patterns' : (list,),
814      'description' : (list, str),
815      'bug' : (str,),
816      'variant' : (str,),
817      'devices': (list, str),
818      'env_vars' : (dict,),
819      'zipapex' : (bool,),
820  }
821  for field in entry:
822    field_type = type(entry[field])
823    if field_type not in supported_field[field]:
824      raise ValueError('%s is not supported type for %s\n%s' % (
825          str(field_type),
826          field,
827          str(entry)))
828
829def get_disabled_test_info(device_name):
830  """Generate set of known failures.
831
832  It parses the art/test/knownfailures.json file to generate the list of
833  disabled tests.
834
835  Returns:
836    The method returns a dict of tests mapped to the variants list
837    for which the test should not be run.
838  """
839  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
840  with open(known_failures_file) as known_failures_json:
841    known_failures_info = json.loads(known_failures_json.read())
842
843  disabled_test_info = {}
844  for failure in known_failures_info:
845    verify_knownfailure_entry(failure)
846    tests = failure.get('tests', [])
847    if isinstance(tests, str):
848      tests = [tests]
849    patterns = failure.get("test_patterns", [])
850    if (not isinstance(patterns, list)):
851      raise ValueError("test_patterns is not a list in %s" % failure)
852
853    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
854    variants = parse_variants(failure.get('variant'))
855
856    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
857    # "foo" is present in "devices".
858    device_names = failure.get('devices', [])
859    if isinstance(device_names, str):
860      device_names = [device_names]
861    if len(device_names) != 0:
862      if device_name in device_names:
863        variants.add('target')
864      else:
865        # Skip adding test info as device_name is not present in "devices" entry.
866        continue
867
868    env_vars = failure.get('env_vars')
869
870    if check_env_vars(env_vars):
871      for test in tests:
872        if test not in RUN_TEST_SET:
873          raise ValueError('%s is not a valid run-test' % (
874              test))
875        if test in disabled_test_info:
876          disabled_test_info[test] = disabled_test_info[test].union(variants)
877        else:
878          disabled_test_info[test] = variants
879
880    zipapex_disable = failure.get("zipapex", False)
881    if zipapex_disable and zipapex_loc is not None:
882      for test in tests:
883        if test not in RUN_TEST_SET:
884          raise ValueError('%s is not a valid run-test' % (test))
885        if test in disabled_test_info:
886          disabled_test_info[test] = disabled_test_info[test].union(variants)
887        else:
888          disabled_test_info[test] = variants
889
890  return disabled_test_info
891
892def gather_disabled_test_info():
893  global DISABLED_TEST_CONTAINER
894  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
895  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
896
897def check_env_vars(env_vars):
898  """Checks if the env variables are set as required to run the test.
899
900  Returns:
901    True if all the env variables are set as required, otherwise False.
902  """
903
904  if not env_vars:
905    return True
906  for key in env_vars:
907    if env.get_env(key) != env_vars.get(key):
908      return False
909  return True
910
911
912def is_test_disabled(test, variant_set):
913  """Checks if the test along with the variant_set is disabled.
914
915  Args:
916    test: The name of the test as in art/test directory.
917    variant_set: Variants to be used for the test.
918  Returns:
919    True, if the test is disabled.
920  """
921  if dry_run:
922    return True
923  if test in env.EXTRA_DISABLED_TESTS:
924    return True
925  if ignore_skips:
926    return False
927  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
928  for variants in variants_list:
929    variants_present = True
930    for variant in variants:
931      if variant not in variant_set:
932        variants_present = False
933        break
934    if variants_present:
935      return True
936  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
937    if bad_combo.issubset(variant_set):
938      return True
939  return False
940
941
942def parse_variants(variants):
943  """Parse variants fetched from art/test/knownfailures.json.
944  """
945  if not variants:
946    variants = ''
947    for variant in TOTAL_VARIANTS_SET:
948      variants += variant
949      variants += '|'
950    variants = variants[:-1]
951  variant_list = set()
952  or_variants = variants.split('|')
953  for or_variant in or_variants:
954    and_variants = or_variant.split('&')
955    variant = set()
956    for and_variant in and_variants:
957      and_variant = and_variant.strip()
958      if and_variant not in TOTAL_VARIANTS_SET:
959        raise ValueError('%s is not a valid variant' % (
960            and_variant))
961      variant.add(and_variant)
962    variant_list.add(frozenset(variant))
963  return variant_list
964
965def print_text(output):
966  sys.stdout.write(output)
967  sys.stdout.flush()
968
969def print_analysis():
970  if not verbose:
971    # Without --verbose, the testrunner erases passing test info. It
972    # does that by overriding the printed text with white spaces all across
973    # the console width.
974    console_width = int(os.popen('stty size', 'r').read().split()[1])
975    eraser_text = '\r' + ' ' * console_width + '\r'
976    print_text(eraser_text)
977
978  # Prints information about the total tests run.
979  # E.g., "2/38 (5%) tests passed".
980  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
981  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
982      passed_test_count,
983      total_test_count,
984      (passed_test_count*100)/total_test_count,
985      'tests' if passed_test_count > 1 else 'test')
986  print_text(passed_test_information)
987
988  # Prints the list of skipped tests, if any.
989  if skipped_tests:
990    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
991    for test in skipped_tests:
992      print_text(test + '\n')
993    print_text('\n')
994
995  # Prints the list of failed tests, if any.
996  if failed_tests:
997    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
998    for test_info in failed_tests:
999      print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
1000    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
1001    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
1002      print_text(('%s\n' % (failed_test)))
1003
1004test_name_matcher = None
1005def extract_test_name(test_name):
1006  """Parses the test name and returns all the parts"""
1007  global test_name_matcher
1008  if test_name_matcher is None:
1009    regex = '^test-art-'
1010    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
1011    regex += 'run-test-'
1012    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
1013    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
1014    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
1015    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
1016    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
1017    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
1018    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
1019    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
1020    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
1021    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
1022    regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
1023    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
1024    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
1025    test_name_matcher = re.compile(regex)
1026  match = test_name_matcher.match(test_name)
1027  if match:
1028    return list(match.group(i) for i in range(1,15))
1029  raise ValueError(test_name + " is not a valid test")
1030
1031def parse_test_name(test_name):
1032  """Parses the testname provided by the user.
1033  It supports two types of test_name:
1034  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1035  exists and if it does, it returns the testname.
1036  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
1037  In this case, it will parse all the variants and check if they are placed
1038  correctly. If yes, it will set the various VARIANT_TYPES to use the
1039  variants required to run the test. Again, it returns the test_name
1040  without the variant information like 001-HelloWorld.
1041  """
1042  test_set = set()
1043  for test in RUN_TEST_SET:
1044    if test.startswith(test_name):
1045      test_set.add(test)
1046  if test_set:
1047    return test_set
1048
1049  parsed = extract_test_name(test_name)
1050  _user_input_variants['target'].add(parsed[0])
1051  _user_input_variants['run'].add(parsed[1])
1052  _user_input_variants['prebuild'].add(parsed[2])
1053  _user_input_variants['compiler'].add(parsed[3])
1054  _user_input_variants['relocate'].add(parsed[4])
1055  _user_input_variants['trace'].add(parsed[5])
1056  _user_input_variants['gc'].add(parsed[6])
1057  _user_input_variants['jni'].add(parsed[7])
1058  _user_input_variants['image'].add(parsed[8])
1059  _user_input_variants['debuggable'].add(parsed[9])
1060  _user_input_variants['jvmti'].add(parsed[10])
1061  _user_input_variants['cdex_level'].add(parsed[11])
1062  _user_input_variants['address_sizes'].add(parsed[13])
1063  return {parsed[12]}
1064
1065
1066def get_target_cpu_count():
1067  adb_command = 'adb shell cat /sys/devices/system/cpu/present'
1068  cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE)
1069  cpu_info = cpu_info_proc.stdout.read()
1070  if type(cpu_info) is bytes:
1071    cpu_info = cpu_info.decode('utf-8')
1072  cpu_info_regex = r'\d*-(\d*)'
1073  match = re.match(cpu_info_regex, cpu_info)
1074  if match:
1075    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1076  else:
1077    raise ValueError('Unable to predict the concurrency for the target. '
1078                     'Is device connected?')
1079
1080
1081def get_host_cpu_count():
1082  return multiprocessing.cpu_count()
1083
1084
1085def parse_option():
1086  global verbose
1087  global dry_run
1088  global ignore_skips
1089  global n_thread
1090  global build
1091  global dist
1092  global gdb
1093  global gdb_arg
1094  global dump_cfg
1095  global gdb_dex2oat
1096  global gdb_dex2oat_args
1097  global runtime_option
1098  global run_test_option
1099  global timeout
1100  global dex2oat_jobs
1101  global run_all_configs
1102  global with_agent
1103  global zipapex_loc
1104  global csv_result
1105
1106  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1107  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
1108  global_group = parser.add_argument_group('Global options',
1109                                           'Options that affect all tests being run')
1110  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1111                            Defaults to half of CPUs on target and all CPUs on host.""")
1112  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1113  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1114  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1115  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1116                            help="Skip the given test in all circumstances.")
1117  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1118                            help="""Don't skip any run-test configurations listed in
1119                            knownfailures.json.""")
1120  global_group.add_argument('--no-build-dependencies',
1121                            action='store_false', dest='build',
1122                            help="""Don't build dependencies under any circumstances. This is the
1123                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1124  global_group.add_argument('-b', '--build-dependencies',
1125                            action='store_true', dest='build',
1126                            help="""Build dependencies under all circumstances. By default we will
1127                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1128  global_group.add_argument('--dist',
1129                            action='store_true', dest='dist',
1130                            help="""If dependencies are to be built, pass `dist` to the build
1131                            command line. You may want to also set the DIST_DIR environment
1132                            variable when using this flag.""")
1133  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1134  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1135  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1136  global_group.add_argument('--dump-cfg', dest='dump_cfg',
1137                            help="""Dump the CFG to the specified host path.
1138                            Example \"--dump-cfg <full-path>/graph.cfg\".""")
1139  global_group.add_argument('--gdb-dex2oat', action='store_true', dest='gdb_dex2oat')
1140  global_group.add_argument('--gdb-dex2oat-args', dest='gdb_dex2oat_args')
1141  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1142                            default=[],
1143                            help="""Pass an option, unaltered, to the run-test script.
1144                            This should be enclosed in single-quotes to allow for spaces. The option
1145                            will be split using shlex.split() prior to invoking run-test.
1146                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\".""")
1147  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1148                            help="""Pass an agent to be attached to the runtime""")
1149  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1150                            help="""Pass an option to the runtime. Runtime options
1151                            starting with a '-' must be separated by a '=', for
1152                            example '--runtime-option=-Xjitthreshold:0'.""")
1153  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1154                            help='Number of dex2oat jobs')
1155  global_group.add_argument('--runtime-zipapex', dest='runtime_zipapex', default=None,
1156                            help='Location for runtime zipapex.')
1157  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1158                            help="Run all the possible configurations for the input test set")
1159  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1160                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1161  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1162    var_group = parser.add_argument_group(
1163        '{}-type Options'.format(variant_type),
1164        "Options that control the '{}' variants.".format(variant_type))
1165    var_group.add_argument('--all-' + variant_type,
1166                           action='store_true',
1167                           dest='all_' + variant_type,
1168                           help='Enable all variants of ' + variant_type)
1169    for variant in variant_set:
1170      flag = '--' + variant
1171      var_group.add_argument(flag, action='store_true', dest=variant)
1172
1173  options = vars(parser.parse_args())
1174  if options['csv_result'] is not None:
1175    csv_result = options['csv_result']
1176    setup_csv_result()
1177  # Handle the --all-<type> meta-options
1178  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1179    if options['all_' + variant_type]:
1180      for variant in variant_set:
1181        options[variant] = True
1182
1183  tests = None
1184  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1185  if options['tests']:
1186    tests = set()
1187    for test_name in options['tests']:
1188      tests |= parse_test_name(test_name)
1189
1190  for variant_type in VARIANT_TYPE_DICT:
1191    for variant in VARIANT_TYPE_DICT[variant_type]:
1192      if options.get(variant):
1193        _user_input_variants[variant_type].add(variant)
1194
1195  if options['verbose']:
1196    verbose = True
1197  if options['n_thread']:
1198    n_thread = max(1, options['n_thread'])
1199  ignore_skips = options['ignore_skips']
1200  if options['dry_run']:
1201    dry_run = True
1202    verbose = True
1203  build = options['build']
1204  dist = options['dist']
1205  if options['gdb']:
1206    n_thread = 1
1207    gdb = True
1208    if options['gdb_arg']:
1209      gdb_arg = options['gdb_arg']
1210  if options['dump_cfg']:
1211    dump_cfg = options['dump_cfg']
1212  if options['gdb_dex2oat']:
1213    n_thread = 1
1214    gdb_dex2oat = True
1215    if options['gdb_dex2oat_args']:
1216      gdb_dex2oat_args = options['gdb_dex2oat_args']
1217  runtime_option = options['runtime_option'];
1218  with_agent = options['with_agent'];
1219  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1220  zipapex_loc = options['runtime_zipapex']
1221
1222  timeout = options['timeout']
1223  if options['dex2oat_jobs']:
1224    dex2oat_jobs = options['dex2oat_jobs']
1225  if options['run_all']:
1226    run_all_configs = True
1227
1228  return tests
1229
1230def main():
1231  gather_test_info()
1232  user_requested_tests = parse_option()
1233  setup_test_env()
1234  gather_disabled_test_info()
1235  if build:
1236    build_targets = ''
1237    if 'host' in _user_input_variants['target']:
1238      build_targets += 'test-art-host-run-test-dependencies '
1239    if 'target' in _user_input_variants['target']:
1240      build_targets += 'test-art-target-run-test-dependencies '
1241    if 'jvm' in _user_input_variants['target']:
1242      build_targets += 'test-art-host-run-test-dependencies '
1243    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1244    build_command += ' DX='
1245    if dist:
1246      build_command += ' dist'
1247    build_command += ' ' + build_targets
1248    print_text('Build command: %s\n' % build_command)
1249    if subprocess.call(build_command.split()):
1250      # Debugging for b/62653020
1251      if env.DIST_DIR:
1252        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1253      sys.exit(1)
1254
1255  if user_requested_tests:
1256    run_tests(user_requested_tests)
1257  else:
1258    run_tests(RUN_TEST_SET)
1259
1260  print_analysis()
1261  close_csv_file()
1262
1263  exit_code = 0 if len(failed_tests) == 0 else 1
1264  sys.exit(exit_code)
1265
1266if __name__ == '__main__':
1267  main()
1268