• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# [VPYTHON:BEGIN]
4# python_version: "3.8"
5# [VPYTHON:END]
6#
7# Copyright 2017, The Android Open Source Project
8#
9# Licensed under the Apache License, Version 2.0 (the "License");
10# you may not use this file except in compliance with the License.
11# You may obtain a copy of the License at
12#
13#     http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS,
17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18# See the License for the specific language governing permissions and
19# limitations under the License.
20
21"""ART Run-Test TestRunner
22
23The testrunner runs the ART run-tests by simply invoking the script.
24It fetches the list of eligible tests from art/test directory, and list of
25disabled tests from art/test/knownfailures.json. It runs the tests by
26invoking art/test/run-test script and checks the exit value to decide if the
27test passed or failed.
28
29Before invoking the script, first build all the tests dependencies.
30There are two major build targets for building target and host tests
31dependencies:
321) test-art-host-run-test
332) test-art-target-run-test
34
35There are various options to invoke the script which are:
36-t: Either the test name as in art/test or the test name including the variant
37    information. Eg, "-t 001-HelloWorld",
38    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
39-j: Number of thread workers to be used. Eg - "-j64"
40--dry-run: Instead of running the test name, just print its name.
41--verbose
42-b / --build-dependencies: to build the dependencies before running the test
43
44To specify any specific variants for the test, use --<<variant-name>>.
45For eg, for compiler type as optimizing, use --optimizing.
46
47
48In the end, the script will print the failed and skipped tests if any.
49
50"""
51import argparse
52import collections
53
54# b/140161314 diagnostics.
55try:
56  import concurrent.futures
57except Exception:
58  import sys
59  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
60  sys.stdout.flush()
61  raise
62
63import contextlib
64import csv
65import datetime
66import fnmatch
67import itertools
68import json
69import multiprocessing
70import os
71import re
72import shlex
73import shutil
74import signal
75import subprocess
76import sys
77import tempfile
78import threading
79import time
80
81import env
82from target_config import target_config
83from device_config import device_config
84
85# TODO: make it adjustable per tests and for buildbots
86#
87# Note: this needs to be larger than run-test timeouts, as long as this script
88#       does not push the value to run-test. run-test is somewhat complicated:
89#                      base: 25m  (large for ASAN)
90#        + timeout handling:  2m
91#        +   gcstress extra: 20m
92#        -----------------------
93#                            47m
94timeout = 3600 # 60 minutes
95
96if env.ART_TEST_RUN_ON_ARM_FVP:
97  # Increase timeout to 600 minutes due to the emulation overhead on FVP.
98  timeout = 36000
99
100# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
101# that has key as the test name (like 001-HelloWorld), and value as set of
102# variants that the test is disabled for.
103DISABLED_TEST_CONTAINER = {}
104
105# The Dict contains the list of all possible variants for a given type. For example,
106# for key TARGET, the value would be target and host. The list is used to parse
107# the test name given as the argument to run.
108VARIANT_TYPE_DICT = {}
109
110# The set of all variant sets that are incompatible and will always be skipped.
111NONFUNCTIONAL_VARIANT_SETS = set()
112
113# The set contains all the variants of each time.
114TOTAL_VARIANTS_SET = set()
115
116# The colors are used in the output. When a test passes, COLOR_PASS is used,
117# and so on.
118COLOR_ERROR = '\033[91m'
119COLOR_PASS = '\033[92m'
120COLOR_SKIP = '\033[93m'
121COLOR_NORMAL = '\033[0m'
122
123# The set contains the list of all the possible run tests that are in art/test
124# directory.
125RUN_TEST_SET = set()
126
127failed_tests = []
128skipped_tests = []
129
130# Flags
131n_thread = 0
132total_test_count = 0
133verbose = False
134dry_run = False
135ignore_skips = False
136build = False
137dist = False
138gdb = False
139gdb_arg = ''
140csv_result = None
141csv_writer = None
142runtime_option = ''
143with_agent = []
144zipapex_loc = None
145run_test_option = []
146dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
147run_all_configs = False
148
149# Dict containing extra arguments
150extra_arguments = { "host" : [], "target" : [] }
151
152# Dict to store user requested test variants.
153# key: variant_type.
154# value: set of variants user wants to run of type <key>.
155_user_input_variants = collections.defaultdict(set)
156
157
158class ChildProcessTracker(object):
159  """Keeps track of forked child processes to be able to kill them."""
160
161  def __init__(self):
162    self.procs = {}             # dict from pid to subprocess.Popen object
163    self.mutex = threading.Lock()
164
165  def wait(self, proc, timeout):
166    """Waits on the given subprocess and makes it available to kill_all meanwhile.
167
168    Args:
169      proc: The subprocess.Popen object to wait on.
170      timeout: Timeout passed on to proc.communicate.
171
172    Returns: A tuple of the process stdout output and its return value.
173    """
174    with self.mutex:
175      if self.procs is not None:
176        self.procs[proc.pid] = proc
177      else:
178        os.killpg(proc.pid, signal.SIGKILL) # kill_all has already been called.
179    try:
180      output = proc.communicate(timeout=timeout)[0]
181      return_value = proc.wait()
182      return output, return_value
183    finally:
184      with self.mutex:
185        if self.procs is not None:
186          del self.procs[proc.pid]
187
188  def kill_all(self):
189    """Kills all currently running processes and any future ones."""
190    with self.mutex:
191      for pid in self.procs:
192        os.killpg(pid, signal.SIGKILL)
193      self.procs = None # Make future wait() calls kill their processes immediately.
194
195child_process_tracker = ChildProcessTracker()
196
197
198def setup_csv_result():
199  """Set up the CSV output if required."""
200  global csv_writer
201  csv_writer = csv.writer(csv_result)
202  # Write the header.
203  csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc',
204                       'jni', 'image', 'debuggable', 'jvmti', 'cdex_level', 'test', 'address_size', 'result'])
205
206
207def send_csv_result(test, result):
208  """
209  Write a line into the CSV results file if one is available.
210  """
211  if csv_writer is not None:
212    csv_writer.writerow(extract_test_name(test) + [result])
213
214def close_csv_file():
215  global csv_result
216  global csv_writer
217  if csv_result is not None:
218    csv_writer = None
219    csv_result.flush()
220    csv_result.close()
221    csv_result = None
222
223def gather_test_info():
224  """The method gathers test information about the test to be run which includes
225  generating the list of total tests from the art/test directory and the list
226  of disabled test. It also maps various variants to types.
227  """
228  global TOTAL_VARIANTS_SET
229  # TODO: Avoid duplication of the variant names in different lists.
230  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
231  VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
232  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
233  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
234  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
235  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
236  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
237  VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
238  VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
239  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
240  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
241  VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
242                                'field-stress', 'step-stress'}
243  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
244                                   'optimizing', 'regalloc_gc',
245                                   'speed-profile', 'baseline'}
246
247  # Regalloc_GC cannot work with prebuild.
248  NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'}))
249
250  for v_type in VARIANT_TYPE_DICT:
251    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
252
253  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
254  for f in os.listdir(test_dir):
255    if fnmatch.fnmatch(f, '[0-9]*'):
256      RUN_TEST_SET.add(f)
257
258
259def setup_test_env():
260  """The method sets default value for the various variants of the tests if they
261  are already not set.
262  """
263  if env.ART_TEST_BISECTION:
264    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
265    env.ART_TEST_RUN_TEST_PREBUILD = False
266    # Bisection search writes to standard output.
267    env.ART_TEST_QUIET = False
268
269  global _user_input_variants
270  global run_all_configs
271  # These are the default variant-options we will use if nothing in the group is specified.
272  default_variants = {
273      'target': {'host', 'target'},
274      'prebuild': {'prebuild'},
275      'cdex_level': {'cdex-fast'},
276      'jvmti': { 'no-jvmti'},
277      'compiler': {'optimizing',
278                   'jit',
279                   'interpreter',
280                   'interp-ac',
281                   'speed-profile'},
282      'relocate': {'no-relocate'},
283      'trace': {'ntrace'},
284      'gc': {'cms'},
285      'jni': {'checkjni'},
286      'image': {'picimage'},
287      'debuggable': {'ndebuggable'},
288      'run': {'debug'},
289      # address_sizes_target depends on the target so it is dealt with below.
290  }
291  # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't
292  # want to pick up if we pass --all.
293  default_variants_keys = default_variants.keys()
294  if run_all_configs:
295    default_variants = VARIANT_TYPE_DICT
296
297  for key in default_variants_keys:
298    if not _user_input_variants[key]:
299      _user_input_variants[key] = default_variants[key]
300
301  _user_input_variants['address_sizes_target'] = collections.defaultdict(set)
302  if not _user_input_variants['address_sizes']:
303    _user_input_variants['address_sizes_target']['target'].add(
304        env.ART_PHONY_TEST_TARGET_SUFFIX)
305    _user_input_variants['address_sizes_target']['host'].add(
306        env.ART_PHONY_TEST_HOST_SUFFIX)
307    if env.ART_TEST_RUN_TEST_2ND_ARCH:
308      _user_input_variants['address_sizes_target']['host'].add(
309          env.ART_2ND_PHONY_TEST_HOST_SUFFIX)
310      _user_input_variants['address_sizes_target']['target'].add(
311          env.ART_2ND_PHONY_TEST_TARGET_SUFFIX)
312  else:
313    _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes']
314    _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
315
316  global n_thread
317  if 'target' in _user_input_variants['target']:
318    device_name = get_device_name()
319    if n_thread == 0:
320      # Use only half of the cores since fully loading the device tends to lead to timeouts.
321      n_thread = get_target_cpu_count() // 2
322      if device_name == 'fugu':
323        n_thread = 1
324  else:
325    device_name = "host"
326    if n_thread == 0:
327      n_thread = get_host_cpu_count()
328  print_text("Concurrency: {} ({})\n".format(n_thread, device_name))
329
330  global extra_arguments
331  for target in _user_input_variants['target']:
332    extra_arguments[target] = find_extra_device_arguments(target)
333
334  if not sys.stdout.isatty():
335    global COLOR_ERROR
336    global COLOR_PASS
337    global COLOR_SKIP
338    global COLOR_NORMAL
339    COLOR_ERROR = ''
340    COLOR_PASS = ''
341    COLOR_SKIP = ''
342    COLOR_NORMAL = ''
343
344def find_extra_device_arguments(target):
345  """
346  Gets any extra arguments from the device_config.
347  """
348  device_name = target
349  if target == 'target':
350    device_name = get_device_name()
351  return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args']
352
353def get_device_name():
354  """
355  Gets the value of ro.product.name from remote device.
356  """
357  proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
358                          stderr=subprocess.STDOUT,
359                          stdout = subprocess.PIPE,
360                          universal_newlines=True)
361  # only wait 2 seconds.
362  timeout_val = 2
363
364  if env.ART_TEST_RUN_ON_ARM_FVP:
365    # Increase timeout to 200 seconds due to the emulation overhead on FVP.
366    timeout_val = 200
367
368  output = proc.communicate(timeout = timeout_val)[0]
369  success = not proc.wait()
370  if success:
371    return output.strip()
372  else:
373    print_text("Unable to determine device type!\n")
374    print_text("Continuing anyway.\n")
375    return "UNKNOWN_TARGET"
376
377def run_tests(tests):
378  """This method generates variants of the tests to be run and executes them.
379
380  Args:
381    tests: The set of tests to be run.
382  """
383  options_all = ''
384
385  # jvm does not run with all these combinations,
386  # or at least it doesn't make sense for most of them.
387  # TODO: support some jvm variants like jvmti ?
388  target_input_variants = _user_input_variants['target']
389  uncombinated_target_input_variants = []
390  if 'jvm' in target_input_variants:
391    _user_input_variants['target'].remove('jvm')
392    uncombinated_target_input_variants.append('jvm')
393
394  global total_test_count
395  total_test_count = len(tests)
396  if target_input_variants:
397    for variant_type in VARIANT_TYPE_DICT:
398      if not (variant_type == 'target' or 'address_sizes' in variant_type):
399        total_test_count *= len(_user_input_variants[variant_type])
400  target_address_combinations = 0
401  for target in target_input_variants:
402    for address_size in _user_input_variants['address_sizes_target'][target]:
403      target_address_combinations += 1
404  target_address_combinations += len(uncombinated_target_input_variants)
405  total_test_count *= target_address_combinations
406
407  if env.ART_TEST_WITH_STRACE:
408    options_all += ' --strace'
409
410  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
411    options_all += ' --always-clean'
412
413  if env.ART_TEST_BISECTION:
414    options_all += ' --bisection-search'
415
416  if gdb:
417    options_all += ' --gdb'
418    if gdb_arg:
419      options_all += ' --gdb-arg ' + gdb_arg
420
421  options_all += ' ' + ' '.join(run_test_option)
422
423  if runtime_option:
424    for opt in runtime_option:
425      options_all += ' --runtime-option ' + opt
426  if with_agent:
427    for opt in with_agent:
428      options_all += ' --with-agent ' + opt
429
430  if dex2oat_jobs != -1:
431    options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
432
433  def iter_config(tests, input_variants, user_input_variants):
434    config = itertools.product(tests, input_variants, user_input_variants['run'],
435                                 user_input_variants['prebuild'], user_input_variants['compiler'],
436                                 user_input_variants['relocate'], user_input_variants['trace'],
437                                 user_input_variants['gc'], user_input_variants['jni'],
438                                 user_input_variants['image'],
439                                 user_input_variants['debuggable'], user_input_variants['jvmti'],
440                                 user_input_variants['cdex_level'])
441    return config
442
443  # [--host, --target] combines with all the other user input variants.
444  config = iter_config(tests, target_input_variants, _user_input_variants)
445  # [--jvm] currently combines with nothing else. most of the extra flags we'd insert
446  # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now.
447  uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''],
448      'prebuild': [''], 'compiler': [''],
449      'relocate': [''], 'trace': [''],
450      'gc': [''], 'jni': [''],
451      'image': [''],
452      'debuggable': [''], 'jvmti': [''],
453      'cdex_level': ['']})
454
455  def start_combination(executor, config_tuple, global_options, address_size):
456      test, target, run, prebuild, compiler, relocate, trace, gc, \
457      jni, image, debuggable, jvmti, cdex_level = config_tuple
458
459      # NB The order of components here should match the order of
460      # components in the regex parser in parse_test_name.
461      test_name = 'test-art-'
462      test_name += target + '-run-test-'
463      test_name += run + '-'
464      test_name += prebuild + '-'
465      test_name += compiler + '-'
466      test_name += relocate + '-'
467      test_name += trace + '-'
468      test_name += gc + '-'
469      test_name += jni + '-'
470      test_name += image + '-'
471      test_name += debuggable + '-'
472      test_name += jvmti + '-'
473      test_name += cdex_level + '-'
474      test_name += test
475      test_name += address_size
476
477      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
478                     image, debuggable, jvmti, cdex_level, address_size}
479
480      options_test = global_options
481
482      if target == 'host':
483        options_test += ' --host'
484      elif target == 'jvm':
485        options_test += ' --jvm'
486
487      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
488      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
489      # for target tests.
490      if target == 'target':
491        if env.ART_TEST_CHROOT:
492          options_test += ' --chroot ' + env.ART_TEST_CHROOT
493        if env.ART_TEST_ANDROID_ROOT:
494          options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
495        if env.ART_TEST_ANDROID_I18N_ROOT:
496            options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT
497        if env.ART_TEST_ANDROID_ART_ROOT:
498          options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT
499        if env.ART_TEST_ANDROID_TZDATA_ROOT:
500          options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT
501
502      if run == 'ndebug':
503        options_test += ' -O'
504
505      if prebuild == 'prebuild':
506        options_test += ' --prebuild'
507      elif prebuild == 'no-prebuild':
508        options_test += ' --no-prebuild'
509
510      if cdex_level:
511        # Add option and remove the cdex- prefix.
512        options_test += ' --compact-dex-level ' + cdex_level.replace('cdex-','')
513
514      if compiler == 'optimizing':
515        options_test += ' --optimizing'
516      elif compiler == 'regalloc_gc':
517        options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
518      elif compiler == 'interpreter':
519        options_test += ' --interpreter'
520      elif compiler == 'interp-ac':
521        options_test += ' --interpreter --verify-soft-fail'
522      elif compiler == 'jit':
523        options_test += ' --jit'
524      elif compiler == 'jit-on-first-use':
525        options_test += ' --jit --runtime-option -Xjitthreshold:0'
526      elif compiler == 'speed-profile':
527        options_test += ' --random-profile'
528      elif compiler == 'baseline':
529        options_test += ' --baseline'
530
531      if relocate == 'relocate':
532        options_test += ' --relocate'
533      elif relocate == 'no-relocate':
534        options_test += ' --no-relocate'
535
536      if trace == 'trace':
537        options_test += ' --trace'
538      elif trace == 'stream':
539        options_test += ' --trace --stream'
540
541      if gc == 'gcverify':
542        options_test += ' --gcverify'
543      elif gc == 'gcstress':
544        options_test += ' --gcstress'
545
546      if jni == 'forcecopy':
547        options_test += ' --runtime-option -Xjniopts:forcecopy'
548      elif jni == 'checkjni':
549        options_test += ' --runtime-option -Xcheck:jni'
550
551      if image == 'no-image':
552        options_test += ' --no-image'
553
554      if debuggable == 'debuggable':
555        options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true'
556
557      if jvmti == 'jvmti-stress':
558        options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
559      elif jvmti == 'field-stress':
560        options_test += ' --jvmti-field-stress'
561      elif jvmti == 'trace-stress':
562        options_test += ' --jvmti-trace-stress'
563      elif jvmti == 'redefine-stress':
564        options_test += ' --jvmti-redefine-stress'
565      elif jvmti == 'step-stress':
566        options_test += ' --jvmti-step-stress'
567
568      if address_size == '64':
569        options_test += ' --64'
570
571      # TODO(http://36039166): This is a temporary solution to
572      # fix build breakages.
573      options_test = (' --output-path %s') % (
574          tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)) + options_test
575
576      run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
577      command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
578      return executor.submit(run_test, command, test, variant_set, test_name)
579
580  #  Use a context-manager to handle cleaning up the extracted zipapex if needed.
581  with handle_zipapex(zipapex_loc) as zipapex_opt:
582    options_all += zipapex_opt
583    global n_thread
584    with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
585      test_futures = []
586      for config_tuple in config:
587        target = config_tuple[1]
588        for address_size in _user_input_variants['address_sizes_target'][target]:
589          test_futures.append(start_combination(executor, config_tuple, options_all, address_size))
590
591      for config_tuple in uncombinated_config:
592        test_futures.append(
593            start_combination(executor, config_tuple, options_all, ""))  # no address size
594
595      try:
596        tests_done = 0
597        for test_future in concurrent.futures.as_completed(test_futures):
598          (test, status, failure_info, test_time) = test_future.result()
599          tests_done += 1
600          print_test_info(tests_done, test, status, failure_info, test_time)
601          if failure_info and not env.ART_TEST_KEEP_GOING:
602            for f in test_futures:
603              f.cancel()
604            break
605      except KeyboardInterrupt:
606        for f in test_futures:
607          f.cancel()
608        child_process_tracker.kill_all()
609      executor.shutdown(True)
610
611@contextlib.contextmanager
612def handle_zipapex(ziploc):
613  """Extracts the zipapex (if present) and handles cleanup.
614
615  If we are running out of a zipapex we want to unzip it once and have all the tests use the same
616  extracted contents. This extracts the files and handles cleanup if needed. It returns the
617  required extra arguments to pass to the run-test.
618  """
619  if ziploc is not None:
620    with tempfile.TemporaryDirectory() as tmpdir:
621      subprocess.check_call(["unzip", "-qq", ziploc, "apex_payload.zip", "-d", tmpdir])
622      subprocess.check_call(
623        ["unzip", "-qq", os.path.join(tmpdir, "apex_payload.zip"), "-d", tmpdir])
624      yield " --runtime-extracted-zipapex " + tmpdir
625  else:
626    yield ""
627
628def _popen(**kwargs):
629  if sys.version_info.major == 3 and sys.version_info.minor >= 6:
630    return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs)
631  return subprocess.Popen(**kwargs)
632
633def run_test(command, test, test_variant, test_name):
634  """Runs the test.
635
636  It invokes art/test/run-test script to run the test. The output of the script
637  is checked, and if it ends with "Succeeded!", it assumes that the tests
638  passed, otherwise, put it in the list of failed test. Before actually running
639  the test, it also checks if the test is placed in the list of disabled tests,
640  and if yes, it skips running it, and adds the test in the list of skipped
641  tests.
642
643  Args:
644    command: The command to be used to invoke the script
645    test: The name of the test without the variant information.
646    test_variant: The set of variant for the test.
647    test_name: The name of the test along with the variants.
648
649  Returns: a tuple of testname, status, optional failure info, and test time.
650  """
651  try:
652    if is_test_disabled(test, test_variant):
653      test_skipped = True
654      test_time = datetime.timedelta()
655    else:
656      test_skipped = False
657      test_start_time = time.monotonic()
658      if verbose:
659        print_text("Starting %s at %s\n" % (test_name, test_start_time))
660      if gdb:
661        proc = _popen(
662          args=command.split(),
663          stderr=subprocess.STDOUT,
664          universal_newlines=True,
665          start_new_session=True
666        )
667      else:
668        proc = _popen(
669          args=command.split(),
670          stderr=subprocess.STDOUT,
671          stdout = subprocess.PIPE,
672          universal_newlines=True,
673          start_new_session=True,
674        )
675      script_output, return_value = child_process_tracker.wait(proc, timeout)
676      test_passed = not return_value
677      test_time_seconds = time.monotonic() - test_start_time
678      test_time = datetime.timedelta(seconds=test_time_seconds)
679
680    if not test_skipped:
681      if test_passed:
682        return (test_name, 'PASS', None, test_time)
683      else:
684        failed_tests.append((test_name, str(command) + "\n" + script_output))
685        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
686    elif not dry_run:
687      skipped_tests.append(test_name)
688      return (test_name, 'SKIP', None, test_time)
689    else:
690      return (test_name, 'PASS', None, test_time)
691  except subprocess.TimeoutExpired as e:
692    if verbose:
693      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
694    test_time_seconds = time.monotonic() - test_start_time
695    test_time = datetime.timedelta(seconds=test_time_seconds)
696    failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
697
698    # HACK(b/142039427): Print extra backtraces on timeout.
699    if "-target-" in test_name:
700      for i in range(8):
701        proc_name = "dalvikvm" + test_name[-2:]
702        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
703        for pid in pidof.stdout.decode("ascii").split():
704          if i >= 4:
705            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
706            subprocess.run(["adb", "shell", "debuggerd", pid])
707            time.sleep(10)
708          task_dir = "/proc/%s/task" % pid
709          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
710          for tid in tids.stdout.decode("ascii").split():
711            for status in ["stat", "status"]:
712              filename = "%s/%s/%s" % (task_dir, tid, status)
713              print_text("Content of %s\n" % (filename))
714              subprocess.run(["adb", "shell", "cat", filename])
715        time.sleep(60)
716
717    # The python documentation states that it is necessary to actually kill the process.
718    os.killpg(proc.pid, signal.SIGKILL)
719    script_output = proc.communicate()
720
721    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
722  except Exception as e:
723    failed_tests.append((test_name, str(e)))
724    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
725
726def print_test_info(test_count, test_name, result, failed_test_info="",
727                    test_time=datetime.timedelta()):
728  """Print the continous test information
729
730  If verbose is set to True, it continuously prints test status information
731  on a new line.
732  If verbose is set to False, it keeps on erasing test
733  information by overriding it with the latest test information. Also,
734  in this case it stictly makes sure that the information length doesn't
735  exceed the console width. It does so by shortening the test_name.
736
737  When a test fails, it prints the output of the run-test script and
738  command used to invoke the script. It doesn't override the failing
739  test information in either of the cases.
740  """
741
742  info = ''
743  if not verbose:
744    # Without --verbose, the testrunner erases passing test info. It
745    # does that by overriding the printed text with white spaces all across
746    # the console width.
747    console_width = int(os.popen('stty size', 'r').read().split()[1])
748    info = '\r' + ' ' * console_width + '\r'
749  try:
750    percent = (test_count * 100) / total_test_count
751    progress_info = ('[ %d%% %d/%d ]') % (
752      percent,
753      test_count,
754      total_test_count)
755    if test_time.total_seconds() != 0 and verbose:
756      info += '(%s)' % str(test_time)
757
758
759    if result == 'FAIL' or result == 'TIMEOUT':
760      if not verbose:
761        info += ('%s %s %s\n') % (
762          progress_info,
763          test_name,
764          COLOR_ERROR + result + COLOR_NORMAL)
765      else:
766        info += ('%s %s %s\n%s\n') % (
767          progress_info,
768          test_name,
769          COLOR_ERROR + result + COLOR_NORMAL,
770          failed_test_info)
771    else:
772      result_text = ''
773      if result == 'PASS':
774        result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL
775      elif result == 'SKIP':
776        result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
777
778      if verbose:
779        info += ('%s %s %s\n') % (
780          progress_info,
781          test_name,
782          result_text)
783      else:
784        total_output_length = 2 # Two spaces
785        total_output_length += len(progress_info)
786        total_output_length += len(result)
787        allowed_test_length = console_width - total_output_length
788        test_name_len = len(test_name)
789        if allowed_test_length < test_name_len:
790          test_name = ('...%s') % (
791            test_name[-(allowed_test_length - 3):])
792        info += ('%s %s %s') % (
793          progress_info,
794          test_name,
795          result_text)
796    send_csv_result(test_name, result)
797    print_text(info)
798  except Exception as e:
799    print_text(('%s\n%s\n') % (test_name, str(e)))
800    failed_tests.append(test_name)
801
802def verify_knownfailure_entry(entry):
803  supported_field = {
804      'tests' : (list, str),
805      'test_patterns' : (list,),
806      'description' : (list, str),
807      'bug' : (str,),
808      'variant' : (str,),
809      'devices': (list, str),
810      'env_vars' : (dict,),
811      'zipapex' : (bool,),
812  }
813  for field in entry:
814    field_type = type(entry[field])
815    if field_type not in supported_field[field]:
816      raise ValueError('%s is not supported type for %s\n%s' % (
817          str(field_type),
818          field,
819          str(entry)))
820
821def get_disabled_test_info(device_name):
822  """Generate set of known failures.
823
824  It parses the art/test/knownfailures.json file to generate the list of
825  disabled tests.
826
827  Returns:
828    The method returns a dict of tests mapped to the variants list
829    for which the test should not be run.
830  """
831  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
832  with open(known_failures_file) as known_failures_json:
833    known_failures_info = json.loads(known_failures_json.read())
834
835  disabled_test_info = {}
836  for failure in known_failures_info:
837    verify_knownfailure_entry(failure)
838    tests = failure.get('tests', [])
839    if isinstance(tests, str):
840      tests = [tests]
841    patterns = failure.get("test_patterns", [])
842    if (not isinstance(patterns, list)):
843      raise ValueError("test_patterns is not a list in %s" % failure)
844
845    tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
846    variants = parse_variants(failure.get('variant'))
847
848    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
849    # "foo" is present in "devices".
850    device_names = failure.get('devices', [])
851    if isinstance(device_names, str):
852      device_names = [device_names]
853    if len(device_names) != 0:
854      if device_name in device_names:
855        variants.add('target')
856      else:
857        # Skip adding test info as device_name is not present in "devices" entry.
858        continue
859
860    env_vars = failure.get('env_vars')
861
862    if check_env_vars(env_vars):
863      for test in tests:
864        if test not in RUN_TEST_SET:
865          raise ValueError('%s is not a valid run-test' % (
866              test))
867        if test in disabled_test_info:
868          disabled_test_info[test] = disabled_test_info[test].union(variants)
869        else:
870          disabled_test_info[test] = variants
871
872    zipapex_disable = failure.get("zipapex", False)
873    if zipapex_disable and zipapex_loc is not None:
874      for test in tests:
875        if test not in RUN_TEST_SET:
876          raise ValueError('%s is not a valid run-test' % (test))
877        if test in disabled_test_info:
878          disabled_test_info[test] = disabled_test_info[test].union(variants)
879        else:
880          disabled_test_info[test] = variants
881
882  return disabled_test_info
883
884def gather_disabled_test_info():
885  global DISABLED_TEST_CONTAINER
886  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
887  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
888
889def check_env_vars(env_vars):
890  """Checks if the env variables are set as required to run the test.
891
892  Returns:
893    True if all the env variables are set as required, otherwise False.
894  """
895
896  if not env_vars:
897    return True
898  for key in env_vars:
899    if env.get_env(key) != env_vars.get(key):
900      return False
901  return True
902
903
904def is_test_disabled(test, variant_set):
905  """Checks if the test along with the variant_set is disabled.
906
907  Args:
908    test: The name of the test as in art/test directory.
909    variant_set: Variants to be used for the test.
910  Returns:
911    True, if the test is disabled.
912  """
913  if dry_run:
914    return True
915  if test in env.EXTRA_DISABLED_TESTS:
916    return True
917  if ignore_skips:
918    return False
919  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
920  for variants in variants_list:
921    variants_present = True
922    for variant in variants:
923      if variant not in variant_set:
924        variants_present = False
925        break
926    if variants_present:
927      return True
928  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
929    if bad_combo.issubset(variant_set):
930      return True
931  return False
932
933
934def parse_variants(variants):
935  """Parse variants fetched from art/test/knownfailures.json.
936  """
937  if not variants:
938    variants = ''
939    for variant in TOTAL_VARIANTS_SET:
940      variants += variant
941      variants += '|'
942    variants = variants[:-1]
943  variant_list = set()
944  or_variants = variants.split('|')
945  for or_variant in or_variants:
946    and_variants = or_variant.split('&')
947    variant = set()
948    for and_variant in and_variants:
949      and_variant = and_variant.strip()
950      if and_variant not in TOTAL_VARIANTS_SET:
951        raise ValueError('%s is not a valid variant' % (
952            and_variant))
953      variant.add(and_variant)
954    variant_list.add(frozenset(variant))
955  return variant_list
956
957def print_text(output):
958  sys.stdout.write(output)
959  sys.stdout.flush()
960
961def print_analysis():
962  if not verbose:
963    # Without --verbose, the testrunner erases passing test info. It
964    # does that by overriding the printed text with white spaces all across
965    # the console width.
966    console_width = int(os.popen('stty size', 'r').read().split()[1])
967    eraser_text = '\r' + ' ' * console_width + '\r'
968    print_text(eraser_text)
969
970  # Prints information about the total tests run.
971  # E.g., "2/38 (5%) tests passed".
972  passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests)
973  passed_test_information = ('%d/%d (%d%%) %s passed.\n') % (
974      passed_test_count,
975      total_test_count,
976      (passed_test_count*100)/total_test_count,
977      'tests' if passed_test_count > 1 else 'test')
978  print_text(passed_test_information)
979
980  # Prints the list of skipped tests, if any.
981  if skipped_tests:
982    print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n')
983    for test in skipped_tests:
984      print_text(test + '\n')
985    print_text('\n')
986
987  # Prints the list of failed tests, if any.
988  if failed_tests:
989    print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n')
990    for test_info in failed_tests:
991      print_text(('%s\n%s\n' % (test_info[0], test_info[1])))
992    print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n')
993    for failed_test in sorted([test_info[0] for test_info in failed_tests]):
994      print_text(('%s\n' % (failed_test)))
995
996test_name_matcher = None
997def extract_test_name(test_name):
998  """Parses the test name and returns all the parts"""
999  global test_name_matcher
1000  if test_name_matcher is None:
1001    regex = '^test-art-'
1002    regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
1003    regex += 'run-test-'
1004    regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
1005    regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
1006    regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
1007    regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
1008    regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
1009    regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
1010    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
1011    regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
1012    regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
1013    regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
1014    regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
1015    regex += '(' + '|'.join(RUN_TEST_SET) + ')'
1016    regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
1017    test_name_matcher = re.compile(regex)
1018  match = test_name_matcher.match(test_name)
1019  if match:
1020    return list(match.group(i) for i in range(1,15))
1021  raise ValueError(test_name + " is not a valid test")
1022
1023def parse_test_name(test_name):
1024  """Parses the testname provided by the user.
1025  It supports two types of test_name:
1026  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
1027  exists and if it does, it returns the testname.
1028  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
1029  In this case, it will parse all the variants and check if they are placed
1030  correctly. If yes, it will set the various VARIANT_TYPES to use the
1031  variants required to run the test. Again, it returns the test_name
1032  without the variant information like 001-HelloWorld.
1033  """
1034  test_set = set()
1035  for test in RUN_TEST_SET:
1036    if test.startswith(test_name):
1037      test_set.add(test)
1038  if test_set:
1039    return test_set
1040
1041  parsed = extract_test_name(test_name)
1042  _user_input_variants['target'].add(parsed[0])
1043  _user_input_variants['run'].add(parsed[1])
1044  _user_input_variants['prebuild'].add(parsed[2])
1045  _user_input_variants['compiler'].add(parsed[3])
1046  _user_input_variants['relocate'].add(parsed[4])
1047  _user_input_variants['trace'].add(parsed[5])
1048  _user_input_variants['gc'].add(parsed[6])
1049  _user_input_variants['jni'].add(parsed[7])
1050  _user_input_variants['image'].add(parsed[8])
1051  _user_input_variants['debuggable'].add(parsed[9])
1052  _user_input_variants['jvmti'].add(parsed[10])
1053  _user_input_variants['cdex_level'].add(parsed[11])
1054  _user_input_variants['address_sizes'].add(parsed[13])
1055  return {parsed[12]}
1056
1057
1058def get_target_cpu_count():
1059  adb_command = 'adb shell cat /sys/devices/system/cpu/present'
1060  cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE)
1061  cpu_info = cpu_info_proc.stdout.read()
1062  if type(cpu_info) is bytes:
1063    cpu_info = cpu_info.decode('utf-8')
1064  cpu_info_regex = r'\d*-(\d*)'
1065  match = re.match(cpu_info_regex, cpu_info)
1066  if match:
1067    return int(match.group(1)) + 1  # Add one to convert from "last-index" to "count"
1068  else:
1069    raise ValueError('Unable to predict the concurrency for the target. '
1070                     'Is device connected?')
1071
1072
1073def get_host_cpu_count():
1074  return multiprocessing.cpu_count()
1075
1076
1077def parse_option():
1078  global verbose
1079  global dry_run
1080  global ignore_skips
1081  global n_thread
1082  global build
1083  global dist
1084  global gdb
1085  global gdb_arg
1086  global runtime_option
1087  global run_test_option
1088  global timeout
1089  global dex2oat_jobs
1090  global run_all_configs
1091  global with_agent
1092  global zipapex_loc
1093  global csv_result
1094
1095  parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
1096  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
1097  global_group = parser.add_argument_group('Global options',
1098                                           'Options that affect all tests being run')
1099  global_group.add_argument('-j', type=int, dest='n_thread', help="""Number of CPUs to use.
1100                            Defaults to half of CPUs on target and all CPUs on host.""")
1101  global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout')
1102  global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose')
1103  global_group.add_argument('--dry-run', action='store_true', dest='dry_run')
1104  global_group.add_argument("--skip", action='append', dest="skips", default=[],
1105                            help="Skip the given test in all circumstances.")
1106  global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False,
1107                            help="""Don't skip any run-test configurations listed in
1108                            knownfailures.json.""")
1109  global_group.add_argument('--no-build-dependencies',
1110                            action='store_false', dest='build',
1111                            help="""Don't build dependencies under any circumstances. This is the
1112                            behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""")
1113  global_group.add_argument('-b', '--build-dependencies',
1114                            action='store_true', dest='build',
1115                            help="""Build dependencies under all circumstances. By default we will
1116                            not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""")
1117  global_group.add_argument('--dist',
1118                            action='store_true', dest='dist',
1119                            help="""If dependencies are to be built, pass `dist` to the build
1120                            command line. You may want to also set the DIST_DIR environment
1121                            variable when using this flag.""")
1122  global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
1123  global_group.add_argument('--gdb', action='store_true', dest='gdb')
1124  global_group.add_argument('--gdb-arg', dest='gdb_arg')
1125  global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
1126                            default=[],
1127                            help="""Pass an option, unaltered, to the run-test script.
1128                            This should be enclosed in single-quotes to allow for spaces. The option
1129                            will be split using shlex.split() prior to invoking run-test.
1130                            Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
1131  global_group.add_argument('--with-agent', action='append', dest='with_agent',
1132                            help="""Pass an agent to be attached to the runtime""")
1133  global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
1134                            help="""Pass an option to the runtime. Runtime options
1135                            starting with a '-' must be separated by a '=', for
1136                            example '--runtime-option=-Xjitthreshold:0'.""")
1137  global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs',
1138                            help='Number of dex2oat jobs')
1139  global_group.add_argument('--runtime-zipapex', dest='runtime_zipapex', default=None,
1140                            help='Location for runtime zipapex.')
1141  global_group.add_argument('-a', '--all', action='store_true', dest='run_all',
1142                            help="Run all the possible configurations for the input test set")
1143  global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None,
1144                            type=argparse.FileType('w'), help='Store a CSV record of all results.')
1145  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1146    var_group = parser.add_argument_group(
1147        '{}-type Options'.format(variant_type),
1148        "Options that control the '{}' variants.".format(variant_type))
1149    var_group.add_argument('--all-' + variant_type,
1150                           action='store_true',
1151                           dest='all_' + variant_type,
1152                           help='Enable all variants of ' + variant_type)
1153    for variant in variant_set:
1154      flag = '--' + variant
1155      var_group.add_argument(flag, action='store_true', dest=variant)
1156
1157  options = vars(parser.parse_args())
1158  if options['csv_result'] is not None:
1159    csv_result = options['csv_result']
1160    setup_csv_result()
1161  # Handle the --all-<type> meta-options
1162  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
1163    if options['all_' + variant_type]:
1164      for variant in variant_set:
1165        options[variant] = True
1166
1167  tests = None
1168  env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
1169  if options['tests']:
1170    tests = set()
1171    for test_name in options['tests']:
1172      tests |= parse_test_name(test_name)
1173
1174  for variant_type in VARIANT_TYPE_DICT:
1175    for variant in VARIANT_TYPE_DICT[variant_type]:
1176      if options.get(variant):
1177        _user_input_variants[variant_type].add(variant)
1178
1179  if options['verbose']:
1180    verbose = True
1181  if options['n_thread']:
1182    n_thread = max(1, options['n_thread'])
1183  ignore_skips = options['ignore_skips']
1184  if options['dry_run']:
1185    dry_run = True
1186    verbose = True
1187  build = options['build']
1188  dist = options['dist']
1189  if options['gdb']:
1190    n_thread = 1
1191    gdb = True
1192    if options['gdb_arg']:
1193      gdb_arg = options['gdb_arg']
1194  runtime_option = options['runtime_option'];
1195  with_agent = options['with_agent'];
1196  run_test_option = sum(map(shlex.split, options['run_test_option']), [])
1197  zipapex_loc = options['runtime_zipapex']
1198
1199  timeout = options['timeout']
1200  if options['dex2oat_jobs']:
1201    dex2oat_jobs = options['dex2oat_jobs']
1202  if options['run_all']:
1203    run_all_configs = True
1204
1205  return tests
1206
1207def main():
1208  gather_test_info()
1209  user_requested_tests = parse_option()
1210  setup_test_env()
1211  gather_disabled_test_info()
1212  if build:
1213    build_targets = ''
1214    if 'host' in _user_input_variants['target']:
1215      build_targets += 'test-art-host-run-test-dependencies '
1216    if 'target' in _user_input_variants['target']:
1217      build_targets += 'test-art-target-run-test-dependencies '
1218    if 'jvm' in _user_input_variants['target']:
1219      build_targets += 'test-art-host-run-test-dependencies '
1220    build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
1221    build_command += ' DX='
1222    if dist:
1223      build_command += ' dist'
1224    build_command += ' ' + build_targets
1225    print_text('Build command: %s\n' % build_command)
1226    if subprocess.call(build_command.split()):
1227      # Debugging for b/62653020
1228      if env.DIST_DIR:
1229        shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
1230      sys.exit(1)
1231
1232  if user_requested_tests:
1233    run_tests(user_requested_tests)
1234  else:
1235    run_tests(RUN_TEST_SET)
1236
1237  print_analysis()
1238  close_csv_file()
1239
1240  exit_code = 0 if len(failed_tests) == 0 else 1
1241  sys.exit(exit_code)
1242
1243if __name__ == '__main__':
1244  main()
1245