• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3#
4# Copyright 2019 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Runs tests for the given input files.
9
10Tries its best to autodetect all tests based on path name without being *too*
11aggressive.
12
13In short, there's a small set of directories in which, if you make any change,
14all of the tests in those directories get run. Additionally, if you change a
15python file named foo, it'll run foo_test.py or foo_unittest.py if either of
16those exist.
17
18All tests are run in parallel.
19"""
20
21# NOTE: An alternative mentioned on the initial CL for this
22# https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1516414
23# is pytest. It looks like that brings some complexity (and makes use outside
24# of the chroot a bit more obnoxious?), but might be worth exploring if this
25# starts to grow quite complex on its own.
26
27from __future__ import print_function
28
29import argparse
30import collections
31import contextlib
32import multiprocessing.pool
33import os
34import pipes
35import subprocess
36import sys
37
38TestSpec = collections.namedtuple('TestSpec', ['directory', 'command'])
39
40# List of python scripts that are not test with relative path to
41# toolchain-utils.
42non_test_py_files = {
43    'debug_info_test/debug_info_test.py',
44}
45
46
47def _make_relative_to_toolchain_utils(toolchain_utils, path):
48  """Cleans & makes a path relative to toolchain_utils.
49
50  Raises if that path isn't under toolchain_utils.
51  """
52  # abspath has the nice property that it removes any markers like './'.
53  as_abs = os.path.abspath(path)
54  result = os.path.relpath(as_abs, start=toolchain_utils)
55
56  if result.startswith('../'):
57    raise ValueError('Non toolchain-utils directory found: %s' % result)
58  return result
59
60
61def _filter_python_tests(test_files, toolchain_utils):
62  """Returns all files that are real python tests."""
63  python_tests = []
64  for test_file in test_files:
65    rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file)
66    if rel_path not in non_test_py_files:
67      python_tests.append(_python_test_to_spec(test_file))
68    else:
69      print('## %s ... NON_TEST_PY_FILE' % rel_path)
70  return python_tests
71
72
73def _gather_python_tests_in(rel_subdir, toolchain_utils):
74  """Returns all files that appear to be Python tests in a given directory."""
75  subdir = os.path.join(toolchain_utils, rel_subdir)
76  test_files = (
77      os.path.join(subdir, file_name)
78      for file_name in os.listdir(subdir)
79      if file_name.endswith('_test.py') or file_name.endswith('_unittest.py'))
80  return _filter_python_tests(test_files, toolchain_utils)
81
82
83def _run_test(test_spec):
84  """Runs a test."""
85  p = subprocess.Popen(
86      test_spec.command,
87      cwd=test_spec.directory,
88      stdin=open('/dev/null'),
89      stdout=subprocess.PIPE,
90      stderr=subprocess.STDOUT,
91      encoding='utf-8')
92  stdout, _ = p.communicate()
93  exit_code = p.wait()
94  return exit_code, stdout
95
96
97def _python_test_to_spec(test_file):
98  """Given a .py file, convert it to a TestSpec."""
99  # Run tests in the directory they exist in, since some of them are sensitive
100  # to that.
101  test_directory = os.path.dirname(os.path.abspath(test_file))
102  file_name = os.path.basename(test_file)
103
104  if os.access(test_file, os.X_OK):
105    command = ['./' + file_name]
106  else:
107    # Assume the user wanted py3.
108    command = ['python3', file_name]
109
110  return TestSpec(directory=test_directory, command=command)
111
112
113def _autodetect_python_tests_for(test_file, toolchain_utils):
114  """Given a test file, detect if there may be related tests."""
115  if not test_file.endswith('.py'):
116    return []
117
118  test_suffixes = ['_test.py', '_unittest.py']
119  if any(test_file.endswith(x) for x in test_suffixes):
120    test_files = [test_file]
121  else:
122    base = test_file[:-3]
123    candidates = (base + x for x in test_suffixes)
124    test_files = (x for x in candidates if os.path.exists(x))
125  return _filter_python_tests(test_files, toolchain_utils)
126
127
128def _run_test_scripts(all_tests, show_successful_output=False):
129  """Runs a list of TestSpecs. Returns whether all of them succeeded."""
130  with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool:
131    results = [pool.apply_async(_run_test, (test,)) for test in all_tests]
132
133  failures = []
134  for i, (test, future) in enumerate(zip(all_tests, results)):
135    # Add a bit more spacing between outputs.
136    if show_successful_output and i:
137      print('\n')
138
139    pretty_test = ' '.join(pipes.quote(test_arg) for test_arg in test.command)
140    pretty_directory = os.path.relpath(test.directory)
141    if pretty_directory == '.':
142      test_message = pretty_test
143    else:
144      test_message = '%s in %s/' % (pretty_test, pretty_directory)
145
146    print('## %s ... ' % test_message, end='')
147    # Be sure that the users sees which test is running.
148    sys.stdout.flush()
149
150    exit_code, stdout = future.get()
151    if not exit_code:
152      print('PASS')
153    else:
154      print('FAIL')
155      failures.append(pretty_test)
156
157    if show_successful_output or exit_code:
158      sys.stdout.write(stdout)
159
160  if failures:
161    word = 'tests' if len(failures) > 1 else 'test'
162    print('%d %s failed: %s' % (len(failures), word, failures))
163
164  return not failures
165
166
167def _compress_list(l):
168  """Removes consecutive duplicate elements from |l|.
169
170  >>> _compress_list([])
171  []
172  >>> _compress_list([1, 1])
173  [1]
174  >>> _compress_list([1, 2, 1])
175  [1, 2, 1]
176  """
177  result = []
178  for e in l:
179    if result and result[-1] == e:
180      continue
181    result.append(e)
182  return result
183
184
185def _fix_python_path(toolchain_utils):
186  pypath = os.environ.get('PYTHONPATH', '')
187  if pypath:
188    pypath = ':' + pypath
189  os.environ['PYTHONPATH'] = toolchain_utils + pypath
190
191
192def _find_forced_subdir_python_tests(test_paths, toolchain_utils):
193  assert all(os.path.isabs(path) for path in test_paths)
194
195  # Directories under toolchain_utils for which any change will cause all tests
196  # in that directory to be rerun. Includes changes in subdirectories.
197  all_dirs = {
198      'crosperf',
199      'cros_utils',
200  }
201
202  relative_paths = [
203      _make_relative_to_toolchain_utils(toolchain_utils, path)
204      for path in test_paths
205  ]
206
207  gather_test_dirs = set()
208
209  for path in relative_paths:
210    top_level_dir = path.split('/')[0]
211    if top_level_dir in all_dirs:
212      gather_test_dirs.add(top_level_dir)
213
214  results = []
215  for d in sorted(gather_test_dirs):
216    results += _gather_python_tests_in(d, toolchain_utils)
217  return results
218
219
220def _find_go_tests(test_paths):
221  """Returns TestSpecs for the go folders of the given files"""
222  assert all(os.path.isabs(path) for path in test_paths)
223
224  dirs_with_gofiles = set(
225      os.path.dirname(p) for p in test_paths if p.endswith('.go'))
226  command = ['go', 'test', '-vet=all']
227  # Note: We sort the directories to be deterministic.
228  return [
229      TestSpec(directory=d, command=command) for d in sorted(dirs_with_gofiles)
230  ]
231
232
233def main(argv):
234  default_toolchain_utils = os.path.abspath(os.path.dirname(__file__))
235
236  parser = argparse.ArgumentParser(description=__doc__)
237  parser.add_argument(
238      '--show_all_output',
239      action='store_true',
240      help='show stdout of successful tests')
241  parser.add_argument(
242      '--toolchain_utils',
243      default=default_toolchain_utils,
244      help='directory of toolchain-utils. Often auto-detected')
245  parser.add_argument(
246      'file', nargs='*', help='a file that we should run tests for')
247  args = parser.parse_args(argv)
248
249  modified_files = [os.path.abspath(f) for f in args.file]
250  show_all_output = args.show_all_output
251  toolchain_utils = args.toolchain_utils
252
253  if not modified_files:
254    print('No files given. Exit.')
255    return 0
256
257  _fix_python_path(toolchain_utils)
258
259  tests_to_run = _find_forced_subdir_python_tests(modified_files,
260                                                  toolchain_utils)
261  for f in modified_files:
262    tests_to_run += _autodetect_python_tests_for(f, toolchain_utils)
263  tests_to_run += _find_go_tests(modified_files)
264
265  # TestSpecs have lists, so we can't use a set. We'd likely want to keep them
266  # sorted for determinism anyway.
267  tests_to_run.sort()
268  tests_to_run = _compress_list(tests_to_run)
269
270  success = _run_test_scripts(tests_to_run, show_all_output)
271  return 0 if success else 1
272
273
274if __name__ == '__main__':
275  sys.exit(main(sys.argv[1:]))
276