• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3#
4# Copyright 2019 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Runs tests for the given input files.
9
10Tries its best to autodetect all tests based on path name without being *too*
11aggressive.
12
13In short, there's a small set of directories in which, if you make any change,
14all of the tests in those directories get run. Additionally, if you change a
15python file named foo, it'll run foo_test.py or foo_unittest.py if either of
16those exist.
17
18All tests are run in parallel.
19"""
20
21# NOTE: An alternative mentioned on the initial CL for this
22# https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1516414
23# is pytest. It looks like that brings some complexity (and makes use outside
24# of the chroot a bit more obnoxious?), but might be worth exploring if this
25# starts to grow quite complex on its own.
26
27from __future__ import print_function
28
29import argparse
30import collections
31import contextlib
32import multiprocessing.pool
33import os
34import pipes
35import subprocess
36import sys
37
38TestSpec = collections.namedtuple('TestSpec', ['directory', 'command'])
39
40# List of python scripts that are not test with relative path to
41# toolchain-utils.
42non_test_py_files = {
43    'debug_info_test/debug_info_test.py',
44}
45
46
47def _make_relative_to_toolchain_utils(toolchain_utils, path):
48  """Cleans & makes a path relative to toolchain_utils.
49
50  Raises if that path isn't under toolchain_utils.
51  """
52  # abspath has the nice property that it removes any markers like './'.
53  as_abs = os.path.abspath(path)
54  result = os.path.relpath(as_abs, start=toolchain_utils)
55
56  if result.startswith('../'):
57    raise ValueError('Non toolchain-utils directory found: %s' % result)
58  return result
59
60
61def _filter_python_tests(test_files, toolchain_utils):
62  """Returns all files that are real python tests."""
63  python_tests = []
64  for test_file in test_files:
65    rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file)
66    if rel_path not in non_test_py_files:
67      python_tests.append(_python_test_to_spec(test_file))
68    else:
69      print('## %s ... NON_TEST_PY_FILE' % rel_path)
70  return python_tests
71
72
73def _gather_python_tests_in(rel_subdir, toolchain_utils):
74  """Returns all files that appear to be Python tests in a given directory."""
75  subdir = os.path.join(toolchain_utils, rel_subdir)
76  test_files = (
77      os.path.join(subdir, file_name) for file_name in os.listdir(subdir)
78      if file_name.endswith('_test.py') or file_name.endswith('_unittest.py'))
79  return _filter_python_tests(test_files, toolchain_utils)
80
81
82def _run_test(test_spec):
83  """Runs a test."""
84  p = subprocess.Popen(test_spec.command,
85                       cwd=test_spec.directory,
86                       stdin=open('/dev/null'),
87                       stdout=subprocess.PIPE,
88                       stderr=subprocess.STDOUT,
89                       encoding='utf-8')
90  stdout, _ = p.communicate()
91  exit_code = p.wait()
92  return exit_code, stdout
93
94
95def _python_test_to_spec(test_file):
96  """Given a .py file, convert it to a TestSpec."""
97  # Run tests in the directory they exist in, since some of them are sensitive
98  # to that.
99  test_directory = os.path.dirname(os.path.abspath(test_file))
100  file_name = os.path.basename(test_file)
101
102  if os.access(test_file, os.X_OK):
103    command = ['./' + file_name]
104  else:
105    # Assume the user wanted py3.
106    command = ['python3', file_name]
107
108  return TestSpec(directory=test_directory, command=command)
109
110
111def _autodetect_python_tests_for(test_file, toolchain_utils):
112  """Given a test file, detect if there may be related tests."""
113  if not test_file.endswith('.py'):
114    return []
115
116  test_prefixes = ('test_', 'unittest_')
117  test_suffixes = ('_test.py', '_unittest.py')
118
119  test_file_name = os.path.basename(test_file)
120  test_file_is_a_test = (
121      any(test_file_name.startswith(x) for x in test_prefixes)
122      or any(test_file_name.endswith(x) for x in test_suffixes))
123
124  if test_file_is_a_test:
125    test_files = [test_file]
126  else:
127    test_file_no_suffix = test_file[:-3]
128    candidates = [test_file_no_suffix + x for x in test_suffixes]
129
130    dir_name = os.path.dirname(test_file)
131    candidates += (os.path.join(dir_name, x + test_file_name)
132                   for x in test_prefixes)
133    test_files = (x for x in candidates if os.path.exists(x))
134  return _filter_python_tests(test_files, toolchain_utils)
135
136
137def _run_test_scripts(all_tests, show_successful_output=False):
138  """Runs a list of TestSpecs. Returns whether all of them succeeded."""
139  with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool:
140    results = [pool.apply_async(_run_test, (test, )) for test in all_tests]
141
142  failures = []
143  for i, (test, future) in enumerate(zip(all_tests, results)):
144    # Add a bit more spacing between outputs.
145    if show_successful_output and i:
146      print('\n')
147
148    pretty_test = ' '.join(pipes.quote(test_arg) for test_arg in test.command)
149    pretty_directory = os.path.relpath(test.directory)
150    if pretty_directory == '.':
151      test_message = pretty_test
152    else:
153      test_message = '%s in %s/' % (pretty_test, pretty_directory)
154
155    print('## %s ... ' % test_message, end='')
156    # Be sure that the users sees which test is running.
157    sys.stdout.flush()
158
159    exit_code, stdout = future.get()
160    if not exit_code:
161      print('PASS')
162    else:
163      print('FAIL')
164      failures.append(pretty_test)
165
166    if show_successful_output or exit_code:
167      sys.stdout.write(stdout)
168
169  if failures:
170    word = 'tests' if len(failures) > 1 else 'test'
171    print('%d %s failed: %s' % (len(failures), word, failures))
172
173  return not failures
174
175
176def _compress_list(l):
177  """Removes consecutive duplicate elements from |l|.
178
179  >>> _compress_list([])
180  []
181  >>> _compress_list([1, 1])
182  [1]
183  >>> _compress_list([1, 2, 1])
184  [1, 2, 1]
185  """
186  result = []
187  for e in l:
188    if result and result[-1] == e:
189      continue
190    result.append(e)
191  return result
192
193
194def _fix_python_path(toolchain_utils):
195  pypath = os.environ.get('PYTHONPATH', '')
196  if pypath:
197    pypath = ':' + pypath
198  os.environ['PYTHONPATH'] = toolchain_utils + pypath
199
200
201def _find_forced_subdir_python_tests(test_paths, toolchain_utils):
202  assert all(os.path.isabs(path) for path in test_paths)
203
204  # Directories under toolchain_utils for which any change will cause all tests
205  # in that directory to be rerun. Includes changes in subdirectories.
206  all_dirs = {
207      'crosperf',
208      'cros_utils',
209  }
210
211  relative_paths = [
212      _make_relative_to_toolchain_utils(toolchain_utils, path)
213      for path in test_paths
214  ]
215
216  gather_test_dirs = set()
217
218  for path in relative_paths:
219    top_level_dir = path.split('/')[0]
220    if top_level_dir in all_dirs:
221      gather_test_dirs.add(top_level_dir)
222
223  results = []
224  for d in sorted(gather_test_dirs):
225    results += _gather_python_tests_in(d, toolchain_utils)
226  return results
227
228
229def _find_go_tests(test_paths):
230  """Returns TestSpecs for the go folders of the given files"""
231  assert all(os.path.isabs(path) for path in test_paths)
232
233  dirs_with_gofiles = set(
234      os.path.dirname(p) for p in test_paths if p.endswith('.go'))
235  command = ['go', 'test', '-vet=all']
236  # Note: We sort the directories to be deterministic.
237  return [
238      TestSpec(directory=d, command=command) for d in sorted(dirs_with_gofiles)
239  ]
240
241
242def main(argv):
243  default_toolchain_utils = os.path.abspath(os.path.dirname(__file__))
244
245  parser = argparse.ArgumentParser(description=__doc__)
246  parser.add_argument('--show_all_output',
247                      action='store_true',
248                      help='show stdout of successful tests')
249  parser.add_argument('--toolchain_utils',
250                      default=default_toolchain_utils,
251                      help='directory of toolchain-utils. Often auto-detected')
252  parser.add_argument('file',
253                      nargs='*',
254                      help='a file that we should run tests for')
255  args = parser.parse_args(argv)
256
257  modified_files = [os.path.abspath(f) for f in args.file]
258  show_all_output = args.show_all_output
259  toolchain_utils = args.toolchain_utils
260
261  if not modified_files:
262    print('No files given. Exit.')
263    return 0
264
265  _fix_python_path(toolchain_utils)
266
267  tests_to_run = _find_forced_subdir_python_tests(modified_files,
268                                                  toolchain_utils)
269  for f in modified_files:
270    tests_to_run += _autodetect_python_tests_for(f, toolchain_utils)
271  tests_to_run += _find_go_tests(modified_files)
272
273  # TestSpecs have lists, so we can't use a set. We'd likely want to keep them
274  # sorted for determinism anyway.
275  tests_to_run.sort()
276  tests_to_run = _compress_list(tests_to_run)
277
278  success = _run_test_scripts(tests_to_run, show_all_output)
279  return 0 if success else 1
280
281
282if __name__ == '__main__':
283  sys.exit(main(sys.argv[1:]))
284